diff options
1282 files changed, 14981 insertions, 10591 deletions
diff --git a/Documentation/CodeOfConflict b/Documentation/CodeOfConflict new file mode 100644 index 000000000000..1684d0b4efa6 --- /dev/null +++ b/Documentation/CodeOfConflict | |||
@@ -0,0 +1,27 @@ | |||
1 | Code of Conflict | ||
2 | ---------------- | ||
3 | |||
4 | The Linux kernel development effort is a very personal process compared | ||
5 | to "traditional" ways of developing software. Your code and ideas | ||
6 | behind it will be carefully reviewed, often resulting in critique and | ||
7 | criticism. The review will almost always require improvements to the | ||
8 | code before it can be included in the kernel. Know that this happens | ||
9 | because everyone involved wants to see the best possible solution for | ||
10 | the overall success of Linux. This development process has been proven | ||
11 | to create the most robust operating system kernel ever, and we do not | ||
12 | want to do anything to cause the quality of submission and eventual | ||
13 | result to ever decrease. | ||
14 | |||
15 | If however, anyone feels personally abused, threatened, or otherwise | ||
16 | uncomfortable due to this process, that is not acceptable. If so, | ||
17 | please contact the Linux Foundation's Technical Advisory Board at | ||
18 | <tab@lists.linux-foundation.org>, or the individual members, and they | ||
19 | will work to resolve the issue to the best of their ability. For more | ||
20 | information on who is on the Technical Advisory Board and what their | ||
21 | role is, please see: | ||
22 | http://www.linuxfoundation.org/programs/advisory-councils/tab | ||
23 | |||
24 | As a reviewer of code, please strive to keep things civil and focused on | ||
25 | the technical issues involved. We are all humans, and frustrations can | ||
26 | be high on both sides of the process. Try to keep in mind the immortal | ||
27 | words of Bill and Ted, "Be excellent to each other." | ||
diff --git a/Documentation/cgroups/unified-hierarchy.txt b/Documentation/cgroups/unified-hierarchy.txt index 71daa35ec2d9..eb102fb72213 100644 --- a/Documentation/cgroups/unified-hierarchy.txt +++ b/Documentation/cgroups/unified-hierarchy.txt | |||
@@ -404,8 +404,8 @@ supported and the interface files "release_agent" and | |||
404 | be understood as an underflow into the highest possible value, -2 or | 404 | be understood as an underflow into the highest possible value, -2 or |
405 | -10M etc. do not work, so it's not consistent. | 405 | -10M etc. do not work, so it's not consistent. |
406 | 406 | ||
407 | memory.low, memory.high, and memory.max will use the string | 407 | memory.low, memory.high, and memory.max will use the string "max" to |
408 | "infinity" to indicate and set the highest possible value. | 408 | indicate and set the highest possible value. |
409 | 409 | ||
410 | 5. Planned Changes | 410 | 5. Planned Changes |
411 | 411 | ||
diff --git a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt index f4445e5a2bbb..1e097037349c 100644 --- a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt +++ b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt | |||
@@ -22,6 +22,8 @@ Optional Properties: | |||
22 | - pclkN, clkN: Pairs of parent of input clock and input clock to the | 22 | - pclkN, clkN: Pairs of parent of input clock and input clock to the |
23 | devices in this power domain. Maximum of 4 pairs (N = 0 to 3) | 23 | devices in this power domain. Maximum of 4 pairs (N = 0 to 3) |
24 | are supported currently. | 24 | are supported currently. |
25 | - power-domains: phandle pointing to the parent power domain, for more details | ||
26 | see Documentation/devicetree/bindings/power/power_domain.txt | ||
25 | 27 | ||
26 | Node of a device using power domains must have a power-domains property | 28 | Node of a device using power domains must have a power-domains property |
27 | defined with a phandle to respective power domain. | 29 | defined with a phandle to respective power domain. |
diff --git a/Documentation/devicetree/bindings/arm/sti.txt b/Documentation/devicetree/bindings/arm/sti.txt index d70ec358736c..8d27f6b084c7 100644 --- a/Documentation/devicetree/bindings/arm/sti.txt +++ b/Documentation/devicetree/bindings/arm/sti.txt | |||
@@ -13,6 +13,10 @@ Boards with the ST STiH407 SoC shall have the following properties: | |||
13 | Required root node property: | 13 | Required root node property: |
14 | compatible = "st,stih407"; | 14 | compatible = "st,stih407"; |
15 | 15 | ||
16 | Boards with the ST STiH410 SoC shall have the following properties: | ||
17 | Required root node property: | ||
18 | compatible = "st,stih410"; | ||
19 | |||
16 | Boards with the ST STiH418 SoC shall have the following properties: | 20 | Boards with the ST STiH418 SoC shall have the following properties: |
17 | Required root node property: | 21 | Required root node property: |
18 | compatible = "st,stih418"; | 22 | compatible = "st,stih418"; |
diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx.txt b/Documentation/devicetree/bindings/i2c/i2c-imx.txt index 52d37fd8d3e5..ce4311d726ae 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-imx.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-imx.txt | |||
@@ -7,6 +7,7 @@ Required properties: | |||
7 | - "fsl,vf610-i2c" for I2C compatible with the one integrated on Vybrid vf610 SoC | 7 | - "fsl,vf610-i2c" for I2C compatible with the one integrated on Vybrid vf610 SoC |
8 | - reg : Should contain I2C/HS-I2C registers location and length | 8 | - reg : Should contain I2C/HS-I2C registers location and length |
9 | - interrupts : Should contain I2C/HS-I2C interrupt | 9 | - interrupts : Should contain I2C/HS-I2C interrupt |
10 | - clocks : Should contain the I2C/HS-I2C clock specifier | ||
10 | 11 | ||
11 | Optional properties: | 12 | Optional properties: |
12 | - clock-frequency : Constains desired I2C/HS-I2C bus clock frequency in Hz. | 13 | - clock-frequency : Constains desired I2C/HS-I2C bus clock frequency in Hz. |
diff --git a/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt b/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt index 33df3932168e..8db32384a486 100644 --- a/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt +++ b/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt | |||
@@ -27,6 +27,8 @@ property is used. | |||
27 | - amd,serdes-cdr-rate: CDR rate speed selection | 27 | - amd,serdes-cdr-rate: CDR rate speed selection |
28 | - amd,serdes-pq-skew: PQ (data sampling) skew | 28 | - amd,serdes-pq-skew: PQ (data sampling) skew |
29 | - amd,serdes-tx-amp: TX amplitude boost | 29 | - amd,serdes-tx-amp: TX amplitude boost |
30 | - amd,serdes-dfe-tap-config: DFE taps available to run | ||
31 | - amd,serdes-dfe-tap-enable: DFE taps to enable | ||
30 | 32 | ||
31 | Example: | 33 | Example: |
32 | xgbe_phy@e1240800 { | 34 | xgbe_phy@e1240800 { |
@@ -41,4 +43,6 @@ Example: | |||
41 | amd,serdes-cdr-rate = <2>, <2>, <7>; | 43 | amd,serdes-cdr-rate = <2>, <2>, <7>; |
42 | amd,serdes-pq-skew = <10>, <10>, <30>; | 44 | amd,serdes-pq-skew = <10>, <10>, <30>; |
43 | amd,serdes-tx-amp = <15>, <15>, <10>; | 45 | amd,serdes-tx-amp = <15>, <15>, <10>; |
46 | amd,serdes-dfe-tap-config = <3>, <3>, <1>; | ||
47 | amd,serdes-dfe-tap-enable = <0>, <0>, <127>; | ||
44 | }; | 48 | }; |
diff --git a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt index cfcc52705ed8..6151999c5dca 100644 --- a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt +++ b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt | |||
@@ -4,7 +4,10 @@ Ethernet nodes are defined to describe on-chip ethernet interfaces in | |||
4 | APM X-Gene SoC. | 4 | APM X-Gene SoC. |
5 | 5 | ||
6 | Required properties for all the ethernet interfaces: | 6 | Required properties for all the ethernet interfaces: |
7 | - compatible: Should be "apm,xgene-enet" | 7 | - compatible: Should state binding information from the following list, |
8 | - "apm,xgene-enet": RGMII based 1G interface | ||
9 | - "apm,xgene1-sgenet": SGMII based 1G interface | ||
10 | - "apm,xgene1-xgenet": XFI based 10G interface | ||
8 | - reg: Address and length of the register set for the device. It contains the | 11 | - reg: Address and length of the register set for the device. It contains the |
9 | information of registers in the same order as described by reg-names | 12 | information of registers in the same order as described by reg-names |
10 | - reg-names: Should contain the register set names | 13 | - reg-names: Should contain the register set names |
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt index e124847443f8..f0b4cd72411d 100644 --- a/Documentation/devicetree/bindings/net/dsa/dsa.txt +++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt | |||
@@ -19,7 +19,9 @@ the parent DSA node. The maximum number of allowed child nodes is 4 | |||
19 | (DSA_MAX_SWITCHES). | 19 | (DSA_MAX_SWITCHES). |
20 | Each of these switch child nodes should have the following required properties: | 20 | Each of these switch child nodes should have the following required properties: |
21 | 21 | ||
22 | - reg : Describes the switch address on the MII bus | 22 | - reg : Contains two fields. The first one describes the |
23 | address on the MII bus. The second is the switch | ||
24 | number that must be unique in cascaded configurations | ||
23 | - #address-cells : Must be 1 | 25 | - #address-cells : Must be 1 |
24 | - #size-cells : Must be 0 | 26 | - #size-cells : Must be 0 |
25 | 27 | ||
diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt index 98c16672ab5f..0f8ed3710c66 100644 --- a/Documentation/devicetree/bindings/power/power_domain.txt +++ b/Documentation/devicetree/bindings/power/power_domain.txt | |||
@@ -19,6 +19,16 @@ Required properties: | |||
19 | providing multiple PM domains (e.g. power controllers), but can be any value | 19 | providing multiple PM domains (e.g. power controllers), but can be any value |
20 | as specified by device tree binding documentation of particular provider. | 20 | as specified by device tree binding documentation of particular provider. |
21 | 21 | ||
22 | Optional properties: | ||
23 | - power-domains : A phandle and PM domain specifier as defined by bindings of | ||
24 | the power controller specified by phandle. | ||
25 | Some power domains might be powered from another power domain (or have | ||
26 | other hardware specific dependencies). For representing such dependency | ||
27 | a standard PM domain consumer binding is used. When provided, all domains | ||
28 | created by the given provider should be subdomains of the domain | ||
29 | specified by this binding. More details about power domain specifier are | ||
30 | available in the next section. | ||
31 | |||
22 | Example: | 32 | Example: |
23 | 33 | ||
24 | power: power-controller@12340000 { | 34 | power: power-controller@12340000 { |
@@ -30,6 +40,25 @@ Example: | |||
30 | The node above defines a power controller that is a PM domain provider and | 40 | The node above defines a power controller that is a PM domain provider and |
31 | expects one cell as its phandle argument. | 41 | expects one cell as its phandle argument. |
32 | 42 | ||
43 | Example 2: | ||
44 | |||
45 | parent: power-controller@12340000 { | ||
46 | compatible = "foo,power-controller"; | ||
47 | reg = <0x12340000 0x1000>; | ||
48 | #power-domain-cells = <1>; | ||
49 | }; | ||
50 | |||
51 | child: power-controller@12340000 { | ||
52 | compatible = "foo,power-controller"; | ||
53 | reg = <0x12341000 0x1000>; | ||
54 | power-domains = <&parent 0>; | ||
55 | #power-domain-cells = <1>; | ||
56 | }; | ||
57 | |||
58 | The nodes above define two power controllers: 'parent' and 'child'. | ||
59 | Domains created by the 'child' power controller are subdomains of '0' power | ||
60 | domain provided by the 'parent' power controller. | ||
61 | |||
33 | ==PM domain consumers== | 62 | ==PM domain consumers== |
34 | 63 | ||
35 | Required properties: | 64 | Required properties: |
diff --git a/Documentation/devicetree/bindings/serial/of-serial.txt b/Documentation/devicetree/bindings/serial/8250.txt index 91d5ab0e60fc..91d5ab0e60fc 100644 --- a/Documentation/devicetree/bindings/serial/of-serial.txt +++ b/Documentation/devicetree/bindings/serial/8250.txt | |||
diff --git a/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt b/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt new file mode 100644 index 000000000000..ebcbb62c0a76 --- /dev/null +++ b/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt | |||
@@ -0,0 +1,19 @@ | |||
1 | ETRAX FS UART | ||
2 | |||
3 | Required properties: | ||
4 | - compatible : "axis,etraxfs-uart" | ||
5 | - reg: offset and length of the register set for the device. | ||
6 | - interrupts: device interrupt | ||
7 | |||
8 | Optional properties: | ||
9 | - {dtr,dsr,ri,cd}-gpios: specify a GPIO for DTR/DSR/RI/CD | ||
10 | line respectively. | ||
11 | |||
12 | Example: | ||
13 | |||
14 | serial@b00260000 { | ||
15 | compatible = "axis,etraxfs-uart"; | ||
16 | reg = <0xb0026000 0x1000>; | ||
17 | interrupts = <68>; | ||
18 | status = "disabled"; | ||
19 | }; | ||
diff --git a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.txt b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.txt index 7f76214f728a..289c40ed7470 100644 --- a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.txt +++ b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.txt | |||
@@ -21,6 +21,18 @@ Optional properties: | |||
21 | - reg-io-width : the size (in bytes) of the IO accesses that should be | 21 | - reg-io-width : the size (in bytes) of the IO accesses that should be |
22 | performed on the device. If this property is not present then single byte | 22 | performed on the device. If this property is not present then single byte |
23 | accesses are used. | 23 | accesses are used. |
24 | - dcd-override : Override the DCD modem status signal. This signal will always | ||
25 | be reported as active instead of being obtained from the modem status | ||
26 | register. Define this if your serial port does not use this pin. | ||
27 | - dsr-override : Override the DTS modem status signal. This signal will always | ||
28 | be reported as active instead of being obtained from the modem status | ||
29 | register. Define this if your serial port does not use this pin. | ||
30 | - cts-override : Override the CTS modem status signal. This signal will always | ||
31 | be reported as active instead of being obtained from the modem status | ||
32 | register. Define this if your serial port does not use this pin. | ||
33 | - ri-override : Override the RI modem status signal. This signal will always be | ||
34 | reported as inactive instead of being obtained from the modem status register. | ||
35 | Define this if your serial port does not use this pin. | ||
24 | 36 | ||
25 | Example: | 37 | Example: |
26 | 38 | ||
@@ -31,6 +43,10 @@ Example: | |||
31 | interrupts = <10>; | 43 | interrupts = <10>; |
32 | reg-shift = <2>; | 44 | reg-shift = <2>; |
33 | reg-io-width = <4>; | 45 | reg-io-width = <4>; |
46 | dcd-override; | ||
47 | dsr-override; | ||
48 | cts-override; | ||
49 | ri-override; | ||
34 | }; | 50 | }; |
35 | 51 | ||
36 | Example with one clock: | 52 | Example with one clock: |
diff --git a/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt index aad527b357a0..523341a0e113 100644 --- a/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt +++ b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt | |||
@@ -2,11 +2,21 @@ | |||
2 | (CSPI/eCSPI) for i.MX | 2 | (CSPI/eCSPI) for i.MX |
3 | 3 | ||
4 | Required properties: | 4 | Required properties: |
5 | - compatible : Should be "fsl,<soc>-cspi" or "fsl,<soc>-ecspi" | 5 | - compatible : |
6 | - "fsl,imx1-cspi" for SPI compatible with the one integrated on i.MX1 | ||
7 | - "fsl,imx21-cspi" for SPI compatible with the one integrated on i.MX21 | ||
8 | - "fsl,imx27-cspi" for SPI compatible with the one integrated on i.MX27 | ||
9 | - "fsl,imx31-cspi" for SPI compatible with the one integrated on i.MX31 | ||
10 | - "fsl,imx35-cspi" for SPI compatible with the one integrated on i.MX35 | ||
11 | - "fsl,imx51-ecspi" for SPI compatible with the one integrated on i.MX51 | ||
6 | - reg : Offset and length of the register set for the device | 12 | - reg : Offset and length of the register set for the device |
7 | - interrupts : Should contain CSPI/eCSPI interrupt | 13 | - interrupts : Should contain CSPI/eCSPI interrupt |
8 | - fsl,spi-num-chipselects : Contains the number of the chipselect | 14 | - fsl,spi-num-chipselects : Contains the number of the chipselect |
9 | - cs-gpios : Specifies the gpio pins to be used for chipselects. | 15 | - cs-gpios : Specifies the gpio pins to be used for chipselects. |
16 | - clocks : Clock specifiers for both ipg and per clocks. | ||
17 | - clock-names : Clock names should include both "ipg" and "per" | ||
18 | See the clock consumer binding, | ||
19 | Documentation/devicetree/bindings/clock/clock-bindings.txt | ||
10 | - dmas: DMA specifiers for tx and rx dma. See the DMA client binding, | 20 | - dmas: DMA specifiers for tx and rx dma. See the DMA client binding, |
11 | Documentation/devicetree/bindings/dma/dma.txt | 21 | Documentation/devicetree/bindings/dma/dma.txt |
12 | - dma-names: DMA request names should include "tx" and "rx" if present. | 22 | - dma-names: DMA request names should include "tx" and "rx" if present. |
diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-qup.txt b/Documentation/devicetree/bindings/spi/qcom,spi-qup.txt index e2c88df2cc15..5c090771c016 100644 --- a/Documentation/devicetree/bindings/spi/qcom,spi-qup.txt +++ b/Documentation/devicetree/bindings/spi/qcom,spi-qup.txt | |||
@@ -33,6 +33,11 @@ Optional properties: | |||
33 | nodes. If unspecified, a single SPI device without a chip | 33 | nodes. If unspecified, a single SPI device without a chip |
34 | select can be used. | 34 | select can be used. |
35 | 35 | ||
36 | - dmas: Two DMA channel specifiers following the convention outlined | ||
37 | in bindings/dma/dma.txt | ||
38 | - dma-names: Names for the dma channels, if present. There must be at | ||
39 | least one channel named "tx" for transmit and named "rx" for | ||
40 | receive. | ||
36 | 41 | ||
37 | SPI slave nodes must be children of the SPI master node and can contain | 42 | SPI slave nodes must be children of the SPI master node and can contain |
38 | properties described in Documentation/devicetree/bindings/spi/spi-bus.txt | 43 | properties described in Documentation/devicetree/bindings/spi/spi-bus.txt |
@@ -51,6 +56,9 @@ Example: | |||
51 | clocks = <&gcc GCC_BLSP2_QUP2_SPI_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>; | 56 | clocks = <&gcc GCC_BLSP2_QUP2_SPI_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>; |
52 | clock-names = "core", "iface"; | 57 | clock-names = "core", "iface"; |
53 | 58 | ||
59 | dmas = <&blsp1_bam 13>, <&blsp1_bam 12>; | ||
60 | dma-names = "rx", "tx"; | ||
61 | |||
54 | pinctrl-names = "default"; | 62 | pinctrl-names = "default"; |
55 | pinctrl-0 = <&spi8_default>; | 63 | pinctrl-0 = <&spi8_default>; |
56 | 64 | ||
diff --git a/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt b/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt index cbbe16ed3874..70af78a9185e 100644 --- a/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt +++ b/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt | |||
@@ -16,6 +16,12 @@ Optional property: | |||
16 | in big endian mode, otherwise in native mode(same with CPU), for more | 16 | in big endian mode, otherwise in native mode(same with CPU), for more |
17 | detail please see: Documentation/devicetree/bindings/regmap/regmap.txt. | 17 | detail please see: Documentation/devicetree/bindings/regmap/regmap.txt. |
18 | 18 | ||
19 | Optional SPI slave node properties: | ||
20 | - fsl,spi-cs-sck-delay: a delay in nanoseconds between activating chip | ||
21 | select and the start of clock signal, at the start of a transfer. | ||
22 | - fsl,spi-sck-cs-delay: a delay in nanoseconds between stopping the clock | ||
23 | signal and deactivating chip select, at the end of a transfer. | ||
24 | |||
19 | Example: | 25 | Example: |
20 | 26 | ||
21 | dspi0@4002c000 { | 27 | dspi0@4002c000 { |
@@ -43,6 +49,8 @@ dspi0@4002c000 { | |||
43 | reg = <0>; | 49 | reg = <0>; |
44 | linux,modalias = "m25p80"; | 50 | linux,modalias = "m25p80"; |
45 | modal = "at26df081a"; | 51 | modal = "at26df081a"; |
52 | fsl,spi-cs-sck-delay = <100>; | ||
53 | fsl,spi-sck-cs-delay = <50>; | ||
46 | }; | 54 | }; |
47 | }; | 55 | }; |
48 | 56 | ||
diff --git a/Documentation/devicetree/bindings/spi/spi-img-spfi.txt b/Documentation/devicetree/bindings/spi/spi-img-spfi.txt index c7dd50fb8eb2..e02fbf18c82c 100644 --- a/Documentation/devicetree/bindings/spi/spi-img-spfi.txt +++ b/Documentation/devicetree/bindings/spi/spi-img-spfi.txt | |||
@@ -14,6 +14,7 @@ Required properties: | |||
14 | - dma-names: Must include the following entries: | 14 | - dma-names: Must include the following entries: |
15 | - rx | 15 | - rx |
16 | - tx | 16 | - tx |
17 | - cs-gpios: Must specify the GPIOs used for chipselect lines. | ||
17 | - #address-cells: Must be 1. | 18 | - #address-cells: Must be 1. |
18 | - #size-cells: Must be 0. | 19 | - #size-cells: Must be 0. |
19 | 20 | ||
diff --git a/Documentation/devicetree/bindings/spi/spi-rockchip.txt b/Documentation/devicetree/bindings/spi/spi-rockchip.txt index 467dec441c62..0c491bda4c65 100644 --- a/Documentation/devicetree/bindings/spi/spi-rockchip.txt +++ b/Documentation/devicetree/bindings/spi/spi-rockchip.txt | |||
@@ -24,6 +24,9 @@ Optional Properties: | |||
24 | - dmas: DMA specifiers for tx and rx dma. See the DMA client binding, | 24 | - dmas: DMA specifiers for tx and rx dma. See the DMA client binding, |
25 | Documentation/devicetree/bindings/dma/dma.txt | 25 | Documentation/devicetree/bindings/dma/dma.txt |
26 | - dma-names: DMA request names should include "tx" and "rx" if present. | 26 | - dma-names: DMA request names should include "tx" and "rx" if present. |
27 | - rx-sample-delay-ns: nanoseconds to delay after the SCLK edge before sampling | ||
28 | Rx data (may need to be fine tuned for high capacitance lines). | ||
29 | No delay (0) by default. | ||
27 | 30 | ||
28 | 31 | ||
29 | Example: | 32 | Example: |
@@ -33,6 +36,7 @@ Example: | |||
33 | reg = <0xff110000 0x1000>; | 36 | reg = <0xff110000 0x1000>; |
34 | dmas = <&pdma1 11>, <&pdma1 12>; | 37 | dmas = <&pdma1 11>, <&pdma1 12>; |
35 | dma-names = "tx", "rx"; | 38 | dma-names = "tx", "rx"; |
39 | rx-sample-delay-ns = <10>; | ||
36 | #address-cells = <1>; | 40 | #address-cells = <1>; |
37 | #size-cells = <0>; | 41 | #size-cells = <0>; |
38 | interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>; | 42 | interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>; |
diff --git a/Documentation/devicetree/bindings/submitting-patches.txt b/Documentation/devicetree/bindings/submitting-patches.txt index 56742bc70218..7d44eae7ab0b 100644 --- a/Documentation/devicetree/bindings/submitting-patches.txt +++ b/Documentation/devicetree/bindings/submitting-patches.txt | |||
@@ -12,6 +12,9 @@ I. For patch submitters | |||
12 | 12 | ||
13 | devicetree@vger.kernel.org | 13 | devicetree@vger.kernel.org |
14 | 14 | ||
15 | and Cc: the DT maintainers. Use scripts/get_maintainer.pl to identify | ||
16 | all of the DT maintainers. | ||
17 | |||
15 | 3) The Documentation/ portion of the patch should come in the series before | 18 | 3) The Documentation/ portion of the patch should come in the series before |
16 | the code implementing the binding. | 19 | the code implementing the binding. |
17 | 20 | ||
diff --git a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt index 43404b197933..332e625f6ed0 100644 --- a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt | |||
@@ -4,7 +4,7 @@ Required properties: | |||
4 | - compatible : "renesas,thermal-<soctype>", "renesas,rcar-thermal" | 4 | - compatible : "renesas,thermal-<soctype>", "renesas,rcar-thermal" |
5 | as fallback. | 5 | as fallback. |
6 | Examples with soctypes are: | 6 | Examples with soctypes are: |
7 | - "renesas,thermal-r8a73a4" (R-Mobile AP6) | 7 | - "renesas,thermal-r8a73a4" (R-Mobile APE6) |
8 | - "renesas,thermal-r8a7779" (R-Car H1) | 8 | - "renesas,thermal-r8a7779" (R-Car H1) |
9 | - "renesas,thermal-r8a7790" (R-Car H2) | 9 | - "renesas,thermal-r8a7790" (R-Car H2) |
10 | - "renesas,thermal-r8a7791" (R-Car M2-W) | 10 | - "renesas,thermal-r8a7791" (R-Car M2-W) |
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 389ca1347a77..fae26d014aaf 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt | |||
@@ -20,6 +20,7 @@ amlogic Amlogic, Inc. | |||
20 | ams AMS AG | 20 | ams AMS AG |
21 | amstaos AMS-Taos Inc. | 21 | amstaos AMS-Taos Inc. |
22 | apm Applied Micro Circuits Corporation (APM) | 22 | apm Applied Micro Circuits Corporation (APM) |
23 | arasan Arasan Chip Systems | ||
23 | arm ARM Ltd. | 24 | arm ARM Ltd. |
24 | armadeus ARMadeus Systems SARL | 25 | armadeus ARMadeus Systems SARL |
25 | asahi-kasei Asahi Kasei Corp. | 26 | asahi-kasei Asahi Kasei Corp. |
@@ -27,6 +28,7 @@ atmel Atmel Corporation | |||
27 | auo AU Optronics Corporation | 28 | auo AU Optronics Corporation |
28 | avago Avago Technologies | 29 | avago Avago Technologies |
29 | avic Shanghai AVIC Optoelectronics Co., Ltd. | 30 | avic Shanghai AVIC Optoelectronics Co., Ltd. |
31 | axis Axis Communications AB | ||
30 | bosch Bosch Sensortec GmbH | 32 | bosch Bosch Sensortec GmbH |
31 | brcm Broadcom Corporation | 33 | brcm Broadcom Corporation |
32 | buffalo Buffalo, Inc. | 34 | buffalo Buffalo, Inc. |
diff --git a/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt b/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt index f90e294d7631..a4d869744f59 100644 --- a/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt | |||
@@ -26,6 +26,11 @@ Optional properties: | |||
26 | - atmel,disable : Should be present if you want to disable the watchdog. | 26 | - atmel,disable : Should be present if you want to disable the watchdog. |
27 | - atmel,idle-halt : Should be present if you want to stop the watchdog when | 27 | - atmel,idle-halt : Should be present if you want to stop the watchdog when |
28 | entering idle state. | 28 | entering idle state. |
29 | CAUTION: This property should be used with care, it actually makes the | ||
30 | watchdog not counting when the CPU is in idle state, therefore the | ||
31 | watchdog reset time depends on mean CPU usage and will not reset at all | ||
32 | if the CPU stop working while it is in idle state, which is probably | ||
33 | not what you want. | ||
29 | - atmel,dbg-halt : Should be present if you want to stop the watchdog when | 34 | - atmel,dbg-halt : Should be present if you want to stop the watchdog when |
30 | entering debug state. | 35 | entering debug state. |
31 | 36 | ||
diff --git a/Documentation/filesystems/dlmfs.txt b/Documentation/filesystems/dlmfs.txt index 1b528b2ad809..fcf4d509d118 100644 --- a/Documentation/filesystems/dlmfs.txt +++ b/Documentation/filesystems/dlmfs.txt | |||
@@ -5,8 +5,8 @@ system. | |||
5 | 5 | ||
6 | dlmfs is built with OCFS2 as it requires most of its infrastructure. | 6 | dlmfs is built with OCFS2 as it requires most of its infrastructure. |
7 | 7 | ||
8 | Project web page: http://oss.oracle.com/projects/ocfs2 | 8 | Project web page: http://ocfs2.wiki.kernel.org |
9 | Tools web page: http://oss.oracle.com/projects/ocfs2-tools | 9 | Tools web page: https://github.com/markfasheh/ocfs2-tools |
10 | OCFS2 mailing lists: http://oss.oracle.com/projects/ocfs2/mailman/ | 10 | OCFS2 mailing lists: http://oss.oracle.com/projects/ocfs2/mailman/ |
11 | 11 | ||
12 | All code copyright 2005 Oracle except when otherwise noted. | 12 | All code copyright 2005 Oracle except when otherwise noted. |
diff --git a/Documentation/filesystems/ocfs2.txt b/Documentation/filesystems/ocfs2.txt index 28f8c08201e2..4c49e5410595 100644 --- a/Documentation/filesystems/ocfs2.txt +++ b/Documentation/filesystems/ocfs2.txt | |||
@@ -8,8 +8,8 @@ also make it attractive for non-clustered use. | |||
8 | You'll want to install the ocfs2-tools package in order to at least | 8 | You'll want to install the ocfs2-tools package in order to at least |
9 | get "mount.ocfs2" and "ocfs2_hb_ctl". | 9 | get "mount.ocfs2" and "ocfs2_hb_ctl". |
10 | 10 | ||
11 | Project web page: http://oss.oracle.com/projects/ocfs2 | 11 | Project web page: http://ocfs2.wiki.kernel.org |
12 | Tools web page: http://oss.oracle.com/projects/ocfs2-tools | 12 | Tools git tree: https://github.com/markfasheh/ocfs2-tools |
13 | OCFS2 mailing lists: http://oss.oracle.com/projects/ocfs2/mailman/ | 13 | OCFS2 mailing lists: http://oss.oracle.com/projects/ocfs2/mailman/ |
14 | 14 | ||
15 | All code copyright 2005 Oracle except when otherwise noted. | 15 | All code copyright 2005 Oracle except when otherwise noted. |
diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt index a63e5e013a8c..92ae734c00c3 100644 --- a/Documentation/input/alps.txt +++ b/Documentation/input/alps.txt | |||
@@ -114,6 +114,9 @@ ALPS Absolute Mode - Protocol Version 2 | |||
114 | byte 4: 0 y6 y5 y4 y3 y2 y1 y0 | 114 | byte 4: 0 y6 y5 y4 y3 y2 y1 y0 |
115 | byte 5: 0 z6 z5 z4 z3 z2 z1 z0 | 115 | byte 5: 0 z6 z5 z4 z3 z2 z1 z0 |
116 | 116 | ||
117 | Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for | ||
118 | the DualPoint Stick. | ||
119 | |||
117 | Dualpoint device -- interleaved packet format | 120 | Dualpoint device -- interleaved packet format |
118 | --------------------------------------------- | 121 | --------------------------------------------- |
119 | 122 | ||
@@ -127,6 +130,11 @@ Dualpoint device -- interleaved packet format | |||
127 | byte 7: 0 y6 y5 y4 y3 y2 y1 y0 | 130 | byte 7: 0 y6 y5 y4 y3 y2 y1 y0 |
128 | byte 8: 0 z6 z5 z4 z3 z2 z1 z0 | 131 | byte 8: 0 z6 z5 z4 z3 z2 z1 z0 |
129 | 132 | ||
133 | Devices which use the interleaving format normally send standard PS/2 mouse | ||
134 | packets for the DualPoint Stick + ALPS Absolute Mode packets for the | ||
135 | touchpad, switching to the interleaved packet format when both the stick and | ||
136 | the touchpad are used at the same time. | ||
137 | |||
130 | ALPS Absolute Mode - Protocol Version 3 | 138 | ALPS Absolute Mode - Protocol Version 3 |
131 | --------------------------------------- | 139 | --------------------------------------- |
132 | 140 | ||
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt index c587a966413e..96705616f582 100644 --- a/Documentation/input/event-codes.txt +++ b/Documentation/input/event-codes.txt | |||
@@ -294,6 +294,12 @@ accordingly. This property does not affect kernel behavior. | |||
294 | The kernel does not provide button emulation for such devices but treats | 294 | The kernel does not provide button emulation for such devices but treats |
295 | them as any other INPUT_PROP_BUTTONPAD device. | 295 | them as any other INPUT_PROP_BUTTONPAD device. |
296 | 296 | ||
297 | INPUT_PROP_ACCELEROMETER | ||
298 | ------------------------- | ||
299 | Directional axes on this device (absolute and/or relative x, y, z) represent | ||
300 | accelerometer data. All other axes retain their meaning. A device must not mix | ||
301 | regular directional axes and accelerometer axes on the same event node. | ||
302 | |||
297 | Guidelines: | 303 | Guidelines: |
298 | ========== | 304 | ========== |
299 | The guidelines below ensure proper single-touch and multi-finger functionality. | 305 | The guidelines below ensure proper single-touch and multi-finger functionality. |
diff --git a/Documentation/input/multi-touch-protocol.txt b/Documentation/input/multi-touch-protocol.txt index 7b4f59c09ee2..b85d000faeb4 100644 --- a/Documentation/input/multi-touch-protocol.txt +++ b/Documentation/input/multi-touch-protocol.txt | |||
@@ -312,9 +312,12 @@ ABS_MT_TOOL_TYPE | |||
312 | 312 | ||
313 | The type of approaching tool. A lot of kernel drivers cannot distinguish | 313 | The type of approaching tool. A lot of kernel drivers cannot distinguish |
314 | between different tool types, such as a finger or a pen. In such cases, the | 314 | between different tool types, such as a finger or a pen. In such cases, the |
315 | event should be omitted. The protocol currently supports MT_TOOL_FINGER and | 315 | event should be omitted. The protocol currently supports MT_TOOL_FINGER, |
316 | MT_TOOL_PEN [2]. For type B devices, this event is handled by input core; | 316 | MT_TOOL_PEN, and MT_TOOL_PALM [2]. For type B devices, this event is handled |
317 | drivers should instead use input_mt_report_slot_state(). | 317 | by input core; drivers should instead use input_mt_report_slot_state(). |
318 | A contact's ABS_MT_TOOL_TYPE may change over time while still touching the | ||
319 | device, because the firmware may not be able to determine which tool is being | ||
320 | used when it first appears. | ||
318 | 321 | ||
319 | ABS_MT_BLOB_ID | 322 | ABS_MT_BLOB_ID |
320 | 323 | ||
diff --git a/Documentation/power/suspend-and-interrupts.txt b/Documentation/power/suspend-and-interrupts.txt index 2f9c5a5fcb25..8afb29a8604a 100644 --- a/Documentation/power/suspend-and-interrupts.txt +++ b/Documentation/power/suspend-and-interrupts.txt | |||
@@ -40,8 +40,10 @@ but also to IPIs and to some other special-purpose interrupts. | |||
40 | 40 | ||
41 | The IRQF_NO_SUSPEND flag is used to indicate that to the IRQ subsystem when | 41 | The IRQF_NO_SUSPEND flag is used to indicate that to the IRQ subsystem when |
42 | requesting a special-purpose interrupt. It causes suspend_device_irqs() to | 42 | requesting a special-purpose interrupt. It causes suspend_device_irqs() to |
43 | leave the corresponding IRQ enabled so as to allow the interrupt to work all | 43 | leave the corresponding IRQ enabled so as to allow the interrupt to work as |
44 | the time as expected. | 44 | expected during the suspend-resume cycle, but does not guarantee that the |
45 | interrupt will wake the system from a suspended state -- for such cases it is | ||
46 | necessary to use enable_irq_wake(). | ||
45 | 47 | ||
46 | Note that the IRQF_NO_SUSPEND flag affects the entire IRQ and not just one | 48 | Note that the IRQF_NO_SUSPEND flag affects the entire IRQ and not just one |
47 | user of it. Thus, if the IRQ is shared, all of the interrupt handlers installed | 49 | user of it. Thus, if the IRQ is shared, all of the interrupt handlers installed |
@@ -110,8 +112,9 @@ any special interrupt handling logic for it to work. | |||
110 | IRQF_NO_SUSPEND and enable_irq_wake() | 112 | IRQF_NO_SUSPEND and enable_irq_wake() |
111 | ------------------------------------- | 113 | ------------------------------------- |
112 | 114 | ||
113 | There are no valid reasons to use both enable_irq_wake() and the IRQF_NO_SUSPEND | 115 | There are very few valid reasons to use both enable_irq_wake() and the |
114 | flag on the same IRQ. | 116 | IRQF_NO_SUSPEND flag on the same IRQ, and it is never valid to use both for the |
117 | same device. | ||
115 | 118 | ||
116 | First of all, if the IRQ is not shared, the rules for handling IRQF_NO_SUSPEND | 119 | First of all, if the IRQ is not shared, the rules for handling IRQF_NO_SUSPEND |
117 | interrupts (interrupt handlers are invoked after suspend_device_irqs()) are | 120 | interrupts (interrupt handlers are invoked after suspend_device_irqs()) are |
@@ -120,4 +123,13 @@ handlers are not invoked after suspend_device_irqs()). | |||
120 | 123 | ||
121 | Second, both enable_irq_wake() and IRQF_NO_SUSPEND apply to entire IRQs and not | 124 | Second, both enable_irq_wake() and IRQF_NO_SUSPEND apply to entire IRQs and not |
122 | to individual interrupt handlers, so sharing an IRQ between a system wakeup | 125 | to individual interrupt handlers, so sharing an IRQ between a system wakeup |
123 | interrupt source and an IRQF_NO_SUSPEND interrupt source does not make sense. | 126 | interrupt source and an IRQF_NO_SUSPEND interrupt source does not generally |
127 | make sense. | ||
128 | |||
129 | In rare cases an IRQ can be shared between a wakeup device driver and an | ||
130 | IRQF_NO_SUSPEND user. In order for this to be safe, the wakeup device driver | ||
131 | must be able to discern spurious IRQs from genuine wakeup events (signalling | ||
132 | the latter to the core with pm_system_wakeup()), must use enable_irq_wake() to | ||
133 | ensure that the IRQ will function as a wakeup source, and must request the IRQ | ||
134 | with IRQF_COND_SUSPEND to tell the core that it meets these requirements. If | ||
135 | these requirements are not met, it is not valid to use IRQF_COND_SUSPEND. | ||
diff --git a/Documentation/spi/spi-summary b/Documentation/spi/spi-summary index d29734bff28c..d1824b399b2d 100644 --- a/Documentation/spi/spi-summary +++ b/Documentation/spi/spi-summary | |||
@@ -342,12 +342,11 @@ SPI protocol drivers somewhat resemble platform device drivers: | |||
342 | .driver = { | 342 | .driver = { |
343 | .name = "CHIP", | 343 | .name = "CHIP", |
344 | .owner = THIS_MODULE, | 344 | .owner = THIS_MODULE, |
345 | .pm = &CHIP_pm_ops, | ||
345 | }, | 346 | }, |
346 | 347 | ||
347 | .probe = CHIP_probe, | 348 | .probe = CHIP_probe, |
348 | .remove = CHIP_remove, | 349 | .remove = CHIP_remove, |
349 | .suspend = CHIP_suspend, | ||
350 | .resume = CHIP_resume, | ||
351 | }; | 350 | }; |
352 | 351 | ||
353 | The driver core will automatically attempt to bind this driver to any SPI | 352 | The driver core will automatically attempt to bind this driver to any SPI |
diff --git a/Documentation/spi/spidev_test.c b/Documentation/spi/spidev_test.c index 3a2f9d59edab..94f574b0fdb2 100644 --- a/Documentation/spi/spidev_test.c +++ b/Documentation/spi/spidev_test.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <unistd.h> | 15 | #include <unistd.h> |
16 | #include <stdio.h> | 16 | #include <stdio.h> |
17 | #include <stdlib.h> | 17 | #include <stdlib.h> |
18 | #include <string.h> | ||
18 | #include <getopt.h> | 19 | #include <getopt.h> |
19 | #include <fcntl.h> | 20 | #include <fcntl.h> |
20 | #include <sys/ioctl.h> | 21 | #include <sys/ioctl.h> |
@@ -34,24 +35,79 @@ static uint32_t mode; | |||
34 | static uint8_t bits = 8; | 35 | static uint8_t bits = 8; |
35 | static uint32_t speed = 500000; | 36 | static uint32_t speed = 500000; |
36 | static uint16_t delay; | 37 | static uint16_t delay; |
38 | static int verbose; | ||
37 | 39 | ||
38 | static void transfer(int fd) | 40 | uint8_t default_tx[] = { |
41 | 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, | ||
42 | 0x40, 0x00, 0x00, 0x00, 0x00, 0x95, | ||
43 | 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, | ||
44 | 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, | ||
45 | 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, | ||
46 | 0xF0, 0x0D, | ||
47 | }; | ||
48 | |||
49 | uint8_t default_rx[ARRAY_SIZE(default_tx)] = {0, }; | ||
50 | char *input_tx; | ||
51 | |||
52 | static void hex_dump(const void *src, size_t length, size_t line_size, char *prefix) | ||
53 | { | ||
54 | int i = 0; | ||
55 | const unsigned char *address = src; | ||
56 | const unsigned char *line = address; | ||
57 | unsigned char c; | ||
58 | |||
59 | printf("%s | ", prefix); | ||
60 | while (length-- > 0) { | ||
61 | printf("%02X ", *address++); | ||
62 | if (!(++i % line_size) || (length == 0 && i % line_size)) { | ||
63 | if (length == 0) { | ||
64 | while (i++ % line_size) | ||
65 | printf("__ "); | ||
66 | } | ||
67 | printf(" | "); /* right close */ | ||
68 | while (line < address) { | ||
69 | c = *line++; | ||
70 | printf("%c", (c < 33 || c == 255) ? 0x2E : c); | ||
71 | } | ||
72 | printf("\n"); | ||
73 | if (length > 0) | ||
74 | printf("%s | ", prefix); | ||
75 | } | ||
76 | } | ||
77 | } | ||
78 | |||
79 | /* | ||
80 | * Unescape - process hexadecimal escape character | ||
81 | * converts shell input "\x23" -> 0x23 | ||
82 | */ | ||
83 | int unespcape(char *_dst, char *_src, size_t len) | ||
84 | { | ||
85 | int ret = 0; | ||
86 | char *src = _src; | ||
87 | char *dst = _dst; | ||
88 | unsigned int ch; | ||
89 | |||
90 | while (*src) { | ||
91 | if (*src == '\\' && *(src+1) == 'x') { | ||
92 | sscanf(src + 2, "%2x", &ch); | ||
93 | src += 4; | ||
94 | *dst++ = (unsigned char)ch; | ||
95 | } else { | ||
96 | *dst++ = *src++; | ||
97 | } | ||
98 | ret++; | ||
99 | } | ||
100 | return ret; | ||
101 | } | ||
102 | |||
103 | static void transfer(int fd, uint8_t const *tx, uint8_t const *rx, size_t len) | ||
39 | { | 104 | { |
40 | int ret; | 105 | int ret; |
41 | uint8_t tx[] = { | 106 | |
42 | 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, | ||
43 | 0x40, 0x00, 0x00, 0x00, 0x00, 0x95, | ||
44 | 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, | ||
45 | 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, | ||
46 | 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, | ||
47 | 0xDE, 0xAD, 0xBE, 0xEF, 0xBA, 0xAD, | ||
48 | 0xF0, 0x0D, | ||
49 | }; | ||
50 | uint8_t rx[ARRAY_SIZE(tx)] = {0, }; | ||
51 | struct spi_ioc_transfer tr = { | 107 | struct spi_ioc_transfer tr = { |
52 | .tx_buf = (unsigned long)tx, | 108 | .tx_buf = (unsigned long)tx, |
53 | .rx_buf = (unsigned long)rx, | 109 | .rx_buf = (unsigned long)rx, |
54 | .len = ARRAY_SIZE(tx), | 110 | .len = len, |
55 | .delay_usecs = delay, | 111 | .delay_usecs = delay, |
56 | .speed_hz = speed, | 112 | .speed_hz = speed, |
57 | .bits_per_word = bits, | 113 | .bits_per_word = bits, |
@@ -76,12 +132,9 @@ static void transfer(int fd) | |||
76 | if (ret < 1) | 132 | if (ret < 1) |
77 | pabort("can't send spi message"); | 133 | pabort("can't send spi message"); |
78 | 134 | ||
79 | for (ret = 0; ret < ARRAY_SIZE(tx); ret++) { | 135 | if (verbose) |
80 | if (!(ret % 6)) | 136 | hex_dump(tx, len, 32, "TX"); |
81 | puts(""); | 137 | hex_dump(rx, len, 32, "RX"); |
82 | printf("%.2X ", rx[ret]); | ||
83 | } | ||
84 | puts(""); | ||
85 | } | 138 | } |
86 | 139 | ||
87 | static void print_usage(const char *prog) | 140 | static void print_usage(const char *prog) |
@@ -97,6 +150,8 @@ static void print_usage(const char *prog) | |||
97 | " -L --lsb least significant bit first\n" | 150 | " -L --lsb least significant bit first\n" |
98 | " -C --cs-high chip select active high\n" | 151 | " -C --cs-high chip select active high\n" |
99 | " -3 --3wire SI/SO signals shared\n" | 152 | " -3 --3wire SI/SO signals shared\n" |
153 | " -v --verbose Verbose (show tx buffer)\n" | ||
154 | " -p Send data (e.g. \"1234\\xde\\xad\")\n" | ||
100 | " -N --no-cs no chip select\n" | 155 | " -N --no-cs no chip select\n" |
101 | " -R --ready slave pulls low to pause\n" | 156 | " -R --ready slave pulls low to pause\n" |
102 | " -2 --dual dual transfer\n" | 157 | " -2 --dual dual transfer\n" |
@@ -121,12 +176,13 @@ static void parse_opts(int argc, char *argv[]) | |||
121 | { "no-cs", 0, 0, 'N' }, | 176 | { "no-cs", 0, 0, 'N' }, |
122 | { "ready", 0, 0, 'R' }, | 177 | { "ready", 0, 0, 'R' }, |
123 | { "dual", 0, 0, '2' }, | 178 | { "dual", 0, 0, '2' }, |
179 | { "verbose", 0, 0, 'v' }, | ||
124 | { "quad", 0, 0, '4' }, | 180 | { "quad", 0, 0, '4' }, |
125 | { NULL, 0, 0, 0 }, | 181 | { NULL, 0, 0, 0 }, |
126 | }; | 182 | }; |
127 | int c; | 183 | int c; |
128 | 184 | ||
129 | c = getopt_long(argc, argv, "D:s:d:b:lHOLC3NR24", lopts, NULL); | 185 | c = getopt_long(argc, argv, "D:s:d:b:lHOLC3NR24p:v", lopts, NULL); |
130 | 186 | ||
131 | if (c == -1) | 187 | if (c == -1) |
132 | break; | 188 | break; |
@@ -165,9 +221,15 @@ static void parse_opts(int argc, char *argv[]) | |||
165 | case 'N': | 221 | case 'N': |
166 | mode |= SPI_NO_CS; | 222 | mode |= SPI_NO_CS; |
167 | break; | 223 | break; |
224 | case 'v': | ||
225 | verbose = 1; | ||
226 | break; | ||
168 | case 'R': | 227 | case 'R': |
169 | mode |= SPI_READY; | 228 | mode |= SPI_READY; |
170 | break; | 229 | break; |
230 | case 'p': | ||
231 | input_tx = optarg; | ||
232 | break; | ||
171 | case '2': | 233 | case '2': |
172 | mode |= SPI_TX_DUAL; | 234 | mode |= SPI_TX_DUAL; |
173 | break; | 235 | break; |
@@ -191,6 +253,9 @@ int main(int argc, char *argv[]) | |||
191 | { | 253 | { |
192 | int ret = 0; | 254 | int ret = 0; |
193 | int fd; | 255 | int fd; |
256 | uint8_t *tx; | ||
257 | uint8_t *rx; | ||
258 | int size; | ||
194 | 259 | ||
195 | parse_opts(argc, argv); | 260 | parse_opts(argc, argv); |
196 | 261 | ||
@@ -235,7 +300,17 @@ int main(int argc, char *argv[]) | |||
235 | printf("bits per word: %d\n", bits); | 300 | printf("bits per word: %d\n", bits); |
236 | printf("max speed: %d Hz (%d KHz)\n", speed, speed/1000); | 301 | printf("max speed: %d Hz (%d KHz)\n", speed, speed/1000); |
237 | 302 | ||
238 | transfer(fd); | 303 | if (input_tx) { |
304 | size = strlen(input_tx+1); | ||
305 | tx = malloc(size); | ||
306 | rx = malloc(size); | ||
307 | size = unespcape((char *)tx, input_tx, size); | ||
308 | transfer(fd, tx, rx, size); | ||
309 | free(rx); | ||
310 | free(tx); | ||
311 | } else { | ||
312 | transfer(fd, default_tx, default_rx, sizeof(default_tx)); | ||
313 | } | ||
239 | 314 | ||
240 | close(fd); | 315 | close(fd); |
241 | 316 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index ddc5a8cf9a8a..efbcb50e4969 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -637,8 +637,7 @@ F: drivers/gpu/drm/radeon/radeon_kfd.h | |||
637 | F: include/uapi/linux/kfd_ioctl.h | 637 | F: include/uapi/linux/kfd_ioctl.h |
638 | 638 | ||
639 | AMD MICROCODE UPDATE SUPPORT | 639 | AMD MICROCODE UPDATE SUPPORT |
640 | M: Andreas Herrmann <herrmann.der.user@googlemail.com> | 640 | M: Borislav Petkov <bp@alien8.de> |
641 | L: amd64-microcode@amd64.org | ||
642 | S: Maintained | 641 | S: Maintained |
643 | F: arch/x86/kernel/cpu/microcode/amd* | 642 | F: arch/x86/kernel/cpu/microcode/amd* |
644 | 643 | ||
@@ -1030,6 +1029,16 @@ F: arch/arm/mach-mxs/ | |||
1030 | F: arch/arm/boot/dts/imx* | 1029 | F: arch/arm/boot/dts/imx* |
1031 | F: arch/arm/configs/imx*_defconfig | 1030 | F: arch/arm/configs/imx*_defconfig |
1032 | 1031 | ||
1032 | ARM/FREESCALE VYBRID ARM ARCHITECTURE | ||
1033 | M: Shawn Guo <shawn.guo@linaro.org> | ||
1034 | M: Sascha Hauer <kernel@pengutronix.de> | ||
1035 | R: Stefan Agner <stefan@agner.ch> | ||
1036 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
1037 | S: Maintained | ||
1038 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git | ||
1039 | F: arch/arm/mach-imx/*vf610* | ||
1040 | F: arch/arm/boot/dts/vf* | ||
1041 | |||
1033 | ARM/GLOMATION GESBC9312SX MACHINE SUPPORT | 1042 | ARM/GLOMATION GESBC9312SX MACHINE SUPPORT |
1034 | M: Lennert Buytenhek <kernel@wantstofly.org> | 1043 | M: Lennert Buytenhek <kernel@wantstofly.org> |
1035 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1044 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
@@ -1176,7 +1185,7 @@ M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> | |||
1176 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1185 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1177 | S: Maintained | 1186 | S: Maintained |
1178 | F: arch/arm/mach-mvebu/ | 1187 | F: arch/arm/mach-mvebu/ |
1179 | F: drivers/rtc/armada38x-rtc | 1188 | F: drivers/rtc/rtc-armada38x.c |
1180 | 1189 | ||
1181 | ARM/Marvell Berlin SoC support | 1190 | ARM/Marvell Berlin SoC support |
1182 | M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> | 1191 | M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> |
@@ -1188,6 +1197,7 @@ ARM/Marvell Dove/MV78xx0/Orion SOC support | |||
1188 | M: Jason Cooper <jason@lakedaemon.net> | 1197 | M: Jason Cooper <jason@lakedaemon.net> |
1189 | M: Andrew Lunn <andrew@lunn.ch> | 1198 | M: Andrew Lunn <andrew@lunn.ch> |
1190 | M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> | 1199 | M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> |
1200 | M: Gregory Clement <gregory.clement@free-electrons.com> | ||
1191 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1201 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1192 | S: Maintained | 1202 | S: Maintained |
1193 | F: arch/arm/mach-dove/ | 1203 | F: arch/arm/mach-dove/ |
@@ -1351,6 +1361,7 @@ F: drivers/i2c/busses/i2c-rk3x.c | |||
1351 | F: drivers/*/*rockchip* | 1361 | F: drivers/*/*rockchip* |
1352 | F: drivers/*/*/*rockchip* | 1362 | F: drivers/*/*/*rockchip* |
1353 | F: sound/soc/rockchip/ | 1363 | F: sound/soc/rockchip/ |
1364 | N: rockchip | ||
1354 | 1365 | ||
1355 | ARM/SAMSUNG EXYNOS ARM ARCHITECTURES | 1366 | ARM/SAMSUNG EXYNOS ARM ARCHITECTURES |
1356 | M: Kukjin Kim <kgene@kernel.org> | 1367 | M: Kukjin Kim <kgene@kernel.org> |
@@ -1664,8 +1675,8 @@ F: drivers/misc/eeprom/at24.c | |||
1664 | F: include/linux/platform_data/at24.h | 1675 | F: include/linux/platform_data/at24.h |
1665 | 1676 | ||
1666 | ATA OVER ETHERNET (AOE) DRIVER | 1677 | ATA OVER ETHERNET (AOE) DRIVER |
1667 | M: "Ed L. Cashin" <ecashin@coraid.com> | 1678 | M: "Ed L. Cashin" <ed.cashin@acm.org> |
1668 | W: http://support.coraid.com/support/linux | 1679 | W: http://www.openaoe.org/ |
1669 | S: Supported | 1680 | S: Supported |
1670 | F: Documentation/aoe/ | 1681 | F: Documentation/aoe/ |
1671 | F: drivers/block/aoe/ | 1682 | F: drivers/block/aoe/ |
@@ -1730,7 +1741,7 @@ S: Maintained | |||
1730 | F: drivers/net/ethernet/atheros/ | 1741 | F: drivers/net/ethernet/atheros/ |
1731 | 1742 | ||
1732 | ATM | 1743 | ATM |
1733 | M: Chas Williams <chas@cmf.nrl.navy.mil> | 1744 | M: Chas Williams <3chas3@gmail.com> |
1734 | L: linux-atm-general@lists.sourceforge.net (moderated for non-subscribers) | 1745 | L: linux-atm-general@lists.sourceforge.net (moderated for non-subscribers) |
1735 | L: netdev@vger.kernel.org | 1746 | L: netdev@vger.kernel.org |
1736 | W: http://linux-atm.sourceforge.net | 1747 | W: http://linux-atm.sourceforge.net |
@@ -2065,7 +2076,7 @@ F: include/net/bluetooth/ | |||
2065 | BONDING DRIVER | 2076 | BONDING DRIVER |
2066 | M: Jay Vosburgh <j.vosburgh@gmail.com> | 2077 | M: Jay Vosburgh <j.vosburgh@gmail.com> |
2067 | M: Veaceslav Falico <vfalico@gmail.com> | 2078 | M: Veaceslav Falico <vfalico@gmail.com> |
2068 | M: Andy Gospodarek <andy@greyhouse.net> | 2079 | M: Andy Gospodarek <gospo@cumulusnetworks.com> |
2069 | L: netdev@vger.kernel.org | 2080 | L: netdev@vger.kernel.org |
2070 | W: http://sourceforge.net/projects/bonding/ | 2081 | W: http://sourceforge.net/projects/bonding/ |
2071 | S: Supported | 2082 | S: Supported |
@@ -2107,7 +2118,6 @@ F: drivers/net/ethernet/broadcom/bnx2x/ | |||
2107 | 2118 | ||
2108 | BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE | 2119 | BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE |
2109 | M: Christian Daudt <bcm@fixthebug.org> | 2120 | M: Christian Daudt <bcm@fixthebug.org> |
2110 | M: Matt Porter <mporter@linaro.org> | ||
2111 | M: Florian Fainelli <f.fainelli@gmail.com> | 2121 | M: Florian Fainelli <f.fainelli@gmail.com> |
2112 | L: bcm-kernel-feedback-list@broadcom.com | 2122 | L: bcm-kernel-feedback-list@broadcom.com |
2113 | T: git git://github.com/broadcom/mach-bcm | 2123 | T: git git://github.com/broadcom/mach-bcm |
@@ -2369,8 +2379,9 @@ F: arch/x86/include/asm/tce.h | |||
2369 | 2379 | ||
2370 | CAN NETWORK LAYER | 2380 | CAN NETWORK LAYER |
2371 | M: Oliver Hartkopp <socketcan@hartkopp.net> | 2381 | M: Oliver Hartkopp <socketcan@hartkopp.net> |
2382 | M: Marc Kleine-Budde <mkl@pengutronix.de> | ||
2372 | L: linux-can@vger.kernel.org | 2383 | L: linux-can@vger.kernel.org |
2373 | W: http://gitorious.org/linux-can | 2384 | W: https://github.com/linux-can |
2374 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git | 2385 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git |
2375 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git | 2386 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git |
2376 | S: Maintained | 2387 | S: Maintained |
@@ -2386,7 +2397,7 @@ CAN NETWORK DRIVERS | |||
2386 | M: Wolfgang Grandegger <wg@grandegger.com> | 2397 | M: Wolfgang Grandegger <wg@grandegger.com> |
2387 | M: Marc Kleine-Budde <mkl@pengutronix.de> | 2398 | M: Marc Kleine-Budde <mkl@pengutronix.de> |
2388 | L: linux-can@vger.kernel.org | 2399 | L: linux-can@vger.kernel.org |
2389 | W: http://gitorious.org/linux-can | 2400 | W: https://github.com/linux-can |
2390 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git | 2401 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git |
2391 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git | 2402 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git |
2392 | S: Maintained | 2403 | S: Maintained |
@@ -3241,6 +3252,13 @@ S: Maintained | |||
3241 | F: Documentation/hwmon/dme1737 | 3252 | F: Documentation/hwmon/dme1737 |
3242 | F: drivers/hwmon/dme1737.c | 3253 | F: drivers/hwmon/dme1737.c |
3243 | 3254 | ||
3255 | DMI/SMBIOS SUPPORT | ||
3256 | M: Jean Delvare <jdelvare@suse.de> | ||
3257 | S: Maintained | ||
3258 | F: drivers/firmware/dmi-id.c | ||
3259 | F: drivers/firmware/dmi_scan.c | ||
3260 | F: include/linux/dmi.h | ||
3261 | |||
3244 | DOCKING STATION DRIVER | 3262 | DOCKING STATION DRIVER |
3245 | M: Shaohua Li <shaohua.li@intel.com> | 3263 | M: Shaohua Li <shaohua.li@intel.com> |
3246 | L: linux-acpi@vger.kernel.org | 3264 | L: linux-acpi@vger.kernel.org |
@@ -5076,7 +5094,7 @@ S: Supported | |||
5076 | F: drivers/platform/x86/intel_menlow.c | 5094 | F: drivers/platform/x86/intel_menlow.c |
5077 | 5095 | ||
5078 | INTEL IA32 MICROCODE UPDATE SUPPORT | 5096 | INTEL IA32 MICROCODE UPDATE SUPPORT |
5079 | M: Tigran Aivazian <tigran@aivazian.fsnet.co.uk> | 5097 | M: Borislav Petkov <bp@alien8.de> |
5080 | S: Maintained | 5098 | S: Maintained |
5081 | F: arch/x86/kernel/cpu/microcode/core* | 5099 | F: arch/x86/kernel/cpu/microcode/core* |
5082 | F: arch/x86/kernel/cpu/microcode/intel* | 5100 | F: arch/x86/kernel/cpu/microcode/intel* |
@@ -5117,22 +5135,21 @@ M: Deepak Saxena <dsaxena@plexity.net> | |||
5117 | S: Maintained | 5135 | S: Maintained |
5118 | F: drivers/char/hw_random/ixp4xx-rng.c | 5136 | F: drivers/char/hw_random/ixp4xx-rng.c |
5119 | 5137 | ||
5120 | INTEL ETHERNET DRIVERS (e100/e1000/e1000e/fm10k/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf) | 5138 | INTEL ETHERNET DRIVERS |
5121 | M: Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 5139 | M: Jeff Kirsher <jeffrey.t.kirsher@intel.com> |
5122 | M: Jesse Brandeburg <jesse.brandeburg@intel.com> | 5140 | R: Jesse Brandeburg <jesse.brandeburg@intel.com> |
5123 | M: Bruce Allan <bruce.w.allan@intel.com> | 5141 | R: Shannon Nelson <shannon.nelson@intel.com> |
5124 | M: Carolyn Wyborny <carolyn.wyborny@intel.com> | 5142 | R: Carolyn Wyborny <carolyn.wyborny@intel.com> |
5125 | M: Don Skidmore <donald.c.skidmore@intel.com> | 5143 | R: Don Skidmore <donald.c.skidmore@intel.com> |
5126 | M: Greg Rose <gregory.v.rose@intel.com> | 5144 | R: Matthew Vick <matthew.vick@intel.com> |
5127 | M: Matthew Vick <matthew.vick@intel.com> | 5145 | R: John Ronciak <john.ronciak@intel.com> |
5128 | M: John Ronciak <john.ronciak@intel.com> | 5146 | R: Mitch Williams <mitch.a.williams@intel.com> |
5129 | M: Mitch Williams <mitch.a.williams@intel.com> | 5147 | L: intel-wired-lan@lists.osuosl.org |
5130 | M: Linux NICS <linux.nics@intel.com> | ||
5131 | L: e1000-devel@lists.sourceforge.net | ||
5132 | W: http://www.intel.com/support/feedback.htm | 5148 | W: http://www.intel.com/support/feedback.htm |
5133 | W: http://e1000.sourceforge.net/ | 5149 | W: http://e1000.sourceforge.net/ |
5134 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net.git | 5150 | Q: http://patchwork.ozlabs.org/project/intel-wired-lan/list/ |
5135 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next.git | 5151 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue.git |
5152 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue.git | ||
5136 | S: Supported | 5153 | S: Supported |
5137 | F: Documentation/networking/e100.txt | 5154 | F: Documentation/networking/e100.txt |
5138 | F: Documentation/networking/e1000.txt | 5155 | F: Documentation/networking/e1000.txt |
@@ -7213,8 +7230,7 @@ ORACLE CLUSTER FILESYSTEM 2 (OCFS2) | |||
7213 | M: Mark Fasheh <mfasheh@suse.com> | 7230 | M: Mark Fasheh <mfasheh@suse.com> |
7214 | M: Joel Becker <jlbec@evilplan.org> | 7231 | M: Joel Becker <jlbec@evilplan.org> |
7215 | L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers) | 7232 | L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers) |
7216 | W: http://oss.oracle.com/projects/ocfs2/ | 7233 | W: http://ocfs2.wiki.kernel.org |
7217 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2.git | ||
7218 | S: Supported | 7234 | S: Supported |
7219 | F: Documentation/filesystems/ocfs2.txt | 7235 | F: Documentation/filesystems/ocfs2.txt |
7220 | F: Documentation/filesystems/dlmfs.txt | 7236 | F: Documentation/filesystems/dlmfs.txt |
@@ -8481,6 +8497,14 @@ S: Supported | |||
8481 | L: netdev@vger.kernel.org | 8497 | L: netdev@vger.kernel.org |
8482 | F: drivers/net/ethernet/samsung/sxgbe/ | 8498 | F: drivers/net/ethernet/samsung/sxgbe/ |
8483 | 8499 | ||
8500 | SAMSUNG THERMAL DRIVER | ||
8501 | M: Lukasz Majewski <l.majewski@samsung.com> | ||
8502 | L: linux-pm@vger.kernel.org | ||
8503 | L: linux-samsung-soc@vger.kernel.org | ||
8504 | S: Supported | ||
8505 | T: https://github.com/lmajewski/linux-samsung-thermal.git | ||
8506 | F: drivers/thermal/samsung/ | ||
8507 | |||
8484 | SAMSUNG USB2 PHY DRIVER | 8508 | SAMSUNG USB2 PHY DRIVER |
8485 | M: Kamil Debski <k.debski@samsung.com> | 8509 | M: Kamil Debski <k.debski@samsung.com> |
8486 | L: linux-kernel@vger.kernel.org | 8510 | L: linux-kernel@vger.kernel.org |
@@ -10189,6 +10213,13 @@ S: Maintained | |||
10189 | F: Documentation/usb/ohci.txt | 10213 | F: Documentation/usb/ohci.txt |
10190 | F: drivers/usb/host/ohci* | 10214 | F: drivers/usb/host/ohci* |
10191 | 10215 | ||
10216 | USB OTG FSM (Finite State Machine) | ||
10217 | M: Peter Chen <Peter.Chen@freescale.com> | ||
10218 | T: git git://github.com/hzpeterchen/linux-usb.git | ||
10219 | L: linux-usb@vger.kernel.org | ||
10220 | S: Maintained | ||
10221 | F: drivers/usb/common/usb-otg-fsm.c | ||
10222 | |||
10192 | USB OVER IP DRIVER | 10223 | USB OVER IP DRIVER |
10193 | M: Valentina Manea <valentina.manea.m@gmail.com> | 10224 | M: Valentina Manea <valentina.manea.m@gmail.com> |
10194 | M: Shuah Khan <shuah.kh@samsung.com> | 10225 | M: Shuah Khan <shuah.kh@samsung.com> |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 4 | 1 | VERSION = 4 |
2 | PATCHLEVEL = 0 | 2 | PATCHLEVEL = 0 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc1 | 4 | EXTRAVERSION = |
5 | NAME = Hurr durr I'ma sheep | 5 | NAME = Hurr durr I'ma sheep |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h index 4e547296831d..52312cb5dbe2 100644 --- a/arch/arc/include/asm/processor.h +++ b/arch/arc/include/asm/processor.h | |||
@@ -47,9 +47,6 @@ struct thread_struct { | |||
47 | /* Forward declaration, a strange C thing */ | 47 | /* Forward declaration, a strange C thing */ |
48 | struct task_struct; | 48 | struct task_struct; |
49 | 49 | ||
50 | /* Return saved PC of a blocked thread */ | ||
51 | unsigned long thread_saved_pc(struct task_struct *t); | ||
52 | |||
53 | #define task_pt_regs(p) \ | 50 | #define task_pt_regs(p) \ |
54 | ((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1) | 51 | ((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1) |
55 | 52 | ||
@@ -72,18 +69,21 @@ unsigned long thread_saved_pc(struct task_struct *t); | |||
72 | #define release_segments(mm) do { } while (0) | 69 | #define release_segments(mm) do { } while (0) |
73 | 70 | ||
74 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret) | 71 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret) |
72 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) | ||
75 | 73 | ||
76 | /* | 74 | /* |
77 | * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode. | 75 | * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode. |
78 | * Look in process.c for details of kernel stack layout | 76 | * Look in process.c for details of kernel stack layout |
79 | */ | 77 | */ |
80 | #define KSTK_ESP(tsk) (tsk->thread.ksp) | 78 | #define TSK_K_ESP(tsk) (tsk->thread.ksp) |
81 | 79 | ||
82 | #define KSTK_REG(tsk, off) (*((unsigned int *)(KSTK_ESP(tsk) + \ | 80 | #define TSK_K_REG(tsk, off) (*((unsigned int *)(TSK_K_ESP(tsk) + \ |
83 | sizeof(struct callee_regs) + off))) | 81 | sizeof(struct callee_regs) + off))) |
84 | 82 | ||
85 | #define KSTK_BLINK(tsk) KSTK_REG(tsk, 4) | 83 | #define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4) |
86 | #define KSTK_FP(tsk) KSTK_REG(tsk, 0) | 84 | #define TSK_K_FP(tsk) TSK_K_REG(tsk, 0) |
85 | |||
86 | #define thread_saved_pc(tsk) TSK_K_BLINK(tsk) | ||
87 | 87 | ||
88 | extern void start_thread(struct pt_regs * regs, unsigned long pc, | 88 | extern void start_thread(struct pt_regs * regs, unsigned long pc, |
89 | unsigned long usp); | 89 | unsigned long usp); |
diff --git a/arch/arc/include/asm/stacktrace.h b/arch/arc/include/asm/stacktrace.h new file mode 100644 index 000000000000..b29b6064ea14 --- /dev/null +++ b/arch/arc/include/asm/stacktrace.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) | ||
3 | * Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #ifndef __ASM_STACKTRACE_H | ||
11 | #define __ASM_STACKTRACE_H | ||
12 | |||
13 | #include <linux/sched.h> | ||
14 | |||
15 | /** | ||
16 | * arc_unwind_core - Unwind the kernel mode stack for an execution context | ||
17 | * @tsk: NULL for current task, specific task otherwise | ||
18 | * @regs: pt_regs used to seed the unwinder {SP, FP, BLINK, PC} | ||
19 | * If NULL, use pt_regs of @tsk (if !NULL) otherwise | ||
20 | * use the current values of {SP, FP, BLINK, PC} | ||
21 | * @consumer_fn: Callback invoked for each frame unwound | ||
22 | * Returns 0 to continue unwinding, -1 to stop | ||
23 | * @arg: Arg to callback | ||
24 | * | ||
25 | * Returns the address of first function in stack | ||
26 | * | ||
27 | * Semantics: | ||
28 | * - synchronous unwinding (e.g. dump_stack): @tsk NULL, @regs NULL | ||
29 | * - Asynchronous unwinding of sleeping task: @tsk !NULL, @regs NULL | ||
30 | * - Asynchronous unwinding of intr/excp etc: @tsk !NULL, @regs !NULL | ||
31 | */ | ||
32 | notrace noinline unsigned int arc_unwind_core( | ||
33 | struct task_struct *tsk, struct pt_regs *regs, | ||
34 | int (*consumer_fn) (unsigned int, void *), | ||
35 | void *arg); | ||
36 | |||
37 | #endif /* __ASM_STACKTRACE_H */ | ||
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index fdd89715d2d3..98c00a2d4dd9 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c | |||
@@ -192,29 +192,6 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) | |||
192 | return 0; | 192 | return 0; |
193 | } | 193 | } |
194 | 194 | ||
195 | /* | ||
196 | * API: expected by schedular Code: If thread is sleeping where is that. | ||
197 | * What is this good for? it will be always the scheduler or ret_from_fork. | ||
198 | * So we hard code that anyways. | ||
199 | */ | ||
200 | unsigned long thread_saved_pc(struct task_struct *t) | ||
201 | { | ||
202 | struct pt_regs *regs = task_pt_regs(t); | ||
203 | unsigned long blink = 0; | ||
204 | |||
205 | /* | ||
206 | * If the thread being queried for in not itself calling this, then it | ||
207 | * implies it is not executing, which in turn implies it is sleeping, | ||
208 | * which in turn implies it got switched OUT by the schedular. | ||
209 | * In that case, it's kernel mode blink can reliably retrieved as per | ||
210 | * the picture above (right above pt_regs). | ||
211 | */ | ||
212 | if (t != current && t->state != TASK_RUNNING) | ||
213 | blink = *((unsigned int *)regs - 1); | ||
214 | |||
215 | return blink; | ||
216 | } | ||
217 | |||
218 | int elf_check_arch(const struct elf32_hdr *x) | 195 | int elf_check_arch(const struct elf32_hdr *x) |
219 | { | 196 | { |
220 | unsigned int eflags; | 197 | unsigned int eflags; |
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index 114234e83caa..edda76fae83f 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c | |||
@@ -67,7 +67,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs, | |||
67 | sigset_t *set) | 67 | sigset_t *set) |
68 | { | 68 | { |
69 | int err; | 69 | int err; |
70 | err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs, | 70 | err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs, |
71 | sizeof(sf->uc.uc_mcontext.regs.scratch)); | 71 | sizeof(sf->uc.uc_mcontext.regs.scratch)); |
72 | err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t)); | 72 | err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t)); |
73 | 73 | ||
@@ -83,7 +83,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf) | |||
83 | if (!err) | 83 | if (!err) |
84 | set_current_blocked(&set); | 84 | set_current_blocked(&set); |
85 | 85 | ||
86 | err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs), | 86 | err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch), |
87 | sizeof(sf->uc.uc_mcontext.regs.scratch)); | 87 | sizeof(sf->uc.uc_mcontext.regs.scratch)); |
88 | 88 | ||
89 | return err; | 89 | return err; |
@@ -131,6 +131,15 @@ SYSCALL_DEFINE0(rt_sigreturn) | |||
131 | /* Don't restart from sigreturn */ | 131 | /* Don't restart from sigreturn */ |
132 | syscall_wont_restart(regs); | 132 | syscall_wont_restart(regs); |
133 | 133 | ||
134 | /* | ||
135 | * Ensure that sigreturn always returns to user mode (in case the | ||
136 | * regs saved on user stack got fudged between save and sigreturn) | ||
137 | * Otherwise it is easy to panic the kernel with a custom | ||
138 | * signal handler and/or restorer which clobberes the status32/ret | ||
139 | * to return to a bogus location in kernel mode. | ||
140 | */ | ||
141 | regs->status32 |= STATUS_U_MASK; | ||
142 | |||
134 | return regs->r0; | 143 | return regs->r0; |
135 | 144 | ||
136 | badframe: | 145 | badframe: |
@@ -229,8 +238,11 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) | |||
229 | 238 | ||
230 | /* | 239 | /* |
231 | * handler returns using sigreturn stub provided already by userpsace | 240 | * handler returns using sigreturn stub provided already by userpsace |
241 | * If not, nuke the process right away | ||
232 | */ | 242 | */ |
233 | BUG_ON(!(ksig->ka.sa.sa_flags & SA_RESTORER)); | 243 | if(!(ksig->ka.sa.sa_flags & SA_RESTORER)) |
244 | return 1; | ||
245 | |||
234 | regs->blink = (unsigned long)ksig->ka.sa.sa_restorer; | 246 | regs->blink = (unsigned long)ksig->ka.sa.sa_restorer; |
235 | 247 | ||
236 | /* User Stack for signal handler will be above the frame just carved */ | 248 | /* User Stack for signal handler will be above the frame just carved */ |
@@ -296,12 +308,12 @@ static void | |||
296 | handle_signal(struct ksignal *ksig, struct pt_regs *regs) | 308 | handle_signal(struct ksignal *ksig, struct pt_regs *regs) |
297 | { | 309 | { |
298 | sigset_t *oldset = sigmask_to_save(); | 310 | sigset_t *oldset = sigmask_to_save(); |
299 | int ret; | 311 | int failed; |
300 | 312 | ||
301 | /* Set up the stack frame */ | 313 | /* Set up the stack frame */ |
302 | ret = setup_rt_frame(ksig, oldset, regs); | 314 | failed = setup_rt_frame(ksig, oldset, regs); |
303 | 315 | ||
304 | signal_setup_done(ret, ksig, 0); | 316 | signal_setup_done(failed, ksig, 0); |
305 | } | 317 | } |
306 | 318 | ||
307 | void do_signal(struct pt_regs *regs) | 319 | void do_signal(struct pt_regs *regs) |
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c index 9ce47cfe2303..92320d6f737c 100644 --- a/arch/arc/kernel/stacktrace.c +++ b/arch/arc/kernel/stacktrace.c | |||
@@ -43,6 +43,10 @@ static void seed_unwind_frame_info(struct task_struct *tsk, | |||
43 | struct pt_regs *regs, | 43 | struct pt_regs *regs, |
44 | struct unwind_frame_info *frame_info) | 44 | struct unwind_frame_info *frame_info) |
45 | { | 45 | { |
46 | /* | ||
47 | * synchronous unwinding (e.g. dump_stack) | ||
48 | * - uses current values of SP and friends | ||
49 | */ | ||
46 | if (tsk == NULL && regs == NULL) { | 50 | if (tsk == NULL && regs == NULL) { |
47 | unsigned long fp, sp, blink, ret; | 51 | unsigned long fp, sp, blink, ret; |
48 | frame_info->task = current; | 52 | frame_info->task = current; |
@@ -61,12 +65,17 @@ static void seed_unwind_frame_info(struct task_struct *tsk, | |||
61 | frame_info->regs.r63 = ret; | 65 | frame_info->regs.r63 = ret; |
62 | frame_info->call_frame = 0; | 66 | frame_info->call_frame = 0; |
63 | } else if (regs == NULL) { | 67 | } else if (regs == NULL) { |
68 | /* | ||
69 | * Asynchronous unwinding of sleeping task | ||
70 | * - Gets SP etc from task's pt_regs (saved bottom of kernel | ||
71 | * mode stack of task) | ||
72 | */ | ||
64 | 73 | ||
65 | frame_info->task = tsk; | 74 | frame_info->task = tsk; |
66 | 75 | ||
67 | frame_info->regs.r27 = KSTK_FP(tsk); | 76 | frame_info->regs.r27 = TSK_K_FP(tsk); |
68 | frame_info->regs.r28 = KSTK_ESP(tsk); | 77 | frame_info->regs.r28 = TSK_K_ESP(tsk); |
69 | frame_info->regs.r31 = KSTK_BLINK(tsk); | 78 | frame_info->regs.r31 = TSK_K_BLINK(tsk); |
70 | frame_info->regs.r63 = (unsigned int)__switch_to; | 79 | frame_info->regs.r63 = (unsigned int)__switch_to; |
71 | 80 | ||
72 | /* In the prologue of __switch_to, first FP is saved on stack | 81 | /* In the prologue of __switch_to, first FP is saved on stack |
@@ -83,6 +92,10 @@ static void seed_unwind_frame_info(struct task_struct *tsk, | |||
83 | frame_info->call_frame = 0; | 92 | frame_info->call_frame = 0; |
84 | 93 | ||
85 | } else { | 94 | } else { |
95 | /* | ||
96 | * Asynchronous unwinding of intr/exception | ||
97 | * - Just uses the pt_regs passed | ||
98 | */ | ||
86 | frame_info->task = tsk; | 99 | frame_info->task = tsk; |
87 | 100 | ||
88 | frame_info->regs.r27 = regs->fp; | 101 | frame_info->regs.r27 = regs->fp; |
@@ -95,7 +108,7 @@ static void seed_unwind_frame_info(struct task_struct *tsk, | |||
95 | 108 | ||
96 | #endif | 109 | #endif |
97 | 110 | ||
98 | static noinline unsigned int | 111 | notrace noinline unsigned int |
99 | arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs, | 112 | arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs, |
100 | int (*consumer_fn) (unsigned int, void *), void *arg) | 113 | int (*consumer_fn) (unsigned int, void *), void *arg) |
101 | { | 114 | { |
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c index 7ff5b5c183bb..74db59b6f392 100644 --- a/arch/arc/kernel/unaligned.c +++ b/arch/arc/kernel/unaligned.c | |||
@@ -12,6 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include <linux/perf_event.h> | ||
15 | #include <linux/ptrace.h> | 16 | #include <linux/ptrace.h> |
16 | #include <linux/uaccess.h> | 17 | #include <linux/uaccess.h> |
17 | #include <asm/disasm.h> | 18 | #include <asm/disasm.h> |
@@ -253,6 +254,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs, | |||
253 | } | 254 | } |
254 | } | 255 | } |
255 | 256 | ||
257 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address); | ||
256 | return 0; | 258 | return 0; |
257 | 259 | ||
258 | fault: | 260 | fault: |
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 563cb27e37f5..6a2e006cbcce 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/ptrace.h> | 14 | #include <linux/ptrace.h> |
15 | #include <linux/uaccess.h> | 15 | #include <linux/uaccess.h> |
16 | #include <linux/kdebug.h> | 16 | #include <linux/kdebug.h> |
17 | #include <linux/perf_event.h> | ||
17 | #include <asm/pgalloc.h> | 18 | #include <asm/pgalloc.h> |
18 | #include <asm/mmu.h> | 19 | #include <asm/mmu.h> |
19 | 20 | ||
@@ -139,13 +140,20 @@ good_area: | |||
139 | return; | 140 | return; |
140 | } | 141 | } |
141 | 142 | ||
143 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | ||
144 | |||
142 | if (likely(!(fault & VM_FAULT_ERROR))) { | 145 | if (likely(!(fault & VM_FAULT_ERROR))) { |
143 | if (flags & FAULT_FLAG_ALLOW_RETRY) { | 146 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
144 | /* To avoid updating stats twice for retry case */ | 147 | /* To avoid updating stats twice for retry case */ |
145 | if (fault & VM_FAULT_MAJOR) | 148 | if (fault & VM_FAULT_MAJOR) { |
146 | tsk->maj_flt++; | 149 | tsk->maj_flt++; |
147 | else | 150 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, |
151 | regs, address); | ||
152 | } else { | ||
148 | tsk->min_flt++; | 153 | tsk->min_flt++; |
154 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, | ||
155 | regs, address); | ||
156 | } | ||
149 | 157 | ||
150 | if (fault & VM_FAULT_RETRY) { | 158 | if (fault & VM_FAULT_RETRY) { |
151 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | 159 | flags &= ~FAULT_FLAG_ALLOW_RETRY; |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 9f1f09a2bc9b..cf4c0c99aa25 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -619,6 +619,7 @@ config ARCH_PXA | |||
619 | select GENERIC_CLOCKEVENTS | 619 | select GENERIC_CLOCKEVENTS |
620 | select GPIO_PXA | 620 | select GPIO_PXA |
621 | select HAVE_IDE | 621 | select HAVE_IDE |
622 | select IRQ_DOMAIN | ||
622 | select MULTI_IRQ_HANDLER | 623 | select MULTI_IRQ_HANDLER |
623 | select PLAT_PXA | 624 | select PLAT_PXA |
624 | select SPARSE_IRQ | 625 | select SPARSE_IRQ |
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 7f99cd652203..eb7bb511f853 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile | |||
@@ -150,6 +150,7 @@ machine-$(CONFIG_ARCH_BERLIN) += berlin | |||
150 | machine-$(CONFIG_ARCH_CLPS711X) += clps711x | 150 | machine-$(CONFIG_ARCH_CLPS711X) += clps711x |
151 | machine-$(CONFIG_ARCH_CNS3XXX) += cns3xxx | 151 | machine-$(CONFIG_ARCH_CNS3XXX) += cns3xxx |
152 | machine-$(CONFIG_ARCH_DAVINCI) += davinci | 152 | machine-$(CONFIG_ARCH_DAVINCI) += davinci |
153 | machine-$(CONFIG_ARCH_DIGICOLOR) += digicolor | ||
153 | machine-$(CONFIG_ARCH_DOVE) += dove | 154 | machine-$(CONFIG_ARCH_DOVE) += dove |
154 | machine-$(CONFIG_ARCH_EBSA110) += ebsa110 | 155 | machine-$(CONFIG_ARCH_EBSA110) += ebsa110 |
155 | machine-$(CONFIG_ARCH_EFM32) += efm32 | 156 | machine-$(CONFIG_ARCH_EFM32) += efm32 |
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi index 6cc25ed912ee..c3255e0c90aa 100644 --- a/arch/arm/boot/dts/am335x-bone-common.dtsi +++ b/arch/arm/boot/dts/am335x-bone-common.dtsi | |||
@@ -195,6 +195,7 @@ | |||
195 | 195 | ||
196 | &usb0 { | 196 | &usb0 { |
197 | status = "okay"; | 197 | status = "okay"; |
198 | dr_mode = "peripheral"; | ||
198 | }; | 199 | }; |
199 | 200 | ||
200 | &usb1 { | 201 | &usb1 { |
@@ -300,3 +301,11 @@ | |||
300 | cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; | 301 | cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; |
301 | cd-inverted; | 302 | cd-inverted; |
302 | }; | 303 | }; |
304 | |||
305 | &aes { | ||
306 | status = "okay"; | ||
307 | }; | ||
308 | |||
309 | &sham { | ||
310 | status = "okay"; | ||
311 | }; | ||
diff --git a/arch/arm/boot/dts/am335x-bone.dts b/arch/arm/boot/dts/am335x-bone.dts index 83d40f7655e5..6b8493720424 100644 --- a/arch/arm/boot/dts/am335x-bone.dts +++ b/arch/arm/boot/dts/am335x-bone.dts | |||
@@ -24,11 +24,3 @@ | |||
24 | &mmc1 { | 24 | &mmc1 { |
25 | vmmc-supply = <&ldo3_reg>; | 25 | vmmc-supply = <&ldo3_reg>; |
26 | }; | 26 | }; |
27 | |||
28 | &sham { | ||
29 | status = "okay"; | ||
30 | }; | ||
31 | |||
32 | &aes { | ||
33 | status = "okay"; | ||
34 | }; | ||
diff --git a/arch/arm/boot/dts/am335x-lxm.dts b/arch/arm/boot/dts/am335x-lxm.dts index 7266a00aab2e..5c5667a3624d 100644 --- a/arch/arm/boot/dts/am335x-lxm.dts +++ b/arch/arm/boot/dts/am335x-lxm.dts | |||
@@ -328,6 +328,10 @@ | |||
328 | dual_emac_res_vlan = <3>; | 328 | dual_emac_res_vlan = <3>; |
329 | }; | 329 | }; |
330 | 330 | ||
331 | &phy_sel { | ||
332 | rmii-clock-ext; | ||
333 | }; | ||
334 | |||
331 | &mac { | 335 | &mac { |
332 | pinctrl-names = "default", "sleep"; | 336 | pinctrl-names = "default", "sleep"; |
333 | pinctrl-0 = <&cpsw_default>; | 337 | pinctrl-0 = <&cpsw_default>; |
diff --git a/arch/arm/boot/dts/am33xx-clocks.dtsi b/arch/arm/boot/dts/am33xx-clocks.dtsi index 712edce7d6fb..071b56aa0c7e 100644 --- a/arch/arm/boot/dts/am33xx-clocks.dtsi +++ b/arch/arm/boot/dts/am33xx-clocks.dtsi | |||
@@ -99,7 +99,7 @@ | |||
99 | ehrpwm0_tbclk: ehrpwm0_tbclk@44e10664 { | 99 | ehrpwm0_tbclk: ehrpwm0_tbclk@44e10664 { |
100 | #clock-cells = <0>; | 100 | #clock-cells = <0>; |
101 | compatible = "ti,gate-clock"; | 101 | compatible = "ti,gate-clock"; |
102 | clocks = <&dpll_per_m2_ck>; | 102 | clocks = <&l4ls_gclk>; |
103 | ti,bit-shift = <0>; | 103 | ti,bit-shift = <0>; |
104 | reg = <0x0664>; | 104 | reg = <0x0664>; |
105 | }; | 105 | }; |
@@ -107,7 +107,7 @@ | |||
107 | ehrpwm1_tbclk: ehrpwm1_tbclk@44e10664 { | 107 | ehrpwm1_tbclk: ehrpwm1_tbclk@44e10664 { |
108 | #clock-cells = <0>; | 108 | #clock-cells = <0>; |
109 | compatible = "ti,gate-clock"; | 109 | compatible = "ti,gate-clock"; |
110 | clocks = <&dpll_per_m2_ck>; | 110 | clocks = <&l4ls_gclk>; |
111 | ti,bit-shift = <1>; | 111 | ti,bit-shift = <1>; |
112 | reg = <0x0664>; | 112 | reg = <0x0664>; |
113 | }; | 113 | }; |
@@ -115,7 +115,7 @@ | |||
115 | ehrpwm2_tbclk: ehrpwm2_tbclk@44e10664 { | 115 | ehrpwm2_tbclk: ehrpwm2_tbclk@44e10664 { |
116 | #clock-cells = <0>; | 116 | #clock-cells = <0>; |
117 | compatible = "ti,gate-clock"; | 117 | compatible = "ti,gate-clock"; |
118 | clocks = <&dpll_per_m2_ck>; | 118 | clocks = <&l4ls_gclk>; |
119 | ti,bit-shift = <2>; | 119 | ti,bit-shift = <2>; |
120 | reg = <0x0664>; | 120 | reg = <0x0664>; |
121 | }; | 121 | }; |
diff --git a/arch/arm/boot/dts/am437x-idk-evm.dts b/arch/arm/boot/dts/am437x-idk-evm.dts index f9a17e2ca8cb..0198f5a62b96 100644 --- a/arch/arm/boot/dts/am437x-idk-evm.dts +++ b/arch/arm/boot/dts/am437x-idk-evm.dts | |||
@@ -133,20 +133,6 @@ | |||
133 | >; | 133 | >; |
134 | }; | 134 | }; |
135 | 135 | ||
136 | i2c1_pins_default: i2c1_pins_default { | ||
137 | pinctrl-single,pins = < | ||
138 | 0x15c (PIN_INPUT | SLEWCTRL_FAST | MUX_MODE2) /* spi0_cs0.i2c1_scl */ | ||
139 | 0x158 (PIN_INPUT | SLEWCTRL_FAST | MUX_MODE2) /* spi0_d1.i2c1_sda */ | ||
140 | >; | ||
141 | }; | ||
142 | |||
143 | i2c1_pins_sleep: i2c1_pins_sleep { | ||
144 | pinctrl-single,pins = < | ||
145 | 0x15c (PIN_INPUT_PULLDOWN | MUX_MODE7) /* spi0_cs0.i2c1_scl */ | ||
146 | 0x158 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* spi0_d1.i2c1_sda */ | ||
147 | >; | ||
148 | }; | ||
149 | |||
150 | mmc1_pins_default: pinmux_mmc1_pins_default { | 136 | mmc1_pins_default: pinmux_mmc1_pins_default { |
151 | pinctrl-single,pins = < | 137 | pinctrl-single,pins = < |
152 | 0x100 (PIN_INPUT | MUX_MODE0) /* mmc0_clk.mmc0_clk */ | 138 | 0x100 (PIN_INPUT | MUX_MODE0) /* mmc0_clk.mmc0_clk */ |
@@ -254,7 +240,7 @@ | |||
254 | status = "okay"; | 240 | status = "okay"; |
255 | pinctrl-names = "default", "sleep"; | 241 | pinctrl-names = "default", "sleep"; |
256 | pinctrl-0 = <&i2c0_pins_default>; | 242 | pinctrl-0 = <&i2c0_pins_default>; |
257 | pinctrl-1 = <&i2c0_pins_default>; | 243 | pinctrl-1 = <&i2c0_pins_sleep>; |
258 | clock-frequency = <400000>; | 244 | clock-frequency = <400000>; |
259 | 245 | ||
260 | at24@50 { | 246 | at24@50 { |
@@ -262,17 +248,10 @@ | |||
262 | pagesize = <64>; | 248 | pagesize = <64>; |
263 | reg = <0x50>; | 249 | reg = <0x50>; |
264 | }; | 250 | }; |
265 | }; | ||
266 | |||
267 | &i2c1 { | ||
268 | status = "okay"; | ||
269 | pinctrl-names = "default", "sleep"; | ||
270 | pinctrl-0 = <&i2c1_pins_default>; | ||
271 | pinctrl-1 = <&i2c1_pins_default>; | ||
272 | clock-frequency = <400000>; | ||
273 | 251 | ||
274 | tps: tps62362@60 { | 252 | tps: tps62362@60 { |
275 | compatible = "ti,tps62362"; | 253 | compatible = "ti,tps62362"; |
254 | reg = <0x60>; | ||
276 | regulator-name = "VDD_MPU"; | 255 | regulator-name = "VDD_MPU"; |
277 | regulator-min-microvolt = <950000>; | 256 | regulator-min-microvolt = <950000>; |
278 | regulator-max-microvolt = <1330000>; | 257 | regulator-max-microvolt = <1330000>; |
diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi index c7dc9dab93a4..cfb49686ab6a 100644 --- a/arch/arm/boot/dts/am43xx-clocks.dtsi +++ b/arch/arm/boot/dts/am43xx-clocks.dtsi | |||
@@ -107,7 +107,7 @@ | |||
107 | ehrpwm0_tbclk: ehrpwm0_tbclk { | 107 | ehrpwm0_tbclk: ehrpwm0_tbclk { |
108 | #clock-cells = <0>; | 108 | #clock-cells = <0>; |
109 | compatible = "ti,gate-clock"; | 109 | compatible = "ti,gate-clock"; |
110 | clocks = <&dpll_per_m2_ck>; | 110 | clocks = <&l4ls_gclk>; |
111 | ti,bit-shift = <0>; | 111 | ti,bit-shift = <0>; |
112 | reg = <0x0664>; | 112 | reg = <0x0664>; |
113 | }; | 113 | }; |
@@ -115,7 +115,7 @@ | |||
115 | ehrpwm1_tbclk: ehrpwm1_tbclk { | 115 | ehrpwm1_tbclk: ehrpwm1_tbclk { |
116 | #clock-cells = <0>; | 116 | #clock-cells = <0>; |
117 | compatible = "ti,gate-clock"; | 117 | compatible = "ti,gate-clock"; |
118 | clocks = <&dpll_per_m2_ck>; | 118 | clocks = <&l4ls_gclk>; |
119 | ti,bit-shift = <1>; | 119 | ti,bit-shift = <1>; |
120 | reg = <0x0664>; | 120 | reg = <0x0664>; |
121 | }; | 121 | }; |
@@ -123,7 +123,7 @@ | |||
123 | ehrpwm2_tbclk: ehrpwm2_tbclk { | 123 | ehrpwm2_tbclk: ehrpwm2_tbclk { |
124 | #clock-cells = <0>; | 124 | #clock-cells = <0>; |
125 | compatible = "ti,gate-clock"; | 125 | compatible = "ti,gate-clock"; |
126 | clocks = <&dpll_per_m2_ck>; | 126 | clocks = <&l4ls_gclk>; |
127 | ti,bit-shift = <2>; | 127 | ti,bit-shift = <2>; |
128 | reg = <0x0664>; | 128 | reg = <0x0664>; |
129 | }; | 129 | }; |
@@ -131,7 +131,7 @@ | |||
131 | ehrpwm3_tbclk: ehrpwm3_tbclk { | 131 | ehrpwm3_tbclk: ehrpwm3_tbclk { |
132 | #clock-cells = <0>; | 132 | #clock-cells = <0>; |
133 | compatible = "ti,gate-clock"; | 133 | compatible = "ti,gate-clock"; |
134 | clocks = <&dpll_per_m2_ck>; | 134 | clocks = <&l4ls_gclk>; |
135 | ti,bit-shift = <4>; | 135 | ti,bit-shift = <4>; |
136 | reg = <0x0664>; | 136 | reg = <0x0664>; |
137 | }; | 137 | }; |
@@ -139,7 +139,7 @@ | |||
139 | ehrpwm4_tbclk: ehrpwm4_tbclk { | 139 | ehrpwm4_tbclk: ehrpwm4_tbclk { |
140 | #clock-cells = <0>; | 140 | #clock-cells = <0>; |
141 | compatible = "ti,gate-clock"; | 141 | compatible = "ti,gate-clock"; |
142 | clocks = <&dpll_per_m2_ck>; | 142 | clocks = <&l4ls_gclk>; |
143 | ti,bit-shift = <5>; | 143 | ti,bit-shift = <5>; |
144 | reg = <0x0664>; | 144 | reg = <0x0664>; |
145 | }; | 145 | }; |
@@ -147,7 +147,7 @@ | |||
147 | ehrpwm5_tbclk: ehrpwm5_tbclk { | 147 | ehrpwm5_tbclk: ehrpwm5_tbclk { |
148 | #clock-cells = <0>; | 148 | #clock-cells = <0>; |
149 | compatible = "ti,gate-clock"; | 149 | compatible = "ti,gate-clock"; |
150 | clocks = <&dpll_per_m2_ck>; | 150 | clocks = <&l4ls_gclk>; |
151 | ti,bit-shift = <6>; | 151 | ti,bit-shift = <6>; |
152 | reg = <0x0664>; | 152 | reg = <0x0664>; |
153 | }; | 153 | }; |
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts index 03750af3b49a..6463f9ef2b54 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts | |||
@@ -549,14 +549,6 @@ | |||
549 | pinctrl-0 = <&usb1_pins>; | 549 | pinctrl-0 = <&usb1_pins>; |
550 | }; | 550 | }; |
551 | 551 | ||
552 | &omap_dwc3_1 { | ||
553 | extcon = <&extcon_usb1>; | ||
554 | }; | ||
555 | |||
556 | &omap_dwc3_2 { | ||
557 | extcon = <&extcon_usb2>; | ||
558 | }; | ||
559 | |||
560 | &usb2 { | 552 | &usb2 { |
561 | dr_mode = "peripheral"; | 553 | dr_mode = "peripheral"; |
562 | }; | 554 | }; |
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi index fff0ee69aab4..e7f0a4ae271c 100644 --- a/arch/arm/boot/dts/at91sam9260.dtsi +++ b/arch/arm/boot/dts/at91sam9260.dtsi | |||
@@ -494,12 +494,12 @@ | |||
494 | 494 | ||
495 | pinctrl_usart3_rts: usart3_rts-0 { | 495 | pinctrl_usart3_rts: usart3_rts-0 { |
496 | atmel,pins = | 496 | atmel,pins = |
497 | <AT91_PIOB 8 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PC8 periph B */ | 497 | <AT91_PIOC 8 AT91_PERIPH_B AT91_PINCTRL_NONE>; |
498 | }; | 498 | }; |
499 | 499 | ||
500 | pinctrl_usart3_cts: usart3_cts-0 { | 500 | pinctrl_usart3_cts: usart3_cts-0 { |
501 | atmel,pins = | 501 | atmel,pins = |
502 | <AT91_PIOB 10 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PC10 periph B */ | 502 | <AT91_PIOC 10 AT91_PERIPH_B AT91_PINCTRL_NONE>; |
503 | }; | 503 | }; |
504 | }; | 504 | }; |
505 | 505 | ||
@@ -853,7 +853,7 @@ | |||
853 | }; | 853 | }; |
854 | 854 | ||
855 | usb1: gadget@fffa4000 { | 855 | usb1: gadget@fffa4000 { |
856 | compatible = "atmel,at91rm9200-udc"; | 856 | compatible = "atmel,at91sam9260-udc"; |
857 | reg = <0xfffa4000 0x4000>; | 857 | reg = <0xfffa4000 0x4000>; |
858 | interrupts = <10 IRQ_TYPE_LEVEL_HIGH 2>; | 858 | interrupts = <10 IRQ_TYPE_LEVEL_HIGH 2>; |
859 | clocks = <&udc_clk>, <&udpck>; | 859 | clocks = <&udc_clk>, <&udpck>; |
@@ -976,7 +976,6 @@ | |||
976 | atmel,watchdog-type = "hardware"; | 976 | atmel,watchdog-type = "hardware"; |
977 | atmel,reset-type = "all"; | 977 | atmel,reset-type = "all"; |
978 | atmel,dbg-halt; | 978 | atmel,dbg-halt; |
979 | atmel,idle-halt; | ||
980 | status = "disabled"; | 979 | status = "disabled"; |
981 | }; | 980 | }; |
982 | 981 | ||
diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi index e247b0b5fdab..d55fdf2487ef 100644 --- a/arch/arm/boot/dts/at91sam9261.dtsi +++ b/arch/arm/boot/dts/at91sam9261.dtsi | |||
@@ -124,11 +124,12 @@ | |||
124 | }; | 124 | }; |
125 | 125 | ||
126 | usb1: gadget@fffa4000 { | 126 | usb1: gadget@fffa4000 { |
127 | compatible = "atmel,at91rm9200-udc"; | 127 | compatible = "atmel,at91sam9261-udc"; |
128 | reg = <0xfffa4000 0x4000>; | 128 | reg = <0xfffa4000 0x4000>; |
129 | interrupts = <10 IRQ_TYPE_LEVEL_HIGH 2>; | 129 | interrupts = <10 IRQ_TYPE_LEVEL_HIGH 2>; |
130 | clocks = <&usb>, <&udc_clk>, <&udpck>; | 130 | clocks = <&udc_clk>, <&udpck>; |
131 | clock-names = "usb_clk", "udc_clk", "udpck"; | 131 | clock-names = "pclk", "hclk"; |
132 | atmel,matrix = <&matrix>; | ||
132 | status = "disabled"; | 133 | status = "disabled"; |
133 | }; | 134 | }; |
134 | 135 | ||
@@ -262,7 +263,7 @@ | |||
262 | }; | 263 | }; |
263 | 264 | ||
264 | matrix: matrix@ffffee00 { | 265 | matrix: matrix@ffffee00 { |
265 | compatible = "atmel,at91sam9260-bus-matrix"; | 266 | compatible = "atmel,at91sam9260-bus-matrix", "syscon"; |
266 | reg = <0xffffee00 0x200>; | 267 | reg = <0xffffee00 0x200>; |
267 | }; | 268 | }; |
268 | 269 | ||
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi index 1f67bb4c144e..fce301c4e9d6 100644 --- a/arch/arm/boot/dts/at91sam9263.dtsi +++ b/arch/arm/boot/dts/at91sam9263.dtsi | |||
@@ -69,7 +69,7 @@ | |||
69 | 69 | ||
70 | sram1: sram@00500000 { | 70 | sram1: sram@00500000 { |
71 | compatible = "mmio-sram"; | 71 | compatible = "mmio-sram"; |
72 | reg = <0x00300000 0x4000>; | 72 | reg = <0x00500000 0x4000>; |
73 | }; | 73 | }; |
74 | 74 | ||
75 | ahb { | 75 | ahb { |
@@ -856,7 +856,7 @@ | |||
856 | }; | 856 | }; |
857 | 857 | ||
858 | usb1: gadget@fff78000 { | 858 | usb1: gadget@fff78000 { |
859 | compatible = "atmel,at91rm9200-udc"; | 859 | compatible = "atmel,at91sam9263-udc"; |
860 | reg = <0xfff78000 0x4000>; | 860 | reg = <0xfff78000 0x4000>; |
861 | interrupts = <24 IRQ_TYPE_LEVEL_HIGH 2>; | 861 | interrupts = <24 IRQ_TYPE_LEVEL_HIGH 2>; |
862 | clocks = <&udc_clk>, <&udpck>; | 862 | clocks = <&udc_clk>, <&udpck>; |
@@ -905,7 +905,6 @@ | |||
905 | atmel,watchdog-type = "hardware"; | 905 | atmel,watchdog-type = "hardware"; |
906 | atmel,reset-type = "all"; | 906 | atmel,reset-type = "all"; |
907 | atmel,dbg-halt; | 907 | atmel,dbg-halt; |
908 | atmel,idle-halt; | ||
909 | status = "disabled"; | 908 | status = "disabled"; |
910 | }; | 909 | }; |
911 | 910 | ||
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi index ee80aa9c0759..488af63d5174 100644 --- a/arch/arm/boot/dts/at91sam9g45.dtsi +++ b/arch/arm/boot/dts/at91sam9g45.dtsi | |||
@@ -1116,7 +1116,6 @@ | |||
1116 | atmel,watchdog-type = "hardware"; | 1116 | atmel,watchdog-type = "hardware"; |
1117 | atmel,reset-type = "all"; | 1117 | atmel,reset-type = "all"; |
1118 | atmel,dbg-halt; | 1118 | atmel,dbg-halt; |
1119 | atmel,idle-halt; | ||
1120 | status = "disabled"; | 1119 | status = "disabled"; |
1121 | }; | 1120 | }; |
1122 | 1121 | ||
@@ -1301,7 +1300,7 @@ | |||
1301 | compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; | 1300 | compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; |
1302 | reg = <0x00800000 0x100000>; | 1301 | reg = <0x00800000 0x100000>; |
1303 | interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; | 1302 | interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; |
1304 | clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>; | 1303 | clocks = <&utmi>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>; |
1305 | clock-names = "usb_clk", "ehci_clk", "hclk", "uhpck"; | 1304 | clock-names = "usb_clk", "ehci_clk", "hclk", "uhpck"; |
1306 | status = "disabled"; | 1305 | status = "disabled"; |
1307 | }; | 1306 | }; |
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi index c2666a7cb5b1..0c53a375ba99 100644 --- a/arch/arm/boot/dts/at91sam9n12.dtsi +++ b/arch/arm/boot/dts/at91sam9n12.dtsi | |||
@@ -894,7 +894,6 @@ | |||
894 | atmel,watchdog-type = "hardware"; | 894 | atmel,watchdog-type = "hardware"; |
895 | atmel,reset-type = "all"; | 895 | atmel,reset-type = "all"; |
896 | atmel,dbg-halt; | 896 | atmel,dbg-halt; |
897 | atmel,idle-halt; | ||
898 | status = "disabled"; | 897 | status = "disabled"; |
899 | }; | 898 | }; |
900 | 899 | ||
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi index 818dabdd8c0e..d221179d0f1a 100644 --- a/arch/arm/boot/dts/at91sam9x5.dtsi +++ b/arch/arm/boot/dts/at91sam9x5.dtsi | |||
@@ -1066,7 +1066,7 @@ | |||
1066 | reg = <0x00500000 0x80000 | 1066 | reg = <0x00500000 0x80000 |
1067 | 0xf803c000 0x400>; | 1067 | 0xf803c000 0x400>; |
1068 | interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>; | 1068 | interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>; |
1069 | clocks = <&usb>, <&udphs_clk>; | 1069 | clocks = <&utmi>, <&udphs_clk>; |
1070 | clock-names = "hclk", "pclk"; | 1070 | clock-names = "hclk", "pclk"; |
1071 | status = "disabled"; | 1071 | status = "disabled"; |
1072 | 1072 | ||
@@ -1130,7 +1130,6 @@ | |||
1130 | atmel,watchdog-type = "hardware"; | 1130 | atmel,watchdog-type = "hardware"; |
1131 | atmel,reset-type = "all"; | 1131 | atmel,reset-type = "all"; |
1132 | atmel,dbg-halt; | 1132 | atmel,dbg-halt; |
1133 | atmel,idle-halt; | ||
1134 | status = "disabled"; | 1133 | status = "disabled"; |
1135 | }; | 1134 | }; |
1136 | 1135 | ||
@@ -1186,7 +1185,7 @@ | |||
1186 | compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; | 1185 | compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; |
1187 | reg = <0x00700000 0x100000>; | 1186 | reg = <0x00700000 0x100000>; |
1188 | interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; | 1187 | interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; |
1189 | clocks = <&usb>, <&uhphs_clk>, <&uhpck>; | 1188 | clocks = <&utmi>, <&uhphs_clk>, <&uhpck>; |
1190 | clock-names = "usb_clk", "ehci_clk", "uhpck"; | 1189 | clock-names = "usb_clk", "ehci_clk", "uhpck"; |
1191 | status = "disabled"; | 1190 | status = "disabled"; |
1192 | }; | 1191 | }; |
diff --git a/arch/arm/boot/dts/dm8168-evm.dts b/arch/arm/boot/dts/dm8168-evm.dts index 857d0289ad4d..afe678f6d2e9 100644 --- a/arch/arm/boot/dts/dm8168-evm.dts +++ b/arch/arm/boot/dts/dm8168-evm.dts | |||
@@ -35,6 +35,32 @@ | |||
35 | DM816X_IOPAD(0x0aac, PIN_INPUT | MUX_MODE0) /* SPI_D1 */ | 35 | DM816X_IOPAD(0x0aac, PIN_INPUT | MUX_MODE0) /* SPI_D1 */ |
36 | >; | 36 | >; |
37 | }; | 37 | }; |
38 | |||
39 | mmc_pins: pinmux_mmc_pins { | ||
40 | pinctrl-single,pins = < | ||
41 | DM816X_IOPAD(0x0a70, MUX_MODE0) /* SD_POW */ | ||
42 | DM816X_IOPAD(0x0a74, MUX_MODE0) /* SD_CLK */ | ||
43 | DM816X_IOPAD(0x0a78, MUX_MODE0) /* SD_CMD */ | ||
44 | DM816X_IOPAD(0x0a7C, MUX_MODE0) /* SD_DAT0 */ | ||
45 | DM816X_IOPAD(0x0a80, MUX_MODE0) /* SD_DAT1 */ | ||
46 | DM816X_IOPAD(0x0a84, MUX_MODE0) /* SD_DAT2 */ | ||
47 | DM816X_IOPAD(0x0a88, MUX_MODE0) /* SD_DAT2 */ | ||
48 | DM816X_IOPAD(0x0a8c, MUX_MODE2) /* GP1[7] */ | ||
49 | DM816X_IOPAD(0x0a90, MUX_MODE2) /* GP1[8] */ | ||
50 | >; | ||
51 | }; | ||
52 | |||
53 | usb0_pins: pinmux_usb0_pins { | ||
54 | pinctrl-single,pins = < | ||
55 | DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */ | ||
56 | >; | ||
57 | }; | ||
58 | |||
59 | usb1_pins: pinmux_usb0_pins { | ||
60 | pinctrl-single,pins = < | ||
61 | DM816X_IOPAD(0x0d04, MUX_MODE0) /* USB1_DRVVBUS */ | ||
62 | >; | ||
63 | }; | ||
38 | }; | 64 | }; |
39 | 65 | ||
40 | &i2c1 { | 66 | &i2c1 { |
@@ -125,5 +151,23 @@ | |||
125 | }; | 151 | }; |
126 | 152 | ||
127 | &mmc1 { | 153 | &mmc1 { |
154 | pinctrl-names = "default"; | ||
155 | pinctrl-0 = <&mmc_pins>; | ||
128 | vmmc-supply = <&vmmcsd_fixed>; | 156 | vmmc-supply = <&vmmcsd_fixed>; |
157 | bus-width = <4>; | ||
158 | cd-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>; | ||
159 | wp-gpios = <&gpio2 8 GPIO_ACTIVE_LOW>; | ||
160 | }; | ||
161 | |||
162 | /* At least dm8168-evm rev c won't support multipoint, later may */ | ||
163 | &usb0 { | ||
164 | pinctrl-names = "default"; | ||
165 | pinctrl-0 = <&usb0_pins>; | ||
166 | mentor,multipoint = <0>; | ||
167 | }; | ||
168 | |||
169 | &usb1 { | ||
170 | pinctrl-names = "default"; | ||
171 | pinctrl-0 = <&usb1_pins>; | ||
172 | mentor,multipoint = <0>; | ||
129 | }; | 173 | }; |
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi index d98d0f7de380..f35715bc6992 100644 --- a/arch/arm/boot/dts/dm816x.dtsi +++ b/arch/arm/boot/dts/dm816x.dtsi | |||
@@ -97,10 +97,31 @@ | |||
97 | 97 | ||
98 | /* Device Configuration Registers */ | 98 | /* Device Configuration Registers */ |
99 | scm_conf: syscon@600 { | 99 | scm_conf: syscon@600 { |
100 | compatible = "syscon"; | 100 | compatible = "syscon", "simple-bus"; |
101 | reg = <0x600 0x110>; | 101 | reg = <0x600 0x110>; |
102 | #address-cells = <1>; | 102 | #address-cells = <1>; |
103 | #size-cells = <1>; | 103 | #size-cells = <1>; |
104 | ranges = <0 0x600 0x110>; | ||
105 | |||
106 | usb_phy0: usb-phy@20 { | ||
107 | compatible = "ti,dm8168-usb-phy"; | ||
108 | reg = <0x20 0x8>; | ||
109 | reg-names = "phy"; | ||
110 | clocks = <&main_fapll 6>; | ||
111 | clock-names = "refclk"; | ||
112 | #phy-cells = <0>; | ||
113 | syscon = <&scm_conf>; | ||
114 | }; | ||
115 | |||
116 | usb_phy1: usb-phy@28 { | ||
117 | compatible = "ti,dm8168-usb-phy"; | ||
118 | reg = <0x28 0x8>; | ||
119 | reg-names = "phy"; | ||
120 | clocks = <&main_fapll 6>; | ||
121 | clock-names = "refclk"; | ||
122 | #phy-cells = <0>; | ||
123 | syscon = <&scm_conf>; | ||
124 | }; | ||
104 | }; | 125 | }; |
105 | 126 | ||
106 | scrm_clocks: clocks { | 127 | scrm_clocks: clocks { |
@@ -129,17 +150,27 @@ | |||
129 | }; | 150 | }; |
130 | 151 | ||
131 | gpio1: gpio@48032000 { | 152 | gpio1: gpio@48032000 { |
132 | compatible = "ti,omap3-gpio"; | 153 | compatible = "ti,omap4-gpio"; |
133 | ti,hwmods = "gpio1"; | 154 | ti,hwmods = "gpio1"; |
155 | ti,gpio-always-on; | ||
134 | reg = <0x48032000 0x1000>; | 156 | reg = <0x48032000 0x1000>; |
135 | interrupts = <97>; | 157 | interrupts = <96>; |
158 | gpio-controller; | ||
159 | #gpio-cells = <2>; | ||
160 | interrupt-controller; | ||
161 | #interrupt-cells = <2>; | ||
136 | }; | 162 | }; |
137 | 163 | ||
138 | gpio2: gpio@4804c000 { | 164 | gpio2: gpio@4804c000 { |
139 | compatible = "ti,omap3-gpio"; | 165 | compatible = "ti,omap4-gpio"; |
140 | ti,hwmods = "gpio2"; | 166 | ti,hwmods = "gpio2"; |
167 | ti,gpio-always-on; | ||
141 | reg = <0x4804c000 0x1000>; | 168 | reg = <0x4804c000 0x1000>; |
142 | interrupts = <99>; | 169 | interrupts = <98>; |
170 | gpio-controller; | ||
171 | #gpio-cells = <2>; | ||
172 | interrupt-controller; | ||
173 | #interrupt-cells = <2>; | ||
143 | }; | 174 | }; |
144 | 175 | ||
145 | gpmc: gpmc@50000000 { | 176 | gpmc: gpmc@50000000 { |
@@ -357,7 +388,10 @@ | |||
357 | reg-names = "mc", "control"; | 388 | reg-names = "mc", "control"; |
358 | interrupts = <18>; | 389 | interrupts = <18>; |
359 | interrupt-names = "mc"; | 390 | interrupt-names = "mc"; |
360 | dr_mode = "otg"; | 391 | dr_mode = "host"; |
392 | interface-type = <0>; | ||
393 | phys = <&usb_phy0>; | ||
394 | phy-names = "usb2-phy"; | ||
361 | mentor,multipoint = <1>; | 395 | mentor,multipoint = <1>; |
362 | mentor,num-eps = <16>; | 396 | mentor,num-eps = <16>; |
363 | mentor,ram-bits = <12>; | 397 | mentor,ram-bits = <12>; |
@@ -366,13 +400,15 @@ | |||
366 | 400 | ||
367 | usb1: usb@47401800 { | 401 | usb1: usb@47401800 { |
368 | compatible = "ti,musb-am33xx"; | 402 | compatible = "ti,musb-am33xx"; |
369 | status = "disabled"; | ||
370 | reg = <0x47401c00 0x400 | 403 | reg = <0x47401c00 0x400 |
371 | 0x47401800 0x200>; | 404 | 0x47401800 0x200>; |
372 | reg-names = "mc", "control"; | 405 | reg-names = "mc", "control"; |
373 | interrupts = <19>; | 406 | interrupts = <19>; |
374 | interrupt-names = "mc"; | 407 | interrupt-names = "mc"; |
375 | dr_mode = "otg"; | 408 | dr_mode = "host"; |
409 | interface-type = <0>; | ||
410 | phys = <&usb_phy1>; | ||
411 | phy-names = "usb2-phy"; | ||
376 | mentor,multipoint = <1>; | 412 | mentor,multipoint = <1>; |
377 | mentor,num-eps = <16>; | 413 | mentor,num-eps = <16>; |
378 | mentor,ram-bits = <12>; | 414 | mentor,ram-bits = <12>; |
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts index 746cddb1b8f5..7563d7ce01bb 100644 --- a/arch/arm/boot/dts/dra7-evm.dts +++ b/arch/arm/boot/dts/dra7-evm.dts | |||
@@ -263,17 +263,15 @@ | |||
263 | 263 | ||
264 | dcan1_pins_default: dcan1_pins_default { | 264 | dcan1_pins_default: dcan1_pins_default { |
265 | pinctrl-single,pins = < | 265 | pinctrl-single,pins = < |
266 | 0x3d0 (PIN_OUTPUT | MUX_MODE0) /* dcan1_tx */ | 266 | 0x3d0 (PIN_OUTPUT_PULLUP | MUX_MODE0) /* dcan1_tx */ |
267 | 0x3d4 (MUX_MODE15) /* dcan1_rx.off */ | 267 | 0x418 (PULL_UP | MUX_MODE1) /* wakeup0.dcan1_rx */ |
268 | 0x418 (PULL_DIS | MUX_MODE1) /* wakeup0.dcan1_rx */ | ||
269 | >; | 268 | >; |
270 | }; | 269 | }; |
271 | 270 | ||
272 | dcan1_pins_sleep: dcan1_pins_sleep { | 271 | dcan1_pins_sleep: dcan1_pins_sleep { |
273 | pinctrl-single,pins = < | 272 | pinctrl-single,pins = < |
274 | 0x3d0 (MUX_MODE15) /* dcan1_tx.off */ | 273 | 0x3d0 (MUX_MODE15 | PULL_UP) /* dcan1_tx.off */ |
275 | 0x3d4 (MUX_MODE15) /* dcan1_rx.off */ | 274 | 0x418 (MUX_MODE15 | PULL_UP) /* wakeup0.off */ |
276 | 0x418 (MUX_MODE15) /* wakeup0.off */ | ||
277 | >; | 275 | >; |
278 | }; | 276 | }; |
279 | }; | 277 | }; |
@@ -543,14 +541,6 @@ | |||
543 | }; | 541 | }; |
544 | }; | 542 | }; |
545 | 543 | ||
546 | &omap_dwc3_1 { | ||
547 | extcon = <&extcon_usb1>; | ||
548 | }; | ||
549 | |||
550 | &omap_dwc3_2 { | ||
551 | extcon = <&extcon_usb2>; | ||
552 | }; | ||
553 | |||
554 | &usb1 { | 544 | &usb1 { |
555 | dr_mode = "peripheral"; | 545 | dr_mode = "peripheral"; |
556 | pinctrl-names = "default"; | 546 | pinctrl-names = "default"; |
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 5827fedafd43..c4659a979c41 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi | |||
@@ -249,8 +249,8 @@ | |||
249 | <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, | 249 | <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, |
250 | <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>; | 250 | <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>; |
251 | #dma-cells = <1>; | 251 | #dma-cells = <1>; |
252 | #dma-channels = <32>; | 252 | dma-channels = <32>; |
253 | #dma-requests = <127>; | 253 | dma-requests = <127>; |
254 | }; | 254 | }; |
255 | 255 | ||
256 | gpio1: gpio@4ae10000 { | 256 | gpio1: gpio@4ae10000 { |
@@ -1090,8 +1090,8 @@ | |||
1090 | <0x4A096800 0x40>; /* pll_ctrl */ | 1090 | <0x4A096800 0x40>; /* pll_ctrl */ |
1091 | reg-names = "phy_rx", "phy_tx", "pll_ctrl"; | 1091 | reg-names = "phy_rx", "phy_tx", "pll_ctrl"; |
1092 | ctrl-module = <&omap_control_sata>; | 1092 | ctrl-module = <&omap_control_sata>; |
1093 | clocks = <&sys_clkin1>; | 1093 | clocks = <&sys_clkin1>, <&sata_ref_clk>; |
1094 | clock-names = "sysclk"; | 1094 | clock-names = "sysclk", "refclk"; |
1095 | #phy-cells = <0>; | 1095 | #phy-cells = <0>; |
1096 | }; | 1096 | }; |
1097 | 1097 | ||
@@ -1111,7 +1111,6 @@ | |||
1111 | "wkupclk", "refclk", | 1111 | "wkupclk", "refclk", |
1112 | "div-clk", "phy-div"; | 1112 | "div-clk", "phy-div"; |
1113 | #phy-cells = <0>; | 1113 | #phy-cells = <0>; |
1114 | ti,hwmods = "pcie1-phy"; | ||
1115 | }; | 1114 | }; |
1116 | 1115 | ||
1117 | pcie2_phy: pciephy@4a095000 { | 1116 | pcie2_phy: pciephy@4a095000 { |
@@ -1130,7 +1129,6 @@ | |||
1130 | "wkupclk", "refclk", | 1129 | "wkupclk", "refclk", |
1131 | "div-clk", "phy-div"; | 1130 | "div-clk", "phy-div"; |
1132 | #phy-cells = <0>; | 1131 | #phy-cells = <0>; |
1133 | ti,hwmods = "pcie2-phy"; | ||
1134 | status = "disabled"; | 1132 | status = "disabled"; |
1135 | }; | 1133 | }; |
1136 | }; | 1134 | }; |
diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts index 4d8711713610..40ed539ce474 100644 --- a/arch/arm/boot/dts/dra72-evm.dts +++ b/arch/arm/boot/dts/dra72-evm.dts | |||
@@ -119,17 +119,15 @@ | |||
119 | 119 | ||
120 | dcan1_pins_default: dcan1_pins_default { | 120 | dcan1_pins_default: dcan1_pins_default { |
121 | pinctrl-single,pins = < | 121 | pinctrl-single,pins = < |
122 | 0x3d0 (PIN_OUTPUT | MUX_MODE0) /* dcan1_tx */ | 122 | 0x3d0 (PIN_OUTPUT_PULLUP | MUX_MODE0) /* dcan1_tx */ |
123 | 0x3d4 (MUX_MODE15) /* dcan1_rx.off */ | 123 | 0x418 (PULL_UP | MUX_MODE1) /* wakeup0.dcan1_rx */ |
124 | 0x418 (PULL_DIS | MUX_MODE1) /* wakeup0.dcan1_rx */ | ||
125 | >; | 124 | >; |
126 | }; | 125 | }; |
127 | 126 | ||
128 | dcan1_pins_sleep: dcan1_pins_sleep { | 127 | dcan1_pins_sleep: dcan1_pins_sleep { |
129 | pinctrl-single,pins = < | 128 | pinctrl-single,pins = < |
130 | 0x3d0 (MUX_MODE15) /* dcan1_tx.off */ | 129 | 0x3d0 (MUX_MODE15 | PULL_UP) /* dcan1_tx.off */ |
131 | 0x3d4 (MUX_MODE15) /* dcan1_rx.off */ | 130 | 0x418 (MUX_MODE15 | PULL_UP) /* wakeup0.off */ |
132 | 0x418 (MUX_MODE15) /* wakeup0.off */ | ||
133 | >; | 131 | >; |
134 | }; | 132 | }; |
135 | 133 | ||
@@ -380,14 +378,6 @@ | |||
380 | phy-supply = <&ldo4_reg>; | 378 | phy-supply = <&ldo4_reg>; |
381 | }; | 379 | }; |
382 | 380 | ||
383 | &omap_dwc3_1 { | ||
384 | extcon = <&extcon_usb1>; | ||
385 | }; | ||
386 | |||
387 | &omap_dwc3_2 { | ||
388 | extcon = <&extcon_usb2>; | ||
389 | }; | ||
390 | |||
391 | &usb1 { | 381 | &usb1 { |
392 | dr_mode = "peripheral"; | 382 | dr_mode = "peripheral"; |
393 | pinctrl-names = "default"; | 383 | pinctrl-names = "default"; |
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi index 4bdcbd61ce47..99b09a44e269 100644 --- a/arch/arm/boot/dts/dra7xx-clocks.dtsi +++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi | |||
@@ -243,10 +243,18 @@ | |||
243 | ti,invert-autoidle-bit; | 243 | ti,invert-autoidle-bit; |
244 | }; | 244 | }; |
245 | 245 | ||
246 | dpll_core_byp_mux: dpll_core_byp_mux { | ||
247 | #clock-cells = <0>; | ||
248 | compatible = "ti,mux-clock"; | ||
249 | clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; | ||
250 | ti,bit-shift = <23>; | ||
251 | reg = <0x012c>; | ||
252 | }; | ||
253 | |||
246 | dpll_core_ck: dpll_core_ck { | 254 | dpll_core_ck: dpll_core_ck { |
247 | #clock-cells = <0>; | 255 | #clock-cells = <0>; |
248 | compatible = "ti,omap4-dpll-core-clock"; | 256 | compatible = "ti,omap4-dpll-core-clock"; |
249 | clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; | 257 | clocks = <&sys_clkin1>, <&dpll_core_byp_mux>; |
250 | reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>; | 258 | reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>; |
251 | }; | 259 | }; |
252 | 260 | ||
@@ -309,10 +317,18 @@ | |||
309 | clock-div = <1>; | 317 | clock-div = <1>; |
310 | }; | 318 | }; |
311 | 319 | ||
320 | dpll_dsp_byp_mux: dpll_dsp_byp_mux { | ||
321 | #clock-cells = <0>; | ||
322 | compatible = "ti,mux-clock"; | ||
323 | clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>; | ||
324 | ti,bit-shift = <23>; | ||
325 | reg = <0x0240>; | ||
326 | }; | ||
327 | |||
312 | dpll_dsp_ck: dpll_dsp_ck { | 328 | dpll_dsp_ck: dpll_dsp_ck { |
313 | #clock-cells = <0>; | 329 | #clock-cells = <0>; |
314 | compatible = "ti,omap4-dpll-clock"; | 330 | compatible = "ti,omap4-dpll-clock"; |
315 | clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>; | 331 | clocks = <&sys_clkin1>, <&dpll_dsp_byp_mux>; |
316 | reg = <0x0234>, <0x0238>, <0x0240>, <0x023c>; | 332 | reg = <0x0234>, <0x0238>, <0x0240>, <0x023c>; |
317 | }; | 333 | }; |
318 | 334 | ||
@@ -335,10 +351,18 @@ | |||
335 | clock-div = <1>; | 351 | clock-div = <1>; |
336 | }; | 352 | }; |
337 | 353 | ||
354 | dpll_iva_byp_mux: dpll_iva_byp_mux { | ||
355 | #clock-cells = <0>; | ||
356 | compatible = "ti,mux-clock"; | ||
357 | clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>; | ||
358 | ti,bit-shift = <23>; | ||
359 | reg = <0x01ac>; | ||
360 | }; | ||
361 | |||
338 | dpll_iva_ck: dpll_iva_ck { | 362 | dpll_iva_ck: dpll_iva_ck { |
339 | #clock-cells = <0>; | 363 | #clock-cells = <0>; |
340 | compatible = "ti,omap4-dpll-clock"; | 364 | compatible = "ti,omap4-dpll-clock"; |
341 | clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>; | 365 | clocks = <&sys_clkin1>, <&dpll_iva_byp_mux>; |
342 | reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>; | 366 | reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>; |
343 | }; | 367 | }; |
344 | 368 | ||
@@ -361,10 +385,18 @@ | |||
361 | clock-div = <1>; | 385 | clock-div = <1>; |
362 | }; | 386 | }; |
363 | 387 | ||
388 | dpll_gpu_byp_mux: dpll_gpu_byp_mux { | ||
389 | #clock-cells = <0>; | ||
390 | compatible = "ti,mux-clock"; | ||
391 | clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; | ||
392 | ti,bit-shift = <23>; | ||
393 | reg = <0x02e4>; | ||
394 | }; | ||
395 | |||
364 | dpll_gpu_ck: dpll_gpu_ck { | 396 | dpll_gpu_ck: dpll_gpu_ck { |
365 | #clock-cells = <0>; | 397 | #clock-cells = <0>; |
366 | compatible = "ti,omap4-dpll-clock"; | 398 | compatible = "ti,omap4-dpll-clock"; |
367 | clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; | 399 | clocks = <&sys_clkin1>, <&dpll_gpu_byp_mux>; |
368 | reg = <0x02d8>, <0x02dc>, <0x02e4>, <0x02e0>; | 400 | reg = <0x02d8>, <0x02dc>, <0x02e4>, <0x02e0>; |
369 | }; | 401 | }; |
370 | 402 | ||
@@ -398,10 +430,18 @@ | |||
398 | clock-div = <1>; | 430 | clock-div = <1>; |
399 | }; | 431 | }; |
400 | 432 | ||
433 | dpll_ddr_byp_mux: dpll_ddr_byp_mux { | ||
434 | #clock-cells = <0>; | ||
435 | compatible = "ti,mux-clock"; | ||
436 | clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; | ||
437 | ti,bit-shift = <23>; | ||
438 | reg = <0x021c>; | ||
439 | }; | ||
440 | |||
401 | dpll_ddr_ck: dpll_ddr_ck { | 441 | dpll_ddr_ck: dpll_ddr_ck { |
402 | #clock-cells = <0>; | 442 | #clock-cells = <0>; |
403 | compatible = "ti,omap4-dpll-clock"; | 443 | compatible = "ti,omap4-dpll-clock"; |
404 | clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; | 444 | clocks = <&sys_clkin1>, <&dpll_ddr_byp_mux>; |
405 | reg = <0x0210>, <0x0214>, <0x021c>, <0x0218>; | 445 | reg = <0x0210>, <0x0214>, <0x021c>, <0x0218>; |
406 | }; | 446 | }; |
407 | 447 | ||
@@ -416,10 +456,18 @@ | |||
416 | ti,invert-autoidle-bit; | 456 | ti,invert-autoidle-bit; |
417 | }; | 457 | }; |
418 | 458 | ||
459 | dpll_gmac_byp_mux: dpll_gmac_byp_mux { | ||
460 | #clock-cells = <0>; | ||
461 | compatible = "ti,mux-clock"; | ||
462 | clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; | ||
463 | ti,bit-shift = <23>; | ||
464 | reg = <0x02b4>; | ||
465 | }; | ||
466 | |||
419 | dpll_gmac_ck: dpll_gmac_ck { | 467 | dpll_gmac_ck: dpll_gmac_ck { |
420 | #clock-cells = <0>; | 468 | #clock-cells = <0>; |
421 | compatible = "ti,omap4-dpll-clock"; | 469 | compatible = "ti,omap4-dpll-clock"; |
422 | clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; | 470 | clocks = <&sys_clkin1>, <&dpll_gmac_byp_mux>; |
423 | reg = <0x02a8>, <0x02ac>, <0x02b4>, <0x02b0>; | 471 | reg = <0x02a8>, <0x02ac>, <0x02b4>, <0x02b0>; |
424 | }; | 472 | }; |
425 | 473 | ||
@@ -482,10 +530,18 @@ | |||
482 | clock-div = <1>; | 530 | clock-div = <1>; |
483 | }; | 531 | }; |
484 | 532 | ||
533 | dpll_eve_byp_mux: dpll_eve_byp_mux { | ||
534 | #clock-cells = <0>; | ||
535 | compatible = "ti,mux-clock"; | ||
536 | clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>; | ||
537 | ti,bit-shift = <23>; | ||
538 | reg = <0x0290>; | ||
539 | }; | ||
540 | |||
485 | dpll_eve_ck: dpll_eve_ck { | 541 | dpll_eve_ck: dpll_eve_ck { |
486 | #clock-cells = <0>; | 542 | #clock-cells = <0>; |
487 | compatible = "ti,omap4-dpll-clock"; | 543 | compatible = "ti,omap4-dpll-clock"; |
488 | clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>; | 544 | clocks = <&sys_clkin1>, <&dpll_eve_byp_mux>; |
489 | reg = <0x0284>, <0x0288>, <0x0290>, <0x028c>; | 545 | reg = <0x0284>, <0x0288>, <0x0290>, <0x028c>; |
490 | }; | 546 | }; |
491 | 547 | ||
@@ -1249,10 +1305,18 @@ | |||
1249 | clock-div = <1>; | 1305 | clock-div = <1>; |
1250 | }; | 1306 | }; |
1251 | 1307 | ||
1308 | dpll_per_byp_mux: dpll_per_byp_mux { | ||
1309 | #clock-cells = <0>; | ||
1310 | compatible = "ti,mux-clock"; | ||
1311 | clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>; | ||
1312 | ti,bit-shift = <23>; | ||
1313 | reg = <0x014c>; | ||
1314 | }; | ||
1315 | |||
1252 | dpll_per_ck: dpll_per_ck { | 1316 | dpll_per_ck: dpll_per_ck { |
1253 | #clock-cells = <0>; | 1317 | #clock-cells = <0>; |
1254 | compatible = "ti,omap4-dpll-clock"; | 1318 | compatible = "ti,omap4-dpll-clock"; |
1255 | clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>; | 1319 | clocks = <&sys_clkin1>, <&dpll_per_byp_mux>; |
1256 | reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>; | 1320 | reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>; |
1257 | }; | 1321 | }; |
1258 | 1322 | ||
@@ -1275,10 +1339,18 @@ | |||
1275 | clock-div = <1>; | 1339 | clock-div = <1>; |
1276 | }; | 1340 | }; |
1277 | 1341 | ||
1342 | dpll_usb_byp_mux: dpll_usb_byp_mux { | ||
1343 | #clock-cells = <0>; | ||
1344 | compatible = "ti,mux-clock"; | ||
1345 | clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>; | ||
1346 | ti,bit-shift = <23>; | ||
1347 | reg = <0x018c>; | ||
1348 | }; | ||
1349 | |||
1278 | dpll_usb_ck: dpll_usb_ck { | 1350 | dpll_usb_ck: dpll_usb_ck { |
1279 | #clock-cells = <0>; | 1351 | #clock-cells = <0>; |
1280 | compatible = "ti,omap4-dpll-j-type-clock"; | 1352 | compatible = "ti,omap4-dpll-j-type-clock"; |
1281 | clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>; | 1353 | clocks = <&sys_clkin1>, <&dpll_usb_byp_mux>; |
1282 | reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>; | 1354 | reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>; |
1283 | }; | 1355 | }; |
1284 | 1356 | ||
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi index 277b48b0b6f9..ac6b0ae42caf 100644 --- a/arch/arm/boot/dts/exynos3250.dtsi +++ b/arch/arm/boot/dts/exynos3250.dtsi | |||
@@ -18,6 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include "skeleton.dtsi" | 20 | #include "skeleton.dtsi" |
21 | #include "exynos4-cpu-thermal.dtsi" | ||
21 | #include <dt-bindings/clock/exynos3250.h> | 22 | #include <dt-bindings/clock/exynos3250.h> |
22 | 23 | ||
23 | / { | 24 | / { |
@@ -193,6 +194,7 @@ | |||
193 | interrupts = <0 216 0>; | 194 | interrupts = <0 216 0>; |
194 | clocks = <&cmu CLK_TMU_APBIF>; | 195 | clocks = <&cmu CLK_TMU_APBIF>; |
195 | clock-names = "tmu_apbif"; | 196 | clock-names = "tmu_apbif"; |
197 | #include "exynos4412-tmu-sensor-conf.dtsi" | ||
196 | status = "disabled"; | 198 | status = "disabled"; |
197 | }; | 199 | }; |
198 | 200 | ||
diff --git a/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi new file mode 100644 index 000000000000..735cb2f10817 --- /dev/null +++ b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * Device tree sources for Exynos4 thermal zone | ||
3 | * | ||
4 | * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <dt-bindings/thermal/thermal.h> | ||
13 | |||
14 | / { | ||
15 | thermal-zones { | ||
16 | cpu_thermal: cpu-thermal { | ||
17 | thermal-sensors = <&tmu 0>; | ||
18 | polling-delay-passive = <0>; | ||
19 | polling-delay = <0>; | ||
20 | trips { | ||
21 | cpu_alert0: cpu-alert-0 { | ||
22 | temperature = <70000>; /* millicelsius */ | ||
23 | hysteresis = <10000>; /* millicelsius */ | ||
24 | type = "active"; | ||
25 | }; | ||
26 | cpu_alert1: cpu-alert-1 { | ||
27 | temperature = <95000>; /* millicelsius */ | ||
28 | hysteresis = <10000>; /* millicelsius */ | ||
29 | type = "active"; | ||
30 | }; | ||
31 | cpu_alert2: cpu-alert-2 { | ||
32 | temperature = <110000>; /* millicelsius */ | ||
33 | hysteresis = <10000>; /* millicelsius */ | ||
34 | type = "active"; | ||
35 | }; | ||
36 | cpu_crit0: cpu-crit-0 { | ||
37 | temperature = <120000>; /* millicelsius */ | ||
38 | hysteresis = <0>; /* millicelsius */ | ||
39 | type = "critical"; | ||
40 | }; | ||
41 | }; | ||
42 | cooling-maps { | ||
43 | map0 { | ||
44 | trip = <&cpu_alert0>; | ||
45 | }; | ||
46 | map1 { | ||
47 | trip = <&cpu_alert1>; | ||
48 | }; | ||
49 | }; | ||
50 | }; | ||
51 | }; | ||
52 | }; | ||
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi index 76173cacd450..77ea547768f4 100644 --- a/arch/arm/boot/dts/exynos4.dtsi +++ b/arch/arm/boot/dts/exynos4.dtsi | |||
@@ -38,6 +38,7 @@ | |||
38 | i2c5 = &i2c_5; | 38 | i2c5 = &i2c_5; |
39 | i2c6 = &i2c_6; | 39 | i2c6 = &i2c_6; |
40 | i2c7 = &i2c_7; | 40 | i2c7 = &i2c_7; |
41 | i2c8 = &i2c_8; | ||
41 | csis0 = &csis_0; | 42 | csis0 = &csis_0; |
42 | csis1 = &csis_1; | 43 | csis1 = &csis_1; |
43 | fimc0 = &fimc_0; | 44 | fimc0 = &fimc_0; |
@@ -104,6 +105,7 @@ | |||
104 | compatible = "samsung,exynos4210-pd"; | 105 | compatible = "samsung,exynos4210-pd"; |
105 | reg = <0x10023C20 0x20>; | 106 | reg = <0x10023C20 0x20>; |
106 | #power-domain-cells = <0>; | 107 | #power-domain-cells = <0>; |
108 | power-domains = <&pd_lcd0>; | ||
107 | }; | 109 | }; |
108 | 110 | ||
109 | pd_cam: cam-power-domain@10023C00 { | 111 | pd_cam: cam-power-domain@10023C00 { |
@@ -554,6 +556,22 @@ | |||
554 | status = "disabled"; | 556 | status = "disabled"; |
555 | }; | 557 | }; |
556 | 558 | ||
559 | i2c_8: i2c@138E0000 { | ||
560 | #address-cells = <1>; | ||
561 | #size-cells = <0>; | ||
562 | compatible = "samsung,s3c2440-hdmiphy-i2c"; | ||
563 | reg = <0x138E0000 0x100>; | ||
564 | interrupts = <0 93 0>; | ||
565 | clocks = <&clock CLK_I2C_HDMI>; | ||
566 | clock-names = "i2c"; | ||
567 | status = "disabled"; | ||
568 | |||
569 | hdmi_i2c_phy: hdmiphy@38 { | ||
570 | compatible = "exynos4210-hdmiphy"; | ||
571 | reg = <0x38>; | ||
572 | }; | ||
573 | }; | ||
574 | |||
557 | spi_0: spi@13920000 { | 575 | spi_0: spi@13920000 { |
558 | compatible = "samsung,exynos4210-spi"; | 576 | compatible = "samsung,exynos4210-spi"; |
559 | reg = <0x13920000 0x100>; | 577 | reg = <0x13920000 0x100>; |
@@ -663,6 +681,33 @@ | |||
663 | status = "disabled"; | 681 | status = "disabled"; |
664 | }; | 682 | }; |
665 | 683 | ||
684 | tmu: tmu@100C0000 { | ||
685 | #include "exynos4412-tmu-sensor-conf.dtsi" | ||
686 | }; | ||
687 | |||
688 | hdmi: hdmi@12D00000 { | ||
689 | compatible = "samsung,exynos4210-hdmi"; | ||
690 | reg = <0x12D00000 0x70000>; | ||
691 | interrupts = <0 92 0>; | ||
692 | clock-names = "hdmi", "sclk_hdmi", "sclk_pixel", "sclk_hdmiphy", | ||
693 | "mout_hdmi"; | ||
694 | clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>, | ||
695 | <&clock CLK_SCLK_PIXEL>, <&clock CLK_SCLK_HDMIPHY>, | ||
696 | <&clock CLK_MOUT_HDMI>; | ||
697 | phy = <&hdmi_i2c_phy>; | ||
698 | power-domains = <&pd_tv>; | ||
699 | samsung,syscon-phandle = <&pmu_system_controller>; | ||
700 | status = "disabled"; | ||
701 | }; | ||
702 | |||
703 | mixer: mixer@12C10000 { | ||
704 | compatible = "samsung,exynos4210-mixer"; | ||
705 | interrupts = <0 91 0>; | ||
706 | reg = <0x12C10000 0x2100>, <0x12c00000 0x300>; | ||
707 | power-domains = <&pd_tv>; | ||
708 | status = "disabled"; | ||
709 | }; | ||
710 | |||
666 | ppmu_dmc0: ppmu_dmc0@106a0000 { | 711 | ppmu_dmc0: ppmu_dmc0@106a0000 { |
667 | compatible = "samsung,exynos-ppmu"; | 712 | compatible = "samsung,exynos-ppmu"; |
668 | reg = <0x106a0000 0x2000>; | 713 | reg = <0x106a0000 0x2000>; |
diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts index 3d6652a4b6cb..32c5fd8f6269 100644 --- a/arch/arm/boot/dts/exynos4210-trats.dts +++ b/arch/arm/boot/dts/exynos4210-trats.dts | |||
@@ -426,6 +426,25 @@ | |||
426 | status = "okay"; | 426 | status = "okay"; |
427 | }; | 427 | }; |
428 | 428 | ||
429 | tmu@100C0000 { | ||
430 | status = "okay"; | ||
431 | }; | ||
432 | |||
433 | thermal-zones { | ||
434 | cpu_thermal: cpu-thermal { | ||
435 | cooling-maps { | ||
436 | map0 { | ||
437 | /* Corresponds to 800MHz at freq_table */ | ||
438 | cooling-device = <&cpu0 2 2>; | ||
439 | }; | ||
440 | map1 { | ||
441 | /* Corresponds to 200MHz at freq_table */ | ||
442 | cooling-device = <&cpu0 4 4>; | ||
443 | }; | ||
444 | }; | ||
445 | }; | ||
446 | }; | ||
447 | |||
429 | camera { | 448 | camera { |
430 | pinctrl-names = "default"; | 449 | pinctrl-names = "default"; |
431 | pinctrl-0 = <>; | 450 | pinctrl-0 = <>; |
diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts index b57e6b82ea20..d4f2b11319dd 100644 --- a/arch/arm/boot/dts/exynos4210-universal_c210.dts +++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts | |||
@@ -505,6 +505,63 @@ | |||
505 | assigned-clock-rates = <0>, <160000000>; | 505 | assigned-clock-rates = <0>, <160000000>; |
506 | }; | 506 | }; |
507 | }; | 507 | }; |
508 | |||
509 | hdmi_en: voltage-regulator-hdmi-5v { | ||
510 | compatible = "regulator-fixed"; | ||
511 | regulator-name = "HDMI_5V"; | ||
512 | regulator-min-microvolt = <5000000>; | ||
513 | regulator-max-microvolt = <5000000>; | ||
514 | gpio = <&gpe0 1 0>; | ||
515 | enable-active-high; | ||
516 | }; | ||
517 | |||
518 | hdmi_ddc: i2c-ddc { | ||
519 | compatible = "i2c-gpio"; | ||
520 | gpios = <&gpe4 2 0 &gpe4 3 0>; | ||
521 | i2c-gpio,delay-us = <100>; | ||
522 | #address-cells = <1>; | ||
523 | #size-cells = <0>; | ||
524 | |||
525 | pinctrl-0 = <&i2c_ddc_bus>; | ||
526 | pinctrl-names = "default"; | ||
527 | status = "okay"; | ||
528 | }; | ||
529 | |||
530 | mixer@12C10000 { | ||
531 | status = "okay"; | ||
532 | }; | ||
533 | |||
534 | hdmi@12D00000 { | ||
535 | hpd-gpio = <&gpx3 7 0>; | ||
536 | pinctrl-names = "default"; | ||
537 | pinctrl-0 = <&hdmi_hpd>; | ||
538 | hdmi-en-supply = <&hdmi_en>; | ||
539 | vdd-supply = <&ldo3_reg>; | ||
540 | vdd_osc-supply = <&ldo4_reg>; | ||
541 | vdd_pll-supply = <&ldo3_reg>; | ||
542 | ddc = <&hdmi_ddc>; | ||
543 | status = "okay"; | ||
544 | }; | ||
545 | |||
546 | i2c@138E0000 { | ||
547 | status = "okay"; | ||
548 | }; | ||
549 | }; | ||
550 | |||
551 | &pinctrl_1 { | ||
552 | hdmi_hpd: hdmi-hpd { | ||
553 | samsung,pins = "gpx3-7"; | ||
554 | samsung,pin-pud = <0>; | ||
555 | }; | ||
556 | }; | ||
557 | |||
558 | &pinctrl_0 { | ||
559 | i2c_ddc_bus: i2c-ddc-bus { | ||
560 | samsung,pins = "gpe4-2", "gpe4-3"; | ||
561 | samsung,pin-function = <2>; | ||
562 | samsung,pin-pud = <3>; | ||
563 | samsung,pin-drv = <0>; | ||
564 | }; | ||
508 | }; | 565 | }; |
509 | 566 | ||
510 | &mdma1 { | 567 | &mdma1 { |
diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi index 67c832c9dcf1..be89f83f70e7 100644 --- a/arch/arm/boot/dts/exynos4210.dtsi +++ b/arch/arm/boot/dts/exynos4210.dtsi | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #include "exynos4.dtsi" | 22 | #include "exynos4.dtsi" |
23 | #include "exynos4210-pinctrl.dtsi" | 23 | #include "exynos4210-pinctrl.dtsi" |
24 | #include "exynos4-cpu-thermal.dtsi" | ||
24 | 25 | ||
25 | / { | 26 | / { |
26 | compatible = "samsung,exynos4210", "samsung,exynos4"; | 27 | compatible = "samsung,exynos4210", "samsung,exynos4"; |
@@ -35,10 +36,13 @@ | |||
35 | #address-cells = <1>; | 36 | #address-cells = <1>; |
36 | #size-cells = <0>; | 37 | #size-cells = <0>; |
37 | 38 | ||
38 | cpu@900 { | 39 | cpu0: cpu@900 { |
39 | device_type = "cpu"; | 40 | device_type = "cpu"; |
40 | compatible = "arm,cortex-a9"; | 41 | compatible = "arm,cortex-a9"; |
41 | reg = <0x900>; | 42 | reg = <0x900>; |
43 | cooling-min-level = <4>; | ||
44 | cooling-max-level = <2>; | ||
45 | #cooling-cells = <2>; /* min followed by max */ | ||
42 | }; | 46 | }; |
43 | 47 | ||
44 | cpu@901 { | 48 | cpu@901 { |
@@ -153,16 +157,38 @@ | |||
153 | reg = <0x03860000 0x1000>; | 157 | reg = <0x03860000 0x1000>; |
154 | }; | 158 | }; |
155 | 159 | ||
156 | tmu@100C0000 { | 160 | tmu: tmu@100C0000 { |
157 | compatible = "samsung,exynos4210-tmu"; | 161 | compatible = "samsung,exynos4210-tmu"; |
158 | interrupt-parent = <&combiner>; | 162 | interrupt-parent = <&combiner>; |
159 | reg = <0x100C0000 0x100>; | 163 | reg = <0x100C0000 0x100>; |
160 | interrupts = <2 4>; | 164 | interrupts = <2 4>; |
161 | clocks = <&clock CLK_TMU_APBIF>; | 165 | clocks = <&clock CLK_TMU_APBIF>; |
162 | clock-names = "tmu_apbif"; | 166 | clock-names = "tmu_apbif"; |
167 | samsung,tmu_gain = <15>; | ||
168 | samsung,tmu_reference_voltage = <7>; | ||
163 | status = "disabled"; | 169 | status = "disabled"; |
164 | }; | 170 | }; |
165 | 171 | ||
172 | thermal-zones { | ||
173 | cpu_thermal: cpu-thermal { | ||
174 | polling-delay-passive = <0>; | ||
175 | polling-delay = <0>; | ||
176 | thermal-sensors = <&tmu 0>; | ||
177 | |||
178 | trips { | ||
179 | cpu_alert0: cpu-alert-0 { | ||
180 | temperature = <85000>; /* millicelsius */ | ||
181 | }; | ||
182 | cpu_alert1: cpu-alert-1 { | ||
183 | temperature = <100000>; /* millicelsius */ | ||
184 | }; | ||
185 | cpu_alert2: cpu-alert-2 { | ||
186 | temperature = <110000>; /* millicelsius */ | ||
187 | }; | ||
188 | }; | ||
189 | }; | ||
190 | }; | ||
191 | |||
166 | g2d@12800000 { | 192 | g2d@12800000 { |
167 | compatible = "samsung,s5pv210-g2d"; | 193 | compatible = "samsung,s5pv210-g2d"; |
168 | reg = <0x12800000 0x1000>; | 194 | reg = <0x12800000 0x1000>; |
@@ -203,6 +229,14 @@ | |||
203 | }; | 229 | }; |
204 | }; | 230 | }; |
205 | 231 | ||
232 | mixer: mixer@12C10000 { | ||
233 | clock-names = "mixer", "hdmi", "sclk_hdmi", "vp", "mout_mixer", | ||
234 | "sclk_mixer"; | ||
235 | clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>, | ||
236 | <&clock CLK_SCLK_HDMI>, <&clock CLK_VP>, | ||
237 | <&clock CLK_MOUT_MIXER>, <&clock CLK_SCLK_MIXER>; | ||
238 | }; | ||
239 | |||
206 | ppmu_lcd1: ppmu_lcd1@12240000 { | 240 | ppmu_lcd1: ppmu_lcd1@12240000 { |
207 | compatible = "samsung,exynos-ppmu"; | 241 | compatible = "samsung,exynos-ppmu"; |
208 | reg = <0x12240000 0x2000>; | 242 | reg = <0x12240000 0x2000>; |
diff --git a/arch/arm/boot/dts/exynos4212.dtsi b/arch/arm/boot/dts/exynos4212.dtsi index dd0a43ec56da..5be03288f1ee 100644 --- a/arch/arm/boot/dts/exynos4212.dtsi +++ b/arch/arm/boot/dts/exynos4212.dtsi | |||
@@ -26,10 +26,13 @@ | |||
26 | #address-cells = <1>; | 26 | #address-cells = <1>; |
27 | #size-cells = <0>; | 27 | #size-cells = <0>; |
28 | 28 | ||
29 | cpu@A00 { | 29 | cpu0: cpu@A00 { |
30 | device_type = "cpu"; | 30 | device_type = "cpu"; |
31 | compatible = "arm,cortex-a9"; | 31 | compatible = "arm,cortex-a9"; |
32 | reg = <0xA00>; | 32 | reg = <0xA00>; |
33 | cooling-min-level = <13>; | ||
34 | cooling-max-level = <7>; | ||
35 | #cooling-cells = <2>; /* min followed by max */ | ||
33 | }; | 36 | }; |
34 | 37 | ||
35 | cpu@A01 { | 38 | cpu@A01 { |
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi index de80b5bba204..adb4f6a97a1d 100644 --- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi +++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi | |||
@@ -249,6 +249,20 @@ | |||
249 | regulator-always-on; | 249 | regulator-always-on; |
250 | }; | 250 | }; |
251 | 251 | ||
252 | ldo8_reg: ldo@8 { | ||
253 | regulator-compatible = "LDO8"; | ||
254 | regulator-name = "VDD10_HDMI_1.0V"; | ||
255 | regulator-min-microvolt = <1000000>; | ||
256 | regulator-max-microvolt = <1000000>; | ||
257 | }; | ||
258 | |||
259 | ldo10_reg: ldo@10 { | ||
260 | regulator-compatible = "LDO10"; | ||
261 | regulator-name = "VDDQ_MIPIHSI_1.8V"; | ||
262 | regulator-min-microvolt = <1800000>; | ||
263 | regulator-max-microvolt = <1800000>; | ||
264 | }; | ||
265 | |||
252 | ldo11_reg: LDO11 { | 266 | ldo11_reg: LDO11 { |
253 | regulator-name = "VDD18_ABB1_1.8V"; | 267 | regulator-name = "VDD18_ABB1_1.8V"; |
254 | regulator-min-microvolt = <1800000>; | 268 | regulator-min-microvolt = <1800000>; |
@@ -411,6 +425,51 @@ | |||
411 | ehci: ehci@12580000 { | 425 | ehci: ehci@12580000 { |
412 | status = "okay"; | 426 | status = "okay"; |
413 | }; | 427 | }; |
428 | |||
429 | tmu@100C0000 { | ||
430 | vtmu-supply = <&ldo10_reg>; | ||
431 | status = "okay"; | ||
432 | }; | ||
433 | |||
434 | thermal-zones { | ||
435 | cpu_thermal: cpu-thermal { | ||
436 | cooling-maps { | ||
437 | map0 { | ||
438 | /* Corresponds to 800MHz at freq_table */ | ||
439 | cooling-device = <&cpu0 7 7>; | ||
440 | }; | ||
441 | map1 { | ||
442 | /* Corresponds to 200MHz at freq_table */ | ||
443 | cooling-device = <&cpu0 13 13>; | ||
444 | }; | ||
445 | }; | ||
446 | }; | ||
447 | }; | ||
448 | |||
449 | mixer: mixer@12C10000 { | ||
450 | status = "okay"; | ||
451 | }; | ||
452 | |||
453 | hdmi@12D00000 { | ||
454 | hpd-gpio = <&gpx3 7 0>; | ||
455 | pinctrl-names = "default"; | ||
456 | pinctrl-0 = <&hdmi_hpd>; | ||
457 | vdd-supply = <&ldo8_reg>; | ||
458 | vdd_osc-supply = <&ldo10_reg>; | ||
459 | vdd_pll-supply = <&ldo8_reg>; | ||
460 | ddc = <&hdmi_ddc>; | ||
461 | status = "okay"; | ||
462 | }; | ||
463 | |||
464 | hdmi_ddc: i2c@13880000 { | ||
465 | status = "okay"; | ||
466 | pinctrl-names = "default"; | ||
467 | pinctrl-0 = <&i2c2_bus>; | ||
468 | }; | ||
469 | |||
470 | i2c@138E0000 { | ||
471 | status = "okay"; | ||
472 | }; | ||
414 | }; | 473 | }; |
415 | 474 | ||
416 | &pinctrl_1 { | 475 | &pinctrl_1 { |
@@ -425,4 +484,9 @@ | |||
425 | samsung,pin-pud = <0>; | 484 | samsung,pin-pud = <0>; |
426 | samsung,pin-drv = <0>; | 485 | samsung,pin-drv = <0>; |
427 | }; | 486 | }; |
487 | |||
488 | hdmi_hpd: hdmi-hpd { | ||
489 | samsung,pins = "gpx3-7"; | ||
490 | samsung,pin-pud = <1>; | ||
491 | }; | ||
428 | }; | 492 | }; |
diff --git a/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi b/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi new file mode 100644 index 000000000000..e3f7934d19d0 --- /dev/null +++ b/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * Device tree sources for Exynos4412 TMU sensor configuration | ||
3 | * | ||
4 | * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <dt-bindings/thermal/thermal_exynos.h> | ||
13 | |||
14 | #thermal-sensor-cells = <0>; | ||
15 | samsung,tmu_gain = <8>; | ||
16 | samsung,tmu_reference_voltage = <16>; | ||
17 | samsung,tmu_noise_cancel_mode = <4>; | ||
18 | samsung,tmu_efuse_value = <55>; | ||
19 | samsung,tmu_min_efuse_value = <40>; | ||
20 | samsung,tmu_max_efuse_value = <100>; | ||
21 | samsung,tmu_first_point_trim = <25>; | ||
22 | samsung,tmu_second_point_trim = <85>; | ||
23 | samsung,tmu_default_temp_offset = <50>; | ||
24 | samsung,tmu_cal_type = <TYPE_ONE_POINT_TRIMMING>; | ||
diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts index 21f748083586..173ffa479ad3 100644 --- a/arch/arm/boot/dts/exynos4412-trats2.dts +++ b/arch/arm/boot/dts/exynos4412-trats2.dts | |||
@@ -927,6 +927,21 @@ | |||
927 | pulldown-ohm = <100000>; /* 100K */ | 927 | pulldown-ohm = <100000>; /* 100K */ |
928 | io-channels = <&adc 2>; /* Battery temperature */ | 928 | io-channels = <&adc 2>; /* Battery temperature */ |
929 | }; | 929 | }; |
930 | |||
931 | thermal-zones { | ||
932 | cpu_thermal: cpu-thermal { | ||
933 | cooling-maps { | ||
934 | map0 { | ||
935 | /* Corresponds to 800MHz at freq_table */ | ||
936 | cooling-device = <&cpu0 7 7>; | ||
937 | }; | ||
938 | map1 { | ||
939 | /* Corresponds to 200MHz at freq_table */ | ||
940 | cooling-device = <&cpu0 13 13>; | ||
941 | }; | ||
942 | }; | ||
943 | }; | ||
944 | }; | ||
930 | }; | 945 | }; |
931 | 946 | ||
932 | &pmu_system_controller { | 947 | &pmu_system_controller { |
diff --git a/arch/arm/boot/dts/exynos4412.dtsi b/arch/arm/boot/dts/exynos4412.dtsi index 0f6ec93bb1d8..68ad43b391ae 100644 --- a/arch/arm/boot/dts/exynos4412.dtsi +++ b/arch/arm/boot/dts/exynos4412.dtsi | |||
@@ -26,10 +26,13 @@ | |||
26 | #address-cells = <1>; | 26 | #address-cells = <1>; |
27 | #size-cells = <0>; | 27 | #size-cells = <0>; |
28 | 28 | ||
29 | cpu@A00 { | 29 | cpu0: cpu@A00 { |
30 | device_type = "cpu"; | 30 | device_type = "cpu"; |
31 | compatible = "arm,cortex-a9"; | 31 | compatible = "arm,cortex-a9"; |
32 | reg = <0xA00>; | 32 | reg = <0xA00>; |
33 | cooling-min-level = <13>; | ||
34 | cooling-max-level = <7>; | ||
35 | #cooling-cells = <2>; /* min followed by max */ | ||
33 | }; | 36 | }; |
34 | 37 | ||
35 | cpu@A01 { | 38 | cpu@A01 { |
diff --git a/arch/arm/boot/dts/exynos4x12.dtsi b/arch/arm/boot/dts/exynos4x12.dtsi index f5e0ae780d6c..6a6abe14fd9b 100644 --- a/arch/arm/boot/dts/exynos4x12.dtsi +++ b/arch/arm/boot/dts/exynos4x12.dtsi | |||
@@ -19,6 +19,7 @@ | |||
19 | 19 | ||
20 | #include "exynos4.dtsi" | 20 | #include "exynos4.dtsi" |
21 | #include "exynos4x12-pinctrl.dtsi" | 21 | #include "exynos4x12-pinctrl.dtsi" |
22 | #include "exynos4-cpu-thermal.dtsi" | ||
22 | 23 | ||
23 | / { | 24 | / { |
24 | aliases { | 25 | aliases { |
@@ -297,4 +298,15 @@ | |||
297 | clock-names = "tmu_apbif"; | 298 | clock-names = "tmu_apbif"; |
298 | status = "disabled"; | 299 | status = "disabled"; |
299 | }; | 300 | }; |
301 | |||
302 | hdmi: hdmi@12D00000 { | ||
303 | compatible = "samsung,exynos4212-hdmi"; | ||
304 | }; | ||
305 | |||
306 | mixer: mixer@12C10000 { | ||
307 | compatible = "samsung,exynos4212-mixer"; | ||
308 | clock-names = "mixer", "hdmi", "sclk_hdmi", "vp"; | ||
309 | clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>, | ||
310 | <&clock CLK_SCLK_HDMI>, <&clock CLK_VP>; | ||
311 | }; | ||
300 | }; | 312 | }; |
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index 9bb1b0b738f5..adbde1adad95 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <dt-bindings/clock/exynos5250.h> | 20 | #include <dt-bindings/clock/exynos5250.h> |
21 | #include "exynos5.dtsi" | 21 | #include "exynos5.dtsi" |
22 | #include "exynos5250-pinctrl.dtsi" | 22 | #include "exynos5250-pinctrl.dtsi" |
23 | 23 | #include "exynos4-cpu-thermal.dtsi" | |
24 | #include <dt-bindings/clock/exynos-audss-clk.h> | 24 | #include <dt-bindings/clock/exynos-audss-clk.h> |
25 | 25 | ||
26 | / { | 26 | / { |
@@ -58,11 +58,14 @@ | |||
58 | #address-cells = <1>; | 58 | #address-cells = <1>; |
59 | #size-cells = <0>; | 59 | #size-cells = <0>; |
60 | 60 | ||
61 | cpu@0 { | 61 | cpu0: cpu@0 { |
62 | device_type = "cpu"; | 62 | device_type = "cpu"; |
63 | compatible = "arm,cortex-a15"; | 63 | compatible = "arm,cortex-a15"; |
64 | reg = <0>; | 64 | reg = <0>; |
65 | clock-frequency = <1700000000>; | 65 | clock-frequency = <1700000000>; |
66 | cooling-min-level = <15>; | ||
67 | cooling-max-level = <9>; | ||
68 | #cooling-cells = <2>; /* min followed by max */ | ||
66 | }; | 69 | }; |
67 | cpu@1 { | 70 | cpu@1 { |
68 | device_type = "cpu"; | 71 | device_type = "cpu"; |
@@ -102,6 +105,12 @@ | |||
102 | #power-domain-cells = <0>; | 105 | #power-domain-cells = <0>; |
103 | }; | 106 | }; |
104 | 107 | ||
108 | pd_disp1: disp1-power-domain@100440A0 { | ||
109 | compatible = "samsung,exynos4210-pd"; | ||
110 | reg = <0x100440A0 0x20>; | ||
111 | #power-domain-cells = <0>; | ||
112 | }; | ||
113 | |||
105 | clock: clock-controller@10010000 { | 114 | clock: clock-controller@10010000 { |
106 | compatible = "samsung,exynos5250-clock"; | 115 | compatible = "samsung,exynos5250-clock"; |
107 | reg = <0x10010000 0x30000>; | 116 | reg = <0x10010000 0x30000>; |
@@ -235,12 +244,32 @@ | |||
235 | status = "disabled"; | 244 | status = "disabled"; |
236 | }; | 245 | }; |
237 | 246 | ||
238 | tmu@10060000 { | 247 | tmu: tmu@10060000 { |
239 | compatible = "samsung,exynos5250-tmu"; | 248 | compatible = "samsung,exynos5250-tmu"; |
240 | reg = <0x10060000 0x100>; | 249 | reg = <0x10060000 0x100>; |
241 | interrupts = <0 65 0>; | 250 | interrupts = <0 65 0>; |
242 | clocks = <&clock CLK_TMU>; | 251 | clocks = <&clock CLK_TMU>; |
243 | clock-names = "tmu_apbif"; | 252 | clock-names = "tmu_apbif"; |
253 | #include "exynos4412-tmu-sensor-conf.dtsi" | ||
254 | }; | ||
255 | |||
256 | thermal-zones { | ||
257 | cpu_thermal: cpu-thermal { | ||
258 | polling-delay-passive = <0>; | ||
259 | polling-delay = <0>; | ||
260 | thermal-sensors = <&tmu 0>; | ||
261 | |||
262 | cooling-maps { | ||
263 | map0 { | ||
264 | /* Corresponds to 800MHz at freq_table */ | ||
265 | cooling-device = <&cpu0 9 9>; | ||
266 | }; | ||
267 | map1 { | ||
268 | /* Corresponds to 200MHz at freq_table */ | ||
269 | cooling-device = <&cpu0 15 15>; | ||
270 | }; | ||
271 | }; | ||
272 | }; | ||
244 | }; | 273 | }; |
245 | 274 | ||
246 | serial@12C00000 { | 275 | serial@12C00000 { |
@@ -719,6 +748,7 @@ | |||
719 | hdmi: hdmi { | 748 | hdmi: hdmi { |
720 | compatible = "samsung,exynos4212-hdmi"; | 749 | compatible = "samsung,exynos4212-hdmi"; |
721 | reg = <0x14530000 0x70000>; | 750 | reg = <0x14530000 0x70000>; |
751 | power-domains = <&pd_disp1>; | ||
722 | interrupts = <0 95 0>; | 752 | interrupts = <0 95 0>; |
723 | clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>, | 753 | clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>, |
724 | <&clock CLK_SCLK_PIXEL>, <&clock CLK_SCLK_HDMIPHY>, | 754 | <&clock CLK_SCLK_PIXEL>, <&clock CLK_SCLK_HDMIPHY>, |
@@ -731,9 +761,11 @@ | |||
731 | mixer { | 761 | mixer { |
732 | compatible = "samsung,exynos5250-mixer"; | 762 | compatible = "samsung,exynos5250-mixer"; |
733 | reg = <0x14450000 0x10000>; | 763 | reg = <0x14450000 0x10000>; |
764 | power-domains = <&pd_disp1>; | ||
734 | interrupts = <0 94 0>; | 765 | interrupts = <0 94 0>; |
735 | clocks = <&clock CLK_MIXER>, <&clock CLK_SCLK_HDMI>; | 766 | clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>, |
736 | clock-names = "mixer", "sclk_hdmi"; | 767 | <&clock CLK_SCLK_HDMI>; |
768 | clock-names = "mixer", "hdmi", "sclk_hdmi"; | ||
737 | }; | 769 | }; |
738 | 770 | ||
739 | dp_phy: video-phy@10040720 { | 771 | dp_phy: video-phy@10040720 { |
@@ -743,6 +775,7 @@ | |||
743 | }; | 775 | }; |
744 | 776 | ||
745 | dp: dp-controller@145B0000 { | 777 | dp: dp-controller@145B0000 { |
778 | power-domains = <&pd_disp1>; | ||
746 | clocks = <&clock CLK_DP>; | 779 | clocks = <&clock CLK_DP>; |
747 | clock-names = "dp"; | 780 | clock-names = "dp"; |
748 | phys = <&dp_phy>; | 781 | phys = <&dp_phy>; |
@@ -750,6 +783,7 @@ | |||
750 | }; | 783 | }; |
751 | 784 | ||
752 | fimd: fimd@14400000 { | 785 | fimd: fimd@14400000 { |
786 | power-domains = <&pd_disp1>; | ||
753 | clocks = <&clock CLK_SCLK_FIMD1>, <&clock CLK_FIMD1>; | 787 | clocks = <&clock CLK_SCLK_FIMD1>, <&clock CLK_FIMD1>; |
754 | clock-names = "sclk_fimd", "fimd"; | 788 | clock-names = "sclk_fimd", "fimd"; |
755 | }; | 789 | }; |
diff --git a/arch/arm/boot/dts/exynos5420-trip-points.dtsi b/arch/arm/boot/dts/exynos5420-trip-points.dtsi new file mode 100644 index 000000000000..5d31fc140823 --- /dev/null +++ b/arch/arm/boot/dts/exynos5420-trip-points.dtsi | |||
@@ -0,0 +1,35 @@ | |||
1 | /* | ||
2 | * Device tree sources for default Exynos5420 thermal zone definition | ||
3 | * | ||
4 | * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | polling-delay-passive = <0>; | ||
13 | polling-delay = <0>; | ||
14 | trips { | ||
15 | cpu-alert-0 { | ||
16 | temperature = <85000>; /* millicelsius */ | ||
17 | hysteresis = <10000>; /* millicelsius */ | ||
18 | type = "active"; | ||
19 | }; | ||
20 | cpu-alert-1 { | ||
21 | temperature = <103000>; /* millicelsius */ | ||
22 | hysteresis = <10000>; /* millicelsius */ | ||
23 | type = "active"; | ||
24 | }; | ||
25 | cpu-alert-2 { | ||
26 | temperature = <110000>; /* millicelsius */ | ||
27 | hysteresis = <10000>; /* millicelsius */ | ||
28 | type = "active"; | ||
29 | }; | ||
30 | cpu-crit-0 { | ||
31 | temperature = <1200000>; /* millicelsius */ | ||
32 | hysteresis = <0>; /* millicelsius */ | ||
33 | type = "critical"; | ||
34 | }; | ||
35 | }; | ||
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi index 9dc2e9773b30..c0e98cf3514f 100644 --- a/arch/arm/boot/dts/exynos5420.dtsi +++ b/arch/arm/boot/dts/exynos5420.dtsi | |||
@@ -740,8 +740,9 @@ | |||
740 | compatible = "samsung,exynos5420-mixer"; | 740 | compatible = "samsung,exynos5420-mixer"; |
741 | reg = <0x14450000 0x10000>; | 741 | reg = <0x14450000 0x10000>; |
742 | interrupts = <0 94 0>; | 742 | interrupts = <0 94 0>; |
743 | clocks = <&clock CLK_MIXER>, <&clock CLK_SCLK_HDMI>; | 743 | clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>, |
744 | clock-names = "mixer", "sclk_hdmi"; | 744 | <&clock CLK_SCLK_HDMI>; |
745 | clock-names = "mixer", "hdmi", "sclk_hdmi"; | ||
745 | power-domains = <&disp_pd>; | 746 | power-domains = <&disp_pd>; |
746 | }; | 747 | }; |
747 | 748 | ||
@@ -782,6 +783,7 @@ | |||
782 | interrupts = <0 65 0>; | 783 | interrupts = <0 65 0>; |
783 | clocks = <&clock CLK_TMU>; | 784 | clocks = <&clock CLK_TMU>; |
784 | clock-names = "tmu_apbif"; | 785 | clock-names = "tmu_apbif"; |
786 | #include "exynos4412-tmu-sensor-conf.dtsi" | ||
785 | }; | 787 | }; |
786 | 788 | ||
787 | tmu_cpu1: tmu@10064000 { | 789 | tmu_cpu1: tmu@10064000 { |
@@ -790,6 +792,7 @@ | |||
790 | interrupts = <0 183 0>; | 792 | interrupts = <0 183 0>; |
791 | clocks = <&clock CLK_TMU>; | 793 | clocks = <&clock CLK_TMU>; |
792 | clock-names = "tmu_apbif"; | 794 | clock-names = "tmu_apbif"; |
795 | #include "exynos4412-tmu-sensor-conf.dtsi" | ||
793 | }; | 796 | }; |
794 | 797 | ||
795 | tmu_cpu2: tmu@10068000 { | 798 | tmu_cpu2: tmu@10068000 { |
@@ -798,6 +801,7 @@ | |||
798 | interrupts = <0 184 0>; | 801 | interrupts = <0 184 0>; |
799 | clocks = <&clock CLK_TMU>, <&clock CLK_TMU>; | 802 | clocks = <&clock CLK_TMU>, <&clock CLK_TMU>; |
800 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; | 803 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; |
804 | #include "exynos4412-tmu-sensor-conf.dtsi" | ||
801 | }; | 805 | }; |
802 | 806 | ||
803 | tmu_cpu3: tmu@1006c000 { | 807 | tmu_cpu3: tmu@1006c000 { |
@@ -806,6 +810,7 @@ | |||
806 | interrupts = <0 185 0>; | 810 | interrupts = <0 185 0>; |
807 | clocks = <&clock CLK_TMU>, <&clock CLK_TMU_GPU>; | 811 | clocks = <&clock CLK_TMU>, <&clock CLK_TMU_GPU>; |
808 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; | 812 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; |
813 | #include "exynos4412-tmu-sensor-conf.dtsi" | ||
809 | }; | 814 | }; |
810 | 815 | ||
811 | tmu_gpu: tmu@100a0000 { | 816 | tmu_gpu: tmu@100a0000 { |
@@ -814,6 +819,30 @@ | |||
814 | interrupts = <0 215 0>; | 819 | interrupts = <0 215 0>; |
815 | clocks = <&clock CLK_TMU_GPU>, <&clock CLK_TMU>; | 820 | clocks = <&clock CLK_TMU_GPU>, <&clock CLK_TMU>; |
816 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; | 821 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; |
822 | #include "exynos4412-tmu-sensor-conf.dtsi" | ||
823 | }; | ||
824 | |||
825 | thermal-zones { | ||
826 | cpu0_thermal: cpu0-thermal { | ||
827 | thermal-sensors = <&tmu_cpu0>; | ||
828 | #include "exynos5420-trip-points.dtsi" | ||
829 | }; | ||
830 | cpu1_thermal: cpu1-thermal { | ||
831 | thermal-sensors = <&tmu_cpu1>; | ||
832 | #include "exynos5420-trip-points.dtsi" | ||
833 | }; | ||
834 | cpu2_thermal: cpu2-thermal { | ||
835 | thermal-sensors = <&tmu_cpu2>; | ||
836 | #include "exynos5420-trip-points.dtsi" | ||
837 | }; | ||
838 | cpu3_thermal: cpu3-thermal { | ||
839 | thermal-sensors = <&tmu_cpu3>; | ||
840 | #include "exynos5420-trip-points.dtsi" | ||
841 | }; | ||
842 | gpu_thermal: gpu-thermal { | ||
843 | thermal-sensors = <&tmu_gpu>; | ||
844 | #include "exynos5420-trip-points.dtsi" | ||
845 | }; | ||
817 | }; | 846 | }; |
818 | 847 | ||
819 | watchdog: watchdog@101D0000 { | 848 | watchdog: watchdog@101D0000 { |
diff --git a/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi b/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi new file mode 100644 index 000000000000..7b2fba0ae92b --- /dev/null +++ b/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * Device tree sources for Exynos5440 TMU sensor configuration | ||
3 | * | ||
4 | * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <dt-bindings/thermal/thermal_exynos.h> | ||
13 | |||
14 | #thermal-sensor-cells = <0>; | ||
15 | samsung,tmu_gain = <5>; | ||
16 | samsung,tmu_reference_voltage = <16>; | ||
17 | samsung,tmu_noise_cancel_mode = <4>; | ||
18 | samsung,tmu_efuse_value = <0x5d2d>; | ||
19 | samsung,tmu_min_efuse_value = <16>; | ||
20 | samsung,tmu_max_efuse_value = <76>; | ||
21 | samsung,tmu_first_point_trim = <25>; | ||
22 | samsung,tmu_second_point_trim = <70>; | ||
23 | samsung,tmu_default_temp_offset = <25>; | ||
24 | samsung,tmu_cal_type = <TYPE_ONE_POINT_TRIMMING>; | ||
diff --git a/arch/arm/boot/dts/exynos5440-trip-points.dtsi b/arch/arm/boot/dts/exynos5440-trip-points.dtsi new file mode 100644 index 000000000000..48adfa8f4300 --- /dev/null +++ b/arch/arm/boot/dts/exynos5440-trip-points.dtsi | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * Device tree sources for default Exynos5440 thermal zone definition | ||
3 | * | ||
4 | * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | polling-delay-passive = <0>; | ||
13 | polling-delay = <0>; | ||
14 | trips { | ||
15 | cpu-alert-0 { | ||
16 | temperature = <100000>; /* millicelsius */ | ||
17 | hysteresis = <0>; /* millicelsius */ | ||
18 | type = "active"; | ||
19 | }; | ||
20 | cpu-crit-0 { | ||
21 | temperature = <1050000>; /* millicelsius */ | ||
22 | hysteresis = <0>; /* millicelsius */ | ||
23 | type = "critical"; | ||
24 | }; | ||
25 | }; | ||
diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi index 8f3373cd7b87..59d9416b3b03 100644 --- a/arch/arm/boot/dts/exynos5440.dtsi +++ b/arch/arm/boot/dts/exynos5440.dtsi | |||
@@ -219,6 +219,7 @@ | |||
219 | interrupts = <0 58 0>; | 219 | interrupts = <0 58 0>; |
220 | clocks = <&clock CLK_B_125>; | 220 | clocks = <&clock CLK_B_125>; |
221 | clock-names = "tmu_apbif"; | 221 | clock-names = "tmu_apbif"; |
222 | #include "exynos5440-tmu-sensor-conf.dtsi" | ||
222 | }; | 223 | }; |
223 | 224 | ||
224 | tmuctrl_1: tmuctrl@16011C { | 225 | tmuctrl_1: tmuctrl@16011C { |
@@ -227,6 +228,7 @@ | |||
227 | interrupts = <0 58 0>; | 228 | interrupts = <0 58 0>; |
228 | clocks = <&clock CLK_B_125>; | 229 | clocks = <&clock CLK_B_125>; |
229 | clock-names = "tmu_apbif"; | 230 | clock-names = "tmu_apbif"; |
231 | #include "exynos5440-tmu-sensor-conf.dtsi" | ||
230 | }; | 232 | }; |
231 | 233 | ||
232 | tmuctrl_2: tmuctrl@160120 { | 234 | tmuctrl_2: tmuctrl@160120 { |
@@ -235,6 +237,22 @@ | |||
235 | interrupts = <0 58 0>; | 237 | interrupts = <0 58 0>; |
236 | clocks = <&clock CLK_B_125>; | 238 | clocks = <&clock CLK_B_125>; |
237 | clock-names = "tmu_apbif"; | 239 | clock-names = "tmu_apbif"; |
240 | #include "exynos5440-tmu-sensor-conf.dtsi" | ||
241 | }; | ||
242 | |||
243 | thermal-zones { | ||
244 | cpu0_thermal: cpu0-thermal { | ||
245 | thermal-sensors = <&tmuctrl_0>; | ||
246 | #include "exynos5440-trip-points.dtsi" | ||
247 | }; | ||
248 | cpu1_thermal: cpu1-thermal { | ||
249 | thermal-sensors = <&tmuctrl_1>; | ||
250 | #include "exynos5440-trip-points.dtsi" | ||
251 | }; | ||
252 | cpu2_thermal: cpu2-thermal { | ||
253 | thermal-sensors = <&tmuctrl_2>; | ||
254 | #include "exynos5440-trip-points.dtsi" | ||
255 | }; | ||
238 | }; | 256 | }; |
239 | 257 | ||
240 | sata@210000 { | 258 | sata@210000 { |
diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi index f1cd2147421d..a626e6dd8022 100644 --- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi | |||
@@ -35,6 +35,7 @@ | |||
35 | regulator-max-microvolt = <5000000>; | 35 | regulator-max-microvolt = <5000000>; |
36 | gpio = <&gpio3 22 0>; | 36 | gpio = <&gpio3 22 0>; |
37 | enable-active-high; | 37 | enable-active-high; |
38 | vin-supply = <&swbst_reg>; | ||
38 | }; | 39 | }; |
39 | 40 | ||
40 | reg_usb_h1_vbus: regulator@1 { | 41 | reg_usb_h1_vbus: regulator@1 { |
@@ -45,6 +46,7 @@ | |||
45 | regulator-max-microvolt = <5000000>; | 46 | regulator-max-microvolt = <5000000>; |
46 | gpio = <&gpio1 29 0>; | 47 | gpio = <&gpio1 29 0>; |
47 | enable-active-high; | 48 | enable-active-high; |
49 | vin-supply = <&swbst_reg>; | ||
48 | }; | 50 | }; |
49 | 51 | ||
50 | reg_audio: regulator@2 { | 52 | reg_audio: regulator@2 { |
diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts index fda4932faefd..945887d3fdb3 100644 --- a/arch/arm/boot/dts/imx6sl-evk.dts +++ b/arch/arm/boot/dts/imx6sl-evk.dts | |||
@@ -52,6 +52,7 @@ | |||
52 | regulator-max-microvolt = <5000000>; | 52 | regulator-max-microvolt = <5000000>; |
53 | gpio = <&gpio4 0 0>; | 53 | gpio = <&gpio4 0 0>; |
54 | enable-active-high; | 54 | enable-active-high; |
55 | vin-supply = <&swbst_reg>; | ||
55 | }; | 56 | }; |
56 | 57 | ||
57 | reg_usb_otg2_vbus: regulator@1 { | 58 | reg_usb_otg2_vbus: regulator@1 { |
@@ -62,6 +63,7 @@ | |||
62 | regulator-max-microvolt = <5000000>; | 63 | regulator-max-microvolt = <5000000>; |
63 | gpio = <&gpio4 2 0>; | 64 | gpio = <&gpio4 2 0>; |
64 | enable-active-high; | 65 | enable-active-high; |
66 | vin-supply = <&swbst_reg>; | ||
65 | }; | 67 | }; |
66 | 68 | ||
67 | reg_aud3v: regulator@2 { | 69 | reg_aud3v: regulator@2 { |
diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi index 59d1c297bb30..578fa2a54dce 100644 --- a/arch/arm/boot/dts/omap2.dtsi +++ b/arch/arm/boot/dts/omap2.dtsi | |||
@@ -87,8 +87,8 @@ | |||
87 | <14>, | 87 | <14>, |
88 | <15>; | 88 | <15>; |
89 | #dma-cells = <1>; | 89 | #dma-cells = <1>; |
90 | #dma-channels = <32>; | 90 | dma-channels = <32>; |
91 | #dma-requests = <64>; | 91 | dma-requests = <64>; |
92 | }; | 92 | }; |
93 | 93 | ||
94 | i2c1: i2c@48070000 { | 94 | i2c1: i2c@48070000 { |
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 60403273f83e..db80f9d376fa 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts | |||
@@ -16,6 +16,13 @@ | |||
16 | model = "Nokia N900"; | 16 | model = "Nokia N900"; |
17 | compatible = "nokia,omap3-n900", "ti,omap3430", "ti,omap3"; | 17 | compatible = "nokia,omap3-n900", "ti,omap3430", "ti,omap3"; |
18 | 18 | ||
19 | aliases { | ||
20 | i2c0; | ||
21 | i2c1 = &i2c1; | ||
22 | i2c2 = &i2c2; | ||
23 | i2c3 = &i2c3; | ||
24 | }; | ||
25 | |||
19 | cpus { | 26 | cpus { |
20 | cpu@0 { | 27 | cpu@0 { |
21 | cpu0-supply = <&vcc>; | 28 | cpu0-supply = <&vcc>; |
@@ -704,7 +711,7 @@ | |||
704 | compatible = "smsc,lan91c94"; | 711 | compatible = "smsc,lan91c94"; |
705 | interrupt-parent = <&gpio2>; | 712 | interrupt-parent = <&gpio2>; |
706 | interrupts = <22 IRQ_TYPE_LEVEL_HIGH>; /* gpio54 */ | 713 | interrupts = <22 IRQ_TYPE_LEVEL_HIGH>; /* gpio54 */ |
707 | reg = <1 0x300 0xf>; /* 16 byte IO range at offset 0x300 */ | 714 | reg = <1 0 0xf>; /* 16 byte IO range */ |
708 | bank-width = <2>; | 715 | bank-width = <2>; |
709 | pinctrl-names = "default"; | 716 | pinctrl-names = "default"; |
710 | pinctrl-0 = <ðernet_pins>; | 717 | pinctrl-0 = <ðernet_pins>; |
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi index 01b71111bd55..3fdc84fddb70 100644 --- a/arch/arm/boot/dts/omap3.dtsi +++ b/arch/arm/boot/dts/omap3.dtsi | |||
@@ -92,6 +92,8 @@ | |||
92 | ti,hwmods = "aes"; | 92 | ti,hwmods = "aes"; |
93 | reg = <0x480c5000 0x50>; | 93 | reg = <0x480c5000 0x50>; |
94 | interrupts = <0>; | 94 | interrupts = <0>; |
95 | dmas = <&sdma 65 &sdma 66>; | ||
96 | dma-names = "tx", "rx"; | ||
95 | }; | 97 | }; |
96 | 98 | ||
97 | prm: prm@48306000 { | 99 | prm: prm@48306000 { |
@@ -155,8 +157,8 @@ | |||
155 | <14>, | 157 | <14>, |
156 | <15>; | 158 | <15>; |
157 | #dma-cells = <1>; | 159 | #dma-cells = <1>; |
158 | #dma-channels = <32>; | 160 | dma-channels = <32>; |
159 | #dma-requests = <96>; | 161 | dma-requests = <96>; |
160 | }; | 162 | }; |
161 | 163 | ||
162 | omap3_pmx_core: pinmux@48002030 { | 164 | omap3_pmx_core: pinmux@48002030 { |
@@ -550,6 +552,8 @@ | |||
550 | ti,hwmods = "sham"; | 552 | ti,hwmods = "sham"; |
551 | reg = <0x480c3000 0x64>; | 553 | reg = <0x480c3000 0x64>; |
552 | interrupts = <49>; | 554 | interrupts = <49>; |
555 | dmas = <&sdma 69>; | ||
556 | dma-names = "rx"; | ||
553 | }; | 557 | }; |
554 | 558 | ||
555 | smartreflex_core: smartreflex@480cb000 { | 559 | smartreflex_core: smartreflex@480cb000 { |
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi index 074147cebae4..87401d9f4d8b 100644 --- a/arch/arm/boot/dts/omap4.dtsi +++ b/arch/arm/boot/dts/omap4.dtsi | |||
@@ -223,8 +223,8 @@ | |||
223 | <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, | 223 | <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, |
224 | <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; | 224 | <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; |
225 | #dma-cells = <1>; | 225 | #dma-cells = <1>; |
226 | #dma-channels = <32>; | 226 | dma-channels = <32>; |
227 | #dma-requests = <127>; | 227 | dma-requests = <127>; |
228 | }; | 228 | }; |
229 | 229 | ||
230 | gpio1: gpio@4a310000 { | 230 | gpio1: gpio@4a310000 { |
diff --git a/arch/arm/boot/dts/omap5-core-thermal.dtsi b/arch/arm/boot/dts/omap5-core-thermal.dtsi index 19212ac6eef0..de8a3d456cf7 100644 --- a/arch/arm/boot/dts/omap5-core-thermal.dtsi +++ b/arch/arm/boot/dts/omap5-core-thermal.dtsi | |||
@@ -13,7 +13,7 @@ | |||
13 | 13 | ||
14 | core_thermal: core_thermal { | 14 | core_thermal: core_thermal { |
15 | polling-delay-passive = <250>; /* milliseconds */ | 15 | polling-delay-passive = <250>; /* milliseconds */ |
16 | polling-delay = <1000>; /* milliseconds */ | 16 | polling-delay = <500>; /* milliseconds */ |
17 | 17 | ||
18 | /* sensor ID */ | 18 | /* sensor ID */ |
19 | thermal-sensors = <&bandgap 2>; | 19 | thermal-sensors = <&bandgap 2>; |
diff --git a/arch/arm/boot/dts/omap5-gpu-thermal.dtsi b/arch/arm/boot/dts/omap5-gpu-thermal.dtsi index 1b87aca88b77..bc3090f2e84b 100644 --- a/arch/arm/boot/dts/omap5-gpu-thermal.dtsi +++ b/arch/arm/boot/dts/omap5-gpu-thermal.dtsi | |||
@@ -13,7 +13,7 @@ | |||
13 | 13 | ||
14 | gpu_thermal: gpu_thermal { | 14 | gpu_thermal: gpu_thermal { |
15 | polling-delay-passive = <250>; /* milliseconds */ | 15 | polling-delay-passive = <250>; /* milliseconds */ |
16 | polling-delay = <1000>; /* milliseconds */ | 16 | polling-delay = <500>; /* milliseconds */ |
17 | 17 | ||
18 | /* sensor ID */ | 18 | /* sensor ID */ |
19 | thermal-sensors = <&bandgap 1>; | 19 | thermal-sensors = <&bandgap 1>; |
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index b321fdf42c9f..4a485b63a141 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi | |||
@@ -238,8 +238,8 @@ | |||
238 | <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, | 238 | <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, |
239 | <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; | 239 | <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; |
240 | #dma-cells = <1>; | 240 | #dma-cells = <1>; |
241 | #dma-channels = <32>; | 241 | dma-channels = <32>; |
242 | #dma-requests = <127>; | 242 | dma-requests = <127>; |
243 | }; | 243 | }; |
244 | 244 | ||
245 | gpio1: gpio@4ae10000 { | 245 | gpio1: gpio@4ae10000 { |
@@ -929,8 +929,8 @@ | |||
929 | <0x4A096800 0x40>; /* pll_ctrl */ | 929 | <0x4A096800 0x40>; /* pll_ctrl */ |
930 | reg-names = "phy_rx", "phy_tx", "pll_ctrl"; | 930 | reg-names = "phy_rx", "phy_tx", "pll_ctrl"; |
931 | ctrl-module = <&omap_control_sata>; | 931 | ctrl-module = <&omap_control_sata>; |
932 | clocks = <&sys_clkin>; | 932 | clocks = <&sys_clkin>, <&sata_ref_clk>; |
933 | clock-names = "sysclk"; | 933 | clock-names = "sysclk", "refclk"; |
934 | #phy-cells = <0>; | 934 | #phy-cells = <0>; |
935 | }; | 935 | }; |
936 | }; | 936 | }; |
@@ -1079,4 +1079,8 @@ | |||
1079 | }; | 1079 | }; |
1080 | }; | 1080 | }; |
1081 | 1081 | ||
1082 | &cpu_thermal { | ||
1083 | polling-delay = <500>; /* milliseconds */ | ||
1084 | }; | ||
1085 | |||
1082 | /include/ "omap54xx-clocks.dtsi" | 1086 | /include/ "omap54xx-clocks.dtsi" |
diff --git a/arch/arm/boot/dts/omap54xx-clocks.dtsi b/arch/arm/boot/dts/omap54xx-clocks.dtsi index 58c27466f012..83b425fb3ac2 100644 --- a/arch/arm/boot/dts/omap54xx-clocks.dtsi +++ b/arch/arm/boot/dts/omap54xx-clocks.dtsi | |||
@@ -167,10 +167,18 @@ | |||
167 | ti,index-starts-at-one; | 167 | ti,index-starts-at-one; |
168 | }; | 168 | }; |
169 | 169 | ||
170 | dpll_core_byp_mux: dpll_core_byp_mux { | ||
171 | #clock-cells = <0>; | ||
172 | compatible = "ti,mux-clock"; | ||
173 | clocks = <&sys_clkin>, <&dpll_abe_m3x2_ck>; | ||
174 | ti,bit-shift = <23>; | ||
175 | reg = <0x012c>; | ||
176 | }; | ||
177 | |||
170 | dpll_core_ck: dpll_core_ck { | 178 | dpll_core_ck: dpll_core_ck { |
171 | #clock-cells = <0>; | 179 | #clock-cells = <0>; |
172 | compatible = "ti,omap4-dpll-core-clock"; | 180 | compatible = "ti,omap4-dpll-core-clock"; |
173 | clocks = <&sys_clkin>, <&dpll_abe_m3x2_ck>; | 181 | clocks = <&sys_clkin>, <&dpll_core_byp_mux>; |
174 | reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>; | 182 | reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>; |
175 | }; | 183 | }; |
176 | 184 | ||
@@ -294,10 +302,18 @@ | |||
294 | clock-div = <1>; | 302 | clock-div = <1>; |
295 | }; | 303 | }; |
296 | 304 | ||
305 | dpll_iva_byp_mux: dpll_iva_byp_mux { | ||
306 | #clock-cells = <0>; | ||
307 | compatible = "ti,mux-clock"; | ||
308 | clocks = <&sys_clkin>, <&iva_dpll_hs_clk_div>; | ||
309 | ti,bit-shift = <23>; | ||
310 | reg = <0x01ac>; | ||
311 | }; | ||
312 | |||
297 | dpll_iva_ck: dpll_iva_ck { | 313 | dpll_iva_ck: dpll_iva_ck { |
298 | #clock-cells = <0>; | 314 | #clock-cells = <0>; |
299 | compatible = "ti,omap4-dpll-clock"; | 315 | compatible = "ti,omap4-dpll-clock"; |
300 | clocks = <&sys_clkin>, <&iva_dpll_hs_clk_div>; | 316 | clocks = <&sys_clkin>, <&dpll_iva_byp_mux>; |
301 | reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>; | 317 | reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>; |
302 | }; | 318 | }; |
303 | 319 | ||
@@ -599,10 +615,19 @@ | |||
599 | }; | 615 | }; |
600 | }; | 616 | }; |
601 | &cm_core_clocks { | 617 | &cm_core_clocks { |
618 | |||
619 | dpll_per_byp_mux: dpll_per_byp_mux { | ||
620 | #clock-cells = <0>; | ||
621 | compatible = "ti,mux-clock"; | ||
622 | clocks = <&sys_clkin>, <&per_dpll_hs_clk_div>; | ||
623 | ti,bit-shift = <23>; | ||
624 | reg = <0x014c>; | ||
625 | }; | ||
626 | |||
602 | dpll_per_ck: dpll_per_ck { | 627 | dpll_per_ck: dpll_per_ck { |
603 | #clock-cells = <0>; | 628 | #clock-cells = <0>; |
604 | compatible = "ti,omap4-dpll-clock"; | 629 | compatible = "ti,omap4-dpll-clock"; |
605 | clocks = <&sys_clkin>, <&per_dpll_hs_clk_div>; | 630 | clocks = <&sys_clkin>, <&dpll_per_byp_mux>; |
606 | reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>; | 631 | reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>; |
607 | }; | 632 | }; |
608 | 633 | ||
@@ -714,10 +739,18 @@ | |||
714 | ti,index-starts-at-one; | 739 | ti,index-starts-at-one; |
715 | }; | 740 | }; |
716 | 741 | ||
742 | dpll_usb_byp_mux: dpll_usb_byp_mux { | ||
743 | #clock-cells = <0>; | ||
744 | compatible = "ti,mux-clock"; | ||
745 | clocks = <&sys_clkin>, <&usb_dpll_hs_clk_div>; | ||
746 | ti,bit-shift = <23>; | ||
747 | reg = <0x018c>; | ||
748 | }; | ||
749 | |||
717 | dpll_usb_ck: dpll_usb_ck { | 750 | dpll_usb_ck: dpll_usb_ck { |
718 | #clock-cells = <0>; | 751 | #clock-cells = <0>; |
719 | compatible = "ti,omap4-dpll-j-type-clock"; | 752 | compatible = "ti,omap4-dpll-j-type-clock"; |
720 | clocks = <&sys_clkin>, <&usb_dpll_hs_clk_div>; | 753 | clocks = <&sys_clkin>, <&dpll_usb_byp_mux>; |
721 | reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>; | 754 | reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>; |
722 | }; | 755 | }; |
723 | 756 | ||
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index d771f687a13b..eccc78d3220b 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi | |||
@@ -411,6 +411,7 @@ | |||
411 | "mac_clk_rx", "mac_clk_tx", | 411 | "mac_clk_rx", "mac_clk_tx", |
412 | "clk_mac_ref", "clk_mac_refout", | 412 | "clk_mac_ref", "clk_mac_refout", |
413 | "aclk_mac", "pclk_mac"; | 413 | "aclk_mac", "pclk_mac"; |
414 | status = "disabled"; | ||
414 | }; | 415 | }; |
415 | 416 | ||
416 | usb_host0_ehci: usb@ff500000 { | 417 | usb_host0_ehci: usb@ff500000 { |
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi index 261311bdf65b..367af53c1b84 100644 --- a/arch/arm/boot/dts/sama5d3.dtsi +++ b/arch/arm/boot/dts/sama5d3.dtsi | |||
@@ -1248,7 +1248,6 @@ | |||
1248 | atmel,watchdog-type = "hardware"; | 1248 | atmel,watchdog-type = "hardware"; |
1249 | atmel,reset-type = "all"; | 1249 | atmel,reset-type = "all"; |
1250 | atmel,dbg-halt; | 1250 | atmel,dbg-halt; |
1251 | atmel,idle-halt; | ||
1252 | status = "disabled"; | 1251 | status = "disabled"; |
1253 | }; | 1252 | }; |
1254 | 1253 | ||
@@ -1416,7 +1415,7 @@ | |||
1416 | compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; | 1415 | compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; |
1417 | reg = <0x00700000 0x100000>; | 1416 | reg = <0x00700000 0x100000>; |
1418 | interrupts = <32 IRQ_TYPE_LEVEL_HIGH 2>; | 1417 | interrupts = <32 IRQ_TYPE_LEVEL_HIGH 2>; |
1419 | clocks = <&usb>, <&uhphs_clk>, <&uhpck>; | 1418 | clocks = <&utmi>, <&uhphs_clk>, <&uhpck>; |
1420 | clock-names = "usb_clk", "ehci_clk", "uhpck"; | 1419 | clock-names = "usb_clk", "ehci_clk", "uhpck"; |
1421 | status = "disabled"; | 1420 | status = "disabled"; |
1422 | }; | 1421 | }; |
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi index d986b41b9654..4303874889c6 100644 --- a/arch/arm/boot/dts/sama5d4.dtsi +++ b/arch/arm/boot/dts/sama5d4.dtsi | |||
@@ -66,6 +66,7 @@ | |||
66 | gpio4 = &pioE; | 66 | gpio4 = &pioE; |
67 | tcb0 = &tcb0; | 67 | tcb0 = &tcb0; |
68 | tcb1 = &tcb1; | 68 | tcb1 = &tcb1; |
69 | i2c0 = &i2c0; | ||
69 | i2c2 = &i2c2; | 70 | i2c2 = &i2c2; |
70 | }; | 71 | }; |
71 | cpus { | 72 | cpus { |
@@ -259,7 +260,7 @@ | |||
259 | compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; | 260 | compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; |
260 | reg = <0x00600000 0x100000>; | 261 | reg = <0x00600000 0x100000>; |
261 | interrupts = <46 IRQ_TYPE_LEVEL_HIGH 2>; | 262 | interrupts = <46 IRQ_TYPE_LEVEL_HIGH 2>; |
262 | clocks = <&usb>, <&uhphs_clk>, <&uhpck>; | 263 | clocks = <&utmi>, <&uhphs_clk>, <&uhpck>; |
263 | clock-names = "usb_clk", "ehci_clk", "uhpck"; | 264 | clock-names = "usb_clk", "ehci_clk", "uhpck"; |
264 | status = "disabled"; | 265 | status = "disabled"; |
265 | }; | 266 | }; |
@@ -461,8 +462,8 @@ | |||
461 | 462 | ||
462 | lcdck: lcdck { | 463 | lcdck: lcdck { |
463 | #clock-cells = <0>; | 464 | #clock-cells = <0>; |
464 | reg = <4>; | 465 | reg = <3>; |
465 | clocks = <&smd>; | 466 | clocks = <&mck>; |
466 | }; | 467 | }; |
467 | 468 | ||
468 | smdck: smdck { | 469 | smdck: smdck { |
@@ -770,7 +771,7 @@ | |||
770 | reg = <50>; | 771 | reg = <50>; |
771 | }; | 772 | }; |
772 | 773 | ||
773 | lcd_clk: lcd_clk { | 774 | lcdc_clk: lcdc_clk { |
774 | #clock-cells = <0>; | 775 | #clock-cells = <0>; |
775 | reg = <51>; | 776 | reg = <51>; |
776 | }; | 777 | }; |
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi index 252c3d1bda50..d9176e606173 100644 --- a/arch/arm/boot/dts/socfpga.dtsi +++ b/arch/arm/boot/dts/socfpga.dtsi | |||
@@ -660,7 +660,7 @@ | |||
660 | #address-cells = <1>; | 660 | #address-cells = <1>; |
661 | #size-cells = <0>; | 661 | #size-cells = <0>; |
662 | reg = <0xfff01000 0x1000>; | 662 | reg = <0xfff01000 0x1000>; |
663 | interrupts = <0 156 4>; | 663 | interrupts = <0 155 4>; |
664 | num-cs = <4>; | 664 | num-cs = <4>; |
665 | clocks = <&spi_m_clk>; | 665 | clocks = <&spi_m_clk>; |
666 | status = "disabled"; | 666 | status = "disabled"; |
@@ -713,6 +713,9 @@ | |||
713 | reg-shift = <2>; | 713 | reg-shift = <2>; |
714 | reg-io-width = <4>; | 714 | reg-io-width = <4>; |
715 | clocks = <&l4_sp_clk>; | 715 | clocks = <&l4_sp_clk>; |
716 | dmas = <&pdma 28>, | ||
717 | <&pdma 29>; | ||
718 | dma-names = "tx", "rx"; | ||
716 | }; | 719 | }; |
717 | 720 | ||
718 | uart1: serial1@ffc03000 { | 721 | uart1: serial1@ffc03000 { |
@@ -722,6 +725,9 @@ | |||
722 | reg-shift = <2>; | 725 | reg-shift = <2>; |
723 | reg-io-width = <4>; | 726 | reg-io-width = <4>; |
724 | clocks = <&l4_sp_clk>; | 727 | clocks = <&l4_sp_clk>; |
728 | dmas = <&pdma 30>, | ||
729 | <&pdma 31>; | ||
730 | dma-names = "tx", "rx"; | ||
725 | }; | 731 | }; |
726 | 732 | ||
727 | rst: rstmgr@ffd05000 { | 733 | rst: rstmgr@ffd05000 { |
diff --git a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts index ab7891c43231..75742f8f96f3 100644 --- a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts +++ b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts | |||
@@ -56,6 +56,22 @@ | |||
56 | model = "Olimex A10-OLinuXino-LIME"; | 56 | model = "Olimex A10-OLinuXino-LIME"; |
57 | compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10"; | 57 | compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10"; |
58 | 58 | ||
59 | cpus { | ||
60 | cpu0: cpu@0 { | ||
61 | /* | ||
62 | * The A10-Lime is known to be unstable | ||
63 | * when running at 1008 MHz | ||
64 | */ | ||
65 | operating-points = < | ||
66 | /* kHz uV */ | ||
67 | 912000 1350000 | ||
68 | 864000 1300000 | ||
69 | 624000 1250000 | ||
70 | >; | ||
71 | cooling-max-level = <2>; | ||
72 | }; | ||
73 | }; | ||
74 | |||
59 | soc@01c00000 { | 75 | soc@01c00000 { |
60 | emac: ethernet@01c0b000 { | 76 | emac: ethernet@01c0b000 { |
61 | pinctrl-names = "default"; | 77 | pinctrl-names = "default"; |
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index 5c2925831f20..eebb7853e00b 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi | |||
@@ -75,7 +75,6 @@ | |||
75 | clock-latency = <244144>; /* 8 32k periods */ | 75 | clock-latency = <244144>; /* 8 32k periods */ |
76 | operating-points = < | 76 | operating-points = < |
77 | /* kHz uV */ | 77 | /* kHz uV */ |
78 | 1056000 1500000 | ||
79 | 1008000 1400000 | 78 | 1008000 1400000 |
80 | 912000 1350000 | 79 | 912000 1350000 |
81 | 864000 1300000 | 80 | 864000 1300000 |
@@ -83,7 +82,7 @@ | |||
83 | >; | 82 | >; |
84 | #cooling-cells = <2>; | 83 | #cooling-cells = <2>; |
85 | cooling-min-level = <0>; | 84 | cooling-min-level = <0>; |
86 | cooling-max-level = <4>; | 85 | cooling-max-level = <3>; |
87 | }; | 86 | }; |
88 | }; | 87 | }; |
89 | 88 | ||
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi index f8818f1edbbe..883cb4873688 100644 --- a/arch/arm/boot/dts/sun5i-a13.dtsi +++ b/arch/arm/boot/dts/sun5i-a13.dtsi | |||
@@ -47,7 +47,6 @@ | |||
47 | clock-latency = <244144>; /* 8 32k periods */ | 47 | clock-latency = <244144>; /* 8 32k periods */ |
48 | operating-points = < | 48 | operating-points = < |
49 | /* kHz uV */ | 49 | /* kHz uV */ |
50 | 1104000 1500000 | ||
51 | 1008000 1400000 | 50 | 1008000 1400000 |
52 | 912000 1350000 | 51 | 912000 1350000 |
53 | 864000 1300000 | 52 | 864000 1300000 |
@@ -57,7 +56,7 @@ | |||
57 | >; | 56 | >; |
58 | #cooling-cells = <2>; | 57 | #cooling-cells = <2>; |
59 | cooling-min-level = <0>; | 58 | cooling-min-level = <0>; |
60 | cooling-max-level = <6>; | 59 | cooling-max-level = <5>; |
61 | }; | 60 | }; |
62 | }; | 61 | }; |
63 | 62 | ||
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index 3a8530b79f1c..fdd181792b4b 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi | |||
@@ -105,7 +105,6 @@ | |||
105 | clock-latency = <244144>; /* 8 32k periods */ | 105 | clock-latency = <244144>; /* 8 32k periods */ |
106 | operating-points = < | 106 | operating-points = < |
107 | /* kHz uV */ | 107 | /* kHz uV */ |
108 | 1008000 1450000 | ||
109 | 960000 1400000 | 108 | 960000 1400000 |
110 | 912000 1400000 | 109 | 912000 1400000 |
111 | 864000 1300000 | 110 | 864000 1300000 |
@@ -116,7 +115,7 @@ | |||
116 | >; | 115 | >; |
117 | #cooling-cells = <2>; | 116 | #cooling-cells = <2>; |
118 | cooling-min-level = <0>; | 117 | cooling-min-level = <0>; |
119 | cooling-max-level = <7>; | 118 | cooling-max-level = <6>; |
120 | }; | 119 | }; |
121 | 120 | ||
122 | cpu@1 { | 121 | cpu@1 { |
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig index f2670f638e97..811e72bbe642 100644 --- a/arch/arm/configs/at91_dt_defconfig +++ b/arch/arm/configs/at91_dt_defconfig | |||
@@ -70,6 +70,7 @@ CONFIG_SCSI=y | |||
70 | CONFIG_BLK_DEV_SD=y | 70 | CONFIG_BLK_DEV_SD=y |
71 | # CONFIG_SCSI_LOWLEVEL is not set | 71 | # CONFIG_SCSI_LOWLEVEL is not set |
72 | CONFIG_NETDEVICES=y | 72 | CONFIG_NETDEVICES=y |
73 | CONFIG_ARM_AT91_ETHER=y | ||
73 | CONFIG_MACB=y | 74 | CONFIG_MACB=y |
74 | # CONFIG_NET_VENDOR_BROADCOM is not set | 75 | # CONFIG_NET_VENDOR_BROADCOM is not set |
75 | CONFIG_DM9000=y | 76 | CONFIG_DM9000=y |
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index e8a4c955241b..06075b6d2463 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig | |||
@@ -62,6 +62,17 @@ CONFIG_MACH_SPEAR1340=y | |||
62 | CONFIG_ARCH_STI=y | 62 | CONFIG_ARCH_STI=y |
63 | CONFIG_ARCH_EXYNOS=y | 63 | CONFIG_ARCH_EXYNOS=y |
64 | CONFIG_EXYNOS5420_MCPM=y | 64 | CONFIG_EXYNOS5420_MCPM=y |
65 | CONFIG_ARCH_SHMOBILE_MULTI=y | ||
66 | CONFIG_ARCH_EMEV2=y | ||
67 | CONFIG_ARCH_R7S72100=y | ||
68 | CONFIG_ARCH_R8A73A4=y | ||
69 | CONFIG_ARCH_R8A7740=y | ||
70 | CONFIG_ARCH_R8A7779=y | ||
71 | CONFIG_ARCH_R8A7790=y | ||
72 | CONFIG_ARCH_R8A7791=y | ||
73 | CONFIG_ARCH_R8A7794=y | ||
74 | CONFIG_ARCH_SH73A0=y | ||
75 | CONFIG_MACH_MARZEN=y | ||
65 | CONFIG_ARCH_SUNXI=y | 76 | CONFIG_ARCH_SUNXI=y |
66 | CONFIG_ARCH_SIRF=y | 77 | CONFIG_ARCH_SIRF=y |
67 | CONFIG_ARCH_TEGRA=y | 78 | CONFIG_ARCH_TEGRA=y |
@@ -84,9 +95,11 @@ CONFIG_PCI_KEYSTONE=y | |||
84 | CONFIG_PCI_MSI=y | 95 | CONFIG_PCI_MSI=y |
85 | CONFIG_PCI_MVEBU=y | 96 | CONFIG_PCI_MVEBU=y |
86 | CONFIG_PCI_TEGRA=y | 97 | CONFIG_PCI_TEGRA=y |
98 | CONFIG_PCI_RCAR_GEN2=y | ||
99 | CONFIG_PCI_RCAR_GEN2_PCIE=y | ||
87 | CONFIG_PCIEPORTBUS=y | 100 | CONFIG_PCIEPORTBUS=y |
88 | CONFIG_SMP=y | 101 | CONFIG_SMP=y |
89 | CONFIG_NR_CPUS=8 | 102 | CONFIG_NR_CPUS=16 |
90 | CONFIG_HIGHPTE=y | 103 | CONFIG_HIGHPTE=y |
91 | CONFIG_CMA=y | 104 | CONFIG_CMA=y |
92 | CONFIG_ARM_APPENDED_DTB=y | 105 | CONFIG_ARM_APPENDED_DTB=y |
@@ -130,6 +143,7 @@ CONFIG_DEVTMPFS_MOUNT=y | |||
130 | CONFIG_DMA_CMA=y | 143 | CONFIG_DMA_CMA=y |
131 | CONFIG_CMA_SIZE_MBYTES=64 | 144 | CONFIG_CMA_SIZE_MBYTES=64 |
132 | CONFIG_OMAP_OCP2SCP=y | 145 | CONFIG_OMAP_OCP2SCP=y |
146 | CONFIG_SIMPLE_PM_BUS=y | ||
133 | CONFIG_MTD=y | 147 | CONFIG_MTD=y |
134 | CONFIG_MTD_CMDLINE_PARTS=y | 148 | CONFIG_MTD_CMDLINE_PARTS=y |
135 | CONFIG_MTD_BLOCK=y | 149 | CONFIG_MTD_BLOCK=y |
@@ -157,6 +171,7 @@ CONFIG_AHCI_SUNXI=y | |||
157 | CONFIG_AHCI_TEGRA=y | 171 | CONFIG_AHCI_TEGRA=y |
158 | CONFIG_SATA_HIGHBANK=y | 172 | CONFIG_SATA_HIGHBANK=y |
159 | CONFIG_SATA_MV=y | 173 | CONFIG_SATA_MV=y |
174 | CONFIG_SATA_RCAR=y | ||
160 | CONFIG_NETDEVICES=y | 175 | CONFIG_NETDEVICES=y |
161 | CONFIG_HIX5HD2_GMAC=y | 176 | CONFIG_HIX5HD2_GMAC=y |
162 | CONFIG_SUN4I_EMAC=y | 177 | CONFIG_SUN4I_EMAC=y |
@@ -167,14 +182,17 @@ CONFIG_MV643XX_ETH=y | |||
167 | CONFIG_MVNETA=y | 182 | CONFIG_MVNETA=y |
168 | CONFIG_KS8851=y | 183 | CONFIG_KS8851=y |
169 | CONFIG_R8169=y | 184 | CONFIG_R8169=y |
185 | CONFIG_SH_ETH=y | ||
170 | CONFIG_SMSC911X=y | 186 | CONFIG_SMSC911X=y |
171 | CONFIG_STMMAC_ETH=y | 187 | CONFIG_STMMAC_ETH=y |
172 | CONFIG_TI_CPSW=y | 188 | CONFIG_TI_CPSW=y |
173 | CONFIG_XILINX_EMACLITE=y | 189 | CONFIG_XILINX_EMACLITE=y |
174 | CONFIG_AT803X_PHY=y | 190 | CONFIG_AT803X_PHY=y |
175 | CONFIG_MARVELL_PHY=y | 191 | CONFIG_MARVELL_PHY=y |
192 | CONFIG_SMSC_PHY=y | ||
176 | CONFIG_BROADCOM_PHY=y | 193 | CONFIG_BROADCOM_PHY=y |
177 | CONFIG_ICPLUS_PHY=y | 194 | CONFIG_ICPLUS_PHY=y |
195 | CONFIG_MICREL_PHY=y | ||
178 | CONFIG_USB_PEGASUS=y | 196 | CONFIG_USB_PEGASUS=y |
179 | CONFIG_USB_USBNET=y | 197 | CONFIG_USB_USBNET=y |
180 | CONFIG_USB_NET_SMSC75XX=y | 198 | CONFIG_USB_NET_SMSC75XX=y |
@@ -192,15 +210,18 @@ CONFIG_KEYBOARD_CROS_EC=y | |||
192 | CONFIG_MOUSE_PS2_ELANTECH=y | 210 | CONFIG_MOUSE_PS2_ELANTECH=y |
193 | CONFIG_INPUT_TOUCHSCREEN=y | 211 | CONFIG_INPUT_TOUCHSCREEN=y |
194 | CONFIG_TOUCHSCREEN_ATMEL_MXT=y | 212 | CONFIG_TOUCHSCREEN_ATMEL_MXT=y |
213 | CONFIG_TOUCHSCREEN_ST1232=m | ||
195 | CONFIG_TOUCHSCREEN_STMPE=y | 214 | CONFIG_TOUCHSCREEN_STMPE=y |
196 | CONFIG_TOUCHSCREEN_SUN4I=y | 215 | CONFIG_TOUCHSCREEN_SUN4I=y |
197 | CONFIG_INPUT_MISC=y | 216 | CONFIG_INPUT_MISC=y |
198 | CONFIG_INPUT_MPU3050=y | 217 | CONFIG_INPUT_MPU3050=y |
199 | CONFIG_INPUT_AXP20X_PEK=y | 218 | CONFIG_INPUT_AXP20X_PEK=y |
219 | CONFIG_INPUT_ADXL34X=m | ||
200 | CONFIG_SERIO_AMBAKMI=y | 220 | CONFIG_SERIO_AMBAKMI=y |
201 | CONFIG_SERIAL_8250=y | 221 | CONFIG_SERIAL_8250=y |
202 | CONFIG_SERIAL_8250_CONSOLE=y | 222 | CONFIG_SERIAL_8250_CONSOLE=y |
203 | CONFIG_SERIAL_8250_DW=y | 223 | CONFIG_SERIAL_8250_DW=y |
224 | CONFIG_SERIAL_8250_EM=y | ||
204 | CONFIG_SERIAL_8250_MT6577=y | 225 | CONFIG_SERIAL_8250_MT6577=y |
205 | CONFIG_SERIAL_AMBA_PL011=y | 226 | CONFIG_SERIAL_AMBA_PL011=y |
206 | CONFIG_SERIAL_AMBA_PL011_CONSOLE=y | 227 | CONFIG_SERIAL_AMBA_PL011_CONSOLE=y |
@@ -213,6 +234,9 @@ CONFIG_SERIAL_SIRFSOC_CONSOLE=y | |||
213 | CONFIG_SERIAL_TEGRA=y | 234 | CONFIG_SERIAL_TEGRA=y |
214 | CONFIG_SERIAL_IMX=y | 235 | CONFIG_SERIAL_IMX=y |
215 | CONFIG_SERIAL_IMX_CONSOLE=y | 236 | CONFIG_SERIAL_IMX_CONSOLE=y |
237 | CONFIG_SERIAL_SH_SCI=y | ||
238 | CONFIG_SERIAL_SH_SCI_NR_UARTS=20 | ||
239 | CONFIG_SERIAL_SH_SCI_CONSOLE=y | ||
216 | CONFIG_SERIAL_MSM=y | 240 | CONFIG_SERIAL_MSM=y |
217 | CONFIG_SERIAL_MSM_CONSOLE=y | 241 | CONFIG_SERIAL_MSM_CONSOLE=y |
218 | CONFIG_SERIAL_VT8500=y | 242 | CONFIG_SERIAL_VT8500=y |
@@ -233,19 +257,26 @@ CONFIG_I2C_MUX_PCA954x=y | |||
233 | CONFIG_I2C_MUX_PINCTRL=y | 257 | CONFIG_I2C_MUX_PINCTRL=y |
234 | CONFIG_I2C_CADENCE=y | 258 | CONFIG_I2C_CADENCE=y |
235 | CONFIG_I2C_DESIGNWARE_PLATFORM=y | 259 | CONFIG_I2C_DESIGNWARE_PLATFORM=y |
260 | CONFIG_I2C_GPIO=m | ||
236 | CONFIG_I2C_EXYNOS5=y | 261 | CONFIG_I2C_EXYNOS5=y |
237 | CONFIG_I2C_MV64XXX=y | 262 | CONFIG_I2C_MV64XXX=y |
263 | CONFIG_I2C_RIIC=y | ||
238 | CONFIG_I2C_S3C2410=y | 264 | CONFIG_I2C_S3C2410=y |
265 | CONFIG_I2C_SH_MOBILE=y | ||
239 | CONFIG_I2C_SIRF=y | 266 | CONFIG_I2C_SIRF=y |
240 | CONFIG_I2C_TEGRA=y | ||
241 | CONFIG_I2C_ST=y | 267 | CONFIG_I2C_ST=y |
242 | CONFIG_SPI=y | 268 | CONFIG_I2C_TEGRA=y |
243 | CONFIG_I2C_XILINX=y | 269 | CONFIG_I2C_XILINX=y |
244 | CONFIG_SPI_DAVINCI=y | 270 | CONFIG_I2C_RCAR=y |
271 | CONFIG_SPI=y | ||
245 | CONFIG_SPI_CADENCE=y | 272 | CONFIG_SPI_CADENCE=y |
273 | CONFIG_SPI_DAVINCI=y | ||
246 | CONFIG_SPI_OMAP24XX=y | 274 | CONFIG_SPI_OMAP24XX=y |
247 | CONFIG_SPI_ORION=y | 275 | CONFIG_SPI_ORION=y |
248 | CONFIG_SPI_PL022=y | 276 | CONFIG_SPI_PL022=y |
277 | CONFIG_SPI_RSPI=y | ||
278 | CONFIG_SPI_SH_MSIOF=m | ||
279 | CONFIG_SPI_SH_HSPI=y | ||
249 | CONFIG_SPI_SIRF=y | 280 | CONFIG_SPI_SIRF=y |
250 | CONFIG_SPI_SUN4I=y | 281 | CONFIG_SPI_SUN4I=y |
251 | CONFIG_SPI_SUN6I=y | 282 | CONFIG_SPI_SUN6I=y |
@@ -259,12 +290,15 @@ CONFIG_PINCTRL_PALMAS=y | |||
259 | CONFIG_PINCTRL_APQ8084=y | 290 | CONFIG_PINCTRL_APQ8084=y |
260 | CONFIG_GPIO_SYSFS=y | 291 | CONFIG_GPIO_SYSFS=y |
261 | CONFIG_GPIO_GENERIC_PLATFORM=y | 292 | CONFIG_GPIO_GENERIC_PLATFORM=y |
262 | CONFIG_GPIO_DWAPB=y | ||
263 | CONFIG_GPIO_DAVINCI=y | 293 | CONFIG_GPIO_DAVINCI=y |
294 | CONFIG_GPIO_DWAPB=y | ||
295 | CONFIG_GPIO_EM=y | ||
296 | CONFIG_GPIO_RCAR=y | ||
264 | CONFIG_GPIO_XILINX=y | 297 | CONFIG_GPIO_XILINX=y |
265 | CONFIG_GPIO_ZYNQ=y | 298 | CONFIG_GPIO_ZYNQ=y |
266 | CONFIG_GPIO_PCA953X=y | 299 | CONFIG_GPIO_PCA953X=y |
267 | CONFIG_GPIO_PCA953X_IRQ=y | 300 | CONFIG_GPIO_PCA953X_IRQ=y |
301 | CONFIG_GPIO_PCF857X=y | ||
268 | CONFIG_GPIO_TWL4030=y | 302 | CONFIG_GPIO_TWL4030=y |
269 | CONFIG_GPIO_PALMAS=y | 303 | CONFIG_GPIO_PALMAS=y |
270 | CONFIG_GPIO_SYSCON=y | 304 | CONFIG_GPIO_SYSCON=y |
@@ -276,10 +310,12 @@ CONFIG_POWER_RESET_AS3722=y | |||
276 | CONFIG_POWER_RESET_GPIO=y | 310 | CONFIG_POWER_RESET_GPIO=y |
277 | CONFIG_POWER_RESET_KEYSTONE=y | 311 | CONFIG_POWER_RESET_KEYSTONE=y |
278 | CONFIG_POWER_RESET_SUN6I=y | 312 | CONFIG_POWER_RESET_SUN6I=y |
313 | CONFIG_POWER_RESET_RMOBILE=y | ||
279 | CONFIG_SENSORS_LM90=y | 314 | CONFIG_SENSORS_LM90=y |
280 | CONFIG_SENSORS_LM95245=y | 315 | CONFIG_SENSORS_LM95245=y |
281 | CONFIG_THERMAL=y | 316 | CONFIG_THERMAL=y |
282 | CONFIG_CPU_THERMAL=y | 317 | CONFIG_CPU_THERMAL=y |
318 | CONFIG_RCAR_THERMAL=y | ||
283 | CONFIG_ARMADA_THERMAL=y | 319 | CONFIG_ARMADA_THERMAL=y |
284 | CONFIG_DAVINCI_WATCHDOG | 320 | CONFIG_DAVINCI_WATCHDOG |
285 | CONFIG_ST_THERMAL_SYSCFG=y | 321 | CONFIG_ST_THERMAL_SYSCFG=y |
@@ -290,6 +326,7 @@ CONFIG_ARM_SP805_WATCHDOG=y | |||
290 | CONFIG_ORION_WATCHDOG=y | 326 | CONFIG_ORION_WATCHDOG=y |
291 | CONFIG_SUNXI_WATCHDOG=y | 327 | CONFIG_SUNXI_WATCHDOG=y |
292 | CONFIG_MESON_WATCHDOG=y | 328 | CONFIG_MESON_WATCHDOG=y |
329 | CONFIG_MFD_AS3711=y | ||
293 | CONFIG_MFD_AS3722=y | 330 | CONFIG_MFD_AS3722=y |
294 | CONFIG_MFD_BCM590XX=y | 331 | CONFIG_MFD_BCM590XX=y |
295 | CONFIG_MFD_AXP20X=y | 332 | CONFIG_MFD_AXP20X=y |
@@ -304,13 +341,16 @@ CONFIG_MFD_TPS65090=y | |||
304 | CONFIG_MFD_TPS6586X=y | 341 | CONFIG_MFD_TPS6586X=y |
305 | CONFIG_MFD_TPS65910=y | 342 | CONFIG_MFD_TPS65910=y |
306 | CONFIG_REGULATOR_AB8500=y | 343 | CONFIG_REGULATOR_AB8500=y |
344 | CONFIG_REGULATOR_AS3711=y | ||
307 | CONFIG_REGULATOR_AS3722=y | 345 | CONFIG_REGULATOR_AS3722=y |
308 | CONFIG_REGULATOR_AXP20X=y | 346 | CONFIG_REGULATOR_AXP20X=y |
309 | CONFIG_REGULATOR_BCM590XX=y | 347 | CONFIG_REGULATOR_BCM590XX=y |
348 | CONFIG_REGULATOR_DA9210=y | ||
310 | CONFIG_REGULATOR_GPIO=y | 349 | CONFIG_REGULATOR_GPIO=y |
311 | CONFIG_MFD_SYSCON=y | 350 | CONFIG_MFD_SYSCON=y |
312 | CONFIG_POWER_RESET_SYSCON=y | 351 | CONFIG_POWER_RESET_SYSCON=y |
313 | CONFIG_REGULATOR_MAX8907=y | 352 | CONFIG_REGULATOR_MAX8907=y |
353 | CONFIG_REGULATOR_MAX8973=y | ||
314 | CONFIG_REGULATOR_MAX77686=y | 354 | CONFIG_REGULATOR_MAX77686=y |
315 | CONFIG_REGULATOR_PALMAS=y | 355 | CONFIG_REGULATOR_PALMAS=y |
316 | CONFIG_REGULATOR_S2MPS11=y | 356 | CONFIG_REGULATOR_S2MPS11=y |
@@ -324,18 +364,32 @@ CONFIG_REGULATOR_TWL4030=y | |||
324 | CONFIG_REGULATOR_VEXPRESS=y | 364 | CONFIG_REGULATOR_VEXPRESS=y |
325 | CONFIG_MEDIA_SUPPORT=y | 365 | CONFIG_MEDIA_SUPPORT=y |
326 | CONFIG_MEDIA_CAMERA_SUPPORT=y | 366 | CONFIG_MEDIA_CAMERA_SUPPORT=y |
367 | CONFIG_MEDIA_CONTROLLER=y | ||
368 | CONFIG_VIDEO_V4L2_SUBDEV_API=y | ||
327 | CONFIG_MEDIA_USB_SUPPORT=y | 369 | CONFIG_MEDIA_USB_SUPPORT=y |
328 | CONFIG_USB_VIDEO_CLASS=y | 370 | CONFIG_USB_VIDEO_CLASS=y |
329 | CONFIG_USB_GSPCA=y | 371 | CONFIG_USB_GSPCA=y |
372 | CONFIG_V4L_PLATFORM_DRIVERS=y | ||
373 | CONFIG_SOC_CAMERA=m | ||
374 | CONFIG_SOC_CAMERA_PLATFORM=m | ||
375 | CONFIG_VIDEO_RCAR_VIN=m | ||
376 | CONFIG_V4L_MEM2MEM_DRIVERS=y | ||
377 | CONFIG_VIDEO_RENESAS_VSP1=m | ||
378 | # CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set | ||
379 | CONFIG_VIDEO_ADV7180=m | ||
330 | CONFIG_DRM=y | 380 | CONFIG_DRM=y |
381 | CONFIG_DRM_RCAR_DU=m | ||
331 | CONFIG_DRM_TEGRA=y | 382 | CONFIG_DRM_TEGRA=y |
332 | CONFIG_DRM_PANEL_SIMPLE=y | 383 | CONFIG_DRM_PANEL_SIMPLE=y |
333 | CONFIG_FB_ARMCLCD=y | 384 | CONFIG_FB_ARMCLCD=y |
334 | CONFIG_FB_WM8505=y | 385 | CONFIG_FB_WM8505=y |
386 | CONFIG_FB_SH_MOBILE_LCDC=y | ||
335 | CONFIG_FB_SIMPLE=y | 387 | CONFIG_FB_SIMPLE=y |
388 | CONFIG_FB_SH_MOBILE_MERAM=y | ||
336 | CONFIG_BACKLIGHT_LCD_SUPPORT=y | 389 | CONFIG_BACKLIGHT_LCD_SUPPORT=y |
337 | CONFIG_BACKLIGHT_CLASS_DEVICE=y | 390 | CONFIG_BACKLIGHT_CLASS_DEVICE=y |
338 | CONFIG_BACKLIGHT_PWM=y | 391 | CONFIG_BACKLIGHT_PWM=y |
392 | CONFIG_BACKLIGHT_AS3711=y | ||
339 | CONFIG_FRAMEBUFFER_CONSOLE=y | 393 | CONFIG_FRAMEBUFFER_CONSOLE=y |
340 | CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y | 394 | CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y |
341 | CONFIG_SOUND=y | 395 | CONFIG_SOUND=y |
@@ -343,6 +397,8 @@ CONFIG_SND=y | |||
343 | CONFIG_SND_DYNAMIC_MINORS=y | 397 | CONFIG_SND_DYNAMIC_MINORS=y |
344 | CONFIG_SND_USB_AUDIO=y | 398 | CONFIG_SND_USB_AUDIO=y |
345 | CONFIG_SND_SOC=y | 399 | CONFIG_SND_SOC=y |
400 | CONFIG_SND_SOC_SH4_FSI=m | ||
401 | CONFIG_SND_SOC_RCAR=m | ||
346 | CONFIG_SND_SOC_TEGRA=y | 402 | CONFIG_SND_SOC_TEGRA=y |
347 | CONFIG_SND_SOC_TEGRA_RT5640=y | 403 | CONFIG_SND_SOC_TEGRA_RT5640=y |
348 | CONFIG_SND_SOC_TEGRA_WM8753=y | 404 | CONFIG_SND_SOC_TEGRA_WM8753=y |
@@ -350,6 +406,8 @@ CONFIG_SND_SOC_TEGRA_WM8903=y | |||
350 | CONFIG_SND_SOC_TEGRA_TRIMSLICE=y | 406 | CONFIG_SND_SOC_TEGRA_TRIMSLICE=y |
351 | CONFIG_SND_SOC_TEGRA_ALC5632=y | 407 | CONFIG_SND_SOC_TEGRA_ALC5632=y |
352 | CONFIG_SND_SOC_TEGRA_MAX98090=y | 408 | CONFIG_SND_SOC_TEGRA_MAX98090=y |
409 | CONFIG_SND_SOC_AK4642=m | ||
410 | CONFIG_SND_SOC_WM8978=m | ||
353 | CONFIG_USB=y | 411 | CONFIG_USB=y |
354 | CONFIG_USB_XHCI_HCD=y | 412 | CONFIG_USB_XHCI_HCD=y |
355 | CONFIG_USB_XHCI_MVEBU=y | 413 | CONFIG_USB_XHCI_MVEBU=y |
@@ -362,6 +420,8 @@ CONFIG_USB_ISP1760_HCD=y | |||
362 | CONFIG_USB_OHCI_HCD=y | 420 | CONFIG_USB_OHCI_HCD=y |
363 | CONFIG_USB_OHCI_HCD_STI=y | 421 | CONFIG_USB_OHCI_HCD_STI=y |
364 | CONFIG_USB_OHCI_HCD_PLATFORM=y | 422 | CONFIG_USB_OHCI_HCD_PLATFORM=y |
423 | CONFIG_USB_R8A66597_HCD=m | ||
424 | CONFIG_USB_RENESAS_USBHS=m | ||
365 | CONFIG_USB_STORAGE=y | 425 | CONFIG_USB_STORAGE=y |
366 | CONFIG_USB_DWC3=y | 426 | CONFIG_USB_DWC3=y |
367 | CONFIG_USB_CHIPIDEA=y | 427 | CONFIG_USB_CHIPIDEA=y |
@@ -374,6 +434,10 @@ CONFIG_SAMSUNG_USB3PHY=y | |||
374 | CONFIG_USB_GPIO_VBUS=y | 434 | CONFIG_USB_GPIO_VBUS=y |
375 | CONFIG_USB_ISP1301=y | 435 | CONFIG_USB_ISP1301=y |
376 | CONFIG_USB_MXS_PHY=y | 436 | CONFIG_USB_MXS_PHY=y |
437 | CONFIG_USB_RCAR_PHY=m | ||
438 | CONFIG_USB_RCAR_GEN2_PHY=m | ||
439 | CONFIG_USB_GADGET=y | ||
440 | CONFIG_USB_RENESAS_USBHS_UDC=m | ||
377 | CONFIG_MMC=y | 441 | CONFIG_MMC=y |
378 | CONFIG_MMC_BLOCK_MINORS=16 | 442 | CONFIG_MMC_BLOCK_MINORS=16 |
379 | CONFIG_MMC_ARMMMCI=y | 443 | CONFIG_MMC_ARMMMCI=y |
@@ -392,12 +456,14 @@ CONFIG_MMC_SDHCI_ST=y | |||
392 | CONFIG_MMC_OMAP=y | 456 | CONFIG_MMC_OMAP=y |
393 | CONFIG_MMC_OMAP_HS=y | 457 | CONFIG_MMC_OMAP_HS=y |
394 | CONFIG_MMC_MVSDIO=y | 458 | CONFIG_MMC_MVSDIO=y |
395 | CONFIG_MMC_SUNXI=y | 459 | CONFIG_MMC_SDHI=y |
396 | CONFIG_MMC_DW=y | 460 | CONFIG_MMC_DW=y |
397 | CONFIG_MMC_DW_IDMAC=y | 461 | CONFIG_MMC_DW_IDMAC=y |
398 | CONFIG_MMC_DW_PLTFM=y | 462 | CONFIG_MMC_DW_PLTFM=y |
399 | CONFIG_MMC_DW_EXYNOS=y | 463 | CONFIG_MMC_DW_EXYNOS=y |
400 | CONFIG_MMC_DW_ROCKCHIP=y | 464 | CONFIG_MMC_DW_ROCKCHIP=y |
465 | CONFIG_MMC_SH_MMCIF=y | ||
466 | CONFIG_MMC_SUNXI=y | ||
401 | CONFIG_NEW_LEDS=y | 467 | CONFIG_NEW_LEDS=y |
402 | CONFIG_LEDS_CLASS=y | 468 | CONFIG_LEDS_CLASS=y |
403 | CONFIG_LEDS_GPIO=y | 469 | CONFIG_LEDS_GPIO=y |
@@ -421,10 +487,12 @@ CONFIG_RTC_DRV_AS3722=y | |||
421 | CONFIG_RTC_DRV_DS1307=y | 487 | CONFIG_RTC_DRV_DS1307=y |
422 | CONFIG_RTC_DRV_MAX8907=y | 488 | CONFIG_RTC_DRV_MAX8907=y |
423 | CONFIG_RTC_DRV_MAX77686=y | 489 | CONFIG_RTC_DRV_MAX77686=y |
490 | CONFIG_RTC_DRV_RS5C372=m | ||
424 | CONFIG_RTC_DRV_PALMAS=y | 491 | CONFIG_RTC_DRV_PALMAS=y |
425 | CONFIG_RTC_DRV_TWL4030=y | 492 | CONFIG_RTC_DRV_TWL4030=y |
426 | CONFIG_RTC_DRV_TPS6586X=y | 493 | CONFIG_RTC_DRV_TPS6586X=y |
427 | CONFIG_RTC_DRV_TPS65910=y | 494 | CONFIG_RTC_DRV_TPS65910=y |
495 | CONFIG_RTC_DRV_S35390A=m | ||
428 | CONFIG_RTC_DRV_EM3027=y | 496 | CONFIG_RTC_DRV_EM3027=y |
429 | CONFIG_RTC_DRV_PL031=y | 497 | CONFIG_RTC_DRV_PL031=y |
430 | CONFIG_RTC_DRV_VT8500=y | 498 | CONFIG_RTC_DRV_VT8500=y |
@@ -436,6 +504,9 @@ CONFIG_DMADEVICES=y | |||
436 | CONFIG_DW_DMAC=y | 504 | CONFIG_DW_DMAC=y |
437 | CONFIG_MV_XOR=y | 505 | CONFIG_MV_XOR=y |
438 | CONFIG_TEGRA20_APB_DMA=y | 506 | CONFIG_TEGRA20_APB_DMA=y |
507 | CONFIG_SH_DMAE=y | ||
508 | CONFIG_RCAR_AUDMAC_PP=m | ||
509 | CONFIG_RCAR_DMAC=y | ||
439 | CONFIG_STE_DMA40=y | 510 | CONFIG_STE_DMA40=y |
440 | CONFIG_SIRF_DMA=y | 511 | CONFIG_SIRF_DMA=y |
441 | CONFIG_TI_EDMA=y | 512 | CONFIG_TI_EDMA=y |
@@ -468,6 +539,7 @@ CONFIG_IIO=y | |||
468 | CONFIG_XILINX_XADC=y | 539 | CONFIG_XILINX_XADC=y |
469 | CONFIG_AK8975=y | 540 | CONFIG_AK8975=y |
470 | CONFIG_PWM=y | 541 | CONFIG_PWM=y |
542 | CONFIG_PWM_RENESAS_TPU=y | ||
471 | CONFIG_PWM_TEGRA=y | 543 | CONFIG_PWM_TEGRA=y |
472 | CONFIG_PWM_VT8500=y | 544 | CONFIG_PWM_VT8500=y |
473 | CONFIG_PHY_HIX5HD2_SATA=y | 545 | CONFIG_PHY_HIX5HD2_SATA=y |
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index b7386524c356..8e108599e1af 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig | |||
@@ -114,6 +114,7 @@ CONFIG_MTD_PHYSMAP_OF=y | |||
114 | CONFIG_MTD_NAND=y | 114 | CONFIG_MTD_NAND=y |
115 | CONFIG_MTD_NAND_ECC_BCH=y | 115 | CONFIG_MTD_NAND_ECC_BCH=y |
116 | CONFIG_MTD_NAND_OMAP2=y | 116 | CONFIG_MTD_NAND_OMAP2=y |
117 | CONFIG_MTD_NAND_OMAP_BCH=y | ||
117 | CONFIG_MTD_ONENAND=y | 118 | CONFIG_MTD_ONENAND=y |
118 | CONFIG_MTD_ONENAND_VERIFY_WRITE=y | 119 | CONFIG_MTD_ONENAND_VERIFY_WRITE=y |
119 | CONFIG_MTD_ONENAND_OMAP2=y | 120 | CONFIG_MTD_ONENAND_OMAP2=y |
@@ -248,6 +249,7 @@ CONFIG_TWL6040_CORE=y | |||
248 | CONFIG_REGULATOR_PALMAS=y | 249 | CONFIG_REGULATOR_PALMAS=y |
249 | CONFIG_REGULATOR_PBIAS=y | 250 | CONFIG_REGULATOR_PBIAS=y |
250 | CONFIG_REGULATOR_TI_ABB=y | 251 | CONFIG_REGULATOR_TI_ABB=y |
252 | CONFIG_REGULATOR_TPS62360=m | ||
251 | CONFIG_REGULATOR_TPS65023=y | 253 | CONFIG_REGULATOR_TPS65023=y |
252 | CONFIG_REGULATOR_TPS6507X=y | 254 | CONFIG_REGULATOR_TPS6507X=y |
253 | CONFIG_REGULATOR_TPS65217=y | 255 | CONFIG_REGULATOR_TPS65217=y |
@@ -374,7 +376,8 @@ CONFIG_PWM_TIEHRPWM=m | |||
374 | CONFIG_PWM_TWL=m | 376 | CONFIG_PWM_TWL=m |
375 | CONFIG_PWM_TWL_LED=m | 377 | CONFIG_PWM_TWL_LED=m |
376 | CONFIG_OMAP_USB2=m | 378 | CONFIG_OMAP_USB2=m |
377 | CONFIG_TI_PIPE3=m | 379 | CONFIG_TI_PIPE3=y |
380 | CONFIG_TWL4030_USB=m | ||
378 | CONFIG_EXT2_FS=y | 381 | CONFIG_EXT2_FS=y |
379 | CONFIG_EXT3_FS=y | 382 | CONFIG_EXT3_FS=y |
380 | # CONFIG_EXT3_FS_XATTR is not set | 383 | # CONFIG_EXT3_FS_XATTR is not set |
diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig index 41d856effe6c..510c747c65b4 100644 --- a/arch/arm/configs/sama5_defconfig +++ b/arch/arm/configs/sama5_defconfig | |||
@@ -3,8 +3,6 @@ | |||
3 | CONFIG_SYSVIPC=y | 3 | CONFIG_SYSVIPC=y |
4 | CONFIG_IRQ_DOMAIN_DEBUG=y | 4 | CONFIG_IRQ_DOMAIN_DEBUG=y |
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | CONFIG_SYSFS_DEPRECATED=y | ||
7 | CONFIG_SYSFS_DEPRECATED_V2=y | ||
8 | CONFIG_BLK_DEV_INITRD=y | 6 | CONFIG_BLK_DEV_INITRD=y |
9 | CONFIG_EMBEDDED=y | 7 | CONFIG_EMBEDDED=y |
10 | CONFIG_SLAB=y | 8 | CONFIG_SLAB=y |
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig index 38840a812924..8f6a5702b696 100644 --- a/arch/arm/configs/sunxi_defconfig +++ b/arch/arm/configs/sunxi_defconfig | |||
@@ -4,6 +4,7 @@ CONFIG_BLK_DEV_INITRD=y | |||
4 | CONFIG_PERF_EVENTS=y | 4 | CONFIG_PERF_EVENTS=y |
5 | CONFIG_ARCH_SUNXI=y | 5 | CONFIG_ARCH_SUNXI=y |
6 | CONFIG_SMP=y | 6 | CONFIG_SMP=y |
7 | CONFIG_NR_CPUS=8 | ||
7 | CONFIG_AEABI=y | 8 | CONFIG_AEABI=y |
8 | CONFIG_HIGHMEM=y | 9 | CONFIG_HIGHMEM=y |
9 | CONFIG_HIGHPTE=y | 10 | CONFIG_HIGHPTE=y |
diff --git a/arch/arm/configs/vexpress_defconfig b/arch/arm/configs/vexpress_defconfig index f489fdaa19b8..37fe607a4ede 100644 --- a/arch/arm/configs/vexpress_defconfig +++ b/arch/arm/configs/vexpress_defconfig | |||
@@ -118,8 +118,8 @@ CONFIG_HID_ZEROPLUS=y | |||
118 | CONFIG_USB=y | 118 | CONFIG_USB=y |
119 | CONFIG_USB_ANNOUNCE_NEW_DEVICES=y | 119 | CONFIG_USB_ANNOUNCE_NEW_DEVICES=y |
120 | CONFIG_USB_MON=y | 120 | CONFIG_USB_MON=y |
121 | CONFIG_USB_ISP1760_HCD=y | ||
122 | CONFIG_USB_STORAGE=y | 121 | CONFIG_USB_STORAGE=y |
122 | CONFIG_USB_ISP1760=y | ||
123 | CONFIG_MMC=y | 123 | CONFIG_MMC=y |
124 | CONFIG_MMC_ARMMMCI=y | 124 | CONFIG_MMC_ARMMMCI=y |
125 | CONFIG_NEW_LEDS=y | 125 | CONFIG_NEW_LEDS=y |
diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped index 71e5fc7cfb18..1d1800f71c5b 100644 --- a/arch/arm/crypto/aesbs-core.S_shipped +++ b/arch/arm/crypto/aesbs-core.S_shipped | |||
@@ -58,14 +58,18 @@ | |||
58 | # define VFP_ABI_FRAME 0 | 58 | # define VFP_ABI_FRAME 0 |
59 | # define BSAES_ASM_EXTENDED_KEY | 59 | # define BSAES_ASM_EXTENDED_KEY |
60 | # define XTS_CHAIN_TWEAK | 60 | # define XTS_CHAIN_TWEAK |
61 | # define __ARM_ARCH__ 7 | 61 | # define __ARM_ARCH__ __LINUX_ARM_ARCH__ |
62 | # define __ARM_MAX_ARCH__ 7 | ||
62 | #endif | 63 | #endif |
63 | 64 | ||
64 | #ifdef __thumb__ | 65 | #ifdef __thumb__ |
65 | # define adrl adr | 66 | # define adrl adr |
66 | #endif | 67 | #endif |
67 | 68 | ||
68 | #if __ARM_ARCH__>=7 | 69 | #if __ARM_MAX_ARCH__>=7 |
70 | .arch armv7-a | ||
71 | .fpu neon | ||
72 | |||
69 | .text | 73 | .text |
70 | .syntax unified @ ARMv7-capable assembler is expected to handle this | 74 | .syntax unified @ ARMv7-capable assembler is expected to handle this |
71 | #ifdef __thumb2__ | 75 | #ifdef __thumb2__ |
@@ -74,8 +78,6 @@ | |||
74 | .code 32 | 78 | .code 32 |
75 | #endif | 79 | #endif |
76 | 80 | ||
77 | .fpu neon | ||
78 | |||
79 | .type _bsaes_decrypt8,%function | 81 | .type _bsaes_decrypt8,%function |
80 | .align 4 | 82 | .align 4 |
81 | _bsaes_decrypt8: | 83 | _bsaes_decrypt8: |
@@ -2095,9 +2097,11 @@ bsaes_xts_decrypt: | |||
2095 | vld1.8 {q8}, [r0] @ initial tweak | 2097 | vld1.8 {q8}, [r0] @ initial tweak |
2096 | adr r2, .Lxts_magic | 2098 | adr r2, .Lxts_magic |
2097 | 2099 | ||
2100 | #ifndef XTS_CHAIN_TWEAK | ||
2098 | tst r9, #0xf @ if not multiple of 16 | 2101 | tst r9, #0xf @ if not multiple of 16 |
2099 | it ne @ Thumb2 thing, sanity check in ARM | 2102 | it ne @ Thumb2 thing, sanity check in ARM |
2100 | subne r9, #0x10 @ subtract another 16 bytes | 2103 | subne r9, #0x10 @ subtract another 16 bytes |
2104 | #endif | ||
2101 | subs r9, #0x80 | 2105 | subs r9, #0x80 |
2102 | 2106 | ||
2103 | blo .Lxts_dec_short | 2107 | blo .Lxts_dec_short |
diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl index be068db960ee..a4d3856e7d24 100644 --- a/arch/arm/crypto/bsaes-armv7.pl +++ b/arch/arm/crypto/bsaes-armv7.pl | |||
@@ -701,14 +701,18 @@ $code.=<<___; | |||
701 | # define VFP_ABI_FRAME 0 | 701 | # define VFP_ABI_FRAME 0 |
702 | # define BSAES_ASM_EXTENDED_KEY | 702 | # define BSAES_ASM_EXTENDED_KEY |
703 | # define XTS_CHAIN_TWEAK | 703 | # define XTS_CHAIN_TWEAK |
704 | # define __ARM_ARCH__ 7 | 704 | # define __ARM_ARCH__ __LINUX_ARM_ARCH__ |
705 | # define __ARM_MAX_ARCH__ 7 | ||
705 | #endif | 706 | #endif |
706 | 707 | ||
707 | #ifdef __thumb__ | 708 | #ifdef __thumb__ |
708 | # define adrl adr | 709 | # define adrl adr |
709 | #endif | 710 | #endif |
710 | 711 | ||
711 | #if __ARM_ARCH__>=7 | 712 | #if __ARM_MAX_ARCH__>=7 |
713 | .arch armv7-a | ||
714 | .fpu neon | ||
715 | |||
712 | .text | 716 | .text |
713 | .syntax unified @ ARMv7-capable assembler is expected to handle this | 717 | .syntax unified @ ARMv7-capable assembler is expected to handle this |
714 | #ifdef __thumb2__ | 718 | #ifdef __thumb2__ |
@@ -717,8 +721,6 @@ $code.=<<___; | |||
717 | .code 32 | 721 | .code 32 |
718 | #endif | 722 | #endif |
719 | 723 | ||
720 | .fpu neon | ||
721 | |||
722 | .type _bsaes_decrypt8,%function | 724 | .type _bsaes_decrypt8,%function |
723 | .align 4 | 725 | .align 4 |
724 | _bsaes_decrypt8: | 726 | _bsaes_decrypt8: |
@@ -2076,9 +2078,11 @@ bsaes_xts_decrypt: | |||
2076 | vld1.8 {@XMM[8]}, [r0] @ initial tweak | 2078 | vld1.8 {@XMM[8]}, [r0] @ initial tweak |
2077 | adr $magic, .Lxts_magic | 2079 | adr $magic, .Lxts_magic |
2078 | 2080 | ||
2081 | #ifndef XTS_CHAIN_TWEAK | ||
2079 | tst $len, #0xf @ if not multiple of 16 | 2082 | tst $len, #0xf @ if not multiple of 16 |
2080 | it ne @ Thumb2 thing, sanity check in ARM | 2083 | it ne @ Thumb2 thing, sanity check in ARM |
2081 | subne $len, #0x10 @ subtract another 16 bytes | 2084 | subne $len, #0x10 @ subtract another 16 bytes |
2085 | #endif | ||
2082 | subs $len, #0x80 | 2086 | subs $len, #0x80 |
2083 | 2087 | ||
2084 | blo .Lxts_dec_short | 2088 | blo .Lxts_dec_short |
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 37ca2a4c6f09..4cf48c3aca13 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h | |||
@@ -149,29 +149,28 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) | |||
149 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ | 149 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ |
150 | }) | 150 | }) |
151 | 151 | ||
152 | #define kvm_pgd_index(addr) pgd_index(addr) | ||
153 | |||
152 | static inline bool kvm_page_empty(void *ptr) | 154 | static inline bool kvm_page_empty(void *ptr) |
153 | { | 155 | { |
154 | struct page *ptr_page = virt_to_page(ptr); | 156 | struct page *ptr_page = virt_to_page(ptr); |
155 | return page_count(ptr_page) == 1; | 157 | return page_count(ptr_page) == 1; |
156 | } | 158 | } |
157 | 159 | ||
158 | |||
159 | #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) | 160 | #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) |
160 | #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp) | 161 | #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp) |
161 | #define kvm_pud_table_empty(kvm, pudp) (0) | 162 | #define kvm_pud_table_empty(kvm, pudp) (0) |
162 | 163 | ||
163 | #define KVM_PREALLOC_LEVEL 0 | 164 | #define KVM_PREALLOC_LEVEL 0 |
164 | 165 | ||
165 | static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd) | 166 | static inline void *kvm_get_hwpgd(struct kvm *kvm) |
166 | { | 167 | { |
167 | return 0; | 168 | return kvm->arch.pgd; |
168 | } | 169 | } |
169 | 170 | ||
170 | static inline void kvm_free_hwpgd(struct kvm *kvm) { } | 171 | static inline unsigned int kvm_get_hwpgd_size(void) |
171 | |||
172 | static inline void *kvm_get_hwpgd(struct kvm *kvm) | ||
173 | { | 172 | { |
174 | return kvm->arch.pgd; | 173 | return PTRS_PER_S2_PGD * sizeof(pgd_t); |
175 | } | 174 | } |
176 | 175 | ||
177 | struct kvm; | 176 | struct kvm; |
@@ -207,7 +206,7 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, | |||
207 | 206 | ||
208 | bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached; | 207 | bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached; |
209 | 208 | ||
210 | VM_BUG_ON(size & PAGE_MASK); | 209 | VM_BUG_ON(size & ~PAGE_MASK); |
211 | 210 | ||
212 | if (!need_flush && !icache_is_pipt()) | 211 | if (!need_flush && !icache_is_pipt()) |
213 | goto vipt_cache; | 212 | goto vipt_cache; |
diff --git a/arch/arm/include/debug/at91.S b/arch/arm/include/debug/at91.S index 80a6501b4d50..c3c45e628e33 100644 --- a/arch/arm/include/debug/at91.S +++ b/arch/arm/include/debug/at91.S | |||
@@ -18,8 +18,11 @@ | |||
18 | #define AT91_DBGU 0xfc00c000 /* SAMA5D4_BASE_USART3 */ | 18 | #define AT91_DBGU 0xfc00c000 /* SAMA5D4_BASE_USART3 */ |
19 | #endif | 19 | #endif |
20 | 20 | ||
21 | /* Keep in sync with mach-at91/include/mach/hardware.h */ | 21 | #ifdef CONFIG_MMU |
22 | #define AT91_IO_P2V(x) ((x) - 0x01000000) | 22 | #define AT91_IO_P2V(x) ((x) - 0x01000000) |
23 | #else | ||
24 | #define AT91_IO_P2V(x) (x) | ||
25 | #endif | ||
23 | 26 | ||
24 | #define AT91_DBGU_SR (0x14) /* Status Register */ | 27 | #define AT91_DBGU_SR (0x14) /* Status Register */ |
25 | #define AT91_DBGU_THR (0x1c) /* Transmitter Holding Register */ | 28 | #define AT91_DBGU_THR (0x1c) /* Transmitter Holding Register */ |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index e55408e96559..1d60bebea4b8 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -246,12 +246,9 @@ static int __get_cpu_architecture(void) | |||
246 | if (cpu_arch) | 246 | if (cpu_arch) |
247 | cpu_arch += CPU_ARCH_ARMv3; | 247 | cpu_arch += CPU_ARCH_ARMv3; |
248 | } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { | 248 | } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { |
249 | unsigned int mmfr0; | ||
250 | |||
251 | /* Revised CPUID format. Read the Memory Model Feature | 249 | /* Revised CPUID format. Read the Memory Model Feature |
252 | * Register 0 and check for VMSAv7 or PMSAv7 */ | 250 | * Register 0 and check for VMSAv7 or PMSAv7 */ |
253 | asm("mrc p15, 0, %0, c0, c1, 4" | 251 | unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0); |
254 | : "=r" (mmfr0)); | ||
255 | if ((mmfr0 & 0x0000000f) >= 0x00000003 || | 252 | if ((mmfr0 & 0x0000000f) >= 0x00000003 || |
256 | (mmfr0 & 0x000000f0) >= 0x00000030) | 253 | (mmfr0 & 0x000000f0) >= 0x00000030) |
257 | cpu_arch = CPU_ARCH_ARMv7; | 254 | cpu_arch = CPU_ARCH_ARMv7; |
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 07e7eb1d7ab6..5560f74f9eee 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
@@ -540,7 +540,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
540 | 540 | ||
541 | vcpu->mode = OUTSIDE_GUEST_MODE; | 541 | vcpu->mode = OUTSIDE_GUEST_MODE; |
542 | kvm_guest_exit(); | 542 | kvm_guest_exit(); |
543 | trace_kvm_exit(*vcpu_pc(vcpu)); | 543 | trace_kvm_exit(kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); |
544 | /* | 544 | /* |
545 | * We may have taken a host interrupt in HYP mode (ie | 545 | * We may have taken a host interrupt in HYP mode (ie |
546 | * while executing the guest). This interrupt is still | 546 | * while executing the guest). This interrupt is still |
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 3e6859bc3e11..5656d79c5a44 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -290,7 +290,7 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, | |||
290 | phys_addr_t addr = start, end = start + size; | 290 | phys_addr_t addr = start, end = start + size; |
291 | phys_addr_t next; | 291 | phys_addr_t next; |
292 | 292 | ||
293 | pgd = pgdp + pgd_index(addr); | 293 | pgd = pgdp + kvm_pgd_index(addr); |
294 | do { | 294 | do { |
295 | next = kvm_pgd_addr_end(addr, end); | 295 | next = kvm_pgd_addr_end(addr, end); |
296 | if (!pgd_none(*pgd)) | 296 | if (!pgd_none(*pgd)) |
@@ -355,7 +355,7 @@ static void stage2_flush_memslot(struct kvm *kvm, | |||
355 | phys_addr_t next; | 355 | phys_addr_t next; |
356 | pgd_t *pgd; | 356 | pgd_t *pgd; |
357 | 357 | ||
358 | pgd = kvm->arch.pgd + pgd_index(addr); | 358 | pgd = kvm->arch.pgd + kvm_pgd_index(addr); |
359 | do { | 359 | do { |
360 | next = kvm_pgd_addr_end(addr, end); | 360 | next = kvm_pgd_addr_end(addr, end); |
361 | stage2_flush_puds(kvm, pgd, addr, next); | 361 | stage2_flush_puds(kvm, pgd, addr, next); |
@@ -632,6 +632,20 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) | |||
632 | __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE); | 632 | __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE); |
633 | } | 633 | } |
634 | 634 | ||
635 | /* Free the HW pgd, one page at a time */ | ||
636 | static void kvm_free_hwpgd(void *hwpgd) | ||
637 | { | ||
638 | free_pages_exact(hwpgd, kvm_get_hwpgd_size()); | ||
639 | } | ||
640 | |||
641 | /* Allocate the HW PGD, making sure that each page gets its own refcount */ | ||
642 | static void *kvm_alloc_hwpgd(void) | ||
643 | { | ||
644 | unsigned int size = kvm_get_hwpgd_size(); | ||
645 | |||
646 | return alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); | ||
647 | } | ||
648 | |||
635 | /** | 649 | /** |
636 | * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. | 650 | * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. |
637 | * @kvm: The KVM struct pointer for the VM. | 651 | * @kvm: The KVM struct pointer for the VM. |
@@ -645,15 +659,31 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) | |||
645 | */ | 659 | */ |
646 | int kvm_alloc_stage2_pgd(struct kvm *kvm) | 660 | int kvm_alloc_stage2_pgd(struct kvm *kvm) |
647 | { | 661 | { |
648 | int ret; | ||
649 | pgd_t *pgd; | 662 | pgd_t *pgd; |
663 | void *hwpgd; | ||
650 | 664 | ||
651 | if (kvm->arch.pgd != NULL) { | 665 | if (kvm->arch.pgd != NULL) { |
652 | kvm_err("kvm_arch already initialized?\n"); | 666 | kvm_err("kvm_arch already initialized?\n"); |
653 | return -EINVAL; | 667 | return -EINVAL; |
654 | } | 668 | } |
655 | 669 | ||
670 | hwpgd = kvm_alloc_hwpgd(); | ||
671 | if (!hwpgd) | ||
672 | return -ENOMEM; | ||
673 | |||
674 | /* When the kernel uses more levels of page tables than the | ||
675 | * guest, we allocate a fake PGD and pre-populate it to point | ||
676 | * to the next-level page table, which will be the real | ||
677 | * initial page table pointed to by the VTTBR. | ||
678 | * | ||
679 | * When KVM_PREALLOC_LEVEL==2, we allocate a single page for | ||
680 | * the PMD and the kernel will use folded pud. | ||
681 | * When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD | ||
682 | * pages. | ||
683 | */ | ||
656 | if (KVM_PREALLOC_LEVEL > 0) { | 684 | if (KVM_PREALLOC_LEVEL > 0) { |
685 | int i; | ||
686 | |||
657 | /* | 687 | /* |
658 | * Allocate fake pgd for the page table manipulation macros to | 688 | * Allocate fake pgd for the page table manipulation macros to |
659 | * work. This is not used by the hardware and we have no | 689 | * work. This is not used by the hardware and we have no |
@@ -661,30 +691,32 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) | |||
661 | */ | 691 | */ |
662 | pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t), | 692 | pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t), |
663 | GFP_KERNEL | __GFP_ZERO); | 693 | GFP_KERNEL | __GFP_ZERO); |
694 | |||
695 | if (!pgd) { | ||
696 | kvm_free_hwpgd(hwpgd); | ||
697 | return -ENOMEM; | ||
698 | } | ||
699 | |||
700 | /* Plug the HW PGD into the fake one. */ | ||
701 | for (i = 0; i < PTRS_PER_S2_PGD; i++) { | ||
702 | if (KVM_PREALLOC_LEVEL == 1) | ||
703 | pgd_populate(NULL, pgd + i, | ||
704 | (pud_t *)hwpgd + i * PTRS_PER_PUD); | ||
705 | else if (KVM_PREALLOC_LEVEL == 2) | ||
706 | pud_populate(NULL, pud_offset(pgd, 0) + i, | ||
707 | (pmd_t *)hwpgd + i * PTRS_PER_PMD); | ||
708 | } | ||
664 | } else { | 709 | } else { |
665 | /* | 710 | /* |
666 | * Allocate actual first-level Stage-2 page table used by the | 711 | * Allocate actual first-level Stage-2 page table used by the |
667 | * hardware for Stage-2 page table walks. | 712 | * hardware for Stage-2 page table walks. |
668 | */ | 713 | */ |
669 | pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER); | 714 | pgd = (pgd_t *)hwpgd; |
670 | } | 715 | } |
671 | 716 | ||
672 | if (!pgd) | ||
673 | return -ENOMEM; | ||
674 | |||
675 | ret = kvm_prealloc_hwpgd(kvm, pgd); | ||
676 | if (ret) | ||
677 | goto out_err; | ||
678 | |||
679 | kvm_clean_pgd(pgd); | 717 | kvm_clean_pgd(pgd); |
680 | kvm->arch.pgd = pgd; | 718 | kvm->arch.pgd = pgd; |
681 | return 0; | 719 | return 0; |
682 | out_err: | ||
683 | if (KVM_PREALLOC_LEVEL > 0) | ||
684 | kfree(pgd); | ||
685 | else | ||
686 | free_pages((unsigned long)pgd, S2_PGD_ORDER); | ||
687 | return ret; | ||
688 | } | 720 | } |
689 | 721 | ||
690 | /** | 722 | /** |
@@ -785,11 +817,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm) | |||
785 | return; | 817 | return; |
786 | 818 | ||
787 | unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); | 819 | unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); |
788 | kvm_free_hwpgd(kvm); | 820 | kvm_free_hwpgd(kvm_get_hwpgd(kvm)); |
789 | if (KVM_PREALLOC_LEVEL > 0) | 821 | if (KVM_PREALLOC_LEVEL > 0) |
790 | kfree(kvm->arch.pgd); | 822 | kfree(kvm->arch.pgd); |
791 | else | 823 | |
792 | free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER); | ||
793 | kvm->arch.pgd = NULL; | 824 | kvm->arch.pgd = NULL; |
794 | } | 825 | } |
795 | 826 | ||
@@ -799,7 +830,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache | |||
799 | pgd_t *pgd; | 830 | pgd_t *pgd; |
800 | pud_t *pud; | 831 | pud_t *pud; |
801 | 832 | ||
802 | pgd = kvm->arch.pgd + pgd_index(addr); | 833 | pgd = kvm->arch.pgd + kvm_pgd_index(addr); |
803 | if (WARN_ON(pgd_none(*pgd))) { | 834 | if (WARN_ON(pgd_none(*pgd))) { |
804 | if (!cache) | 835 | if (!cache) |
805 | return NULL; | 836 | return NULL; |
@@ -1089,7 +1120,7 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) | |||
1089 | pgd_t *pgd; | 1120 | pgd_t *pgd; |
1090 | phys_addr_t next; | 1121 | phys_addr_t next; |
1091 | 1122 | ||
1092 | pgd = kvm->arch.pgd + pgd_index(addr); | 1123 | pgd = kvm->arch.pgd + kvm_pgd_index(addr); |
1093 | do { | 1124 | do { |
1094 | /* | 1125 | /* |
1095 | * Release kvm_mmu_lock periodically if the memory region is | 1126 | * Release kvm_mmu_lock periodically if the memory region is |
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h index 881874b1a036..6817664b46b8 100644 --- a/arch/arm/kvm/trace.h +++ b/arch/arm/kvm/trace.h | |||
@@ -25,18 +25,22 @@ TRACE_EVENT(kvm_entry, | |||
25 | ); | 25 | ); |
26 | 26 | ||
27 | TRACE_EVENT(kvm_exit, | 27 | TRACE_EVENT(kvm_exit, |
28 | TP_PROTO(unsigned long vcpu_pc), | 28 | TP_PROTO(unsigned int exit_reason, unsigned long vcpu_pc), |
29 | TP_ARGS(vcpu_pc), | 29 | TP_ARGS(exit_reason, vcpu_pc), |
30 | 30 | ||
31 | TP_STRUCT__entry( | 31 | TP_STRUCT__entry( |
32 | __field( unsigned int, exit_reason ) | ||
32 | __field( unsigned long, vcpu_pc ) | 33 | __field( unsigned long, vcpu_pc ) |
33 | ), | 34 | ), |
34 | 35 | ||
35 | TP_fast_assign( | 36 | TP_fast_assign( |
37 | __entry->exit_reason = exit_reason; | ||
36 | __entry->vcpu_pc = vcpu_pc; | 38 | __entry->vcpu_pc = vcpu_pc; |
37 | ), | 39 | ), |
38 | 40 | ||
39 | TP_printk("PC: 0x%08lx", __entry->vcpu_pc) | 41 | TP_printk("HSR_EC: 0x%04x, PC: 0x%08lx", |
42 | __entry->exit_reason, | ||
43 | __entry->vcpu_pc) | ||
40 | ); | 44 | ); |
41 | 45 | ||
42 | TRACE_EVENT(kvm_guest_fault, | 46 | TRACE_EVENT(kvm_guest_fault, |
diff --git a/arch/arm/mach-asm9260/Kconfig b/arch/arm/mach-asm9260/Kconfig index 8423be76080e..52241207a82a 100644 --- a/arch/arm/mach-asm9260/Kconfig +++ b/arch/arm/mach-asm9260/Kconfig | |||
@@ -2,5 +2,7 @@ config MACH_ASM9260 | |||
2 | bool "Alphascale ASM9260" | 2 | bool "Alphascale ASM9260" |
3 | depends on ARCH_MULTI_V5 | 3 | depends on ARCH_MULTI_V5 |
4 | select CPU_ARM926T | 4 | select CPU_ARM926T |
5 | select ASM9260_TIMER | ||
6 | select GENERIC_CLOCKEVENTS | ||
5 | help | 7 | help |
6 | Support for Alphascale ASM9260 based platform. | 8 | Support for Alphascale ASM9260 based platform. |
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 5e34fb143309..aa4116e9452f 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c | |||
@@ -270,37 +270,35 @@ static void __init at91_pm_sram_init(void) | |||
270 | phys_addr_t sram_pbase; | 270 | phys_addr_t sram_pbase; |
271 | unsigned long sram_base; | 271 | unsigned long sram_base; |
272 | struct device_node *node; | 272 | struct device_node *node; |
273 | struct platform_device *pdev; | 273 | struct platform_device *pdev = NULL; |
274 | 274 | ||
275 | node = of_find_compatible_node(NULL, NULL, "mmio-sram"); | 275 | for_each_compatible_node(node, NULL, "mmio-sram") { |
276 | if (!node) { | 276 | pdev = of_find_device_by_node(node); |
277 | pr_warn("%s: failed to find sram node!\n", __func__); | 277 | if (pdev) { |
278 | return; | 278 | of_node_put(node); |
279 | break; | ||
280 | } | ||
279 | } | 281 | } |
280 | 282 | ||
281 | pdev = of_find_device_by_node(node); | ||
282 | if (!pdev) { | 283 | if (!pdev) { |
283 | pr_warn("%s: failed to find sram device!\n", __func__); | 284 | pr_warn("%s: failed to find sram device!\n", __func__); |
284 | goto put_node; | 285 | return; |
285 | } | 286 | } |
286 | 287 | ||
287 | sram_pool = dev_get_gen_pool(&pdev->dev); | 288 | sram_pool = dev_get_gen_pool(&pdev->dev); |
288 | if (!sram_pool) { | 289 | if (!sram_pool) { |
289 | pr_warn("%s: sram pool unavailable!\n", __func__); | 290 | pr_warn("%s: sram pool unavailable!\n", __func__); |
290 | goto put_node; | 291 | return; |
291 | } | 292 | } |
292 | 293 | ||
293 | sram_base = gen_pool_alloc(sram_pool, at91_slow_clock_sz); | 294 | sram_base = gen_pool_alloc(sram_pool, at91_slow_clock_sz); |
294 | if (!sram_base) { | 295 | if (!sram_base) { |
295 | pr_warn("%s: unable to alloc ocram!\n", __func__); | 296 | pr_warn("%s: unable to alloc ocram!\n", __func__); |
296 | goto put_node; | 297 | return; |
297 | } | 298 | } |
298 | 299 | ||
299 | sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base); | 300 | sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base); |
300 | slow_clock = __arm_ioremap_exec(sram_pbase, at91_slow_clock_sz, false); | 301 | slow_clock = __arm_ioremap_exec(sram_pbase, at91_slow_clock_sz, false); |
301 | |||
302 | put_node: | ||
303 | of_node_put(node); | ||
304 | } | 302 | } |
305 | #endif | 303 | #endif |
306 | 304 | ||
diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h index d2c89963af2d..86c0aa819d25 100644 --- a/arch/arm/mach-at91/pm.h +++ b/arch/arm/mach-at91/pm.h | |||
@@ -44,7 +44,7 @@ static inline void at91rm9200_standby(void) | |||
44 | " mcr p15, 0, %0, c7, c0, 4\n\t" | 44 | " mcr p15, 0, %0, c7, c0, 4\n\t" |
45 | " str %5, [%1, %2]" | 45 | " str %5, [%1, %2]" |
46 | : | 46 | : |
47 | : "r" (0), "r" (AT91_BASE_SYS), "r" (AT91RM9200_SDRAMC_LPR), | 47 | : "r" (0), "r" (at91_ramc_base[0]), "r" (AT91RM9200_SDRAMC_LPR), |
48 | "r" (1), "r" (AT91RM9200_SDRAMC_SRR), | 48 | "r" (1), "r" (AT91RM9200_SDRAMC_SRR), |
49 | "r" (lpr)); | 49 | "r" (lpr)); |
50 | } | 50 | } |
diff --git a/arch/arm/mach-at91/pm_slowclock.S b/arch/arm/mach-at91/pm_slowclock.S index 556151e85ec4..931f0e302c03 100644 --- a/arch/arm/mach-at91/pm_slowclock.S +++ b/arch/arm/mach-at91/pm_slowclock.S | |||
@@ -25,11 +25,6 @@ | |||
25 | */ | 25 | */ |
26 | #undef SLOWDOWN_MASTER_CLOCK | 26 | #undef SLOWDOWN_MASTER_CLOCK |
27 | 27 | ||
28 | #define MCKRDY_TIMEOUT 1000 | ||
29 | #define MOSCRDY_TIMEOUT 1000 | ||
30 | #define PLLALOCK_TIMEOUT 1000 | ||
31 | #define PLLBLOCK_TIMEOUT 1000 | ||
32 | |||
33 | pmc .req r0 | 28 | pmc .req r0 |
34 | sdramc .req r1 | 29 | sdramc .req r1 |
35 | ramc1 .req r2 | 30 | ramc1 .req r2 |
@@ -41,60 +36,42 @@ tmp2 .req r5 | |||
41 | * Wait until master clock is ready (after switching master clock source) | 36 | * Wait until master clock is ready (after switching master clock source) |
42 | */ | 37 | */ |
43 | .macro wait_mckrdy | 38 | .macro wait_mckrdy |
44 | mov tmp2, #MCKRDY_TIMEOUT | 39 | 1: ldr tmp1, [pmc, #AT91_PMC_SR] |
45 | 1: sub tmp2, tmp2, #1 | ||
46 | cmp tmp2, #0 | ||
47 | beq 2f | ||
48 | ldr tmp1, [pmc, #AT91_PMC_SR] | ||
49 | tst tmp1, #AT91_PMC_MCKRDY | 40 | tst tmp1, #AT91_PMC_MCKRDY |
50 | beq 1b | 41 | beq 1b |
51 | 2: | ||
52 | .endm | 42 | .endm |
53 | 43 | ||
54 | /* | 44 | /* |
55 | * Wait until master oscillator has stabilized. | 45 | * Wait until master oscillator has stabilized. |
56 | */ | 46 | */ |
57 | .macro wait_moscrdy | 47 | .macro wait_moscrdy |
58 | mov tmp2, #MOSCRDY_TIMEOUT | 48 | 1: ldr tmp1, [pmc, #AT91_PMC_SR] |
59 | 1: sub tmp2, tmp2, #1 | ||
60 | cmp tmp2, #0 | ||
61 | beq 2f | ||
62 | ldr tmp1, [pmc, #AT91_PMC_SR] | ||
63 | tst tmp1, #AT91_PMC_MOSCS | 49 | tst tmp1, #AT91_PMC_MOSCS |
64 | beq 1b | 50 | beq 1b |
65 | 2: | ||
66 | .endm | 51 | .endm |
67 | 52 | ||
68 | /* | 53 | /* |
69 | * Wait until PLLA has locked. | 54 | * Wait until PLLA has locked. |
70 | */ | 55 | */ |
71 | .macro wait_pllalock | 56 | .macro wait_pllalock |
72 | mov tmp2, #PLLALOCK_TIMEOUT | 57 | 1: ldr tmp1, [pmc, #AT91_PMC_SR] |
73 | 1: sub tmp2, tmp2, #1 | ||
74 | cmp tmp2, #0 | ||
75 | beq 2f | ||
76 | ldr tmp1, [pmc, #AT91_PMC_SR] | ||
77 | tst tmp1, #AT91_PMC_LOCKA | 58 | tst tmp1, #AT91_PMC_LOCKA |
78 | beq 1b | 59 | beq 1b |
79 | 2: | ||
80 | .endm | 60 | .endm |
81 | 61 | ||
82 | /* | 62 | /* |
83 | * Wait until PLLB has locked. | 63 | * Wait until PLLB has locked. |
84 | */ | 64 | */ |
85 | .macro wait_pllblock | 65 | .macro wait_pllblock |
86 | mov tmp2, #PLLBLOCK_TIMEOUT | 66 | 1: ldr tmp1, [pmc, #AT91_PMC_SR] |
87 | 1: sub tmp2, tmp2, #1 | ||
88 | cmp tmp2, #0 | ||
89 | beq 2f | ||
90 | ldr tmp1, [pmc, #AT91_PMC_SR] | ||
91 | tst tmp1, #AT91_PMC_LOCKB | 67 | tst tmp1, #AT91_PMC_LOCKB |
92 | beq 1b | 68 | beq 1b |
93 | 2: | ||
94 | .endm | 69 | .endm |
95 | 70 | ||
96 | .text | 71 | .text |
97 | 72 | ||
73 | .arm | ||
74 | |||
98 | /* void at91_slow_clock(void __iomem *pmc, void __iomem *sdramc, | 75 | /* void at91_slow_clock(void __iomem *pmc, void __iomem *sdramc, |
99 | * void __iomem *ramc1, int memctrl) | 76 | * void __iomem *ramc1, int memctrl) |
100 | */ | 77 | */ |
@@ -134,6 +111,16 @@ ddr_sr_enable: | |||
134 | cmp memctrl, #AT91_MEMCTRL_DDRSDR | 111 | cmp memctrl, #AT91_MEMCTRL_DDRSDR |
135 | bne sdr_sr_enable | 112 | bne sdr_sr_enable |
136 | 113 | ||
114 | /* LPDDR1 --> force DDR2 mode during self-refresh */ | ||
115 | ldr tmp1, [sdramc, #AT91_DDRSDRC_MDR] | ||
116 | str tmp1, .saved_sam9_mdr | ||
117 | bic tmp1, tmp1, #~AT91_DDRSDRC_MD | ||
118 | cmp tmp1, #AT91_DDRSDRC_MD_LOW_POWER_DDR | ||
119 | ldreq tmp1, [sdramc, #AT91_DDRSDRC_MDR] | ||
120 | biceq tmp1, tmp1, #AT91_DDRSDRC_MD | ||
121 | orreq tmp1, tmp1, #AT91_DDRSDRC_MD_DDR2 | ||
122 | streq tmp1, [sdramc, #AT91_DDRSDRC_MDR] | ||
123 | |||
137 | /* prepare for DDRAM self-refresh mode */ | 124 | /* prepare for DDRAM self-refresh mode */ |
138 | ldr tmp1, [sdramc, #AT91_DDRSDRC_LPR] | 125 | ldr tmp1, [sdramc, #AT91_DDRSDRC_LPR] |
139 | str tmp1, .saved_sam9_lpr | 126 | str tmp1, .saved_sam9_lpr |
@@ -142,14 +129,26 @@ ddr_sr_enable: | |||
142 | 129 | ||
143 | /* figure out if we use the second ram controller */ | 130 | /* figure out if we use the second ram controller */ |
144 | cmp ramc1, #0 | 131 | cmp ramc1, #0 |
145 | ldrne tmp2, [ramc1, #AT91_DDRSDRC_LPR] | 132 | beq ddr_no_2nd_ctrl |
146 | strne tmp2, .saved_sam9_lpr1 | 133 | |
147 | bicne tmp2, #AT91_DDRSDRC_LPCB | 134 | ldr tmp2, [ramc1, #AT91_DDRSDRC_MDR] |
148 | orrne tmp2, #AT91_DDRSDRC_LPCB_SELF_REFRESH | 135 | str tmp2, .saved_sam9_mdr1 |
136 | bic tmp2, tmp2, #~AT91_DDRSDRC_MD | ||
137 | cmp tmp2, #AT91_DDRSDRC_MD_LOW_POWER_DDR | ||
138 | ldreq tmp2, [ramc1, #AT91_DDRSDRC_MDR] | ||
139 | biceq tmp2, tmp2, #AT91_DDRSDRC_MD | ||
140 | orreq tmp2, tmp2, #AT91_DDRSDRC_MD_DDR2 | ||
141 | streq tmp2, [ramc1, #AT91_DDRSDRC_MDR] | ||
142 | |||
143 | ldr tmp2, [ramc1, #AT91_DDRSDRC_LPR] | ||
144 | str tmp2, .saved_sam9_lpr1 | ||
145 | bic tmp2, #AT91_DDRSDRC_LPCB | ||
146 | orr tmp2, #AT91_DDRSDRC_LPCB_SELF_REFRESH | ||
149 | 147 | ||
150 | /* Enable DDRAM self-refresh mode */ | 148 | /* Enable DDRAM self-refresh mode */ |
149 | str tmp2, [ramc1, #AT91_DDRSDRC_LPR] | ||
150 | ddr_no_2nd_ctrl: | ||
151 | str tmp1, [sdramc, #AT91_DDRSDRC_LPR] | 151 | str tmp1, [sdramc, #AT91_DDRSDRC_LPR] |
152 | strne tmp2, [ramc1, #AT91_DDRSDRC_LPR] | ||
153 | 152 | ||
154 | b sdr_sr_done | 153 | b sdr_sr_done |
155 | 154 | ||
@@ -208,6 +207,7 @@ sdr_sr_done: | |||
208 | /* Turn off the main oscillator */ | 207 | /* Turn off the main oscillator */ |
209 | ldr tmp1, [pmc, #AT91_CKGR_MOR] | 208 | ldr tmp1, [pmc, #AT91_CKGR_MOR] |
210 | bic tmp1, tmp1, #AT91_PMC_MOSCEN | 209 | bic tmp1, tmp1, #AT91_PMC_MOSCEN |
210 | orr tmp1, tmp1, #AT91_PMC_KEY | ||
211 | str tmp1, [pmc, #AT91_CKGR_MOR] | 211 | str tmp1, [pmc, #AT91_CKGR_MOR] |
212 | 212 | ||
213 | /* Wait for interrupt */ | 213 | /* Wait for interrupt */ |
@@ -216,6 +216,7 @@ sdr_sr_done: | |||
216 | /* Turn on the main oscillator */ | 216 | /* Turn on the main oscillator */ |
217 | ldr tmp1, [pmc, #AT91_CKGR_MOR] | 217 | ldr tmp1, [pmc, #AT91_CKGR_MOR] |
218 | orr tmp1, tmp1, #AT91_PMC_MOSCEN | 218 | orr tmp1, tmp1, #AT91_PMC_MOSCEN |
219 | orr tmp1, tmp1, #AT91_PMC_KEY | ||
219 | str tmp1, [pmc, #AT91_CKGR_MOR] | 220 | str tmp1, [pmc, #AT91_CKGR_MOR] |
220 | 221 | ||
221 | wait_moscrdy | 222 | wait_moscrdy |
@@ -280,12 +281,17 @@ sdr_sr_done: | |||
280 | */ | 281 | */ |
281 | cmp memctrl, #AT91_MEMCTRL_DDRSDR | 282 | cmp memctrl, #AT91_MEMCTRL_DDRSDR |
282 | bne sdr_en_restore | 283 | bne sdr_en_restore |
284 | /* Restore MDR in case of LPDDR1 */ | ||
285 | ldr tmp1, .saved_sam9_mdr | ||
286 | str tmp1, [sdramc, #AT91_DDRSDRC_MDR] | ||
283 | /* Restore LPR on AT91 with DDRAM */ | 287 | /* Restore LPR on AT91 with DDRAM */ |
284 | ldr tmp1, .saved_sam9_lpr | 288 | ldr tmp1, .saved_sam9_lpr |
285 | str tmp1, [sdramc, #AT91_DDRSDRC_LPR] | 289 | str tmp1, [sdramc, #AT91_DDRSDRC_LPR] |
286 | 290 | ||
287 | /* if we use the second ram controller */ | 291 | /* if we use the second ram controller */ |
288 | cmp ramc1, #0 | 292 | cmp ramc1, #0 |
293 | ldrne tmp2, .saved_sam9_mdr1 | ||
294 | strne tmp2, [ramc1, #AT91_DDRSDRC_MDR] | ||
289 | ldrne tmp2, .saved_sam9_lpr1 | 295 | ldrne tmp2, .saved_sam9_lpr1 |
290 | strne tmp2, [ramc1, #AT91_DDRSDRC_LPR] | 296 | strne tmp2, [ramc1, #AT91_DDRSDRC_LPR] |
291 | 297 | ||
@@ -319,5 +325,11 @@ ram_restored: | |||
319 | .saved_sam9_lpr1: | 325 | .saved_sam9_lpr1: |
320 | .word 0 | 326 | .word 0 |
321 | 327 | ||
328 | .saved_sam9_mdr: | ||
329 | .word 0 | ||
330 | |||
331 | .saved_sam9_mdr1: | ||
332 | .word 0 | ||
333 | |||
322 | ENTRY(at91_slow_clock_sz) | 334 | ENTRY(at91_slow_clock_sz) |
323 | .word .-at91_slow_clock | 335 | .word .-at91_slow_clock |
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c index 3f32c47a6d74..d2e9f12d12f1 100644 --- a/arch/arm/mach-exynos/platsmp.c +++ b/arch/arm/mach-exynos/platsmp.c | |||
@@ -126,8 +126,7 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious) | |||
126 | */ | 126 | */ |
127 | void exynos_cpu_power_down(int cpu) | 127 | void exynos_cpu_power_down(int cpu) |
128 | { | 128 | { |
129 | if (cpu == 0 && (of_machine_is_compatible("samsung,exynos5420") || | 129 | if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) { |
130 | of_machine_is_compatible("samsung,exynos5800"))) { | ||
131 | /* | 130 | /* |
132 | * Bypass power down for CPU0 during suspend. Check for | 131 | * Bypass power down for CPU0 during suspend. Check for |
133 | * the SYS_PWR_REG value to decide if we are suspending | 132 | * the SYS_PWR_REG value to decide if we are suspending |
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c index 20f267121b3e..37266a826437 100644 --- a/arch/arm/mach-exynos/pm_domains.c +++ b/arch/arm/mach-exynos/pm_domains.c | |||
@@ -161,6 +161,34 @@ no_clk: | |||
161 | of_genpd_add_provider_simple(np, &pd->pd); | 161 | of_genpd_add_provider_simple(np, &pd->pd); |
162 | } | 162 | } |
163 | 163 | ||
164 | /* Assign the child power domains to their parents */ | ||
165 | for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") { | ||
166 | struct generic_pm_domain *child_domain, *parent_domain; | ||
167 | struct of_phandle_args args; | ||
168 | |||
169 | args.np = np; | ||
170 | args.args_count = 0; | ||
171 | child_domain = of_genpd_get_from_provider(&args); | ||
172 | if (!child_domain) | ||
173 | continue; | ||
174 | |||
175 | if (of_parse_phandle_with_args(np, "power-domains", | ||
176 | "#power-domain-cells", 0, &args) != 0) | ||
177 | continue; | ||
178 | |||
179 | parent_domain = of_genpd_get_from_provider(&args); | ||
180 | if (!parent_domain) | ||
181 | continue; | ||
182 | |||
183 | if (pm_genpd_add_subdomain(parent_domain, child_domain)) | ||
184 | pr_warn("%s failed to add subdomain: %s\n", | ||
185 | parent_domain->name, child_domain->name); | ||
186 | else | ||
187 | pr_info("%s has as child subdomain: %s.\n", | ||
188 | parent_domain->name, child_domain->name); | ||
189 | of_node_put(np); | ||
190 | } | ||
191 | |||
164 | return 0; | 192 | return 0; |
165 | } | 193 | } |
166 | arch_initcall(exynos4_pm_init_power_domain); | 194 | arch_initcall(exynos4_pm_init_power_domain); |
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c index 52e2b1a2fddb..318d127df147 100644 --- a/arch/arm/mach-exynos/suspend.c +++ b/arch/arm/mach-exynos/suspend.c | |||
@@ -87,8 +87,8 @@ static unsigned int exynos_pmu_spare3; | |||
87 | static u32 exynos_irqwake_intmask = 0xffffffff; | 87 | static u32 exynos_irqwake_intmask = 0xffffffff; |
88 | 88 | ||
89 | static const struct exynos_wkup_irq exynos3250_wkup_irq[] = { | 89 | static const struct exynos_wkup_irq exynos3250_wkup_irq[] = { |
90 | { 73, BIT(1) }, /* RTC alarm */ | 90 | { 105, BIT(1) }, /* RTC alarm */ |
91 | { 74, BIT(2) }, /* RTC tick */ | 91 | { 106, BIT(2) }, /* RTC tick */ |
92 | { /* sentinel */ }, | 92 | { /* sentinel */ }, |
93 | }; | 93 | }; |
94 | 94 | ||
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c index 4ad6e473cf83..9de3412af406 100644 --- a/arch/arm/mach-imx/mach-imx6q.c +++ b/arch/arm/mach-imx/mach-imx6q.c | |||
@@ -211,8 +211,9 @@ static void __init imx6q_1588_init(void) | |||
211 | * set bit IOMUXC_GPR1[21]. Or the PTP clock must be from pad | 211 | * set bit IOMUXC_GPR1[21]. Or the PTP clock must be from pad |
212 | * (external OSC), and we need to clear the bit. | 212 | * (external OSC), and we need to clear the bit. |
213 | */ | 213 | */ |
214 | clksel = ptp_clk == enet_ref ? IMX6Q_GPR1_ENET_CLK_SEL_ANATOP : | 214 | clksel = clk_is_match(ptp_clk, enet_ref) ? |
215 | IMX6Q_GPR1_ENET_CLK_SEL_PAD; | 215 | IMX6Q_GPR1_ENET_CLK_SEL_ANATOP : |
216 | IMX6Q_GPR1_ENET_CLK_SEL_PAD; | ||
216 | gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); | 217 | gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); |
217 | if (!IS_ERR(gpr)) | 218 | if (!IS_ERR(gpr)) |
218 | regmap_update_bits(gpr, IOMUXC_GPR1, | 219 | regmap_update_bits(gpr, IOMUXC_GPR1, |
diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c index 61bfe584a9d7..fc832040c6e9 100644 --- a/arch/arm/mach-msm/board-halibut.c +++ b/arch/arm/mach-msm/board-halibut.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/input.h> | 20 | #include <linux/input.h> |
21 | #include <linux/io.h> | 21 | #include <linux/io.h> |
22 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
23 | #include <linux/smc91x.h> | ||
23 | 24 | ||
24 | #include <mach/hardware.h> | 25 | #include <mach/hardware.h> |
25 | #include <asm/mach-types.h> | 26 | #include <asm/mach-types.h> |
@@ -46,15 +47,20 @@ static struct resource smc91x_resources[] = { | |||
46 | [1] = { | 47 | [1] = { |
47 | .start = MSM_GPIO_TO_INT(49), | 48 | .start = MSM_GPIO_TO_INT(49), |
48 | .end = MSM_GPIO_TO_INT(49), | 49 | .end = MSM_GPIO_TO_INT(49), |
49 | .flags = IORESOURCE_IRQ, | 50 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, |
50 | }, | 51 | }, |
51 | }; | 52 | }; |
52 | 53 | ||
54 | static struct smc91x_platdata smc91x_platdata = { | ||
55 | .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, | ||
56 | }; | ||
57 | |||
53 | static struct platform_device smc91x_device = { | 58 | static struct platform_device smc91x_device = { |
54 | .name = "smc91x", | 59 | .name = "smc91x", |
55 | .id = 0, | 60 | .id = 0, |
56 | .num_resources = ARRAY_SIZE(smc91x_resources), | 61 | .num_resources = ARRAY_SIZE(smc91x_resources), |
57 | .resource = smc91x_resources, | 62 | .resource = smc91x_resources, |
63 | .dev.platform_data = &smc91x_platdata, | ||
58 | }; | 64 | }; |
59 | 65 | ||
60 | static struct platform_device *devices[] __initdata = { | 66 | static struct platform_device *devices[] __initdata = { |
diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c index 4c748616ef47..10016a3bc698 100644 --- a/arch/arm/mach-msm/board-qsd8x50.c +++ b/arch/arm/mach-msm/board-qsd8x50.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/usb/msm_hsusb.h> | 22 | #include <linux/usb/msm_hsusb.h> |
23 | #include <linux/err.h> | 23 | #include <linux/err.h> |
24 | #include <linux/clkdev.h> | 24 | #include <linux/clkdev.h> |
25 | #include <linux/smc91x.h> | ||
25 | 26 | ||
26 | #include <asm/mach-types.h> | 27 | #include <asm/mach-types.h> |
27 | #include <asm/mach/arch.h> | 28 | #include <asm/mach/arch.h> |
@@ -49,15 +50,20 @@ static struct resource smc91x_resources[] = { | |||
49 | .flags = IORESOURCE_MEM, | 50 | .flags = IORESOURCE_MEM, |
50 | }, | 51 | }, |
51 | [1] = { | 52 | [1] = { |
52 | .flags = IORESOURCE_IRQ, | 53 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, |
53 | }, | 54 | }, |
54 | }; | 55 | }; |
55 | 56 | ||
57 | static struct smc91x_platdata smc91x_platdata = { | ||
58 | .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, | ||
59 | }; | ||
60 | |||
56 | static struct platform_device smc91x_device = { | 61 | static struct platform_device smc91x_device = { |
57 | .name = "smc91x", | 62 | .name = "smc91x", |
58 | .id = 0, | 63 | .id = 0, |
59 | .num_resources = ARRAY_SIZE(smc91x_resources), | 64 | .num_resources = ARRAY_SIZE(smc91x_resources), |
60 | .resource = smc91x_resources, | 65 | .resource = smc91x_resources, |
66 | .dev.platform_data = &smc91x_platdata, | ||
61 | }; | 67 | }; |
62 | 68 | ||
63 | static int __init msm_init_smc91x(void) | 69 | static int __init msm_init_smc91x(void) |
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index 2a2f4d56e4c8..25f1beea453e 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c | |||
@@ -720,6 +720,8 @@ static const char * __init omap_get_family(void) | |||
720 | return kasprintf(GFP_KERNEL, "OMAP4"); | 720 | return kasprintf(GFP_KERNEL, "OMAP4"); |
721 | else if (soc_is_omap54xx()) | 721 | else if (soc_is_omap54xx()) |
722 | return kasprintf(GFP_KERNEL, "OMAP5"); | 722 | return kasprintf(GFP_KERNEL, "OMAP5"); |
723 | else if (soc_is_am33xx() || soc_is_am335x()) | ||
724 | return kasprintf(GFP_KERNEL, "AM33xx"); | ||
723 | else if (soc_is_am43xx()) | 725 | else if (soc_is_am43xx()) |
724 | return kasprintf(GFP_KERNEL, "AM43xx"); | 726 | return kasprintf(GFP_KERNEL, "AM43xx"); |
725 | else if (soc_is_dra7xx()) | 727 | else if (soc_is_dra7xx()) |
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 92afb723dcfc..355b08936871 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -1692,16 +1692,15 @@ static int _deassert_hardreset(struct omap_hwmod *oh, const char *name) | |||
1692 | if (ret == -EBUSY) | 1692 | if (ret == -EBUSY) |
1693 | pr_warn("omap_hwmod: %s: failed to hardreset\n", oh->name); | 1693 | pr_warn("omap_hwmod: %s: failed to hardreset\n", oh->name); |
1694 | 1694 | ||
1695 | if (!ret) { | 1695 | if (oh->clkdm) { |
1696 | /* | 1696 | /* |
1697 | * Set the clockdomain to HW_AUTO, assuming that the | 1697 | * Set the clockdomain to HW_AUTO, assuming that the |
1698 | * previous state was HW_AUTO. | 1698 | * previous state was HW_AUTO. |
1699 | */ | 1699 | */ |
1700 | if (oh->clkdm && hwsup) | 1700 | if (hwsup) |
1701 | clkdm_allow_idle(oh->clkdm); | 1701 | clkdm_allow_idle(oh->clkdm); |
1702 | } else { | 1702 | |
1703 | if (oh->clkdm) | 1703 | clkdm_hwmod_disable(oh->clkdm, oh); |
1704 | clkdm_hwmod_disable(oh->clkdm, oh); | ||
1705 | } | 1704 | } |
1706 | 1705 | ||
1707 | return ret; | 1706 | return ret; |
@@ -2698,6 +2697,7 @@ static int __init _register(struct omap_hwmod *oh) | |||
2698 | INIT_LIST_HEAD(&oh->master_ports); | 2697 | INIT_LIST_HEAD(&oh->master_ports); |
2699 | INIT_LIST_HEAD(&oh->slave_ports); | 2698 | INIT_LIST_HEAD(&oh->slave_ports); |
2700 | spin_lock_init(&oh->_lock); | 2699 | spin_lock_init(&oh->_lock); |
2700 | lockdep_set_class(&oh->_lock, &oh->hwmod_key); | ||
2701 | 2701 | ||
2702 | oh->_state = _HWMOD_STATE_REGISTERED; | 2702 | oh->_state = _HWMOD_STATE_REGISTERED; |
2703 | 2703 | ||
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index 9d4bec6ee742..9611c91d9b82 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h | |||
@@ -674,6 +674,7 @@ struct omap_hwmod { | |||
674 | u32 _sysc_cache; | 674 | u32 _sysc_cache; |
675 | void __iomem *_mpu_rt_va; | 675 | void __iomem *_mpu_rt_va; |
676 | spinlock_t _lock; | 676 | spinlock_t _lock; |
677 | struct lock_class_key hwmod_key; /* unique lock class */ | ||
677 | struct list_head node; | 678 | struct list_head node; |
678 | struct omap_hwmod_ocp_if *_mpu_port; | 679 | struct omap_hwmod_ocp_if *_mpu_port; |
679 | unsigned int (*xlate_irq)(unsigned int); | 680 | unsigned int (*xlate_irq)(unsigned int); |
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index e8692e7675b8..16fe7a1b7a35 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c | |||
@@ -1466,55 +1466,18 @@ static struct omap_hwmod dra7xx_ocp2scp3_hwmod = { | |||
1466 | * | 1466 | * |
1467 | */ | 1467 | */ |
1468 | 1468 | ||
1469 | static struct omap_hwmod_class dra7xx_pcie_hwmod_class = { | 1469 | static struct omap_hwmod_class dra7xx_pciess_hwmod_class = { |
1470 | .name = "pcie", | 1470 | .name = "pcie", |
1471 | }; | 1471 | }; |
1472 | 1472 | ||
1473 | /* pcie1 */ | 1473 | /* pcie1 */ |
1474 | static struct omap_hwmod dra7xx_pcie1_hwmod = { | 1474 | static struct omap_hwmod dra7xx_pciess1_hwmod = { |
1475 | .name = "pcie1", | 1475 | .name = "pcie1", |
1476 | .class = &dra7xx_pcie_hwmod_class, | 1476 | .class = &dra7xx_pciess_hwmod_class, |
1477 | .clkdm_name = "pcie_clkdm", | 1477 | .clkdm_name = "pcie_clkdm", |
1478 | .main_clk = "l4_root_clk_div", | 1478 | .main_clk = "l4_root_clk_div", |
1479 | .prcm = { | 1479 | .prcm = { |
1480 | .omap4 = { | 1480 | .omap4 = { |
1481 | .clkctrl_offs = DRA7XX_CM_PCIE_CLKSTCTRL_OFFSET, | ||
1482 | .modulemode = MODULEMODE_SWCTRL, | ||
1483 | }, | ||
1484 | }, | ||
1485 | }; | ||
1486 | |||
1487 | /* pcie2 */ | ||
1488 | static struct omap_hwmod dra7xx_pcie2_hwmod = { | ||
1489 | .name = "pcie2", | ||
1490 | .class = &dra7xx_pcie_hwmod_class, | ||
1491 | .clkdm_name = "pcie_clkdm", | ||
1492 | .main_clk = "l4_root_clk_div", | ||
1493 | .prcm = { | ||
1494 | .omap4 = { | ||
1495 | .clkctrl_offs = DRA7XX_CM_PCIE_CLKSTCTRL_OFFSET, | ||
1496 | .modulemode = MODULEMODE_SWCTRL, | ||
1497 | }, | ||
1498 | }, | ||
1499 | }; | ||
1500 | |||
1501 | /* | ||
1502 | * 'PCIE PHY' class | ||
1503 | * | ||
1504 | */ | ||
1505 | |||
1506 | static struct omap_hwmod_class dra7xx_pcie_phy_hwmod_class = { | ||
1507 | .name = "pcie-phy", | ||
1508 | }; | ||
1509 | |||
1510 | /* pcie1 phy */ | ||
1511 | static struct omap_hwmod dra7xx_pcie1_phy_hwmod = { | ||
1512 | .name = "pcie1-phy", | ||
1513 | .class = &dra7xx_pcie_phy_hwmod_class, | ||
1514 | .clkdm_name = "l3init_clkdm", | ||
1515 | .main_clk = "l4_root_clk_div", | ||
1516 | .prcm = { | ||
1517 | .omap4 = { | ||
1518 | .clkctrl_offs = DRA7XX_CM_L3INIT_PCIESS1_CLKCTRL_OFFSET, | 1481 | .clkctrl_offs = DRA7XX_CM_L3INIT_PCIESS1_CLKCTRL_OFFSET, |
1519 | .context_offs = DRA7XX_RM_L3INIT_PCIESS1_CONTEXT_OFFSET, | 1482 | .context_offs = DRA7XX_RM_L3INIT_PCIESS1_CONTEXT_OFFSET, |
1520 | .modulemode = MODULEMODE_SWCTRL, | 1483 | .modulemode = MODULEMODE_SWCTRL, |
@@ -1522,11 +1485,11 @@ static struct omap_hwmod dra7xx_pcie1_phy_hwmod = { | |||
1522 | }, | 1485 | }, |
1523 | }; | 1486 | }; |
1524 | 1487 | ||
1525 | /* pcie2 phy */ | 1488 | /* pcie2 */ |
1526 | static struct omap_hwmod dra7xx_pcie2_phy_hwmod = { | 1489 | static struct omap_hwmod dra7xx_pciess2_hwmod = { |
1527 | .name = "pcie2-phy", | 1490 | .name = "pcie2", |
1528 | .class = &dra7xx_pcie_phy_hwmod_class, | 1491 | .class = &dra7xx_pciess_hwmod_class, |
1529 | .clkdm_name = "l3init_clkdm", | 1492 | .clkdm_name = "pcie_clkdm", |
1530 | .main_clk = "l4_root_clk_div", | 1493 | .main_clk = "l4_root_clk_div", |
1531 | .prcm = { | 1494 | .prcm = { |
1532 | .omap4 = { | 1495 | .omap4 = { |
@@ -2877,50 +2840,34 @@ static struct omap_hwmod_ocp_if dra7xx_l4_cfg__ocp2scp3 = { | |||
2877 | .user = OCP_USER_MPU | OCP_USER_SDMA, | 2840 | .user = OCP_USER_MPU | OCP_USER_SDMA, |
2878 | }; | 2841 | }; |
2879 | 2842 | ||
2880 | /* l3_main_1 -> pcie1 */ | 2843 | /* l3_main_1 -> pciess1 */ |
2881 | static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pcie1 = { | 2844 | static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pciess1 = { |
2882 | .master = &dra7xx_l3_main_1_hwmod, | 2845 | .master = &dra7xx_l3_main_1_hwmod, |
2883 | .slave = &dra7xx_pcie1_hwmod, | 2846 | .slave = &dra7xx_pciess1_hwmod, |
2884 | .clk = "l3_iclk_div", | 2847 | .clk = "l3_iclk_div", |
2885 | .user = OCP_USER_MPU | OCP_USER_SDMA, | 2848 | .user = OCP_USER_MPU | OCP_USER_SDMA, |
2886 | }; | 2849 | }; |
2887 | 2850 | ||
2888 | /* l4_cfg -> pcie1 */ | 2851 | /* l4_cfg -> pciess1 */ |
2889 | static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie1 = { | 2852 | static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pciess1 = { |
2890 | .master = &dra7xx_l4_cfg_hwmod, | 2853 | .master = &dra7xx_l4_cfg_hwmod, |
2891 | .slave = &dra7xx_pcie1_hwmod, | 2854 | .slave = &dra7xx_pciess1_hwmod, |
2892 | .clk = "l4_root_clk_div", | 2855 | .clk = "l4_root_clk_div", |
2893 | .user = OCP_USER_MPU | OCP_USER_SDMA, | 2856 | .user = OCP_USER_MPU | OCP_USER_SDMA, |
2894 | }; | 2857 | }; |
2895 | 2858 | ||
2896 | /* l3_main_1 -> pcie2 */ | 2859 | /* l3_main_1 -> pciess2 */ |
2897 | static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pcie2 = { | 2860 | static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pciess2 = { |
2898 | .master = &dra7xx_l3_main_1_hwmod, | 2861 | .master = &dra7xx_l3_main_1_hwmod, |
2899 | .slave = &dra7xx_pcie2_hwmod, | 2862 | .slave = &dra7xx_pciess2_hwmod, |
2900 | .clk = "l3_iclk_div", | 2863 | .clk = "l3_iclk_div", |
2901 | .user = OCP_USER_MPU | OCP_USER_SDMA, | 2864 | .user = OCP_USER_MPU | OCP_USER_SDMA, |
2902 | }; | 2865 | }; |
2903 | 2866 | ||
2904 | /* l4_cfg -> pcie2 */ | 2867 | /* l4_cfg -> pciess2 */ |
2905 | static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie2 = { | 2868 | static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pciess2 = { |
2906 | .master = &dra7xx_l4_cfg_hwmod, | ||
2907 | .slave = &dra7xx_pcie2_hwmod, | ||
2908 | .clk = "l4_root_clk_div", | ||
2909 | .user = OCP_USER_MPU | OCP_USER_SDMA, | ||
2910 | }; | ||
2911 | |||
2912 | /* l4_cfg -> pcie1 phy */ | ||
2913 | static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie1_phy = { | ||
2914 | .master = &dra7xx_l4_cfg_hwmod, | ||
2915 | .slave = &dra7xx_pcie1_phy_hwmod, | ||
2916 | .clk = "l4_root_clk_div", | ||
2917 | .user = OCP_USER_MPU | OCP_USER_SDMA, | ||
2918 | }; | ||
2919 | |||
2920 | /* l4_cfg -> pcie2 phy */ | ||
2921 | static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie2_phy = { | ||
2922 | .master = &dra7xx_l4_cfg_hwmod, | 2869 | .master = &dra7xx_l4_cfg_hwmod, |
2923 | .slave = &dra7xx_pcie2_phy_hwmod, | 2870 | .slave = &dra7xx_pciess2_hwmod, |
2924 | .clk = "l4_root_clk_div", | 2871 | .clk = "l4_root_clk_div", |
2925 | .user = OCP_USER_MPU | OCP_USER_SDMA, | 2872 | .user = OCP_USER_MPU | OCP_USER_SDMA, |
2926 | }; | 2873 | }; |
@@ -3327,12 +3274,10 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = { | |||
3327 | &dra7xx_l4_cfg__mpu, | 3274 | &dra7xx_l4_cfg__mpu, |
3328 | &dra7xx_l4_cfg__ocp2scp1, | 3275 | &dra7xx_l4_cfg__ocp2scp1, |
3329 | &dra7xx_l4_cfg__ocp2scp3, | 3276 | &dra7xx_l4_cfg__ocp2scp3, |
3330 | &dra7xx_l3_main_1__pcie1, | 3277 | &dra7xx_l3_main_1__pciess1, |
3331 | &dra7xx_l4_cfg__pcie1, | 3278 | &dra7xx_l4_cfg__pciess1, |
3332 | &dra7xx_l3_main_1__pcie2, | 3279 | &dra7xx_l3_main_1__pciess2, |
3333 | &dra7xx_l4_cfg__pcie2, | 3280 | &dra7xx_l4_cfg__pciess2, |
3334 | &dra7xx_l4_cfg__pcie1_phy, | ||
3335 | &dra7xx_l4_cfg__pcie2_phy, | ||
3336 | &dra7xx_l3_main_1__qspi, | 3281 | &dra7xx_l3_main_1__qspi, |
3337 | &dra7xx_l4_per3__rtcss, | 3282 | &dra7xx_l4_per3__rtcss, |
3338 | &dra7xx_l4_cfg__sata, | 3283 | &dra7xx_l4_cfg__sata, |
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 190fa43e7479..e642b079e9f3 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c | |||
@@ -173,6 +173,7 @@ static void __init omap3_igep0030_rev_g_legacy_init(void) | |||
173 | 173 | ||
174 | static void __init omap3_evm_legacy_init(void) | 174 | static void __init omap3_evm_legacy_init(void) |
175 | { | 175 | { |
176 | hsmmc2_internal_input_clk(); | ||
176 | legacy_init_wl12xx(WL12XX_REFCLOCK_38, 0, 149); | 177 | legacy_init_wl12xx(WL12XX_REFCLOCK_38, 0, 149); |
177 | } | 178 | } |
178 | 179 | ||
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c index a08a617a6c11..d6d6bc39e05c 100644 --- a/arch/arm/mach-omap2/prm44xx.c +++ b/arch/arm/mach-omap2/prm44xx.c | |||
@@ -252,10 +252,10 @@ static void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask) | |||
252 | { | 252 | { |
253 | saved_mask[0] = | 253 | saved_mask[0] = |
254 | omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, | 254 | omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, |
255 | OMAP4_PRM_IRQSTATUS_MPU_OFFSET); | 255 | OMAP4_PRM_IRQENABLE_MPU_OFFSET); |
256 | saved_mask[1] = | 256 | saved_mask[1] = |
257 | omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, | 257 | omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, |
258 | OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET); | 258 | OMAP4_PRM_IRQENABLE_MPU_2_OFFSET); |
259 | 259 | ||
260 | omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST, | 260 | omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST, |
261 | OMAP4_PRM_IRQENABLE_MPU_OFFSET); | 261 | OMAP4_PRM_IRQENABLE_MPU_OFFSET); |
diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c index 343c4e3a7c5d..f6d02e4cbcda 100644 --- a/arch/arm/mach-pxa/idp.c +++ b/arch/arm/mach-pxa/idp.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/platform_data/video-pxafb.h> | 36 | #include <linux/platform_data/video-pxafb.h> |
37 | #include <mach/bitfield.h> | 37 | #include <mach/bitfield.h> |
38 | #include <linux/platform_data/mmc-pxamci.h> | 38 | #include <linux/platform_data/mmc-pxamci.h> |
39 | #include <linux/smc91x.h> | ||
39 | 40 | ||
40 | #include "generic.h" | 41 | #include "generic.h" |
41 | #include "devices.h" | 42 | #include "devices.h" |
@@ -81,11 +82,16 @@ static struct resource smc91x_resources[] = { | |||
81 | } | 82 | } |
82 | }; | 83 | }; |
83 | 84 | ||
85 | static struct smc91x_platdata smc91x_platdata = { | ||
86 | .flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT, | ||
87 | }; | ||
88 | |||
84 | static struct platform_device smc91x_device = { | 89 | static struct platform_device smc91x_device = { |
85 | .name = "smc91x", | 90 | .name = "smc91x", |
86 | .id = 0, | 91 | .id = 0, |
87 | .num_resources = ARRAY_SIZE(smc91x_resources), | 92 | .num_resources = ARRAY_SIZE(smc91x_resources), |
88 | .resource = smc91x_resources, | 93 | .resource = smc91x_resources, |
94 | .dev.platform_data = &smc91x_platdata, | ||
89 | }; | 95 | }; |
90 | 96 | ||
91 | static void idp_backlight_power(int on) | 97 | static void idp_backlight_power(int on) |
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c index 0eecd83c624e..89a7c06570d3 100644 --- a/arch/arm/mach-pxa/irq.c +++ b/arch/arm/mach-pxa/irq.c | |||
@@ -11,6 +11,7 @@ | |||
11 | * it under the terms of the GNU General Public License version 2 as | 11 | * it under the terms of the GNU General Public License version 2 as |
12 | * published by the Free Software Foundation. | 12 | * published by the Free Software Foundation. |
13 | */ | 13 | */ |
14 | #include <linux/bitops.h> | ||
14 | #include <linux/init.h> | 15 | #include <linux/init.h> |
15 | #include <linux/module.h> | 16 | #include <linux/module.h> |
16 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
@@ -40,7 +41,6 @@ | |||
40 | #define ICHP_VAL_IRQ (1 << 31) | 41 | #define ICHP_VAL_IRQ (1 << 31) |
41 | #define ICHP_IRQ(i) (((i) >> 16) & 0x7fff) | 42 | #define ICHP_IRQ(i) (((i) >> 16) & 0x7fff) |
42 | #define IPR_VALID (1 << 31) | 43 | #define IPR_VALID (1 << 31) |
43 | #define IRQ_BIT(n) (((n) - PXA_IRQ(0)) & 0x1f) | ||
44 | 44 | ||
45 | #define MAX_INTERNAL_IRQS 128 | 45 | #define MAX_INTERNAL_IRQS 128 |
46 | 46 | ||
@@ -51,6 +51,7 @@ | |||
51 | static void __iomem *pxa_irq_base; | 51 | static void __iomem *pxa_irq_base; |
52 | static int pxa_internal_irq_nr; | 52 | static int pxa_internal_irq_nr; |
53 | static bool cpu_has_ipr; | 53 | static bool cpu_has_ipr; |
54 | static struct irq_domain *pxa_irq_domain; | ||
54 | 55 | ||
55 | static inline void __iomem *irq_base(int i) | 56 | static inline void __iomem *irq_base(int i) |
56 | { | 57 | { |
@@ -66,18 +67,20 @@ static inline void __iomem *irq_base(int i) | |||
66 | void pxa_mask_irq(struct irq_data *d) | 67 | void pxa_mask_irq(struct irq_data *d) |
67 | { | 68 | { |
68 | void __iomem *base = irq_data_get_irq_chip_data(d); | 69 | void __iomem *base = irq_data_get_irq_chip_data(d); |
70 | irq_hw_number_t irq = irqd_to_hwirq(d); | ||
69 | uint32_t icmr = __raw_readl(base + ICMR); | 71 | uint32_t icmr = __raw_readl(base + ICMR); |
70 | 72 | ||
71 | icmr &= ~(1 << IRQ_BIT(d->irq)); | 73 | icmr &= ~BIT(irq & 0x1f); |
72 | __raw_writel(icmr, base + ICMR); | 74 | __raw_writel(icmr, base + ICMR); |
73 | } | 75 | } |
74 | 76 | ||
75 | void pxa_unmask_irq(struct irq_data *d) | 77 | void pxa_unmask_irq(struct irq_data *d) |
76 | { | 78 | { |
77 | void __iomem *base = irq_data_get_irq_chip_data(d); | 79 | void __iomem *base = irq_data_get_irq_chip_data(d); |
80 | irq_hw_number_t irq = irqd_to_hwirq(d); | ||
78 | uint32_t icmr = __raw_readl(base + ICMR); | 81 | uint32_t icmr = __raw_readl(base + ICMR); |
79 | 82 | ||
80 | icmr |= 1 << IRQ_BIT(d->irq); | 83 | icmr |= BIT(irq & 0x1f); |
81 | __raw_writel(icmr, base + ICMR); | 84 | __raw_writel(icmr, base + ICMR); |
82 | } | 85 | } |
83 | 86 | ||
@@ -118,40 +121,63 @@ asmlinkage void __exception_irq_entry ichp_handle_irq(struct pt_regs *regs) | |||
118 | } while (1); | 121 | } while (1); |
119 | } | 122 | } |
120 | 123 | ||
121 | void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int)) | 124 | static int pxa_irq_map(struct irq_domain *h, unsigned int virq, |
125 | irq_hw_number_t hw) | ||
122 | { | 126 | { |
123 | int irq, i, n; | 127 | void __iomem *base = irq_base(hw / 32); |
124 | 128 | ||
125 | BUG_ON(irq_nr > MAX_INTERNAL_IRQS); | 129 | /* initialize interrupt priority */ |
130 | if (cpu_has_ipr) | ||
131 | __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw)); | ||
132 | |||
133 | irq_set_chip_and_handler(virq, &pxa_internal_irq_chip, | ||
134 | handle_level_irq); | ||
135 | irq_set_chip_data(virq, base); | ||
136 | set_irq_flags(virq, IRQF_VALID); | ||
137 | |||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | static struct irq_domain_ops pxa_irq_ops = { | ||
142 | .map = pxa_irq_map, | ||
143 | .xlate = irq_domain_xlate_onecell, | ||
144 | }; | ||
145 | |||
146 | static __init void | ||
147 | pxa_init_irq_common(struct device_node *node, int irq_nr, | ||
148 | int (*fn)(struct irq_data *, unsigned int)) | ||
149 | { | ||
150 | int n; | ||
126 | 151 | ||
127 | pxa_internal_irq_nr = irq_nr; | 152 | pxa_internal_irq_nr = irq_nr; |
128 | cpu_has_ipr = !cpu_is_pxa25x(); | 153 | pxa_irq_domain = irq_domain_add_legacy(node, irq_nr, |
129 | pxa_irq_base = io_p2v(0x40d00000); | 154 | PXA_IRQ(0), 0, |
155 | &pxa_irq_ops, NULL); | ||
156 | if (!pxa_irq_domain) | ||
157 | panic("Unable to add PXA IRQ domain\n"); | ||
158 | irq_set_default_host(pxa_irq_domain); | ||
130 | 159 | ||
131 | for (n = 0; n < irq_nr; n += 32) { | 160 | for (n = 0; n < irq_nr; n += 32) { |
132 | void __iomem *base = irq_base(n >> 5); | 161 | void __iomem *base = irq_base(n >> 5); |
133 | 162 | ||
134 | __raw_writel(0, base + ICMR); /* disable all IRQs */ | 163 | __raw_writel(0, base + ICMR); /* disable all IRQs */ |
135 | __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */ | 164 | __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */ |
136 | for (i = n; (i < (n + 32)) && (i < irq_nr); i++) { | ||
137 | /* initialize interrupt priority */ | ||
138 | if (cpu_has_ipr) | ||
139 | __raw_writel(i | IPR_VALID, pxa_irq_base + IPR(i)); | ||
140 | |||
141 | irq = PXA_IRQ(i); | ||
142 | irq_set_chip_and_handler(irq, &pxa_internal_irq_chip, | ||
143 | handle_level_irq); | ||
144 | irq_set_chip_data(irq, base); | ||
145 | set_irq_flags(irq, IRQF_VALID); | ||
146 | } | ||
147 | } | 165 | } |
148 | |||
149 | /* only unmasked interrupts kick us out of idle */ | 166 | /* only unmasked interrupts kick us out of idle */ |
150 | __raw_writel(1, irq_base(0) + ICCR); | 167 | __raw_writel(1, irq_base(0) + ICCR); |
151 | 168 | ||
152 | pxa_internal_irq_chip.irq_set_wake = fn; | 169 | pxa_internal_irq_chip.irq_set_wake = fn; |
153 | } | 170 | } |
154 | 171 | ||
172 | void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int)) | ||
173 | { | ||
174 | BUG_ON(irq_nr > MAX_INTERNAL_IRQS); | ||
175 | |||
176 | pxa_irq_base = io_p2v(0x40d00000); | ||
177 | cpu_has_ipr = !cpu_is_pxa25x(); | ||
178 | pxa_init_irq_common(NULL, irq_nr, fn); | ||
179 | } | ||
180 | |||
155 | #ifdef CONFIG_PM | 181 | #ifdef CONFIG_PM |
156 | static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32]; | 182 | static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32]; |
157 | static unsigned long saved_ipr[MAX_INTERNAL_IRQS]; | 183 | static unsigned long saved_ipr[MAX_INTERNAL_IRQS]; |
@@ -203,30 +229,6 @@ struct syscore_ops pxa_irq_syscore_ops = { | |||
203 | }; | 229 | }; |
204 | 230 | ||
205 | #ifdef CONFIG_OF | 231 | #ifdef CONFIG_OF |
206 | static struct irq_domain *pxa_irq_domain; | ||
207 | |||
208 | static int pxa_irq_map(struct irq_domain *h, unsigned int virq, | ||
209 | irq_hw_number_t hw) | ||
210 | { | ||
211 | void __iomem *base = irq_base(hw / 32); | ||
212 | |||
213 | /* initialize interrupt priority */ | ||
214 | if (cpu_has_ipr) | ||
215 | __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw)); | ||
216 | |||
217 | irq_set_chip_and_handler(hw, &pxa_internal_irq_chip, | ||
218 | handle_level_irq); | ||
219 | irq_set_chip_data(hw, base); | ||
220 | set_irq_flags(hw, IRQF_VALID); | ||
221 | |||
222 | return 0; | ||
223 | } | ||
224 | |||
225 | static struct irq_domain_ops pxa_irq_ops = { | ||
226 | .map = pxa_irq_map, | ||
227 | .xlate = irq_domain_xlate_onecell, | ||
228 | }; | ||
229 | |||
230 | static const struct of_device_id intc_ids[] __initconst = { | 232 | static const struct of_device_id intc_ids[] __initconst = { |
231 | { .compatible = "marvell,pxa-intc", }, | 233 | { .compatible = "marvell,pxa-intc", }, |
232 | {} | 234 | {} |
@@ -236,7 +238,7 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int)) | |||
236 | { | 238 | { |
237 | struct device_node *node; | 239 | struct device_node *node; |
238 | struct resource res; | 240 | struct resource res; |
239 | int n, ret; | 241 | int ret; |
240 | 242 | ||
241 | node = of_find_matching_node(NULL, intc_ids); | 243 | node = of_find_matching_node(NULL, intc_ids); |
242 | if (!node) { | 244 | if (!node) { |
@@ -267,23 +269,6 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int)) | |||
267 | return; | 269 | return; |
268 | } | 270 | } |
269 | 271 | ||
270 | pxa_irq_domain = irq_domain_add_legacy(node, pxa_internal_irq_nr, 0, 0, | 272 | pxa_init_irq_common(node, pxa_internal_irq_nr, fn); |
271 | &pxa_irq_ops, NULL); | ||
272 | if (!pxa_irq_domain) | ||
273 | panic("Unable to add PXA IRQ domain\n"); | ||
274 | |||
275 | irq_set_default_host(pxa_irq_domain); | ||
276 | |||
277 | for (n = 0; n < pxa_internal_irq_nr; n += 32) { | ||
278 | void __iomem *base = irq_base(n >> 5); | ||
279 | |||
280 | __raw_writel(0, base + ICMR); /* disable all IRQs */ | ||
281 | __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */ | ||
282 | } | ||
283 | |||
284 | /* only unmasked interrupts kick us out of idle */ | ||
285 | __raw_writel(1, irq_base(0) + ICCR); | ||
286 | |||
287 | pxa_internal_irq_chip.irq_set_wake = fn; | ||
288 | } | 273 | } |
289 | #endif /* CONFIG_OF */ | 274 | #endif /* CONFIG_OF */ |
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c index ad777b353bd5..eaee2c20b189 100644 --- a/arch/arm/mach-pxa/lpd270.c +++ b/arch/arm/mach-pxa/lpd270.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/mtd/mtd.h> | 24 | #include <linux/mtd/mtd.h> |
25 | #include <linux/mtd/partitions.h> | 25 | #include <linux/mtd/partitions.h> |
26 | #include <linux/pwm_backlight.h> | 26 | #include <linux/pwm_backlight.h> |
27 | #include <linux/smc91x.h> | ||
27 | 28 | ||
28 | #include <asm/types.h> | 29 | #include <asm/types.h> |
29 | #include <asm/setup.h> | 30 | #include <asm/setup.h> |
@@ -189,15 +190,20 @@ static struct resource smc91x_resources[] = { | |||
189 | [1] = { | 190 | [1] = { |
190 | .start = LPD270_ETHERNET_IRQ, | 191 | .start = LPD270_ETHERNET_IRQ, |
191 | .end = LPD270_ETHERNET_IRQ, | 192 | .end = LPD270_ETHERNET_IRQ, |
192 | .flags = IORESOURCE_IRQ, | 193 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, |
193 | }, | 194 | }, |
194 | }; | 195 | }; |
195 | 196 | ||
197 | struct smc91x_platdata smc91x_platdata = { | ||
198 | .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, | ||
199 | }; | ||
200 | |||
196 | static struct platform_device smc91x_device = { | 201 | static struct platform_device smc91x_device = { |
197 | .name = "smc91x", | 202 | .name = "smc91x", |
198 | .id = 0, | 203 | .id = 0, |
199 | .num_resources = ARRAY_SIZE(smc91x_resources), | 204 | .num_resources = ARRAY_SIZE(smc91x_resources), |
200 | .resource = smc91x_resources, | 205 | .resource = smc91x_resources, |
206 | .dev.platform_data = &smc91x_platdata, | ||
201 | }; | 207 | }; |
202 | 208 | ||
203 | static struct resource lpd270_flash_resources[] = { | 209 | static struct resource lpd270_flash_resources[] = { |
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c index 205f9bf3821e..ac2ae5c71ab4 100644 --- a/arch/arm/mach-pxa/zeus.c +++ b/arch/arm/mach-pxa/zeus.c | |||
@@ -412,7 +412,7 @@ static struct fixed_voltage_config can_regulator_pdata = { | |||
412 | }; | 412 | }; |
413 | 413 | ||
414 | static struct platform_device can_regulator_device = { | 414 | static struct platform_device can_regulator_device = { |
415 | .name = "reg-fixed-volage", | 415 | .name = "reg-fixed-voltage", |
416 | .id = 0, | 416 | .id = 0, |
417 | .dev = { | 417 | .dev = { |
418 | .platform_data = &can_regulator_pdata, | 418 | .platform_data = &can_regulator_pdata, |
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c index 850e506926df..c309593abdb2 100644 --- a/arch/arm/mach-realview/core.c +++ b/arch/arm/mach-realview/core.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/platform_data/video-clcd-versatile.h> | 28 | #include <linux/platform_data/video-clcd-versatile.h> |
29 | #include <linux/io.h> | 29 | #include <linux/io.h> |
30 | #include <linux/smsc911x.h> | 30 | #include <linux/smsc911x.h> |
31 | #include <linux/smc91x.h> | ||
31 | #include <linux/ata_platform.h> | 32 | #include <linux/ata_platform.h> |
32 | #include <linux/amba/mmci.h> | 33 | #include <linux/amba/mmci.h> |
33 | #include <linux/gfp.h> | 34 | #include <linux/gfp.h> |
@@ -94,6 +95,10 @@ static struct smsc911x_platform_config smsc911x_config = { | |||
94 | .phy_interface = PHY_INTERFACE_MODE_MII, | 95 | .phy_interface = PHY_INTERFACE_MODE_MII, |
95 | }; | 96 | }; |
96 | 97 | ||
98 | static struct smc91x_platdata smc91x_platdata = { | ||
99 | .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT, | ||
100 | }; | ||
101 | |||
97 | static struct platform_device realview_eth_device = { | 102 | static struct platform_device realview_eth_device = { |
98 | .name = "smsc911x", | 103 | .name = "smsc911x", |
99 | .id = 0, | 104 | .id = 0, |
@@ -107,6 +112,8 @@ int realview_eth_register(const char *name, struct resource *res) | |||
107 | realview_eth_device.resource = res; | 112 | realview_eth_device.resource = res; |
108 | if (strcmp(realview_eth_device.name, "smsc911x") == 0) | 113 | if (strcmp(realview_eth_device.name, "smsc911x") == 0) |
109 | realview_eth_device.dev.platform_data = &smsc911x_config; | 114 | realview_eth_device.dev.platform_data = &smsc911x_config; |
115 | else | ||
116 | realview_eth_device.dev.platform_data = &smc91x_platdata; | ||
110 | 117 | ||
111 | return platform_device_register(&realview_eth_device); | 118 | return platform_device_register(&realview_eth_device); |
112 | } | 119 | } |
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c index 64c88d657f9e..b3869cbbcc68 100644 --- a/arch/arm/mach-realview/realview_eb.c +++ b/arch/arm/mach-realview/realview_eb.c | |||
@@ -234,7 +234,7 @@ static struct resource realview_eb_eth_resources[] = { | |||
234 | [1] = { | 234 | [1] = { |
235 | .start = IRQ_EB_ETH, | 235 | .start = IRQ_EB_ETH, |
236 | .end = IRQ_EB_ETH, | 236 | .end = IRQ_EB_ETH, |
237 | .flags = IORESOURCE_IRQ, | 237 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, |
238 | }, | 238 | }, |
239 | }; | 239 | }; |
240 | 240 | ||
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c index 169262e3040d..af868d258e66 100644 --- a/arch/arm/mach-sa1100/neponset.c +++ b/arch/arm/mach-sa1100/neponset.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/pm.h> | 12 | #include <linux/pm.h> |
13 | #include <linux/serial_core.h> | 13 | #include <linux/serial_core.h> |
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/smc91x.h> | ||
15 | 16 | ||
16 | #include <asm/mach-types.h> | 17 | #include <asm/mach-types.h> |
17 | #include <asm/mach/map.h> | 18 | #include <asm/mach/map.h> |
@@ -258,12 +259,17 @@ static int neponset_probe(struct platform_device *dev) | |||
258 | 0x02000000, "smc91x-attrib"), | 259 | 0x02000000, "smc91x-attrib"), |
259 | { .flags = IORESOURCE_IRQ }, | 260 | { .flags = IORESOURCE_IRQ }, |
260 | }; | 261 | }; |
262 | struct smc91x_platdata smc91x_platdata = { | ||
263 | .flags = SMC91X_USE_8BIT | SMC91X_IO_SHIFT_2 | SMC91X_NOWAIT, | ||
264 | }; | ||
261 | struct platform_device_info smc91x_devinfo = { | 265 | struct platform_device_info smc91x_devinfo = { |
262 | .parent = &dev->dev, | 266 | .parent = &dev->dev, |
263 | .name = "smc91x", | 267 | .name = "smc91x", |
264 | .id = 0, | 268 | .id = 0, |
265 | .res = smc91x_resources, | 269 | .res = smc91x_resources, |
266 | .num_res = ARRAY_SIZE(smc91x_resources), | 270 | .num_res = ARRAY_SIZE(smc91x_resources), |
271 | .data = &smc91x_platdata, | ||
272 | .size_data = sizeof(smc91x_platdata), | ||
267 | }; | 273 | }; |
268 | int ret, irq; | 274 | int ret, irq; |
269 | 275 | ||
diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c index 091261878eff..1525d7b5f1b7 100644 --- a/arch/arm/mach-sa1100/pleb.c +++ b/arch/arm/mach-sa1100/pleb.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/irq.h> | 11 | #include <linux/irq.h> |
12 | #include <linux/io.h> | 12 | #include <linux/io.h> |
13 | #include <linux/mtd/partitions.h> | 13 | #include <linux/mtd/partitions.h> |
14 | #include <linux/smc91x.h> | ||
14 | 15 | ||
15 | #include <mach/hardware.h> | 16 | #include <mach/hardware.h> |
16 | #include <asm/setup.h> | 17 | #include <asm/setup.h> |
@@ -43,12 +44,18 @@ static struct resource smc91x_resources[] = { | |||
43 | #endif | 44 | #endif |
44 | }; | 45 | }; |
45 | 46 | ||
47 | static struct smc91x_platdata smc91x_platdata = { | ||
48 | .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, | ||
49 | }; | ||
46 | 50 | ||
47 | static struct platform_device smc91x_device = { | 51 | static struct platform_device smc91x_device = { |
48 | .name = "smc91x", | 52 | .name = "smc91x", |
49 | .id = 0, | 53 | .id = 0, |
50 | .num_resources = ARRAY_SIZE(smc91x_resources), | 54 | .num_resources = ARRAY_SIZE(smc91x_resources), |
51 | .resource = smc91x_resources, | 55 | .resource = smc91x_resources, |
56 | .dev = { | ||
57 | .platform_data = &smc91x_platdata, | ||
58 | }, | ||
52 | }; | 59 | }; |
53 | 60 | ||
54 | static struct platform_device *devices[] __initdata = { | 61 | static struct platform_device *devices[] __initdata = { |
diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h index 483cb467bf65..a0f3b1cd497c 100644 --- a/arch/arm/mach-socfpga/core.h +++ b/arch/arm/mach-socfpga/core.h | |||
@@ -45,6 +45,6 @@ extern char secondary_trampoline, secondary_trampoline_end; | |||
45 | 45 | ||
46 | extern unsigned long socfpga_cpu1start_addr; | 46 | extern unsigned long socfpga_cpu1start_addr; |
47 | 47 | ||
48 | #define SOCFPGA_SCU_VIRT_BASE 0xfffec000 | 48 | #define SOCFPGA_SCU_VIRT_BASE 0xfee00000 |
49 | 49 | ||
50 | #endif | 50 | #endif |
diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c index 383d61e138af..f5e597c207b9 100644 --- a/arch/arm/mach-socfpga/socfpga.c +++ b/arch/arm/mach-socfpga/socfpga.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <asm/hardware/cache-l2x0.h> | 23 | #include <asm/hardware/cache-l2x0.h> |
24 | #include <asm/mach/arch.h> | 24 | #include <asm/mach/arch.h> |
25 | #include <asm/mach/map.h> | 25 | #include <asm/mach/map.h> |
26 | #include <asm/cacheflush.h> | ||
26 | 27 | ||
27 | #include "core.h" | 28 | #include "core.h" |
28 | 29 | ||
@@ -73,6 +74,10 @@ void __init socfpga_sysmgr_init(void) | |||
73 | (u32 *) &socfpga_cpu1start_addr)) | 74 | (u32 *) &socfpga_cpu1start_addr)) |
74 | pr_err("SMP: Need cpu1-start-addr in device tree.\n"); | 75 | pr_err("SMP: Need cpu1-start-addr in device tree.\n"); |
75 | 76 | ||
77 | /* Ensure that socfpga_cpu1start_addr is visible to other CPUs */ | ||
78 | smp_wmb(); | ||
79 | sync_cache_w(&socfpga_cpu1start_addr); | ||
80 | |||
76 | sys_manager_base_addr = of_iomap(np, 0); | 81 | sys_manager_base_addr = of_iomap(np, 0); |
77 | 82 | ||
78 | np = of_find_compatible_node(NULL, NULL, "altr,rst-mgr"); | 83 | np = of_find_compatible_node(NULL, NULL, "altr,rst-mgr"); |
diff --git a/arch/arm/mach-sti/board-dt.c b/arch/arm/mach-sti/board-dt.c index b067390cef4e..b373acade338 100644 --- a/arch/arm/mach-sti/board-dt.c +++ b/arch/arm/mach-sti/board-dt.c | |||
@@ -18,6 +18,7 @@ static const char *stih41x_dt_match[] __initdata = { | |||
18 | "st,stih415", | 18 | "st,stih415", |
19 | "st,stih416", | 19 | "st,stih416", |
20 | "st,stih407", | 20 | "st,stih407", |
21 | "st,stih410", | ||
21 | "st,stih418", | 22 | "st,stih418", |
22 | NULL | 23 | NULL |
23 | }; | 24 | }; |
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig index a77604fbaf25..81502b90dd91 100644 --- a/arch/arm/mach-sunxi/Kconfig +++ b/arch/arm/mach-sunxi/Kconfig | |||
@@ -1,10 +1,12 @@ | |||
1 | menuconfig ARCH_SUNXI | 1 | menuconfig ARCH_SUNXI |
2 | bool "Allwinner SoCs" if ARCH_MULTI_V7 | 2 | bool "Allwinner SoCs" if ARCH_MULTI_V7 |
3 | select ARCH_REQUIRE_GPIOLIB | 3 | select ARCH_REQUIRE_GPIOLIB |
4 | select ARCH_HAS_RESET_CONTROLLER | ||
4 | select CLKSRC_MMIO | 5 | select CLKSRC_MMIO |
5 | select GENERIC_IRQ_CHIP | 6 | select GENERIC_IRQ_CHIP |
6 | select PINCTRL | 7 | select PINCTRL |
7 | select SUN4I_TIMER | 8 | select SUN4I_TIMER |
9 | select RESET_CONTROLLER | ||
8 | 10 | ||
9 | if ARCH_SUNXI | 11 | if ARCH_SUNXI |
10 | 12 | ||
@@ -20,10 +22,8 @@ config MACH_SUN5I | |||
20 | config MACH_SUN6I | 22 | config MACH_SUN6I |
21 | bool "Allwinner A31 (sun6i) SoCs support" | 23 | bool "Allwinner A31 (sun6i) SoCs support" |
22 | default ARCH_SUNXI | 24 | default ARCH_SUNXI |
23 | select ARCH_HAS_RESET_CONTROLLER | ||
24 | select ARM_GIC | 25 | select ARM_GIC |
25 | select MFD_SUN6I_PRCM | 26 | select MFD_SUN6I_PRCM |
26 | select RESET_CONTROLLER | ||
27 | select SUN5I_HSTIMER | 27 | select SUN5I_HSTIMER |
28 | 28 | ||
29 | config MACH_SUN7I | 29 | config MACH_SUN7I |
@@ -37,16 +37,12 @@ config MACH_SUN7I | |||
37 | config MACH_SUN8I | 37 | config MACH_SUN8I |
38 | bool "Allwinner A23 (sun8i) SoCs support" | 38 | bool "Allwinner A23 (sun8i) SoCs support" |
39 | default ARCH_SUNXI | 39 | default ARCH_SUNXI |
40 | select ARCH_HAS_RESET_CONTROLLER | ||
41 | select ARM_GIC | 40 | select ARM_GIC |
42 | select MFD_SUN6I_PRCM | 41 | select MFD_SUN6I_PRCM |
43 | select RESET_CONTROLLER | ||
44 | 42 | ||
45 | config MACH_SUN9I | 43 | config MACH_SUN9I |
46 | bool "Allwinner (sun9i) SoCs support" | 44 | bool "Allwinner (sun9i) SoCs support" |
47 | default ARCH_SUNXI | 45 | default ARCH_SUNXI |
48 | select ARCH_HAS_RESET_CONTROLLER | ||
49 | select ARM_GIC | 46 | select ARM_GIC |
50 | select RESET_CONTROLLER | ||
51 | 47 | ||
52 | endif | 48 | endif |
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index c6c7696b8db9..8f15f70622a6 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -1131,23 +1131,22 @@ static void __init l2c310_of_parse(const struct device_node *np, | |||
1131 | } | 1131 | } |
1132 | 1132 | ||
1133 | ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); | 1133 | ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); |
1134 | if (ret) | 1134 | if (!ret) { |
1135 | return; | 1135 | switch (assoc) { |
1136 | 1136 | case 16: | |
1137 | switch (assoc) { | 1137 | *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; |
1138 | case 16: | 1138 | *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16; |
1139 | *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; | 1139 | *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; |
1140 | *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16; | 1140 | break; |
1141 | *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; | 1141 | case 8: |
1142 | break; | 1142 | *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; |
1143 | case 8: | 1143 | *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; |
1144 | *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; | 1144 | break; |
1145 | *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; | 1145 | default: |
1146 | break; | 1146 | pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n", |
1147 | default: | 1147 | assoc); |
1148 | pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n", | 1148 | break; |
1149 | assoc); | 1149 | } |
1150 | break; | ||
1151 | } | 1150 | } |
1152 | 1151 | ||
1153 | prefetch = l2x0_saved_regs.prefetch_ctrl; | 1152 | prefetch = l2x0_saved_regs.prefetch_ctrl; |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 170a116d1b29..c27447653903 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -171,7 +171,7 @@ static int __dma_supported(struct device *dev, u64 mask, bool warn) | |||
171 | */ | 171 | */ |
172 | if (sizeof(mask) != sizeof(dma_addr_t) && | 172 | if (sizeof(mask) != sizeof(dma_addr_t) && |
173 | mask > (dma_addr_t)~0 && | 173 | mask > (dma_addr_t)~0 && |
174 | dma_to_pfn(dev, ~0) < max_pfn) { | 174 | dma_to_pfn(dev, ~0) < max_pfn - 1) { |
175 | if (warn) { | 175 | if (warn) { |
176 | dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", | 176 | dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", |
177 | mask); | 177 | mask); |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index a982dc3190df..6333d9c17875 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
@@ -552,6 +552,7 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
552 | 552 | ||
553 | pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n", | 553 | pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n", |
554 | inf->name, fsr, addr); | 554 | inf->name, fsr, addr); |
555 | show_pte(current->mm, addr); | ||
555 | 556 | ||
556 | info.si_signo = inf->sig; | 557 | info.si_signo = inf->sig; |
557 | info.si_errno = 0; | 558 | info.si_errno = 0; |
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c index 004e35cdcfff..cf30daff8932 100644 --- a/arch/arm/mm/pageattr.c +++ b/arch/arm/mm/pageattr.c | |||
@@ -49,7 +49,10 @@ static int change_memory_common(unsigned long addr, int numpages, | |||
49 | WARN_ON_ONCE(1); | 49 | WARN_ON_ONCE(1); |
50 | } | 50 | } |
51 | 51 | ||
52 | if (!is_module_address(start) || !is_module_address(end - 1)) | 52 | if (start < MODULES_VADDR || start >= MODULES_END) |
53 | return -EINVAL; | ||
54 | |||
55 | if (end < MODULES_VADDR || start >= MODULES_END) | ||
53 | return -EINVAL; | 56 | return -EINVAL; |
54 | 57 | ||
55 | data.set_mask = set_mask; | 58 | data.set_mask = set_mask; |
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c index db10169a08de..8ca94d379bc3 100644 --- a/arch/arm/plat-omap/dmtimer.c +++ b/arch/arm/plat-omap/dmtimer.c | |||
@@ -799,6 +799,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev) | |||
799 | struct device *dev = &pdev->dev; | 799 | struct device *dev = &pdev->dev; |
800 | const struct of_device_id *match; | 800 | const struct of_device_id *match; |
801 | const struct dmtimer_platform_data *pdata; | 801 | const struct dmtimer_platform_data *pdata; |
802 | int ret; | ||
802 | 803 | ||
803 | match = of_match_device(of_match_ptr(omap_timer_match), dev); | 804 | match = of_match_device(of_match_ptr(omap_timer_match), dev); |
804 | pdata = match ? match->data : dev->platform_data; | 805 | pdata = match ? match->data : dev->platform_data; |
@@ -860,7 +861,12 @@ static int omap_dm_timer_probe(struct platform_device *pdev) | |||
860 | } | 861 | } |
861 | 862 | ||
862 | if (!timer->reserved) { | 863 | if (!timer->reserved) { |
863 | pm_runtime_get_sync(dev); | 864 | ret = pm_runtime_get_sync(dev); |
865 | if (ret < 0) { | ||
866 | dev_err(dev, "%s: pm_runtime_get_sync failed!\n", | ||
867 | __func__); | ||
868 | goto err_get_sync; | ||
869 | } | ||
864 | __omap_dm_timer_init_regs(timer); | 870 | __omap_dm_timer_init_regs(timer); |
865 | pm_runtime_put(dev); | 871 | pm_runtime_put(dev); |
866 | } | 872 | } |
@@ -873,6 +879,11 @@ static int omap_dm_timer_probe(struct platform_device *pdev) | |||
873 | dev_dbg(dev, "Device Probed.\n"); | 879 | dev_dbg(dev, "Device Probed.\n"); |
874 | 880 | ||
875 | return 0; | 881 | return 0; |
882 | |||
883 | err_get_sync: | ||
884 | pm_runtime_put_noidle(dev); | ||
885 | pm_runtime_disable(dev); | ||
886 | return ret; | ||
876 | } | 887 | } |
877 | 888 | ||
878 | /** | 889 | /** |
@@ -899,6 +910,8 @@ static int omap_dm_timer_remove(struct platform_device *pdev) | |||
899 | } | 910 | } |
900 | spin_unlock_irqrestore(&dm_timer_lock, flags); | 911 | spin_unlock_irqrestore(&dm_timer_lock, flags); |
901 | 912 | ||
913 | pm_runtime_disable(&pdev->dev); | ||
914 | |||
902 | return ret; | 915 | return ret; |
903 | } | 916 | } |
904 | 917 | ||
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index f1ad9c2ab2e9..a857794432d6 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi | |||
@@ -622,7 +622,7 @@ | |||
622 | }; | 622 | }; |
623 | 623 | ||
624 | sgenet0: ethernet@1f210000 { | 624 | sgenet0: ethernet@1f210000 { |
625 | compatible = "apm,xgene-enet"; | 625 | compatible = "apm,xgene1-sgenet"; |
626 | status = "disabled"; | 626 | status = "disabled"; |
627 | reg = <0x0 0x1f210000 0x0 0xd100>, | 627 | reg = <0x0 0x1f210000 0x0 0xd100>, |
628 | <0x0 0x1f200000 0x0 0Xc300>, | 628 | <0x0 0x1f200000 0x0 0Xc300>, |
@@ -636,7 +636,7 @@ | |||
636 | }; | 636 | }; |
637 | 637 | ||
638 | xgenet: ethernet@1f610000 { | 638 | xgenet: ethernet@1f610000 { |
639 | compatible = "apm,xgene-enet"; | 639 | compatible = "apm,xgene1-xgenet"; |
640 | status = "disabled"; | 640 | status = "disabled"; |
641 | reg = <0x0 0x1f610000 0x0 0xd100>, | 641 | reg = <0x0 0x1f610000 0x0 0xd100>, |
642 | <0x0 0x1f600000 0x0 0Xc300>, | 642 | <0x0 0x1f600000 0x0 0Xc300>, |
diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dts b/arch/arm64/boot/dts/arm/foundation-v8.dts index 27f32962e55c..4eac8dcea423 100644 --- a/arch/arm64/boot/dts/arm/foundation-v8.dts +++ b/arch/arm64/boot/dts/arm/foundation-v8.dts | |||
@@ -34,6 +34,7 @@ | |||
34 | reg = <0x0 0x0>; | 34 | reg = <0x0 0x0>; |
35 | enable-method = "spin-table"; | 35 | enable-method = "spin-table"; |
36 | cpu-release-addr = <0x0 0x8000fff8>; | 36 | cpu-release-addr = <0x0 0x8000fff8>; |
37 | next-level-cache = <&L2_0>; | ||
37 | }; | 38 | }; |
38 | cpu@1 { | 39 | cpu@1 { |
39 | device_type = "cpu"; | 40 | device_type = "cpu"; |
@@ -41,6 +42,7 @@ | |||
41 | reg = <0x0 0x1>; | 42 | reg = <0x0 0x1>; |
42 | enable-method = "spin-table"; | 43 | enable-method = "spin-table"; |
43 | cpu-release-addr = <0x0 0x8000fff8>; | 44 | cpu-release-addr = <0x0 0x8000fff8>; |
45 | next-level-cache = <&L2_0>; | ||
44 | }; | 46 | }; |
45 | cpu@2 { | 47 | cpu@2 { |
46 | device_type = "cpu"; | 48 | device_type = "cpu"; |
@@ -48,6 +50,7 @@ | |||
48 | reg = <0x0 0x2>; | 50 | reg = <0x0 0x2>; |
49 | enable-method = "spin-table"; | 51 | enable-method = "spin-table"; |
50 | cpu-release-addr = <0x0 0x8000fff8>; | 52 | cpu-release-addr = <0x0 0x8000fff8>; |
53 | next-level-cache = <&L2_0>; | ||
51 | }; | 54 | }; |
52 | cpu@3 { | 55 | cpu@3 { |
53 | device_type = "cpu"; | 56 | device_type = "cpu"; |
@@ -55,6 +58,11 @@ | |||
55 | reg = <0x0 0x3>; | 58 | reg = <0x0 0x3>; |
56 | enable-method = "spin-table"; | 59 | enable-method = "spin-table"; |
57 | cpu-release-addr = <0x0 0x8000fff8>; | 60 | cpu-release-addr = <0x0 0x8000fff8>; |
61 | next-level-cache = <&L2_0>; | ||
62 | }; | ||
63 | |||
64 | L2_0: l2-cache0 { | ||
65 | compatible = "cache"; | ||
58 | }; | 66 | }; |
59 | }; | 67 | }; |
60 | 68 | ||
diff --git a/arch/arm64/boot/dts/arm/juno-clocks.dtsi b/arch/arm64/boot/dts/arm/juno-clocks.dtsi index ea2b5666a16f..c9b89efe0f56 100644 --- a/arch/arm64/boot/dts/arm/juno-clocks.dtsi +++ b/arch/arm64/boot/dts/arm/juno-clocks.dtsi | |||
@@ -8,7 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | /* SoC fixed clocks */ | 10 | /* SoC fixed clocks */ |
11 | soc_uartclk: refclk72738khz { | 11 | soc_uartclk: refclk7273800hz { |
12 | compatible = "fixed-clock"; | 12 | compatible = "fixed-clock"; |
13 | #clock-cells = <0>; | 13 | #clock-cells = <0>; |
14 | clock-frequency = <7273800>; | 14 | clock-frequency = <7273800>; |
diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts index d429129ecb3d..133ee59de2d7 100644 --- a/arch/arm64/boot/dts/arm/juno.dts +++ b/arch/arm64/boot/dts/arm/juno.dts | |||
@@ -39,6 +39,7 @@ | |||
39 | reg = <0x0 0x0>; | 39 | reg = <0x0 0x0>; |
40 | device_type = "cpu"; | 40 | device_type = "cpu"; |
41 | enable-method = "psci"; | 41 | enable-method = "psci"; |
42 | next-level-cache = <&A57_L2>; | ||
42 | }; | 43 | }; |
43 | 44 | ||
44 | A57_1: cpu@1 { | 45 | A57_1: cpu@1 { |
@@ -46,6 +47,7 @@ | |||
46 | reg = <0x0 0x1>; | 47 | reg = <0x0 0x1>; |
47 | device_type = "cpu"; | 48 | device_type = "cpu"; |
48 | enable-method = "psci"; | 49 | enable-method = "psci"; |
50 | next-level-cache = <&A57_L2>; | ||
49 | }; | 51 | }; |
50 | 52 | ||
51 | A53_0: cpu@100 { | 53 | A53_0: cpu@100 { |
@@ -53,6 +55,7 @@ | |||
53 | reg = <0x0 0x100>; | 55 | reg = <0x0 0x100>; |
54 | device_type = "cpu"; | 56 | device_type = "cpu"; |
55 | enable-method = "psci"; | 57 | enable-method = "psci"; |
58 | next-level-cache = <&A53_L2>; | ||
56 | }; | 59 | }; |
57 | 60 | ||
58 | A53_1: cpu@101 { | 61 | A53_1: cpu@101 { |
@@ -60,6 +63,7 @@ | |||
60 | reg = <0x0 0x101>; | 63 | reg = <0x0 0x101>; |
61 | device_type = "cpu"; | 64 | device_type = "cpu"; |
62 | enable-method = "psci"; | 65 | enable-method = "psci"; |
66 | next-level-cache = <&A53_L2>; | ||
63 | }; | 67 | }; |
64 | 68 | ||
65 | A53_2: cpu@102 { | 69 | A53_2: cpu@102 { |
@@ -67,6 +71,7 @@ | |||
67 | reg = <0x0 0x102>; | 71 | reg = <0x0 0x102>; |
68 | device_type = "cpu"; | 72 | device_type = "cpu"; |
69 | enable-method = "psci"; | 73 | enable-method = "psci"; |
74 | next-level-cache = <&A53_L2>; | ||
70 | }; | 75 | }; |
71 | 76 | ||
72 | A53_3: cpu@103 { | 77 | A53_3: cpu@103 { |
@@ -74,6 +79,15 @@ | |||
74 | reg = <0x0 0x103>; | 79 | reg = <0x0 0x103>; |
75 | device_type = "cpu"; | 80 | device_type = "cpu"; |
76 | enable-method = "psci"; | 81 | enable-method = "psci"; |
82 | next-level-cache = <&A53_L2>; | ||
83 | }; | ||
84 | |||
85 | A57_L2: l2-cache0 { | ||
86 | compatible = "cache"; | ||
87 | }; | ||
88 | |||
89 | A53_L2: l2-cache1 { | ||
90 | compatible = "cache"; | ||
77 | }; | 91 | }; |
78 | }; | 92 | }; |
79 | 93 | ||
diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts index efc59b3baf63..20addabbd127 100644 --- a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts +++ b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts | |||
@@ -37,6 +37,7 @@ | |||
37 | reg = <0x0 0x0>; | 37 | reg = <0x0 0x0>; |
38 | enable-method = "spin-table"; | 38 | enable-method = "spin-table"; |
39 | cpu-release-addr = <0x0 0x8000fff8>; | 39 | cpu-release-addr = <0x0 0x8000fff8>; |
40 | next-level-cache = <&L2_0>; | ||
40 | }; | 41 | }; |
41 | cpu@1 { | 42 | cpu@1 { |
42 | device_type = "cpu"; | 43 | device_type = "cpu"; |
@@ -44,6 +45,7 @@ | |||
44 | reg = <0x0 0x1>; | 45 | reg = <0x0 0x1>; |
45 | enable-method = "spin-table"; | 46 | enable-method = "spin-table"; |
46 | cpu-release-addr = <0x0 0x8000fff8>; | 47 | cpu-release-addr = <0x0 0x8000fff8>; |
48 | next-level-cache = <&L2_0>; | ||
47 | }; | 49 | }; |
48 | cpu@2 { | 50 | cpu@2 { |
49 | device_type = "cpu"; | 51 | device_type = "cpu"; |
@@ -51,6 +53,7 @@ | |||
51 | reg = <0x0 0x2>; | 53 | reg = <0x0 0x2>; |
52 | enable-method = "spin-table"; | 54 | enable-method = "spin-table"; |
53 | cpu-release-addr = <0x0 0x8000fff8>; | 55 | cpu-release-addr = <0x0 0x8000fff8>; |
56 | next-level-cache = <&L2_0>; | ||
54 | }; | 57 | }; |
55 | cpu@3 { | 58 | cpu@3 { |
56 | device_type = "cpu"; | 59 | device_type = "cpu"; |
@@ -58,6 +61,11 @@ | |||
58 | reg = <0x0 0x3>; | 61 | reg = <0x0 0x3>; |
59 | enable-method = "spin-table"; | 62 | enable-method = "spin-table"; |
60 | cpu-release-addr = <0x0 0x8000fff8>; | 63 | cpu-release-addr = <0x0 0x8000fff8>; |
64 | next-level-cache = <&L2_0>; | ||
65 | }; | ||
66 | |||
67 | L2_0: l2-cache0 { | ||
68 | compatible = "cache"; | ||
61 | }; | 69 | }; |
62 | }; | 70 | }; |
63 | 71 | ||
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index 5720608c50b1..abb79b3cfcfe 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile | |||
@@ -29,7 +29,7 @@ aes-ce-blk-y := aes-glue-ce.o aes-ce.o | |||
29 | obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o | 29 | obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o |
30 | aes-neon-blk-y := aes-glue-neon.o aes-neon.o | 30 | aes-neon-blk-y := aes-glue-neon.o aes-neon.o |
31 | 31 | ||
32 | AFLAGS_aes-ce.o := -DINTERLEAVE=2 -DINTERLEAVE_INLINE | 32 | AFLAGS_aes-ce.o := -DINTERLEAVE=4 |
33 | AFLAGS_aes-neon.o := -DINTERLEAVE=4 | 33 | AFLAGS_aes-neon.o := -DINTERLEAVE=4 |
34 | 34 | ||
35 | CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS | 35 | CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS |
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 5901480bfdca..750bac4e637e 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h | |||
@@ -20,6 +20,9 @@ | |||
20 | #error "Only include this from assembly code" | 20 | #error "Only include this from assembly code" |
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | #ifndef __ASM_ASSEMBLER_H | ||
24 | #define __ASM_ASSEMBLER_H | ||
25 | |||
23 | #include <asm/ptrace.h> | 26 | #include <asm/ptrace.h> |
24 | #include <asm/thread_info.h> | 27 | #include <asm/thread_info.h> |
25 | 28 | ||
@@ -155,3 +158,5 @@ lr .req x30 // link register | |||
155 | #endif | 158 | #endif |
156 | orr \rd, \lbits, \hbits, lsl #32 | 159 | orr \rd, \lbits, \hbits, lsl #32 |
157 | .endm | 160 | .endm |
161 | |||
162 | #endif /* __ASM_ASSEMBLER_H */ | ||
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index cb9593079f29..d8c25b7b18fb 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h | |||
@@ -246,14 +246,30 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, | |||
246 | __ret; \ | 246 | __ret; \ |
247 | }) | 247 | }) |
248 | 248 | ||
249 | #define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) | 249 | #define _protect_cmpxchg_local(pcp, o, n) \ |
250 | #define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) | 250 | ({ \ |
251 | #define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) | 251 | typeof(*raw_cpu_ptr(&(pcp))) __ret; \ |
252 | #define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) | 252 | preempt_disable(); \ |
253 | 253 | __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \ | |
254 | #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ | 254 | preempt_enable(); \ |
255 | cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \ | 255 | __ret; \ |
256 | o1, o2, n1, n2) | 256 | }) |
257 | |||
258 | #define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) | ||
259 | #define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) | ||
260 | #define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) | ||
261 | #define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) | ||
262 | |||
263 | #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ | ||
264 | ({ \ | ||
265 | int __ret; \ | ||
266 | preempt_disable(); \ | ||
267 | __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \ | ||
268 | raw_cpu_ptr(&(ptr2)), \ | ||
269 | o1, o2, n1, n2); \ | ||
270 | preempt_enable(); \ | ||
271 | __ret; \ | ||
272 | }) | ||
257 | 273 | ||
258 | #define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) | 274 | #define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) |
259 | #define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) | 275 | #define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) |
diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h index 0710654631e7..c60643f14cda 100644 --- a/arch/arm64/include/asm/cpuidle.h +++ b/arch/arm64/include/asm/cpuidle.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef __ASM_CPUIDLE_H | 1 | #ifndef __ASM_CPUIDLE_H |
2 | #define __ASM_CPUIDLE_H | 2 | #define __ASM_CPUIDLE_H |
3 | 3 | ||
4 | #include <asm/proc-fns.h> | ||
5 | |||
4 | #ifdef CONFIG_CPU_IDLE | 6 | #ifdef CONFIG_CPU_IDLE |
5 | extern int cpu_init_idle(unsigned int cpu); | 7 | extern int cpu_init_idle(unsigned int cpu); |
6 | extern int cpu_suspend(unsigned long arg); | 8 | extern int cpu_suspend(unsigned long arg); |
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index e2ff32a93b5c..d2f49423c5dc 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h | |||
@@ -264,8 +264,10 @@ __AARCH64_INSN_FUNCS(ands, 0x7F200000, 0x6A000000) | |||
264 | __AARCH64_INSN_FUNCS(bics, 0x7F200000, 0x6A200000) | 264 | __AARCH64_INSN_FUNCS(bics, 0x7F200000, 0x6A200000) |
265 | __AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) | 265 | __AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) |
266 | __AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) | 266 | __AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) |
267 | __AARCH64_INSN_FUNCS(cbz, 0xFE000000, 0x34000000) | 267 | __AARCH64_INSN_FUNCS(cbz, 0x7F000000, 0x34000000) |
268 | __AARCH64_INSN_FUNCS(cbnz, 0xFE000000, 0x35000000) | 268 | __AARCH64_INSN_FUNCS(cbnz, 0x7F000000, 0x35000000) |
269 | __AARCH64_INSN_FUNCS(tbz, 0x7F000000, 0x36000000) | ||
270 | __AARCH64_INSN_FUNCS(tbnz, 0x7F000000, 0x37000000) | ||
269 | __AARCH64_INSN_FUNCS(bcond, 0xFF000010, 0x54000000) | 271 | __AARCH64_INSN_FUNCS(bcond, 0xFF000010, 0x54000000) |
270 | __AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001) | 272 | __AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001) |
271 | __AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002) | 273 | __AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002) |
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 94674eb7e7bb..54bb4ba97441 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h | |||
@@ -129,6 +129,9 @@ | |||
129 | * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are | 129 | * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are |
130 | * not known to exist and will break with this configuration. | 130 | * not known to exist and will break with this configuration. |
131 | * | 131 | * |
132 | * VTCR_EL2.PS is extracted from ID_AA64MMFR0_EL1.PARange at boot time | ||
133 | * (see hyp-init.S). | ||
134 | * | ||
132 | * Note that when using 4K pages, we concatenate two first level page tables | 135 | * Note that when using 4K pages, we concatenate two first level page tables |
133 | * together. | 136 | * together. |
134 | * | 137 | * |
@@ -138,7 +141,6 @@ | |||
138 | #ifdef CONFIG_ARM64_64K_PAGES | 141 | #ifdef CONFIG_ARM64_64K_PAGES |
139 | /* | 142 | /* |
140 | * Stage2 translation configuration: | 143 | * Stage2 translation configuration: |
141 | * 40bits output (PS = 2) | ||
142 | * 40bits input (T0SZ = 24) | 144 | * 40bits input (T0SZ = 24) |
143 | * 64kB pages (TG0 = 1) | 145 | * 64kB pages (TG0 = 1) |
144 | * 2 level page tables (SL = 1) | 146 | * 2 level page tables (SL = 1) |
@@ -150,7 +152,6 @@ | |||
150 | #else | 152 | #else |
151 | /* | 153 | /* |
152 | * Stage2 translation configuration: | 154 | * Stage2 translation configuration: |
153 | * 40bits output (PS = 2) | ||
154 | * 40bits input (T0SZ = 24) | 155 | * 40bits input (T0SZ = 24) |
155 | * 4kB pages (TG0 = 0) | 156 | * 4kB pages (TG0 = 0) |
156 | * 3 level page tables (SL = 1) | 157 | * 3 level page tables (SL = 1) |
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 6458b5373142..bbfb600fa822 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h | |||
@@ -158,6 +158,8 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) | |||
158 | #define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT) | 158 | #define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT) |
159 | #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) | 159 | #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) |
160 | 160 | ||
161 | #define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)) | ||
162 | |||
161 | /* | 163 | /* |
162 | * If we are concatenating first level stage-2 page tables, we would have less | 164 | * If we are concatenating first level stage-2 page tables, we would have less |
163 | * than or equal to 16 pointers in the fake PGD, because that's what the | 165 | * than or equal to 16 pointers in the fake PGD, because that's what the |
@@ -171,43 +173,6 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) | |||
171 | #define KVM_PREALLOC_LEVEL (0) | 173 | #define KVM_PREALLOC_LEVEL (0) |
172 | #endif | 174 | #endif |
173 | 175 | ||
174 | /** | ||
175 | * kvm_prealloc_hwpgd - allocate inital table for VTTBR | ||
176 | * @kvm: The KVM struct pointer for the VM. | ||
177 | * @pgd: The kernel pseudo pgd | ||
178 | * | ||
179 | * When the kernel uses more levels of page tables than the guest, we allocate | ||
180 | * a fake PGD and pre-populate it to point to the next-level page table, which | ||
181 | * will be the real initial page table pointed to by the VTTBR. | ||
182 | * | ||
183 | * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and | ||
184 | * the kernel will use folded pud. When KVM_PREALLOC_LEVEL==1, we | ||
185 | * allocate 2 consecutive PUD pages. | ||
186 | */ | ||
187 | static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd) | ||
188 | { | ||
189 | unsigned int i; | ||
190 | unsigned long hwpgd; | ||
191 | |||
192 | if (KVM_PREALLOC_LEVEL == 0) | ||
193 | return 0; | ||
194 | |||
195 | hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT); | ||
196 | if (!hwpgd) | ||
197 | return -ENOMEM; | ||
198 | |||
199 | for (i = 0; i < PTRS_PER_S2_PGD; i++) { | ||
200 | if (KVM_PREALLOC_LEVEL == 1) | ||
201 | pgd_populate(NULL, pgd + i, | ||
202 | (pud_t *)hwpgd + i * PTRS_PER_PUD); | ||
203 | else if (KVM_PREALLOC_LEVEL == 2) | ||
204 | pud_populate(NULL, pud_offset(pgd, 0) + i, | ||
205 | (pmd_t *)hwpgd + i * PTRS_PER_PMD); | ||
206 | } | ||
207 | |||
208 | return 0; | ||
209 | } | ||
210 | |||
211 | static inline void *kvm_get_hwpgd(struct kvm *kvm) | 176 | static inline void *kvm_get_hwpgd(struct kvm *kvm) |
212 | { | 177 | { |
213 | pgd_t *pgd = kvm->arch.pgd; | 178 | pgd_t *pgd = kvm->arch.pgd; |
@@ -224,12 +189,11 @@ static inline void *kvm_get_hwpgd(struct kvm *kvm) | |||
224 | return pmd_offset(pud, 0); | 189 | return pmd_offset(pud, 0); |
225 | } | 190 | } |
226 | 191 | ||
227 | static inline void kvm_free_hwpgd(struct kvm *kvm) | 192 | static inline unsigned int kvm_get_hwpgd_size(void) |
228 | { | 193 | { |
229 | if (KVM_PREALLOC_LEVEL > 0) { | 194 | if (KVM_PREALLOC_LEVEL > 0) |
230 | unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm); | 195 | return PTRS_PER_S2_PGD * PAGE_SIZE; |
231 | free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT); | 196 | return PTRS_PER_S2_PGD * sizeof(pgd_t); |
232 | } | ||
233 | } | 197 | } |
234 | 198 | ||
235 | static inline bool kvm_page_empty(void *ptr) | 199 | static inline bool kvm_page_empty(void *ptr) |
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index a9eee33dfa62..101a42bde728 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h | |||
@@ -151,6 +151,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
151 | { | 151 | { |
152 | unsigned int cpu = smp_processor_id(); | 152 | unsigned int cpu = smp_processor_id(); |
153 | 153 | ||
154 | /* | ||
155 | * init_mm.pgd does not contain any user mappings and it is always | ||
156 | * active for kernel addresses in TTBR1. Just set the reserved TTBR0. | ||
157 | */ | ||
158 | if (next == &init_mm) { | ||
159 | cpu_set_reserved_ttbr0(); | ||
160 | return; | ||
161 | } | ||
162 | |||
154 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) | 163 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) |
155 | check_and_switch_context(next, tsk); | 164 | check_and_switch_context(next, tsk); |
156 | } | 165 | } |
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 09da25bc596f..4fde8c1df97f 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h | |||
@@ -204,25 +204,47 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, | |||
204 | return ret; | 204 | return ret; |
205 | } | 205 | } |
206 | 206 | ||
207 | #define _percpu_read(pcp) \ | ||
208 | ({ \ | ||
209 | typeof(pcp) __retval; \ | ||
210 | preempt_disable(); \ | ||
211 | __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \ | ||
212 | sizeof(pcp)); \ | ||
213 | preempt_enable(); \ | ||
214 | __retval; \ | ||
215 | }) | ||
216 | |||
217 | #define _percpu_write(pcp, val) \ | ||
218 | do { \ | ||
219 | preempt_disable(); \ | ||
220 | __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \ | ||
221 | sizeof(pcp)); \ | ||
222 | preempt_enable(); \ | ||
223 | } while(0) \ | ||
224 | |||
225 | #define _pcp_protect(operation, pcp, val) \ | ||
226 | ({ \ | ||
227 | typeof(pcp) __retval; \ | ||
228 | preempt_disable(); \ | ||
229 | __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \ | ||
230 | (val), sizeof(pcp)); \ | ||
231 | preempt_enable(); \ | ||
232 | __retval; \ | ||
233 | }) | ||
234 | |||
207 | #define _percpu_add(pcp, val) \ | 235 | #define _percpu_add(pcp, val) \ |
208 | __percpu_add(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) | 236 | _pcp_protect(__percpu_add, pcp, val) |
209 | 237 | ||
210 | #define _percpu_add_return(pcp, val) (typeof(pcp)) (_percpu_add(pcp, val)) | 238 | #define _percpu_add_return(pcp, val) _percpu_add(pcp, val) |
211 | 239 | ||
212 | #define _percpu_and(pcp, val) \ | 240 | #define _percpu_and(pcp, val) \ |
213 | __percpu_and(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) | 241 | _pcp_protect(__percpu_and, pcp, val) |
214 | 242 | ||
215 | #define _percpu_or(pcp, val) \ | 243 | #define _percpu_or(pcp, val) \ |
216 | __percpu_or(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) | 244 | _pcp_protect(__percpu_or, pcp, val) |
217 | |||
218 | #define _percpu_read(pcp) (typeof(pcp)) \ | ||
219 | (__percpu_read(raw_cpu_ptr(&(pcp)), sizeof(pcp))) | ||
220 | |||
221 | #define _percpu_write(pcp, val) \ | ||
222 | __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp)) | ||
223 | 245 | ||
224 | #define _percpu_xchg(pcp, val) (typeof(pcp)) \ | 246 | #define _percpu_xchg(pcp, val) (typeof(pcp)) \ |
225 | (__percpu_xchg(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))) | 247 | _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val)) |
226 | 248 | ||
227 | #define this_cpu_add_1(pcp, val) _percpu_add(pcp, val) | 249 | #define this_cpu_add_1(pcp, val) _percpu_add(pcp, val) |
228 | #define this_cpu_add_2(pcp, val) _percpu_add(pcp, val) | 250 | #define this_cpu_add_2(pcp, val) _percpu_add(pcp, val) |
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 16449c535e50..800ec0e87ed9 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
@@ -460,7 +460,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr) | |||
460 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 460 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
461 | { | 461 | { |
462 | const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | | 462 | const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | |
463 | PTE_PROT_NONE | PTE_VALID | PTE_WRITE; | 463 | PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK; |
464 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); | 464 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); |
465 | return pte; | 465 | return pte; |
466 | } | 466 | } |
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 9a8fd84f8fb2..941c375616e2 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h | |||
@@ -39,7 +39,11 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); | |||
39 | 39 | ||
40 | #include <asm/memory.h> | 40 | #include <asm/memory.h> |
41 | 41 | ||
42 | #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) | 42 | #define cpu_switch_mm(pgd,mm) \ |
43 | do { \ | ||
44 | BUG_ON(pgd == swapper_pg_dir); \ | ||
45 | cpu_do_switch_mm(virt_to_phys(pgd),mm); \ | ||
46 | } while (0) | ||
43 | 47 | ||
44 | #define cpu_get_pgd() \ | 48 | #define cpu_get_pgd() \ |
45 | ({ \ | 49 | ({ \ |
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index f9be30ea1cbd..20e9591a60cf 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h | |||
@@ -45,7 +45,8 @@ | |||
45 | #define STACK_TOP STACK_TOP_MAX | 45 | #define STACK_TOP STACK_TOP_MAX |
46 | #endif /* CONFIG_COMPAT */ | 46 | #endif /* CONFIG_COMPAT */ |
47 | 47 | ||
48 | #define ARCH_LOW_ADDRESS_LIMIT PHYS_MASK | 48 | extern phys_addr_t arm64_dma_phys_limit; |
49 | #define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1) | ||
49 | #endif /* __KERNEL__ */ | 50 | #endif /* __KERNEL__ */ |
50 | 51 | ||
51 | struct debug_info { | 52 | struct debug_info { |
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index c028fe37456f..53d9c354219f 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h | |||
@@ -48,6 +48,7 @@ static inline void tlb_flush(struct mmu_gather *tlb) | |||
48 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | 48 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, |
49 | unsigned long addr) | 49 | unsigned long addr) |
50 | { | 50 | { |
51 | __flush_tlb_pgtable(tlb->mm, addr); | ||
51 | pgtable_page_dtor(pte); | 52 | pgtable_page_dtor(pte); |
52 | tlb_remove_entry(tlb, pte); | 53 | tlb_remove_entry(tlb, pte); |
53 | } | 54 | } |
@@ -56,6 +57,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | |||
56 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, | 57 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, |
57 | unsigned long addr) | 58 | unsigned long addr) |
58 | { | 59 | { |
60 | __flush_tlb_pgtable(tlb->mm, addr); | ||
59 | tlb_remove_entry(tlb, virt_to_page(pmdp)); | 61 | tlb_remove_entry(tlb, virt_to_page(pmdp)); |
60 | } | 62 | } |
61 | #endif | 63 | #endif |
@@ -64,6 +66,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, | |||
64 | static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, | 66 | static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, |
65 | unsigned long addr) | 67 | unsigned long addr) |
66 | { | 68 | { |
69 | __flush_tlb_pgtable(tlb->mm, addr); | ||
67 | tlb_remove_entry(tlb, virt_to_page(pudp)); | 70 | tlb_remove_entry(tlb, virt_to_page(pudp)); |
68 | } | 71 | } |
69 | #endif | 72 | #endif |
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 73f0ce570fb3..c3bb05b98616 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h | |||
@@ -24,11 +24,6 @@ | |||
24 | #include <linux/sched.h> | 24 | #include <linux/sched.h> |
25 | #include <asm/cputype.h> | 25 | #include <asm/cputype.h> |
26 | 26 | ||
27 | extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *); | ||
28 | extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long); | ||
29 | |||
30 | extern struct cpu_tlb_fns cpu_tlb; | ||
31 | |||
32 | /* | 27 | /* |
33 | * TLB Management | 28 | * TLB Management |
34 | * ============== | 29 | * ============== |
@@ -149,6 +144,19 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end | |||
149 | } | 144 | } |
150 | 145 | ||
151 | /* | 146 | /* |
147 | * Used to invalidate the TLB (walk caches) corresponding to intermediate page | ||
148 | * table levels (pgd/pud/pmd). | ||
149 | */ | ||
150 | static inline void __flush_tlb_pgtable(struct mm_struct *mm, | ||
151 | unsigned long uaddr) | ||
152 | { | ||
153 | unsigned long addr = uaddr >> 12 | ((unsigned long)ASID(mm) << 48); | ||
154 | |||
155 | dsb(ishst); | ||
156 | asm("tlbi vae1is, %0" : : "r" (addr)); | ||
157 | dsb(ish); | ||
158 | } | ||
159 | /* | ||
152 | * On AArch64, the cache coherency is handled via the set_pte_at() function. | 160 | * On AArch64, the cache coherency is handled via the set_pte_at() function. |
153 | */ | 161 | */ |
154 | static inline void update_mmu_cache(struct vm_area_struct *vma, | 162 | static inline void update_mmu_cache(struct vm_area_struct *vma, |
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index bef04afd6031..5ee07eee80c2 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile | |||
@@ -15,8 +15,9 @@ CFLAGS_REMOVE_return_address.o = -pg | |||
15 | arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ | 15 | arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ |
16 | entry-fpsimd.o process.o ptrace.o setup.o signal.o \ | 16 | entry-fpsimd.o process.o ptrace.o setup.o signal.o \ |
17 | sys.o stacktrace.o time.o traps.o io.o vdso.o \ | 17 | sys.o stacktrace.o time.o traps.o io.o vdso.o \ |
18 | hyp-stub.o psci.o cpu_ops.o insn.o return_address.o \ | 18 | hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o \ |
19 | cpuinfo.o cpu_errata.o alternative.o cacheinfo.o | 19 | return_address.o cpuinfo.o cpu_errata.o \ |
20 | alternative.o cacheinfo.o | ||
20 | 21 | ||
21 | arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ | 22 | arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ |
22 | sys_compat.o entry32.o \ | 23 | sys_compat.o entry32.o \ |
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index b42c7b480e1e..ab21e0d58278 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c | |||
@@ -337,7 +337,11 @@ core_initcall(arm64_dmi_init); | |||
337 | 337 | ||
338 | static void efi_set_pgd(struct mm_struct *mm) | 338 | static void efi_set_pgd(struct mm_struct *mm) |
339 | { | 339 | { |
340 | cpu_switch_mm(mm->pgd, mm); | 340 | if (mm == &init_mm) |
341 | cpu_set_reserved_ttbr0(); | ||
342 | else | ||
343 | cpu_switch_mm(mm->pgd, mm); | ||
344 | |||
341 | flush_tlb_all(); | 345 | flush_tlb_all(); |
342 | if (icache_is_aivivt()) | 346 | if (icache_is_aivivt()) |
343 | __flush_icache_all(); | 347 | __flush_icache_all(); |
@@ -354,3 +358,12 @@ void efi_virtmap_unload(void) | |||
354 | efi_set_pgd(current->active_mm); | 358 | efi_set_pgd(current->active_mm); |
355 | preempt_enable(); | 359 | preempt_enable(); |
356 | } | 360 | } |
361 | |||
362 | /* | ||
363 | * UpdateCapsule() depends on the system being shutdown via | ||
364 | * ResetSystem(). | ||
365 | */ | ||
366 | bool efi_poweroff_required(void) | ||
367 | { | ||
368 | return efi_enabled(EFI_RUNTIME_SERVICES); | ||
369 | } | ||
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index cf8556ae09d0..c851be795080 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c | |||
@@ -156,7 +156,7 @@ static int ftrace_modify_graph_caller(bool enable) | |||
156 | 156 | ||
157 | branch = aarch64_insn_gen_branch_imm(pc, | 157 | branch = aarch64_insn_gen_branch_imm(pc, |
158 | (unsigned long)ftrace_graph_caller, | 158 | (unsigned long)ftrace_graph_caller, |
159 | AARCH64_INSN_BRANCH_LINK); | 159 | AARCH64_INSN_BRANCH_NOLINK); |
160 | nop = aarch64_insn_gen_nop(); | 160 | nop = aarch64_insn_gen_nop(); |
161 | 161 | ||
162 | if (enable) | 162 | if (enable) |
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 8ce88e08c030..07f930540f4a 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
@@ -585,8 +585,8 @@ ENDPROC(set_cpu_boot_mode_flag) | |||
585 | * zeroing of .bss would clobber it. | 585 | * zeroing of .bss would clobber it. |
586 | */ | 586 | */ |
587 | .pushsection .data..cacheline_aligned | 587 | .pushsection .data..cacheline_aligned |
588 | ENTRY(__boot_cpu_mode) | ||
589 | .align L1_CACHE_SHIFT | 588 | .align L1_CACHE_SHIFT |
589 | ENTRY(__boot_cpu_mode) | ||
590 | .long BOOT_CPU_MODE_EL2 | 590 | .long BOOT_CPU_MODE_EL2 |
591 | .long 0 | 591 | .long 0 |
592 | .popsection | 592 | .popsection |
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 27d4864577e5..c8eca88f12e6 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c | |||
@@ -87,8 +87,10 @@ static void __kprobes *patch_map(void *addr, int fixmap) | |||
87 | 87 | ||
88 | if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX)) | 88 | if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX)) |
89 | page = vmalloc_to_page(addr); | 89 | page = vmalloc_to_page(addr); |
90 | else | 90 | else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA)) |
91 | page = virt_to_page(addr); | 91 | page = virt_to_page(addr); |
92 | else | ||
93 | return addr; | ||
92 | 94 | ||
93 | BUG_ON(!page); | 95 | BUG_ON(!page); |
94 | set_fixmap(fixmap, page_to_phys(page)); | 96 | set_fixmap(fixmap, page_to_phys(page)); |
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index fde9923af859..c6b1f3b96f45 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <stdarg.h> | 21 | #include <stdarg.h> |
22 | 22 | ||
23 | #include <linux/compat.h> | 23 | #include <linux/compat.h> |
24 | #include <linux/efi.h> | ||
24 | #include <linux/export.h> | 25 | #include <linux/export.h> |
25 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
26 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
@@ -150,6 +151,13 @@ void machine_restart(char *cmd) | |||
150 | local_irq_disable(); | 151 | local_irq_disable(); |
151 | smp_send_stop(); | 152 | smp_send_stop(); |
152 | 153 | ||
154 | /* | ||
155 | * UpdateCapsule() depends on the system being reset via | ||
156 | * ResetSystem(). | ||
157 | */ | ||
158 | if (efi_enabled(EFI_RUNTIME_SERVICES)) | ||
159 | efi_reboot(reboot_mode, NULL); | ||
160 | |||
153 | /* Now call the architecture specific reboot code. */ | 161 | /* Now call the architecture specific reboot code. */ |
154 | if (arm_pm_restart) | 162 | if (arm_pm_restart) |
155 | arm_pm_restart(reboot_mode, cmd); | 163 | arm_pm_restart(reboot_mode, cmd); |
diff --git a/arch/arm64/kernel/psci-call.S b/arch/arm64/kernel/psci-call.S new file mode 100644 index 000000000000..cf83e61cd3b5 --- /dev/null +++ b/arch/arm64/kernel/psci-call.S | |||
@@ -0,0 +1,28 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License version 2 as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * Copyright (C) 2015 ARM Limited | ||
12 | * | ||
13 | * Author: Will Deacon <will.deacon@arm.com> | ||
14 | */ | ||
15 | |||
16 | #include <linux/linkage.h> | ||
17 | |||
18 | /* int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */ | ||
19 | ENTRY(__invoke_psci_fn_hvc) | ||
20 | hvc #0 | ||
21 | ret | ||
22 | ENDPROC(__invoke_psci_fn_hvc) | ||
23 | |||
24 | /* int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */ | ||
25 | ENTRY(__invoke_psci_fn_smc) | ||
26 | smc #0 | ||
27 | ret | ||
28 | ENDPROC(__invoke_psci_fn_smc) | ||
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index 3425f311c49e..9b8a70ae64a1 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c | |||
@@ -57,6 +57,9 @@ static struct psci_operations psci_ops; | |||
57 | static int (*invoke_psci_fn)(u64, u64, u64, u64); | 57 | static int (*invoke_psci_fn)(u64, u64, u64, u64); |
58 | typedef int (*psci_initcall_t)(const struct device_node *); | 58 | typedef int (*psci_initcall_t)(const struct device_node *); |
59 | 59 | ||
60 | asmlinkage int __invoke_psci_fn_hvc(u64, u64, u64, u64); | ||
61 | asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64); | ||
62 | |||
60 | enum psci_function { | 63 | enum psci_function { |
61 | PSCI_FN_CPU_SUSPEND, | 64 | PSCI_FN_CPU_SUSPEND, |
62 | PSCI_FN_CPU_ON, | 65 | PSCI_FN_CPU_ON, |
@@ -109,40 +112,6 @@ static void psci_power_state_unpack(u32 power_state, | |||
109 | PSCI_0_2_POWER_STATE_AFFL_SHIFT; | 112 | PSCI_0_2_POWER_STATE_AFFL_SHIFT; |
110 | } | 113 | } |
111 | 114 | ||
112 | /* | ||
113 | * The following two functions are invoked via the invoke_psci_fn pointer | ||
114 | * and will not be inlined, allowing us to piggyback on the AAPCS. | ||
115 | */ | ||
116 | static noinline int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, | ||
117 | u64 arg2) | ||
118 | { | ||
119 | asm volatile( | ||
120 | __asmeq("%0", "x0") | ||
121 | __asmeq("%1", "x1") | ||
122 | __asmeq("%2", "x2") | ||
123 | __asmeq("%3", "x3") | ||
124 | "hvc #0\n" | ||
125 | : "+r" (function_id) | ||
126 | : "r" (arg0), "r" (arg1), "r" (arg2)); | ||
127 | |||
128 | return function_id; | ||
129 | } | ||
130 | |||
131 | static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, | ||
132 | u64 arg2) | ||
133 | { | ||
134 | asm volatile( | ||
135 | __asmeq("%0", "x0") | ||
136 | __asmeq("%1", "x1") | ||
137 | __asmeq("%2", "x2") | ||
138 | __asmeq("%3", "x3") | ||
139 | "smc #0\n" | ||
140 | : "+r" (function_id) | ||
141 | : "r" (arg0), "r" (arg1), "r" (arg2)); | ||
142 | |||
143 | return function_id; | ||
144 | } | ||
145 | |||
146 | static int psci_get_version(void) | 115 | static int psci_get_version(void) |
147 | { | 116 | { |
148 | int err; | 117 | int err; |
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index c20a300e2213..d26fcd4cd6e6 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c | |||
@@ -154,8 +154,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) | |||
154 | case __SI_TIMER: | 154 | case __SI_TIMER: |
155 | err |= __put_user(from->si_tid, &to->si_tid); | 155 | err |= __put_user(from->si_tid, &to->si_tid); |
156 | err |= __put_user(from->si_overrun, &to->si_overrun); | 156 | err |= __put_user(from->si_overrun, &to->si_overrun); |
157 | err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, | 157 | err |= __put_user(from->si_int, &to->si_int); |
158 | &to->si_ptr); | ||
159 | break; | 158 | break; |
160 | case __SI_POLL: | 159 | case __SI_POLL: |
161 | err |= __put_user(from->si_band, &to->si_band); | 160 | err |= __put_user(from->si_band, &to->si_band); |
@@ -184,7 +183,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) | |||
184 | case __SI_MESGQ: /* But this is */ | 183 | case __SI_MESGQ: /* But this is */ |
185 | err |= __put_user(from->si_pid, &to->si_pid); | 184 | err |= __put_user(from->si_pid, &to->si_pid); |
186 | err |= __put_user(from->si_uid, &to->si_uid); | 185 | err |= __put_user(from->si_uid, &to->si_uid); |
187 | err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr); | 186 | err |= __put_user(from->si_int, &to->si_int); |
188 | break; | 187 | break; |
189 | case __SI_SYS: | 188 | case __SI_SYS: |
190 | err |= __put_user((compat_uptr_t)(unsigned long) | 189 | err |= __put_user((compat_uptr_t)(unsigned long) |
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S index fe652ffd34c2..efa79e8d4196 100644 --- a/arch/arm64/kernel/vdso/gettimeofday.S +++ b/arch/arm64/kernel/vdso/gettimeofday.S | |||
@@ -174,8 +174,6 @@ ENDPROC(__kernel_clock_gettime) | |||
174 | /* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); */ | 174 | /* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); */ |
175 | ENTRY(__kernel_clock_getres) | 175 | ENTRY(__kernel_clock_getres) |
176 | .cfi_startproc | 176 | .cfi_startproc |
177 | cbz w1, 3f | ||
178 | |||
179 | cmp w0, #CLOCK_REALTIME | 177 | cmp w0, #CLOCK_REALTIME |
180 | ccmp w0, #CLOCK_MONOTONIC, #0x4, ne | 178 | ccmp w0, #CLOCK_MONOTONIC, #0x4, ne |
181 | b.ne 1f | 179 | b.ne 1f |
@@ -188,6 +186,7 @@ ENTRY(__kernel_clock_getres) | |||
188 | b.ne 4f | 186 | b.ne 4f |
189 | ldr x2, 6f | 187 | ldr x2, 6f |
190 | 2: | 188 | 2: |
189 | cbz w1, 3f | ||
191 | stp xzr, x2, [x1] | 190 | stp xzr, x2, [x1] |
192 | 191 | ||
193 | 3: /* res == NULL. */ | 192 | 3: /* res == NULL. */ |
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 0a24b9b8c698..ef7d112f5ce0 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c | |||
@@ -51,7 +51,7 @@ static int __init early_coherent_pool(char *p) | |||
51 | } | 51 | } |
52 | early_param("coherent_pool", early_coherent_pool); | 52 | early_param("coherent_pool", early_coherent_pool); |
53 | 53 | ||
54 | static void *__alloc_from_pool(size_t size, struct page **ret_page) | 54 | static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags) |
55 | { | 55 | { |
56 | unsigned long val; | 56 | unsigned long val; |
57 | void *ptr = NULL; | 57 | void *ptr = NULL; |
@@ -67,6 +67,8 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page) | |||
67 | 67 | ||
68 | *ret_page = phys_to_page(phys); | 68 | *ret_page = phys_to_page(phys); |
69 | ptr = (void *)val; | 69 | ptr = (void *)val; |
70 | if (flags & __GFP_ZERO) | ||
71 | memset(ptr, 0, size); | ||
70 | } | 72 | } |
71 | 73 | ||
72 | return ptr; | 74 | return ptr; |
@@ -101,6 +103,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, | |||
101 | flags |= GFP_DMA; | 103 | flags |= GFP_DMA; |
102 | if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { | 104 | if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { |
103 | struct page *page; | 105 | struct page *page; |
106 | void *addr; | ||
104 | 107 | ||
105 | size = PAGE_ALIGN(size); | 108 | size = PAGE_ALIGN(size); |
106 | page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, | 109 | page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, |
@@ -109,7 +112,10 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, | |||
109 | return NULL; | 112 | return NULL; |
110 | 113 | ||
111 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); | 114 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); |
112 | return page_address(page); | 115 | addr = page_address(page); |
116 | if (flags & __GFP_ZERO) | ||
117 | memset(addr, 0, size); | ||
118 | return addr; | ||
113 | } else { | 119 | } else { |
114 | return swiotlb_alloc_coherent(dev, size, dma_handle, flags); | 120 | return swiotlb_alloc_coherent(dev, size, dma_handle, flags); |
115 | } | 121 | } |
@@ -146,7 +152,7 @@ static void *__dma_alloc(struct device *dev, size_t size, | |||
146 | 152 | ||
147 | if (!coherent && !(flags & __GFP_WAIT)) { | 153 | if (!coherent && !(flags & __GFP_WAIT)) { |
148 | struct page *page = NULL; | 154 | struct page *page = NULL; |
149 | void *addr = __alloc_from_pool(size, &page); | 155 | void *addr = __alloc_from_pool(size, &page, flags); |
150 | 156 | ||
151 | if (addr) | 157 | if (addr) |
152 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); | 158 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); |
@@ -348,8 +354,6 @@ static struct dma_map_ops swiotlb_dma_ops = { | |||
348 | .mapping_error = swiotlb_dma_mapping_error, | 354 | .mapping_error = swiotlb_dma_mapping_error, |
349 | }; | 355 | }; |
350 | 356 | ||
351 | extern int swiotlb_late_init_with_default_size(size_t default_size); | ||
352 | |||
353 | static int __init atomic_pool_init(void) | 357 | static int __init atomic_pool_init(void) |
354 | { | 358 | { |
355 | pgprot_t prot = __pgprot(PROT_NORMAL_NC); | 359 | pgprot_t prot = __pgprot(PROT_NORMAL_NC); |
@@ -411,21 +415,13 @@ out: | |||
411 | return -ENOMEM; | 415 | return -ENOMEM; |
412 | } | 416 | } |
413 | 417 | ||
414 | static int __init swiotlb_late_init(void) | 418 | static int __init arm64_dma_init(void) |
415 | { | 419 | { |
416 | size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT); | 420 | int ret; |
417 | 421 | ||
418 | dma_ops = &swiotlb_dma_ops; | 422 | dma_ops = &swiotlb_dma_ops; |
419 | 423 | ||
420 | return swiotlb_late_init_with_default_size(swiotlb_size); | 424 | ret = atomic_pool_init(); |
421 | } | ||
422 | |||
423 | static int __init arm64_dma_init(void) | ||
424 | { | ||
425 | int ret = 0; | ||
426 | |||
427 | ret |= swiotlb_late_init(); | ||
428 | ret |= atomic_pool_init(); | ||
429 | 425 | ||
430 | return ret; | 426 | return ret; |
431 | } | 427 | } |
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 71145f952070..ae85da6307bb 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/dma-mapping.h> | 33 | #include <linux/dma-mapping.h> |
34 | #include <linux/dma-contiguous.h> | 34 | #include <linux/dma-contiguous.h> |
35 | #include <linux/efi.h> | 35 | #include <linux/efi.h> |
36 | #include <linux/swiotlb.h> | ||
36 | 37 | ||
37 | #include <asm/fixmap.h> | 38 | #include <asm/fixmap.h> |
38 | #include <asm/memory.h> | 39 | #include <asm/memory.h> |
@@ -45,6 +46,7 @@ | |||
45 | #include "mm.h" | 46 | #include "mm.h" |
46 | 47 | ||
47 | phys_addr_t memstart_addr __read_mostly = 0; | 48 | phys_addr_t memstart_addr __read_mostly = 0; |
49 | phys_addr_t arm64_dma_phys_limit __read_mostly; | ||
48 | 50 | ||
49 | #ifdef CONFIG_BLK_DEV_INITRD | 51 | #ifdef CONFIG_BLK_DEV_INITRD |
50 | static int __init early_initrd(char *p) | 52 | static int __init early_initrd(char *p) |
@@ -85,7 +87,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) | |||
85 | 87 | ||
86 | /* 4GB maximum for 32-bit only capable devices */ | 88 | /* 4GB maximum for 32-bit only capable devices */ |
87 | if (IS_ENABLED(CONFIG_ZONE_DMA)) { | 89 | if (IS_ENABLED(CONFIG_ZONE_DMA)) { |
88 | max_dma = PFN_DOWN(max_zone_dma_phys()); | 90 | max_dma = PFN_DOWN(arm64_dma_phys_limit); |
89 | zone_size[ZONE_DMA] = max_dma - min; | 91 | zone_size[ZONE_DMA] = max_dma - min; |
90 | } | 92 | } |
91 | zone_size[ZONE_NORMAL] = max - max_dma; | 93 | zone_size[ZONE_NORMAL] = max - max_dma; |
@@ -156,8 +158,6 @@ early_param("mem", early_mem); | |||
156 | 158 | ||
157 | void __init arm64_memblock_init(void) | 159 | void __init arm64_memblock_init(void) |
158 | { | 160 | { |
159 | phys_addr_t dma_phys_limit = 0; | ||
160 | |||
161 | memblock_enforce_memory_limit(memory_limit); | 161 | memblock_enforce_memory_limit(memory_limit); |
162 | 162 | ||
163 | /* | 163 | /* |
@@ -174,8 +174,10 @@ void __init arm64_memblock_init(void) | |||
174 | 174 | ||
175 | /* 4GB maximum for 32-bit only capable devices */ | 175 | /* 4GB maximum for 32-bit only capable devices */ |
176 | if (IS_ENABLED(CONFIG_ZONE_DMA)) | 176 | if (IS_ENABLED(CONFIG_ZONE_DMA)) |
177 | dma_phys_limit = max_zone_dma_phys(); | 177 | arm64_dma_phys_limit = max_zone_dma_phys(); |
178 | dma_contiguous_reserve(dma_phys_limit); | 178 | else |
179 | arm64_dma_phys_limit = PHYS_MASK + 1; | ||
180 | dma_contiguous_reserve(arm64_dma_phys_limit); | ||
179 | 181 | ||
180 | memblock_allow_resize(); | 182 | memblock_allow_resize(); |
181 | memblock_dump_all(); | 183 | memblock_dump_all(); |
@@ -276,6 +278,8 @@ static void __init free_unused_memmap(void) | |||
276 | */ | 278 | */ |
277 | void __init mem_init(void) | 279 | void __init mem_init(void) |
278 | { | 280 | { |
281 | swiotlb_init(1); | ||
282 | |||
279 | set_max_mapnr(pfn_to_page(max_pfn) - mem_map); | 283 | set_max_mapnr(pfn_to_page(max_pfn) - mem_map); |
280 | 284 | ||
281 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | 285 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index bb0ea94c4ba1..1d3ec3ddd84b 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c | |||
@@ -51,7 +51,10 @@ static int change_memory_common(unsigned long addr, int numpages, | |||
51 | WARN_ON_ONCE(1); | 51 | WARN_ON_ONCE(1); |
52 | } | 52 | } |
53 | 53 | ||
54 | if (!is_module_address(start) || !is_module_address(end - 1)) | 54 | if (start < MODULES_VADDR || start >= MODULES_END) |
55 | return -EINVAL; | ||
56 | |||
57 | if (end < MODULES_VADDR || end >= MODULES_END) | ||
55 | return -EINVAL; | 58 | return -EINVAL; |
56 | 59 | ||
57 | data.set_mask = set_mask; | 60 | data.set_mask = set_mask; |
diff --git a/arch/c6x/include/asm/pgtable.h b/arch/c6x/include/asm/pgtable.h index 78d4483ba40c..ec4db6df5e0d 100644 --- a/arch/c6x/include/asm/pgtable.h +++ b/arch/c6x/include/asm/pgtable.h | |||
@@ -67,6 +67,11 @@ extern unsigned long empty_zero_page; | |||
67 | */ | 67 | */ |
68 | #define pgtable_cache_init() do { } while (0) | 68 | #define pgtable_cache_init() do { } while (0) |
69 | 69 | ||
70 | /* | ||
71 | * c6x is !MMU, so define the simpliest implementation | ||
72 | */ | ||
73 | #define pgprot_writecombine pgprot_noncached | ||
74 | |||
70 | #include <asm-generic/pgtable.h> | 75 | #include <asm-generic/pgtable.h> |
71 | 76 | ||
72 | #endif /* _ASM_C6X_PGTABLE_H */ | 77 | #endif /* _ASM_C6X_PGTABLE_H */ |
diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h index 93bcf2abd1a1..07d7a7ef8bd5 100644 --- a/arch/frv/include/asm/pgtable.h +++ b/arch/frv/include/asm/pgtable.h | |||
@@ -123,12 +123,14 @@ extern unsigned long empty_zero_page; | |||
123 | #define PGDIR_MASK (~(PGDIR_SIZE - 1)) | 123 | #define PGDIR_MASK (~(PGDIR_SIZE - 1)) |
124 | #define PTRS_PER_PGD 64 | 124 | #define PTRS_PER_PGD 64 |
125 | 125 | ||
126 | #define __PAGETABLE_PUD_FOLDED | ||
126 | #define PUD_SHIFT 26 | 127 | #define PUD_SHIFT 26 |
127 | #define PTRS_PER_PUD 1 | 128 | #define PTRS_PER_PUD 1 |
128 | #define PUD_SIZE (1UL << PUD_SHIFT) | 129 | #define PUD_SIZE (1UL << PUD_SHIFT) |
129 | #define PUD_MASK (~(PUD_SIZE - 1)) | 130 | #define PUD_MASK (~(PUD_SIZE - 1)) |
130 | #define PUE_SIZE 256 | 131 | #define PUE_SIZE 256 |
131 | 132 | ||
133 | #define __PAGETABLE_PMD_FOLDED | ||
132 | #define PMD_SHIFT 26 | 134 | #define PMD_SHIFT 26 |
133 | #define PMD_SIZE (1UL << PMD_SHIFT) | 135 | #define PMD_SIZE (1UL << PMD_SHIFT) |
134 | #define PMD_MASK (~(PMD_SIZE - 1)) | 136 | #define PMD_MASK (~(PMD_SIZE - 1)) |
diff --git a/arch/m32r/include/asm/pgtable-2level.h b/arch/m32r/include/asm/pgtable-2level.h index 8fd8ee70266a..421e6ba3a173 100644 --- a/arch/m32r/include/asm/pgtable-2level.h +++ b/arch/m32r/include/asm/pgtable-2level.h | |||
@@ -13,6 +13,7 @@ | |||
13 | * the M32R is two-level, so we don't really have any | 13 | * the M32R is two-level, so we don't really have any |
14 | * PMD directory physically. | 14 | * PMD directory physically. |
15 | */ | 15 | */ |
16 | #define __PAGETABLE_PMD_FOLDED | ||
16 | #define PMD_SHIFT 22 | 17 | #define PMD_SHIFT 22 |
17 | #define PTRS_PER_PMD 1 | 18 | #define PTRS_PER_PMD 1 |
18 | 19 | ||
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h index 28a145bfbb71..35ed4a9981ae 100644 --- a/arch/m68k/include/asm/pgtable_mm.h +++ b/arch/m68k/include/asm/pgtable_mm.h | |||
@@ -54,10 +54,12 @@ | |||
54 | */ | 54 | */ |
55 | #ifdef CONFIG_SUN3 | 55 | #ifdef CONFIG_SUN3 |
56 | #define PTRS_PER_PTE 16 | 56 | #define PTRS_PER_PTE 16 |
57 | #define __PAGETABLE_PMD_FOLDED | ||
57 | #define PTRS_PER_PMD 1 | 58 | #define PTRS_PER_PMD 1 |
58 | #define PTRS_PER_PGD 2048 | 59 | #define PTRS_PER_PGD 2048 |
59 | #elif defined(CONFIG_COLDFIRE) | 60 | #elif defined(CONFIG_COLDFIRE) |
60 | #define PTRS_PER_PTE 512 | 61 | #define PTRS_PER_PTE 512 |
62 | #define __PAGETABLE_PMD_FOLDED | ||
61 | #define PTRS_PER_PMD 1 | 63 | #define PTRS_PER_PMD 1 |
62 | #define PTRS_PER_PGD 1024 | 64 | #define PTRS_PER_PGD 1024 |
63 | #else | 65 | #else |
diff --git a/arch/metag/include/asm/io.h b/arch/metag/include/asm/io.h index 9359e5048442..d5779b0ec573 100644 --- a/arch/metag/include/asm/io.h +++ b/arch/metag/include/asm/io.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _ASM_METAG_IO_H | 2 | #define _ASM_METAG_IO_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <asm/pgtable-bits.h> | ||
5 | 6 | ||
6 | #define IO_SPACE_LIMIT 0 | 7 | #define IO_SPACE_LIMIT 0 |
7 | 8 | ||
diff --git a/arch/metag/include/asm/pgtable-bits.h b/arch/metag/include/asm/pgtable-bits.h new file mode 100644 index 000000000000..25ba6729f496 --- /dev/null +++ b/arch/metag/include/asm/pgtable-bits.h | |||
@@ -0,0 +1,104 @@ | |||
1 | /* | ||
2 | * Meta page table definitions. | ||
3 | */ | ||
4 | |||
5 | #ifndef _METAG_PGTABLE_BITS_H | ||
6 | #define _METAG_PGTABLE_BITS_H | ||
7 | |||
8 | #include <asm/metag_mem.h> | ||
9 | |||
10 | /* | ||
11 | * Definitions for MMU descriptors | ||
12 | * | ||
13 | * These are the hardware bits in the MMCU pte entries. | ||
14 | * Derived from the Meta toolkit headers. | ||
15 | */ | ||
16 | #define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT | ||
17 | #define _PAGE_WRITE MMCU_ENTRY_WR_BIT | ||
18 | #define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT | ||
19 | /* Write combine bit - this can cause writes to occur out of order */ | ||
20 | #define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT | ||
21 | /* Sys coherent bit - this bit is never used by Linux */ | ||
22 | #define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT | ||
23 | #define _PAGE_ALWAYS_ZERO_1 0x020 | ||
24 | #define _PAGE_CACHE_CTRL0 0x040 | ||
25 | #define _PAGE_CACHE_CTRL1 0x080 | ||
26 | #define _PAGE_ALWAYS_ZERO_2 0x100 | ||
27 | #define _PAGE_ALWAYS_ZERO_3 0x200 | ||
28 | #define _PAGE_ALWAYS_ZERO_4 0x400 | ||
29 | #define _PAGE_ALWAYS_ZERO_5 0x800 | ||
30 | |||
31 | /* These are software bits that we stuff into the gaps in the hardware | ||
32 | * pte entries that are not used. Note, these DO get stored in the actual | ||
33 | * hardware, but the hardware just does not use them. | ||
34 | */ | ||
35 | #define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1 | ||
36 | #define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2 | ||
37 | |||
38 | /* Pages owned, and protected by, the kernel. */ | ||
39 | #define _PAGE_KERNEL _PAGE_PRIV | ||
40 | |||
41 | /* No cacheing of this page */ | ||
42 | #define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S) | ||
43 | /* burst cacheing - good for data streaming */ | ||
44 | #define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S) | ||
45 | /* One cache way per thread */ | ||
46 | #define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S) | ||
47 | /* Full on cacheing */ | ||
48 | #define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S) | ||
49 | |||
50 | #define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE) | ||
51 | |||
52 | /* which bits are used for cache control ... */ | ||
53 | #define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \ | ||
54 | _PAGE_WR_COMBINE) | ||
55 | |||
56 | /* This is a mask of the bits that pte_modify is allowed to change. */ | ||
57 | #define _PAGE_CHG_MASK (PAGE_MASK) | ||
58 | |||
59 | #define _PAGE_SZ_SHIFT 1 | ||
60 | #define _PAGE_SZ_4K (0x0) | ||
61 | #define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT) | ||
62 | #define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT) | ||
63 | #define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT) | ||
64 | #define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT) | ||
65 | #define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT) | ||
66 | #define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT) | ||
67 | #define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT) | ||
68 | #define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT) | ||
69 | #define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT) | ||
70 | #define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT) | ||
71 | #define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT) | ||
72 | |||
73 | #if defined(CONFIG_PAGE_SIZE_4K) | ||
74 | #define _PAGE_SZ (_PAGE_SZ_4K) | ||
75 | #elif defined(CONFIG_PAGE_SIZE_8K) | ||
76 | #define _PAGE_SZ (_PAGE_SZ_8K) | ||
77 | #elif defined(CONFIG_PAGE_SIZE_16K) | ||
78 | #define _PAGE_SZ (_PAGE_SZ_16K) | ||
79 | #endif | ||
80 | #define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT) | ||
81 | |||
82 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_8K) | ||
83 | # define _PAGE_SZHUGE (_PAGE_SZ_8K) | ||
84 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K) | ||
85 | # define _PAGE_SZHUGE (_PAGE_SZ_16K) | ||
86 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K) | ||
87 | # define _PAGE_SZHUGE (_PAGE_SZ_32K) | ||
88 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | ||
89 | # define _PAGE_SZHUGE (_PAGE_SZ_64K) | ||
90 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K) | ||
91 | # define _PAGE_SZHUGE (_PAGE_SZ_128K) | ||
92 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) | ||
93 | # define _PAGE_SZHUGE (_PAGE_SZ_256K) | ||
94 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) | ||
95 | # define _PAGE_SZHUGE (_PAGE_SZ_512K) | ||
96 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M) | ||
97 | # define _PAGE_SZHUGE (_PAGE_SZ_1M) | ||
98 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M) | ||
99 | # define _PAGE_SZHUGE (_PAGE_SZ_2M) | ||
100 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M) | ||
101 | # define _PAGE_SZHUGE (_PAGE_SZ_4M) | ||
102 | #endif | ||
103 | |||
104 | #endif /* _METAG_PGTABLE_BITS_H */ | ||
diff --git a/arch/metag/include/asm/pgtable.h b/arch/metag/include/asm/pgtable.h index d0604c0a8702..ffa3a3a2ecad 100644 --- a/arch/metag/include/asm/pgtable.h +++ b/arch/metag/include/asm/pgtable.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #ifndef _METAG_PGTABLE_H | 5 | #ifndef _METAG_PGTABLE_H |
6 | #define _METAG_PGTABLE_H | 6 | #define _METAG_PGTABLE_H |
7 | 7 | ||
8 | #include <asm/pgtable-bits.h> | ||
8 | #include <asm-generic/pgtable-nopmd.h> | 9 | #include <asm-generic/pgtable-nopmd.h> |
9 | 10 | ||
10 | /* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */ | 11 | /* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */ |
@@ -21,100 +22,6 @@ | |||
21 | #endif | 22 | #endif |
22 | 23 | ||
23 | /* | 24 | /* |
24 | * Definitions for MMU descriptors | ||
25 | * | ||
26 | * These are the hardware bits in the MMCU pte entries. | ||
27 | * Derived from the Meta toolkit headers. | ||
28 | */ | ||
29 | #define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT | ||
30 | #define _PAGE_WRITE MMCU_ENTRY_WR_BIT | ||
31 | #define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT | ||
32 | /* Write combine bit - this can cause writes to occur out of order */ | ||
33 | #define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT | ||
34 | /* Sys coherent bit - this bit is never used by Linux */ | ||
35 | #define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT | ||
36 | #define _PAGE_ALWAYS_ZERO_1 0x020 | ||
37 | #define _PAGE_CACHE_CTRL0 0x040 | ||
38 | #define _PAGE_CACHE_CTRL1 0x080 | ||
39 | #define _PAGE_ALWAYS_ZERO_2 0x100 | ||
40 | #define _PAGE_ALWAYS_ZERO_3 0x200 | ||
41 | #define _PAGE_ALWAYS_ZERO_4 0x400 | ||
42 | #define _PAGE_ALWAYS_ZERO_5 0x800 | ||
43 | |||
44 | /* These are software bits that we stuff into the gaps in the hardware | ||
45 | * pte entries that are not used. Note, these DO get stored in the actual | ||
46 | * hardware, but the hardware just does not use them. | ||
47 | */ | ||
48 | #define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1 | ||
49 | #define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2 | ||
50 | |||
51 | /* Pages owned, and protected by, the kernel. */ | ||
52 | #define _PAGE_KERNEL _PAGE_PRIV | ||
53 | |||
54 | /* No cacheing of this page */ | ||
55 | #define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S) | ||
56 | /* burst cacheing - good for data streaming */ | ||
57 | #define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S) | ||
58 | /* One cache way per thread */ | ||
59 | #define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S) | ||
60 | /* Full on cacheing */ | ||
61 | #define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S) | ||
62 | |||
63 | #define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE) | ||
64 | |||
65 | /* which bits are used for cache control ... */ | ||
66 | #define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \ | ||
67 | _PAGE_WR_COMBINE) | ||
68 | |||
69 | /* This is a mask of the bits that pte_modify is allowed to change. */ | ||
70 | #define _PAGE_CHG_MASK (PAGE_MASK) | ||
71 | |||
72 | #define _PAGE_SZ_SHIFT 1 | ||
73 | #define _PAGE_SZ_4K (0x0) | ||
74 | #define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT) | ||
75 | #define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT) | ||
76 | #define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT) | ||
77 | #define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT) | ||
78 | #define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT) | ||
79 | #define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT) | ||
80 | #define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT) | ||
81 | #define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT) | ||
82 | #define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT) | ||
83 | #define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT) | ||
84 | #define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT) | ||
85 | |||
86 | #if defined(CONFIG_PAGE_SIZE_4K) | ||
87 | #define _PAGE_SZ (_PAGE_SZ_4K) | ||
88 | #elif defined(CONFIG_PAGE_SIZE_8K) | ||
89 | #define _PAGE_SZ (_PAGE_SZ_8K) | ||
90 | #elif defined(CONFIG_PAGE_SIZE_16K) | ||
91 | #define _PAGE_SZ (_PAGE_SZ_16K) | ||
92 | #endif | ||
93 | #define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT) | ||
94 | |||
95 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_8K) | ||
96 | # define _PAGE_SZHUGE (_PAGE_SZ_8K) | ||
97 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K) | ||
98 | # define _PAGE_SZHUGE (_PAGE_SZ_16K) | ||
99 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K) | ||
100 | # define _PAGE_SZHUGE (_PAGE_SZ_32K) | ||
101 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | ||
102 | # define _PAGE_SZHUGE (_PAGE_SZ_64K) | ||
103 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K) | ||
104 | # define _PAGE_SZHUGE (_PAGE_SZ_128K) | ||
105 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) | ||
106 | # define _PAGE_SZHUGE (_PAGE_SZ_256K) | ||
107 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) | ||
108 | # define _PAGE_SZHUGE (_PAGE_SZ_512K) | ||
109 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M) | ||
110 | # define _PAGE_SZHUGE (_PAGE_SZ_1M) | ||
111 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M) | ||
112 | # define _PAGE_SZHUGE (_PAGE_SZ_2M) | ||
113 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M) | ||
114 | # define _PAGE_SZHUGE (_PAGE_SZ_4M) | ||
115 | #endif | ||
116 | |||
117 | /* | ||
118 | * The Linux memory management assumes a three-level page table setup. On | 25 | * The Linux memory management assumes a three-level page table setup. On |
119 | * Meta, we use that, but "fold" the mid level into the top-level page | 26 | * Meta, we use that, but "fold" the mid level into the top-level page |
120 | * table. | 27 | * table. |
diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h index 881071c07942..13272fd5a5ba 100644 --- a/arch/metag/include/asm/processor.h +++ b/arch/metag/include/asm/processor.h | |||
@@ -149,8 +149,8 @@ extern void exit_thread(void); | |||
149 | 149 | ||
150 | unsigned long get_wchan(struct task_struct *p); | 150 | unsigned long get_wchan(struct task_struct *p); |
151 | 151 | ||
152 | #define KSTK_EIP(tsk) ((tsk)->thread.kernel_context->CurrPC) | 152 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->ctx.CurrPC) |
153 | #define KSTK_ESP(tsk) ((tsk)->thread.kernel_context->AX[0].U0) | 153 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->ctx.AX[0].U0) |
154 | 154 | ||
155 | #define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0) | 155 | #define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0) |
156 | 156 | ||
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S index 0536bc021cc6..ef548510b951 100644 --- a/arch/microblaze/kernel/entry.S +++ b/arch/microblaze/kernel/entry.S | |||
@@ -348,8 +348,9 @@ C_ENTRY(_user_exception): | |||
348 | * The LP register should point to the location where the called function | 348 | * The LP register should point to the location where the called function |
349 | * should return. [note that MAKE_SYS_CALL uses label 1] */ | 349 | * should return. [note that MAKE_SYS_CALL uses label 1] */ |
350 | /* See if the system call number is valid */ | 350 | /* See if the system call number is valid */ |
351 | blti r12, 5f | ||
351 | addi r11, r12, -__NR_syscalls; | 352 | addi r11, r12, -__NR_syscalls; |
352 | bgei r11,5f; | 353 | bgei r11, 5f; |
353 | /* Figure out which function to use for this system call. */ | 354 | /* Figure out which function to use for this system call. */ |
354 | /* Note Microblaze barrel shift is optional, so don't rely on it */ | 355 | /* Note Microblaze barrel shift is optional, so don't rely on it */ |
355 | add r12, r12, r12; /* convert num -> ptr */ | 356 | add r12, r12, r12; /* convert num -> ptr */ |
@@ -375,7 +376,7 @@ C_ENTRY(_user_exception): | |||
375 | 376 | ||
376 | /* The syscall number is invalid, return an error. */ | 377 | /* The syscall number is invalid, return an error. */ |
377 | 5: | 378 | 5: |
378 | rtsd r15, 8; /* looks like a normal subroutine return */ | 379 | braid ret_from_trap |
379 | addi r3, r0, -ENOSYS; | 380 | addi r3, r0, -ENOSYS; |
380 | 381 | ||
381 | /* Entry point used to return from a syscall/trap */ | 382 | /* Entry point used to return from a syscall/trap */ |
@@ -411,7 +412,7 @@ C_ENTRY(ret_from_trap): | |||
411 | bri 1b | 412 | bri 1b |
412 | 413 | ||
413 | /* Maybe handle a signal */ | 414 | /* Maybe handle a signal */ |
414 | 5: | 415 | 5: |
415 | andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME; | 416 | andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME; |
416 | beqi r11, 4f; /* Signals to handle, handle them */ | 417 | beqi r11, 4f; /* Signals to handle, handle them */ |
417 | 418 | ||
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c index bbcd82242059..b6beb0e07b1b 100644 --- a/arch/mips/kvm/tlb.c +++ b/arch/mips/kvm/tlb.c | |||
@@ -216,6 +216,7 @@ int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, | |||
216 | if (idx > current_cpu_data.tlbsize) { | 216 | if (idx > current_cpu_data.tlbsize) { |
217 | kvm_err("%s: Invalid Index: %d\n", __func__, idx); | 217 | kvm_err("%s: Invalid Index: %d\n", __func__, idx); |
218 | kvm_mips_dump_host_tlbs(); | 218 | kvm_mips_dump_host_tlbs(); |
219 | local_irq_restore(flags); | ||
219 | return -1; | 220 | return -1; |
220 | } | 221 | } |
221 | 222 | ||
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h index c1388d40663b..bd6437f67dc0 100644 --- a/arch/mips/kvm/trace.h +++ b/arch/mips/kvm/trace.h | |||
@@ -24,18 +24,18 @@ TRACE_EVENT(kvm_exit, | |||
24 | TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), | 24 | TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), |
25 | TP_ARGS(vcpu, reason), | 25 | TP_ARGS(vcpu, reason), |
26 | TP_STRUCT__entry( | 26 | TP_STRUCT__entry( |
27 | __field(struct kvm_vcpu *, vcpu) | 27 | __field(unsigned long, pc) |
28 | __field(unsigned int, reason) | 28 | __field(unsigned int, reason) |
29 | ), | 29 | ), |
30 | 30 | ||
31 | TP_fast_assign( | 31 | TP_fast_assign( |
32 | __entry->vcpu = vcpu; | 32 | __entry->pc = vcpu->arch.pc; |
33 | __entry->reason = reason; | 33 | __entry->reason = reason; |
34 | ), | 34 | ), |
35 | 35 | ||
36 | TP_printk("[%s]PC: 0x%08lx", | 36 | TP_printk("[%s]PC: 0x%08lx", |
37 | kvm_mips_exit_types_str[__entry->reason], | 37 | kvm_mips_exit_types_str[__entry->reason], |
38 | __entry->vcpu->arch.pc) | 38 | __entry->pc) |
39 | ); | 39 | ); |
40 | 40 | ||
41 | #endif /* _TRACE_KVM_H */ | 41 | #endif /* _TRACE_KVM_H */ |
diff --git a/arch/mn10300/include/asm/pgtable.h b/arch/mn10300/include/asm/pgtable.h index afab728ab65e..96d3f9deb59c 100644 --- a/arch/mn10300/include/asm/pgtable.h +++ b/arch/mn10300/include/asm/pgtable.h | |||
@@ -56,7 +56,9 @@ extern void paging_init(void); | |||
56 | #define PGDIR_SHIFT 22 | 56 | #define PGDIR_SHIFT 22 |
57 | #define PTRS_PER_PGD 1024 | 57 | #define PTRS_PER_PGD 1024 |
58 | #define PTRS_PER_PUD 1 /* we don't really have any PUD physically */ | 58 | #define PTRS_PER_PUD 1 /* we don't really have any PUD physically */ |
59 | #define __PAGETABLE_PUD_FOLDED | ||
59 | #define PTRS_PER_PMD 1 /* we don't really have any PMD physically */ | 60 | #define PTRS_PER_PMD 1 /* we don't really have any PMD physically */ |
61 | #define __PAGETABLE_PMD_FOLDED | ||
60 | #define PTRS_PER_PTE 1024 | 62 | #define PTRS_PER_PTE 1024 |
61 | 63 | ||
62 | #define PGD_SIZE PAGE_SIZE | 64 | #define PGD_SIZE PAGE_SIZE |
diff --git a/arch/nios2/include/asm/ptrace.h b/arch/nios2/include/asm/ptrace.h index 20fb1cf2dab6..642462144872 100644 --- a/arch/nios2/include/asm/ptrace.h +++ b/arch/nios2/include/asm/ptrace.h | |||
@@ -15,7 +15,54 @@ | |||
15 | 15 | ||
16 | #include <uapi/asm/ptrace.h> | 16 | #include <uapi/asm/ptrace.h> |
17 | 17 | ||
18 | /* This struct defines the way the registers are stored on the | ||
19 | stack during a system call. */ | ||
20 | |||
18 | #ifndef __ASSEMBLY__ | 21 | #ifndef __ASSEMBLY__ |
22 | struct pt_regs { | ||
23 | unsigned long r8; /* r8-r15 Caller-saved GP registers */ | ||
24 | unsigned long r9; | ||
25 | unsigned long r10; | ||
26 | unsigned long r11; | ||
27 | unsigned long r12; | ||
28 | unsigned long r13; | ||
29 | unsigned long r14; | ||
30 | unsigned long r15; | ||
31 | unsigned long r1; /* Assembler temporary */ | ||
32 | unsigned long r2; /* Retval LS 32bits */ | ||
33 | unsigned long r3; /* Retval MS 32bits */ | ||
34 | unsigned long r4; /* r4-r7 Register arguments */ | ||
35 | unsigned long r5; | ||
36 | unsigned long r6; | ||
37 | unsigned long r7; | ||
38 | unsigned long orig_r2; /* Copy of r2 ?? */ | ||
39 | unsigned long ra; /* Return address */ | ||
40 | unsigned long fp; /* Frame pointer */ | ||
41 | unsigned long sp; /* Stack pointer */ | ||
42 | unsigned long gp; /* Global pointer */ | ||
43 | unsigned long estatus; | ||
44 | unsigned long ea; /* Exception return address (pc) */ | ||
45 | unsigned long orig_r7; | ||
46 | }; | ||
47 | |||
48 | /* | ||
49 | * This is the extended stack used by signal handlers and the context | ||
50 | * switcher: it's pushed after the normal "struct pt_regs". | ||
51 | */ | ||
52 | struct switch_stack { | ||
53 | unsigned long r16; /* r16-r23 Callee-saved GP registers */ | ||
54 | unsigned long r17; | ||
55 | unsigned long r18; | ||
56 | unsigned long r19; | ||
57 | unsigned long r20; | ||
58 | unsigned long r21; | ||
59 | unsigned long r22; | ||
60 | unsigned long r23; | ||
61 | unsigned long fp; | ||
62 | unsigned long gp; | ||
63 | unsigned long ra; | ||
64 | }; | ||
65 | |||
19 | #define user_mode(regs) (((regs)->estatus & ESTATUS_EU)) | 66 | #define user_mode(regs) (((regs)->estatus & ESTATUS_EU)) |
20 | 67 | ||
21 | #define instruction_pointer(regs) ((regs)->ra) | 68 | #define instruction_pointer(regs) ((regs)->ra) |
diff --git a/arch/nios2/include/asm/thread_info.h b/arch/nios2/include/asm/thread_info.h index 1f266575beb5..a16e55cbd8ad 100644 --- a/arch/nios2/include/asm/thread_info.h +++ b/arch/nios2/include/asm/thread_info.h | |||
@@ -47,7 +47,6 @@ struct thread_info { | |||
47 | 0-0x7FFFFFFF for user-thead | 47 | 0-0x7FFFFFFF for user-thead |
48 | 0-0xFFFFFFFF for kernel-thread | 48 | 0-0xFFFFFFFF for kernel-thread |
49 | */ | 49 | */ |
50 | struct restart_block restart_block; | ||
51 | struct pt_regs *regs; | 50 | struct pt_regs *regs; |
52 | }; | 51 | }; |
53 | 52 | ||
@@ -64,9 +63,6 @@ struct thread_info { | |||
64 | .cpu = 0, \ | 63 | .cpu = 0, \ |
65 | .preempt_count = INIT_PREEMPT_COUNT, \ | 64 | .preempt_count = INIT_PREEMPT_COUNT, \ |
66 | .addr_limit = KERNEL_DS, \ | 65 | .addr_limit = KERNEL_DS, \ |
67 | .restart_block = { \ | ||
68 | .fn = do_no_restart_syscall, \ | ||
69 | }, \ | ||
70 | } | 66 | } |
71 | 67 | ||
72 | #define init_thread_info (init_thread_union.thread_info) | 68 | #define init_thread_info (init_thread_union.thread_info) |
diff --git a/arch/nios2/include/asm/ucontext.h b/arch/nios2/include/asm/ucontext.h deleted file mode 100644 index 2c87614b0f6e..000000000000 --- a/arch/nios2/include/asm/ucontext.h +++ /dev/null | |||
@@ -1,32 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch> | ||
3 | * Copyright (C) 2004 Microtronix Datacom Ltd | ||
4 | * | ||
5 | * This file is subject to the terms and conditions of the GNU General Public | ||
6 | * License. See the file "COPYING" in the main directory of this archive | ||
7 | * for more details. | ||
8 | */ | ||
9 | |||
10 | #ifndef _ASM_NIOS2_UCONTEXT_H | ||
11 | #define _ASM_NIOS2_UCONTEXT_H | ||
12 | |||
13 | typedef int greg_t; | ||
14 | #define NGREG 32 | ||
15 | typedef greg_t gregset_t[NGREG]; | ||
16 | |||
17 | struct mcontext { | ||
18 | int version; | ||
19 | gregset_t gregs; | ||
20 | }; | ||
21 | |||
22 | #define MCONTEXT_VERSION 2 | ||
23 | |||
24 | struct ucontext { | ||
25 | unsigned long uc_flags; | ||
26 | struct ucontext *uc_link; | ||
27 | stack_t uc_stack; | ||
28 | struct mcontext uc_mcontext; | ||
29 | sigset_t uc_sigmask; /* mask last for extensibility */ | ||
30 | }; | ||
31 | |||
32 | #endif | ||
diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild index 4f07ca3f8d10..e0bb972a50d7 100644 --- a/arch/nios2/include/uapi/asm/Kbuild +++ b/arch/nios2/include/uapi/asm/Kbuild | |||
@@ -1,4 +1,5 @@ | |||
1 | include include/uapi/asm-generic/Kbuild.asm | 1 | include include/uapi/asm-generic/Kbuild.asm |
2 | 2 | ||
3 | header-y += elf.h | 3 | header-y += elf.h |
4 | header-y += ucontext.h | 4 | |
5 | generic-y += ucontext.h | ||
diff --git a/arch/nios2/include/uapi/asm/elf.h b/arch/nios2/include/uapi/asm/elf.h index a5b91ae5cf56..6f06d3b2949e 100644 --- a/arch/nios2/include/uapi/asm/elf.h +++ b/arch/nios2/include/uapi/asm/elf.h | |||
@@ -50,9 +50,7 @@ | |||
50 | 50 | ||
51 | typedef unsigned long elf_greg_t; | 51 | typedef unsigned long elf_greg_t; |
52 | 52 | ||
53 | #define ELF_NGREG \ | 53 | #define ELF_NGREG 49 |
54 | ((sizeof(struct pt_regs) + sizeof(struct switch_stack)) / \ | ||
55 | sizeof(elf_greg_t)) | ||
56 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | 54 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; |
57 | 55 | ||
58 | typedef unsigned long elf_fpregset_t; | 56 | typedef unsigned long elf_fpregset_t; |
diff --git a/arch/nios2/include/uapi/asm/ptrace.h b/arch/nios2/include/uapi/asm/ptrace.h index e83a7c9d1c36..eff00e67c0a2 100644 --- a/arch/nios2/include/uapi/asm/ptrace.h +++ b/arch/nios2/include/uapi/asm/ptrace.h | |||
@@ -60,60 +60,21 @@ | |||
60 | #define PTR_IPENDING 37 | 60 | #define PTR_IPENDING 37 |
61 | #define PTR_CPUID 38 | 61 | #define PTR_CPUID 38 |
62 | #define PTR_CTL6 39 | 62 | #define PTR_CTL6 39 |
63 | #define PTR_CTL7 40 | 63 | #define PTR_EXCEPTION 40 |
64 | #define PTR_PTEADDR 41 | 64 | #define PTR_PTEADDR 41 |
65 | #define PTR_TLBACC 42 | 65 | #define PTR_TLBACC 42 |
66 | #define PTR_TLBMISC 43 | 66 | #define PTR_TLBMISC 43 |
67 | #define PTR_ECCINJ 44 | ||
68 | #define PTR_BADADDR 45 | ||
69 | #define PTR_CONFIG 46 | ||
70 | #define PTR_MPUBASE 47 | ||
71 | #define PTR_MPUACC 48 | ||
67 | 72 | ||
68 | #define NUM_PTRACE_REG (PTR_TLBMISC + 1) | 73 | #define NUM_PTRACE_REG (PTR_MPUACC + 1) |
69 | 74 | ||
70 | /* this struct defines the way the registers are stored on the | 75 | /* User structures for general purpose registers. */ |
71 | stack during a system call. | 76 | struct user_pt_regs { |
72 | 77 | __u32 regs[49]; | |
73 | There is a fake_regs in setup.c that has to match pt_regs.*/ | ||
74 | |||
75 | struct pt_regs { | ||
76 | unsigned long r8; /* r8-r15 Caller-saved GP registers */ | ||
77 | unsigned long r9; | ||
78 | unsigned long r10; | ||
79 | unsigned long r11; | ||
80 | unsigned long r12; | ||
81 | unsigned long r13; | ||
82 | unsigned long r14; | ||
83 | unsigned long r15; | ||
84 | unsigned long r1; /* Assembler temporary */ | ||
85 | unsigned long r2; /* Retval LS 32bits */ | ||
86 | unsigned long r3; /* Retval MS 32bits */ | ||
87 | unsigned long r4; /* r4-r7 Register arguments */ | ||
88 | unsigned long r5; | ||
89 | unsigned long r6; | ||
90 | unsigned long r7; | ||
91 | unsigned long orig_r2; /* Copy of r2 ?? */ | ||
92 | unsigned long ra; /* Return address */ | ||
93 | unsigned long fp; /* Frame pointer */ | ||
94 | unsigned long sp; /* Stack pointer */ | ||
95 | unsigned long gp; /* Global pointer */ | ||
96 | unsigned long estatus; | ||
97 | unsigned long ea; /* Exception return address (pc) */ | ||
98 | unsigned long orig_r7; | ||
99 | }; | ||
100 | |||
101 | /* | ||
102 | * This is the extended stack used by signal handlers and the context | ||
103 | * switcher: it's pushed after the normal "struct pt_regs". | ||
104 | */ | ||
105 | struct switch_stack { | ||
106 | unsigned long r16; /* r16-r23 Callee-saved GP registers */ | ||
107 | unsigned long r17; | ||
108 | unsigned long r18; | ||
109 | unsigned long r19; | ||
110 | unsigned long r20; | ||
111 | unsigned long r21; | ||
112 | unsigned long r22; | ||
113 | unsigned long r23; | ||
114 | unsigned long fp; | ||
115 | unsigned long gp; | ||
116 | unsigned long ra; | ||
117 | }; | 78 | }; |
118 | 79 | ||
119 | #endif /* __ASSEMBLY__ */ | 80 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/nios2/include/uapi/asm/sigcontext.h b/arch/nios2/include/uapi/asm/sigcontext.h index 7b8bb41867d4..b67944a50927 100644 --- a/arch/nios2/include/uapi/asm/sigcontext.h +++ b/arch/nios2/include/uapi/asm/sigcontext.h | |||
@@ -15,14 +15,16 @@ | |||
15 | * details. | 15 | * details. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #ifndef _ASM_NIOS2_SIGCONTEXT_H | 18 | #ifndef _UAPI__ASM_SIGCONTEXT_H |
19 | #define _ASM_NIOS2_SIGCONTEXT_H | 19 | #define _UAPI__ASM_SIGCONTEXT_H |
20 | 20 | ||
21 | #include <asm/ptrace.h> | 21 | #include <linux/types.h> |
22 | |||
23 | #define MCONTEXT_VERSION 2 | ||
22 | 24 | ||
23 | struct sigcontext { | 25 | struct sigcontext { |
24 | struct pt_regs regs; | 26 | int version; |
25 | unsigned long sc_mask; /* old sigmask */ | 27 | unsigned long gregs[32]; |
26 | }; | 28 | }; |
27 | 29 | ||
28 | #endif | 30 | #endif |
diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S index 7729bd3f2e79..27b006c52e12 100644 --- a/arch/nios2/kernel/entry.S +++ b/arch/nios2/kernel/entry.S | |||
@@ -161,7 +161,7 @@ ENTRY(inthandler) | |||
161 | *********************************************************************** | 161 | *********************************************************************** |
162 | */ | 162 | */ |
163 | ENTRY(handle_trap) | 163 | ENTRY(handle_trap) |
164 | ldw r24, -4(ea) /* instruction that caused the exception */ | 164 | ldwio r24, -4(ea) /* instruction that caused the exception */ |
165 | srli r24, r24, 4 | 165 | srli r24, r24, 4 |
166 | andi r24, r24, 0x7c | 166 | andi r24, r24, 0x7c |
167 | movia r9,trap_table | 167 | movia r9,trap_table |
diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c index 2d0ea25be171..20662b0f6c9e 100644 --- a/arch/nios2/kernel/signal.c +++ b/arch/nios2/kernel/signal.c | |||
@@ -39,11 +39,11 @@ static inline int rt_restore_ucontext(struct pt_regs *regs, | |||
39 | struct ucontext *uc, int *pr2) | 39 | struct ucontext *uc, int *pr2) |
40 | { | 40 | { |
41 | int temp; | 41 | int temp; |
42 | greg_t *gregs = uc->uc_mcontext.gregs; | 42 | unsigned long *gregs = uc->uc_mcontext.gregs; |
43 | int err; | 43 | int err; |
44 | 44 | ||
45 | /* Always make any pending restarted system calls return -EINTR */ | 45 | /* Always make any pending restarted system calls return -EINTR */ |
46 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | 46 | current->restart_block.fn = do_no_restart_syscall; |
47 | 47 | ||
48 | err = __get_user(temp, &uc->uc_mcontext.version); | 48 | err = __get_user(temp, &uc->uc_mcontext.version); |
49 | if (temp != MCONTEXT_VERSION) | 49 | if (temp != MCONTEXT_VERSION) |
@@ -127,7 +127,7 @@ badframe: | |||
127 | static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs) | 127 | static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs) |
128 | { | 128 | { |
129 | struct switch_stack *sw = (struct switch_stack *)regs - 1; | 129 | struct switch_stack *sw = (struct switch_stack *)regs - 1; |
130 | greg_t *gregs = uc->uc_mcontext.gregs; | 130 | unsigned long *gregs = uc->uc_mcontext.gregs; |
131 | int err = 0; | 131 | int err = 0; |
132 | 132 | ||
133 | err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version); | 133 | err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version); |
diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c index 2ae482b42669..796642932e2e 100644 --- a/arch/nios2/mm/cacheflush.c +++ b/arch/nios2/mm/cacheflush.c | |||
@@ -23,9 +23,6 @@ static void __flush_dcache(unsigned long start, unsigned long end) | |||
23 | end += (cpuinfo.dcache_line_size - 1); | 23 | end += (cpuinfo.dcache_line_size - 1); |
24 | end &= ~(cpuinfo.dcache_line_size - 1); | 24 | end &= ~(cpuinfo.dcache_line_size - 1); |
25 | 25 | ||
26 | if (end > start + cpuinfo.dcache_size) | ||
27 | end = start + cpuinfo.dcache_size; | ||
28 | |||
29 | for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) { | 26 | for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) { |
30 | __asm__ __volatile__ (" flushda 0(%0)\n" | 27 | __asm__ __volatile__ (" flushda 0(%0)\n" |
31 | : /* Outputs */ | 28 | : /* Outputs */ |
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index 0d231adfe576..0c9b6afe69e9 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c | |||
@@ -126,7 +126,6 @@ good_area: | |||
126 | break; | 126 | break; |
127 | } | 127 | } |
128 | 128 | ||
129 | survive: | ||
130 | /* | 129 | /* |
131 | * If for any reason at all we couldn't handle the fault, | 130 | * If for any reason at all we couldn't handle the fault, |
132 | * make sure we exit gracefully rather than endlessly redo | 131 | * make sure we exit gracefully rather than endlessly redo |
@@ -220,11 +219,6 @@ no_context: | |||
220 | */ | 219 | */ |
221 | out_of_memory: | 220 | out_of_memory: |
222 | up_read(&mm->mmap_sem); | 221 | up_read(&mm->mmap_sem); |
223 | if (is_global_init(tsk)) { | ||
224 | yield(); | ||
225 | down_read(&mm->mmap_sem); | ||
226 | goto survive; | ||
227 | } | ||
228 | if (!user_mode(regs)) | 222 | if (!user_mode(regs)) |
229 | goto no_context; | 223 | goto no_context; |
230 | pagefault_out_of_memory(); | 224 | pagefault_out_of_memory(); |
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index f213f5b4c423..d17437238a2c 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h | |||
@@ -26,7 +26,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) | |||
26 | 26 | ||
27 | if (likely(pgd != NULL)) { | 27 | if (likely(pgd != NULL)) { |
28 | memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER); | 28 | memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER); |
29 | #ifdef CONFIG_64BIT | 29 | #if PT_NLEVELS == 3 |
30 | actual_pgd += PTRS_PER_PGD; | 30 | actual_pgd += PTRS_PER_PGD; |
31 | /* Populate first pmd with allocated memory. We mark it | 31 | /* Populate first pmd with allocated memory. We mark it |
32 | * with PxD_FLAG_ATTACHED as a signal to the system that this | 32 | * with PxD_FLAG_ATTACHED as a signal to the system that this |
@@ -45,7 +45,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) | |||
45 | 45 | ||
46 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | 46 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
47 | { | 47 | { |
48 | #ifdef CONFIG_64BIT | 48 | #if PT_NLEVELS == 3 |
49 | pgd -= PTRS_PER_PGD; | 49 | pgd -= PTRS_PER_PGD; |
50 | #endif | 50 | #endif |
51 | free_pages((unsigned long)pgd, PGD_ALLOC_ORDER); | 51 | free_pages((unsigned long)pgd, PGD_ALLOC_ORDER); |
@@ -72,12 +72,15 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) | |||
72 | 72 | ||
73 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | 73 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) |
74 | { | 74 | { |
75 | #ifdef CONFIG_64BIT | ||
76 | if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) | 75 | if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) |
77 | /* This is the permanent pmd attached to the pgd; | 76 | /* |
78 | * cannot free it */ | 77 | * This is the permanent pmd attached to the pgd; |
78 | * cannot free it. | ||
79 | * Increment the counter to compensate for the decrement | ||
80 | * done by generic mm code. | ||
81 | */ | ||
82 | mm_inc_nr_pmds(mm); | ||
79 | return; | 83 | return; |
80 | #endif | ||
81 | free_pages((unsigned long)pmd, PMD_ORDER); | 84 | free_pages((unsigned long)pmd, PMD_ORDER); |
82 | } | 85 | } |
83 | 86 | ||
@@ -99,7 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | |||
99 | static inline void | 102 | static inline void |
100 | pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) | 103 | pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) |
101 | { | 104 | { |
102 | #ifdef CONFIG_64BIT | 105 | #if PT_NLEVELS == 3 |
103 | /* preserve the gateway marker if this is the beginning of | 106 | /* preserve the gateway marker if this is the beginning of |
104 | * the permanent pmd */ | 107 | * the permanent pmd */ |
105 | if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) | 108 | if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) |
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 8c966b2270aa..15207b9362bf 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h | |||
@@ -96,6 +96,7 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long); | |||
96 | #if PT_NLEVELS == 3 | 96 | #if PT_NLEVELS == 3 |
97 | #define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY) | 97 | #define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY) |
98 | #else | 98 | #else |
99 | #define __PAGETABLE_PMD_FOLDED | ||
99 | #define BITS_PER_PMD 0 | 100 | #define BITS_PER_PMD 0 |
100 | #endif | 101 | #endif |
101 | #define PTRS_PER_PMD (1UL << BITS_PER_PMD) | 102 | #define PTRS_PER_PMD (1UL << BITS_PER_PMD) |
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 5a8997d63899..8eefb12d1d33 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S | |||
@@ -55,8 +55,8 @@ | |||
55 | #define ENTRY_COMP(_name_) .word sys_##_name_ | 55 | #define ENTRY_COMP(_name_) .word sys_##_name_ |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | ENTRY_SAME(restart_syscall) /* 0 */ | 58 | 90: ENTRY_SAME(restart_syscall) /* 0 */ |
59 | ENTRY_SAME(exit) | 59 | 91: ENTRY_SAME(exit) |
60 | ENTRY_SAME(fork_wrapper) | 60 | ENTRY_SAME(fork_wrapper) |
61 | ENTRY_SAME(read) | 61 | ENTRY_SAME(read) |
62 | ENTRY_SAME(write) | 62 | ENTRY_SAME(write) |
@@ -439,7 +439,10 @@ | |||
439 | ENTRY_SAME(bpf) | 439 | ENTRY_SAME(bpf) |
440 | ENTRY_COMP(execveat) | 440 | ENTRY_COMP(execveat) |
441 | 441 | ||
442 | /* Nothing yet */ | 442 | |
443 | .ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b)) | ||
444 | .error "size of syscall table does not fit value of __NR_Linux_syscalls" | ||
445 | .endif | ||
443 | 446 | ||
444 | #undef ENTRY_SAME | 447 | #undef ENTRY_SAME |
445 | #undef ENTRY_DIFF | 448 | #undef ENTRY_DIFF |
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h index 2bf8e9307be9..4c8ad592ae33 100644 --- a/arch/powerpc/include/asm/cputhreads.h +++ b/arch/powerpc/include/asm/cputhreads.h | |||
@@ -55,7 +55,7 @@ static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads) | |||
55 | 55 | ||
56 | static inline int cpu_nr_cores(void) | 56 | static inline int cpu_nr_cores(void) |
57 | { | 57 | { |
58 | return NR_CPUS >> threads_shift; | 58 | return nr_cpu_ids >> threads_shift; |
59 | } | 59 | } |
60 | 60 | ||
61 | static inline cpumask_t cpu_online_cores_map(void) | 61 | static inline cpumask_t cpu_online_cores_map(void) |
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index 9cfa3706a1b8..f1ea5972f6ec 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h | |||
@@ -113,6 +113,7 @@ extern void iommu_register_group(struct iommu_table *tbl, | |||
113 | int pci_domain_number, unsigned long pe_num); | 113 | int pci_domain_number, unsigned long pe_num); |
114 | extern int iommu_add_device(struct device *dev); | 114 | extern int iommu_add_device(struct device *dev); |
115 | extern void iommu_del_device(struct device *dev); | 115 | extern void iommu_del_device(struct device *dev); |
116 | extern int __init tce_iommu_bus_notifier_init(void); | ||
116 | #else | 117 | #else |
117 | static inline void iommu_register_group(struct iommu_table *tbl, | 118 | static inline void iommu_register_group(struct iommu_table *tbl, |
118 | int pci_domain_number, | 119 | int pci_domain_number, |
@@ -128,6 +129,11 @@ static inline int iommu_add_device(struct device *dev) | |||
128 | static inline void iommu_del_device(struct device *dev) | 129 | static inline void iommu_del_device(struct device *dev) |
129 | { | 130 | { |
130 | } | 131 | } |
132 | |||
133 | static inline int __init tce_iommu_bus_notifier_init(void) | ||
134 | { | ||
135 | return 0; | ||
136 | } | ||
131 | #endif /* !CONFIG_IOMMU_API */ | 137 | #endif /* !CONFIG_IOMMU_API */ |
132 | 138 | ||
133 | static inline void set_iommu_table_base_and_group(struct device *dev, | 139 | static inline void set_iommu_table_base_and_group(struct device *dev, |
diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h new file mode 100644 index 000000000000..744fd54de374 --- /dev/null +++ b/arch/powerpc/include/asm/irq_work.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #ifndef _ASM_POWERPC_IRQ_WORK_H | ||
2 | #define _ASM_POWERPC_IRQ_WORK_H | ||
3 | |||
4 | static inline bool arch_irq_work_has_interrupt(void) | ||
5 | { | ||
6 | return true; | ||
7 | } | ||
8 | |||
9 | #endif /* _ASM_POWERPC_IRQ_WORK_H */ | ||
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 03cd858a401c..4cbe23af400a 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h | |||
@@ -153,6 +153,7 @@ | |||
153 | #define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff | 153 | #define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff |
154 | #define PPC_INST_MFTMR 0x7c0002dc | 154 | #define PPC_INST_MFTMR 0x7c0002dc |
155 | #define PPC_INST_MSGSND 0x7c00019c | 155 | #define PPC_INST_MSGSND 0x7c00019c |
156 | #define PPC_INST_MSGCLR 0x7c0001dc | ||
156 | #define PPC_INST_MSGSNDP 0x7c00011c | 157 | #define PPC_INST_MSGSNDP 0x7c00011c |
157 | #define PPC_INST_MTTMR 0x7c0003dc | 158 | #define PPC_INST_MTTMR 0x7c0003dc |
158 | #define PPC_INST_NOP 0x60000000 | 159 | #define PPC_INST_NOP 0x60000000 |
@@ -309,6 +310,8 @@ | |||
309 | ___PPC_RB(b) | __PPC_EH(eh)) | 310 | ___PPC_RB(b) | __PPC_EH(eh)) |
310 | #define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \ | 311 | #define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \ |
311 | ___PPC_RB(b)) | 312 | ___PPC_RB(b)) |
313 | #define PPC_MSGCLR(b) stringify_in_c(.long PPC_INST_MSGCLR | \ | ||
314 | ___PPC_RB(b)) | ||
312 | #define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \ | 315 | #define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \ |
313 | ___PPC_RB(b)) | 316 | ___PPC_RB(b)) |
314 | #define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \ | 317 | #define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \ |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 1c874fb533bb..af56b5c6c81a 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -608,13 +608,16 @@ | |||
608 | #define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */ | 608 | #define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */ |
609 | #define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */ | 609 | #define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */ |
610 | #define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ | 610 | #define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ |
611 | #define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */ | ||
611 | #define SRR1_WAKESYSERR 0x00300000 /* System error */ | 612 | #define SRR1_WAKESYSERR 0x00300000 /* System error */ |
612 | #define SRR1_WAKEEE 0x00200000 /* External interrupt */ | 613 | #define SRR1_WAKEEE 0x00200000 /* External interrupt */ |
613 | #define SRR1_WAKEMT 0x00280000 /* mtctrl */ | 614 | #define SRR1_WAKEMT 0x00280000 /* mtctrl */ |
614 | #define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */ | 615 | #define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */ |
615 | #define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ | 616 | #define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ |
617 | #define SRR1_WAKEDBELL 0x00140000 /* Privileged doorbell on P8 */ | ||
616 | #define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */ | 618 | #define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */ |
617 | #define SRR1_WAKERESET 0x00100000 /* System reset */ | 619 | #define SRR1_WAKERESET 0x00100000 /* System reset */ |
620 | #define SRR1_WAKEHDBELL 0x000c0000 /* Hypervisor doorbell on P8 */ | ||
618 | #define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */ | 621 | #define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */ |
619 | #define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained, | 622 | #define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained, |
620 | * may not be recoverable */ | 623 | * may not be recoverable */ |
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index f337666768a7..f83046878336 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
@@ -437,6 +437,26 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
437 | .machine_check_early = __machine_check_early_realmode_p8, | 437 | .machine_check_early = __machine_check_early_realmode_p8, |
438 | .platform = "power8", | 438 | .platform = "power8", |
439 | }, | 439 | }, |
440 | { /* Power8NVL */ | ||
441 | .pvr_mask = 0xffff0000, | ||
442 | .pvr_value = 0x004c0000, | ||
443 | .cpu_name = "POWER8NVL (raw)", | ||
444 | .cpu_features = CPU_FTRS_POWER8, | ||
445 | .cpu_user_features = COMMON_USER_POWER8, | ||
446 | .cpu_user_features2 = COMMON_USER2_POWER8, | ||
447 | .mmu_features = MMU_FTRS_POWER8, | ||
448 | .icache_bsize = 128, | ||
449 | .dcache_bsize = 128, | ||
450 | .num_pmcs = 6, | ||
451 | .pmc_type = PPC_PMC_IBM, | ||
452 | .oprofile_cpu_type = "ppc64/power8", | ||
453 | .oprofile_type = PPC_OPROFILE_INVALID, | ||
454 | .cpu_setup = __setup_cpu_power8, | ||
455 | .cpu_restore = __restore_cpu_power8, | ||
456 | .flush_tlb = __flush_tlb_power8, | ||
457 | .machine_check_early = __machine_check_early_realmode_p8, | ||
458 | .platform = "power8", | ||
459 | }, | ||
440 | { /* Power8 DD1: Does not support doorbell IPIs */ | 460 | { /* Power8 DD1: Does not support doorbell IPIs */ |
441 | .pvr_mask = 0xffffff00, | 461 | .pvr_mask = 0xffffff00, |
442 | .pvr_value = 0x004d0100, | 462 | .pvr_value = 0x004d0100, |
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c index f4217819cc31..2128f3a96c32 100644 --- a/arch/powerpc/kernel/dbell.c +++ b/arch/powerpc/kernel/dbell.c | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | #include <asm/dbell.h> | 18 | #include <asm/dbell.h> |
19 | #include <asm/irq_regs.h> | 19 | #include <asm/irq_regs.h> |
20 | #include <asm/kvm_ppc.h> | ||
20 | 21 | ||
21 | #ifdef CONFIG_SMP | 22 | #ifdef CONFIG_SMP |
22 | void doorbell_setup_this_cpu(void) | 23 | void doorbell_setup_this_cpu(void) |
@@ -41,6 +42,7 @@ void doorbell_exception(struct pt_regs *regs) | |||
41 | 42 | ||
42 | may_hard_irq_enable(); | 43 | may_hard_irq_enable(); |
43 | 44 | ||
45 | kvmppc_set_host_ipi(smp_processor_id(), 0); | ||
44 | __this_cpu_inc(irq_stat.doorbell_irqs); | 46 | __this_cpu_inc(irq_stat.doorbell_irqs); |
45 | 47 | ||
46 | smp_ipi_demux(); | 48 | smp_ipi_demux(); |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index c2df8150bd7a..9519e6bdc6d7 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -1408,7 +1408,7 @@ machine_check_handle_early: | |||
1408 | bne 9f /* continue in V mode if we are. */ | 1408 | bne 9f /* continue in V mode if we are. */ |
1409 | 1409 | ||
1410 | 5: | 1410 | 5: |
1411 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 1411 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER |
1412 | /* | 1412 | /* |
1413 | * We are coming from kernel context. Check if we are coming from | 1413 | * We are coming from kernel context. Check if we are coming from |
1414 | * guest. if yes, then we can continue. We will fall through | 1414 | * guest. if yes, then we can continue. We will fall through |
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 5d3968c4d799..b054f33ab1fb 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c | |||
@@ -1175,4 +1175,30 @@ void iommu_del_device(struct device *dev) | |||
1175 | } | 1175 | } |
1176 | EXPORT_SYMBOL_GPL(iommu_del_device); | 1176 | EXPORT_SYMBOL_GPL(iommu_del_device); |
1177 | 1177 | ||
1178 | static int tce_iommu_bus_notifier(struct notifier_block *nb, | ||
1179 | unsigned long action, void *data) | ||
1180 | { | ||
1181 | struct device *dev = data; | ||
1182 | |||
1183 | switch (action) { | ||
1184 | case BUS_NOTIFY_ADD_DEVICE: | ||
1185 | return iommu_add_device(dev); | ||
1186 | case BUS_NOTIFY_DEL_DEVICE: | ||
1187 | if (dev->iommu_group) | ||
1188 | iommu_del_device(dev); | ||
1189 | return 0; | ||
1190 | default: | ||
1191 | return 0; | ||
1192 | } | ||
1193 | } | ||
1194 | |||
1195 | static struct notifier_block tce_iommu_bus_nb = { | ||
1196 | .notifier_call = tce_iommu_bus_notifier, | ||
1197 | }; | ||
1198 | |||
1199 | int __init tce_iommu_bus_notifier_init(void) | ||
1200 | { | ||
1201 | bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb); | ||
1202 | return 0; | ||
1203 | } | ||
1178 | #endif /* CONFIG_IOMMU_API */ | 1204 | #endif /* CONFIG_IOMMU_API */ |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 6e19afa35a15..ec9ec2058d2d 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -541,8 +541,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
541 | if (smp_ops->give_timebase) | 541 | if (smp_ops->give_timebase) |
542 | smp_ops->give_timebase(); | 542 | smp_ops->give_timebase(); |
543 | 543 | ||
544 | /* Wait until cpu puts itself in the online map */ | 544 | /* Wait until cpu puts itself in the online & active maps */ |
545 | while (!cpu_online(cpu)) | 545 | while (!cpu_online(cpu) || !cpu_active(cpu)) |
546 | cpu_relax(); | 546 | cpu_relax(); |
547 | 547 | ||
548 | return 0; | 548 | return 0; |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index de4018a1bc4b..de747563d29d 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -636,7 +636,7 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu) | |||
636 | spin_lock(&vcpu->arch.vpa_update_lock); | 636 | spin_lock(&vcpu->arch.vpa_update_lock); |
637 | lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; | 637 | lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; |
638 | if (lppaca) | 638 | if (lppaca) |
639 | yield_count = lppaca->yield_count; | 639 | yield_count = be32_to_cpu(lppaca->yield_count); |
640 | spin_unlock(&vcpu->arch.vpa_update_lock); | 640 | spin_unlock(&vcpu->arch.vpa_update_lock); |
641 | return yield_count; | 641 | return yield_count; |
642 | } | 642 | } |
@@ -942,20 +942,20 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu, | |||
942 | static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, | 942 | static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, |
943 | bool preserve_top32) | 943 | bool preserve_top32) |
944 | { | 944 | { |
945 | struct kvm *kvm = vcpu->kvm; | ||
945 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | 946 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
946 | u64 mask; | 947 | u64 mask; |
947 | 948 | ||
949 | mutex_lock(&kvm->lock); | ||
948 | spin_lock(&vc->lock); | 950 | spin_lock(&vc->lock); |
949 | /* | 951 | /* |
950 | * If ILE (interrupt little-endian) has changed, update the | 952 | * If ILE (interrupt little-endian) has changed, update the |
951 | * MSR_LE bit in the intr_msr for each vcpu in this vcore. | 953 | * MSR_LE bit in the intr_msr for each vcpu in this vcore. |
952 | */ | 954 | */ |
953 | if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { | 955 | if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { |
954 | struct kvm *kvm = vcpu->kvm; | ||
955 | struct kvm_vcpu *vcpu; | 956 | struct kvm_vcpu *vcpu; |
956 | int i; | 957 | int i; |
957 | 958 | ||
958 | mutex_lock(&kvm->lock); | ||
959 | kvm_for_each_vcpu(i, vcpu, kvm) { | 959 | kvm_for_each_vcpu(i, vcpu, kvm) { |
960 | if (vcpu->arch.vcore != vc) | 960 | if (vcpu->arch.vcore != vc) |
961 | continue; | 961 | continue; |
@@ -964,7 +964,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, | |||
964 | else | 964 | else |
965 | vcpu->arch.intr_msr &= ~MSR_LE; | 965 | vcpu->arch.intr_msr &= ~MSR_LE; |
966 | } | 966 | } |
967 | mutex_unlock(&kvm->lock); | ||
968 | } | 967 | } |
969 | 968 | ||
970 | /* | 969 | /* |
@@ -981,6 +980,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, | |||
981 | mask &= 0xFFFFFFFF; | 980 | mask &= 0xFFFFFFFF; |
982 | vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); | 981 | vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); |
983 | spin_unlock(&vc->lock); | 982 | spin_unlock(&vc->lock); |
983 | mutex_unlock(&kvm->lock); | ||
984 | } | 984 | } |
985 | 985 | ||
986 | static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, | 986 | static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index bb94e6f20c81..6cbf1630cb70 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -1005,6 +1005,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | |||
1005 | /* Save HEIR (HV emulation assist reg) in emul_inst | 1005 | /* Save HEIR (HV emulation assist reg) in emul_inst |
1006 | if this is an HEI (HV emulation interrupt, e40) */ | 1006 | if this is an HEI (HV emulation interrupt, e40) */ |
1007 | li r3,KVM_INST_FETCH_FAILED | 1007 | li r3,KVM_INST_FETCH_FAILED |
1008 | stw r3,VCPU_LAST_INST(r9) | ||
1008 | cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST | 1009 | cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST |
1009 | bne 11f | 1010 | bne 11f |
1010 | mfspr r3,SPRN_HEIR | 1011 | mfspr r3,SPRN_HEIR |
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index e69142f4af08..54323d6b5166 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c | |||
@@ -836,30 +836,4 @@ void __init pnv_pci_init(void) | |||
836 | #endif | 836 | #endif |
837 | } | 837 | } |
838 | 838 | ||
839 | static int tce_iommu_bus_notifier(struct notifier_block *nb, | ||
840 | unsigned long action, void *data) | ||
841 | { | ||
842 | struct device *dev = data; | ||
843 | |||
844 | switch (action) { | ||
845 | case BUS_NOTIFY_ADD_DEVICE: | ||
846 | return iommu_add_device(dev); | ||
847 | case BUS_NOTIFY_DEL_DEVICE: | ||
848 | if (dev->iommu_group) | ||
849 | iommu_del_device(dev); | ||
850 | return 0; | ||
851 | default: | ||
852 | return 0; | ||
853 | } | ||
854 | } | ||
855 | |||
856 | static struct notifier_block tce_iommu_bus_nb = { | ||
857 | .notifier_call = tce_iommu_bus_notifier, | ||
858 | }; | ||
859 | |||
860 | static int __init tce_iommu_bus_notifier_init(void) | ||
861 | { | ||
862 | bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb); | ||
863 | return 0; | ||
864 | } | ||
865 | machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init); | 839 | machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init); |
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index fc34025ef822..38a45088f633 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c | |||
@@ -33,6 +33,8 @@ | |||
33 | #include <asm/runlatch.h> | 33 | #include <asm/runlatch.h> |
34 | #include <asm/code-patching.h> | 34 | #include <asm/code-patching.h> |
35 | #include <asm/dbell.h> | 35 | #include <asm/dbell.h> |
36 | #include <asm/kvm_ppc.h> | ||
37 | #include <asm/ppc-opcode.h> | ||
36 | 38 | ||
37 | #include "powernv.h" | 39 | #include "powernv.h" |
38 | 40 | ||
@@ -149,7 +151,7 @@ static int pnv_smp_cpu_disable(void) | |||
149 | static void pnv_smp_cpu_kill_self(void) | 151 | static void pnv_smp_cpu_kill_self(void) |
150 | { | 152 | { |
151 | unsigned int cpu; | 153 | unsigned int cpu; |
152 | unsigned long srr1; | 154 | unsigned long srr1, wmask; |
153 | u32 idle_states; | 155 | u32 idle_states; |
154 | 156 | ||
155 | /* Standard hot unplug procedure */ | 157 | /* Standard hot unplug procedure */ |
@@ -161,6 +163,10 @@ static void pnv_smp_cpu_kill_self(void) | |||
161 | generic_set_cpu_dead(cpu); | 163 | generic_set_cpu_dead(cpu); |
162 | smp_wmb(); | 164 | smp_wmb(); |
163 | 165 | ||
166 | wmask = SRR1_WAKEMASK; | ||
167 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) | ||
168 | wmask = SRR1_WAKEMASK_P8; | ||
169 | |||
164 | idle_states = pnv_get_supported_cpuidle_states(); | 170 | idle_states = pnv_get_supported_cpuidle_states(); |
165 | /* We don't want to take decrementer interrupts while we are offline, | 171 | /* We don't want to take decrementer interrupts while we are offline, |
166 | * so clear LPCR:PECE1. We keep PECE2 enabled. | 172 | * so clear LPCR:PECE1. We keep PECE2 enabled. |
@@ -191,10 +197,14 @@ static void pnv_smp_cpu_kill_self(void) | |||
191 | * having finished executing in a KVM guest, then srr1 | 197 | * having finished executing in a KVM guest, then srr1 |
192 | * contains 0. | 198 | * contains 0. |
193 | */ | 199 | */ |
194 | if ((srr1 & SRR1_WAKEMASK) == SRR1_WAKEEE) { | 200 | if ((srr1 & wmask) == SRR1_WAKEEE) { |
195 | icp_native_flush_interrupt(); | 201 | icp_native_flush_interrupt(); |
196 | local_paca->irq_happened &= PACA_IRQ_HARD_DIS; | 202 | local_paca->irq_happened &= PACA_IRQ_HARD_DIS; |
197 | smp_mb(); | 203 | smp_mb(); |
204 | } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) { | ||
205 | unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); | ||
206 | asm volatile(PPC_MSGCLR(%0) : : "r" (msg)); | ||
207 | kvmppc_set_host_ipi(cpu, 0); | ||
198 | } | 208 | } |
199 | 209 | ||
200 | if (cpu_core_split_required()) | 210 | if (cpu_core_split_required()) |
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 1d3d52dc3ff3..7803a19adb31 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c | |||
@@ -1340,3 +1340,5 @@ static int __init disable_multitce(char *str) | |||
1340 | } | 1340 | } |
1341 | 1341 | ||
1342 | __setup("multitce=", disable_multitce); | 1342 | __setup("multitce=", disable_multitce); |
1343 | |||
1344 | machine_subsys_initcall_sync(pseries, tce_iommu_bus_notifier_init); | ||
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index 90cf3dcbd9f2..8f35d525cede 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c | |||
@@ -25,10 +25,10 @@ | |||
25 | static struct kobject *mobility_kobj; | 25 | static struct kobject *mobility_kobj; |
26 | 26 | ||
27 | struct update_props_workarea { | 27 | struct update_props_workarea { |
28 | u32 phandle; | 28 | __be32 phandle; |
29 | u32 state; | 29 | __be32 state; |
30 | u64 reserved; | 30 | __be64 reserved; |
31 | u32 nprops; | 31 | __be32 nprops; |
32 | } __packed; | 32 | } __packed; |
33 | 33 | ||
34 | #define NODE_ACTION_MASK 0xff000000 | 34 | #define NODE_ACTION_MASK 0xff000000 |
@@ -54,11 +54,11 @@ static int mobility_rtas_call(int token, char *buf, s32 scope) | |||
54 | return rc; | 54 | return rc; |
55 | } | 55 | } |
56 | 56 | ||
57 | static int delete_dt_node(u32 phandle) | 57 | static int delete_dt_node(__be32 phandle) |
58 | { | 58 | { |
59 | struct device_node *dn; | 59 | struct device_node *dn; |
60 | 60 | ||
61 | dn = of_find_node_by_phandle(phandle); | 61 | dn = of_find_node_by_phandle(be32_to_cpu(phandle)); |
62 | if (!dn) | 62 | if (!dn) |
63 | return -ENOENT; | 63 | return -ENOENT; |
64 | 64 | ||
@@ -127,7 +127,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop, | |||
127 | return 0; | 127 | return 0; |
128 | } | 128 | } |
129 | 129 | ||
130 | static int update_dt_node(u32 phandle, s32 scope) | 130 | static int update_dt_node(__be32 phandle, s32 scope) |
131 | { | 131 | { |
132 | struct update_props_workarea *upwa; | 132 | struct update_props_workarea *upwa; |
133 | struct device_node *dn; | 133 | struct device_node *dn; |
@@ -136,6 +136,7 @@ static int update_dt_node(u32 phandle, s32 scope) | |||
136 | char *prop_data; | 136 | char *prop_data; |
137 | char *rtas_buf; | 137 | char *rtas_buf; |
138 | int update_properties_token; | 138 | int update_properties_token; |
139 | u32 nprops; | ||
139 | u32 vd; | 140 | u32 vd; |
140 | 141 | ||
141 | update_properties_token = rtas_token("ibm,update-properties"); | 142 | update_properties_token = rtas_token("ibm,update-properties"); |
@@ -146,7 +147,7 @@ static int update_dt_node(u32 phandle, s32 scope) | |||
146 | if (!rtas_buf) | 147 | if (!rtas_buf) |
147 | return -ENOMEM; | 148 | return -ENOMEM; |
148 | 149 | ||
149 | dn = of_find_node_by_phandle(phandle); | 150 | dn = of_find_node_by_phandle(be32_to_cpu(phandle)); |
150 | if (!dn) { | 151 | if (!dn) { |
151 | kfree(rtas_buf); | 152 | kfree(rtas_buf); |
152 | return -ENOENT; | 153 | return -ENOENT; |
@@ -162,6 +163,7 @@ static int update_dt_node(u32 phandle, s32 scope) | |||
162 | break; | 163 | break; |
163 | 164 | ||
164 | prop_data = rtas_buf + sizeof(*upwa); | 165 | prop_data = rtas_buf + sizeof(*upwa); |
166 | nprops = be32_to_cpu(upwa->nprops); | ||
165 | 167 | ||
166 | /* On the first call to ibm,update-properties for a node the | 168 | /* On the first call to ibm,update-properties for a node the |
167 | * the first property value descriptor contains an empty | 169 | * the first property value descriptor contains an empty |
@@ -170,17 +172,17 @@ static int update_dt_node(u32 phandle, s32 scope) | |||
170 | */ | 172 | */ |
171 | if (*prop_data == 0) { | 173 | if (*prop_data == 0) { |
172 | prop_data++; | 174 | prop_data++; |
173 | vd = *(u32 *)prop_data; | 175 | vd = be32_to_cpu(*(__be32 *)prop_data); |
174 | prop_data += vd + sizeof(vd); | 176 | prop_data += vd + sizeof(vd); |
175 | upwa->nprops--; | 177 | nprops--; |
176 | } | 178 | } |
177 | 179 | ||
178 | for (i = 0; i < upwa->nprops; i++) { | 180 | for (i = 0; i < nprops; i++) { |
179 | char *prop_name; | 181 | char *prop_name; |
180 | 182 | ||
181 | prop_name = prop_data; | 183 | prop_name = prop_data; |
182 | prop_data += strlen(prop_name) + 1; | 184 | prop_data += strlen(prop_name) + 1; |
183 | vd = *(u32 *)prop_data; | 185 | vd = be32_to_cpu(*(__be32 *)prop_data); |
184 | prop_data += sizeof(vd); | 186 | prop_data += sizeof(vd); |
185 | 187 | ||
186 | switch (vd) { | 188 | switch (vd) { |
@@ -212,13 +214,13 @@ static int update_dt_node(u32 phandle, s32 scope) | |||
212 | return 0; | 214 | return 0; |
213 | } | 215 | } |
214 | 216 | ||
215 | static int add_dt_node(u32 parent_phandle, u32 drc_index) | 217 | static int add_dt_node(__be32 parent_phandle, __be32 drc_index) |
216 | { | 218 | { |
217 | struct device_node *dn; | 219 | struct device_node *dn; |
218 | struct device_node *parent_dn; | 220 | struct device_node *parent_dn; |
219 | int rc; | 221 | int rc; |
220 | 222 | ||
221 | parent_dn = of_find_node_by_phandle(parent_phandle); | 223 | parent_dn = of_find_node_by_phandle(be32_to_cpu(parent_phandle)); |
222 | if (!parent_dn) | 224 | if (!parent_dn) |
223 | return -ENOENT; | 225 | return -ENOENT; |
224 | 226 | ||
@@ -237,7 +239,7 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index) | |||
237 | int pseries_devicetree_update(s32 scope) | 239 | int pseries_devicetree_update(s32 scope) |
238 | { | 240 | { |
239 | char *rtas_buf; | 241 | char *rtas_buf; |
240 | u32 *data; | 242 | __be32 *data; |
241 | int update_nodes_token; | 243 | int update_nodes_token; |
242 | int rc; | 244 | int rc; |
243 | 245 | ||
@@ -254,17 +256,17 @@ int pseries_devicetree_update(s32 scope) | |||
254 | if (rc && rc != 1) | 256 | if (rc && rc != 1) |
255 | break; | 257 | break; |
256 | 258 | ||
257 | data = (u32 *)rtas_buf + 4; | 259 | data = (__be32 *)rtas_buf + 4; |
258 | while (*data & NODE_ACTION_MASK) { | 260 | while (be32_to_cpu(*data) & NODE_ACTION_MASK) { |
259 | int i; | 261 | int i; |
260 | u32 action = *data & NODE_ACTION_MASK; | 262 | u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK; |
261 | int node_count = *data & NODE_COUNT_MASK; | 263 | u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK; |
262 | 264 | ||
263 | data++; | 265 | data++; |
264 | 266 | ||
265 | for (i = 0; i < node_count; i++) { | 267 | for (i = 0; i < node_count; i++) { |
266 | u32 phandle = *data++; | 268 | __be32 phandle = *data++; |
267 | u32 drc_index; | 269 | __be32 drc_index; |
268 | 270 | ||
269 | switch (action) { | 271 | switch (action) { |
270 | case DELETE_DT_NODE: | 272 | case DELETE_DT_NODE: |
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index c9df40b5c0ac..c9c875d9ed31 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h | |||
@@ -211,7 +211,7 @@ do { \ | |||
211 | 211 | ||
212 | extern unsigned long mmap_rnd_mask; | 212 | extern unsigned long mmap_rnd_mask; |
213 | 213 | ||
214 | #define STACK_RND_MASK (mmap_rnd_mask) | 214 | #define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask) |
215 | 215 | ||
216 | #define ARCH_DLINFO \ | 216 | #define ARCH_DLINFO \ |
217 | do { \ | 217 | do { \ |
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index d84559e31f32..f407bbf5ee94 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
@@ -515,15 +515,15 @@ struct s390_io_adapter { | |||
515 | #define S390_ARCH_FAC_MASK_SIZE_U64 \ | 515 | #define S390_ARCH_FAC_MASK_SIZE_U64 \ |
516 | (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64)) | 516 | (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64)) |
517 | 517 | ||
518 | struct s390_model_fac { | 518 | struct kvm_s390_fac { |
519 | /* facilities used in SIE context */ | 519 | /* facility list requested by guest */ |
520 | __u64 sie[S390_ARCH_FAC_LIST_SIZE_U64]; | 520 | __u64 list[S390_ARCH_FAC_LIST_SIZE_U64]; |
521 | /* subset enabled by kvm */ | 521 | /* facility mask supported by kvm & hosting machine */ |
522 | __u64 kvm[S390_ARCH_FAC_LIST_SIZE_U64]; | 522 | __u64 mask[S390_ARCH_FAC_LIST_SIZE_U64]; |
523 | }; | 523 | }; |
524 | 524 | ||
525 | struct kvm_s390_cpu_model { | 525 | struct kvm_s390_cpu_model { |
526 | struct s390_model_fac *fac; | 526 | struct kvm_s390_fac *fac; |
527 | struct cpuid cpu_id; | 527 | struct cpuid cpu_id; |
528 | unsigned short ibc; | 528 | unsigned short ibc; |
529 | }; | 529 | }; |
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index f49b71954654..8fb3802f8fad 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
@@ -62,6 +62,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
62 | { | 62 | { |
63 | int cpu = smp_processor_id(); | 63 | int cpu = smp_processor_id(); |
64 | 64 | ||
65 | S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); | ||
65 | if (prev == next) | 66 | if (prev == next) |
66 | return; | 67 | return; |
67 | if (MACHINE_HAS_TLB_LC) | 68 | if (MACHINE_HAS_TLB_LC) |
@@ -73,7 +74,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
73 | atomic_dec(&prev->context.attach_count); | 74 | atomic_dec(&prev->context.attach_count); |
74 | if (MACHINE_HAS_TLB_LC) | 75 | if (MACHINE_HAS_TLB_LC) |
75 | cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); | 76 | cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); |
76 | S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); | ||
77 | } | 77 | } |
78 | 78 | ||
79 | #define finish_arch_post_lock_switch finish_arch_post_lock_switch | 79 | #define finish_arch_post_lock_switch finish_arch_post_lock_switch |
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 7b2ac6e44166..53eacbd4f09b 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h | |||
@@ -37,16 +37,7 @@ static inline void storage_key_init_range(unsigned long start, unsigned long end | |||
37 | #endif | 37 | #endif |
38 | } | 38 | } |
39 | 39 | ||
40 | static inline void clear_page(void *page) | 40 | #define clear_page(page) memset((page), 0, PAGE_SIZE) |
41 | { | ||
42 | register unsigned long reg1 asm ("1") = 0; | ||
43 | register void *reg2 asm ("2") = page; | ||
44 | register unsigned long reg3 asm ("3") = 4096; | ||
45 | asm volatile( | ||
46 | " mvcl 2,0" | ||
47 | : "+d" (reg2), "+d" (reg3) : "d" (reg1) | ||
48 | : "memory", "cc"); | ||
49 | } | ||
50 | 41 | ||
51 | /* | 42 | /* |
52 | * copy_page uses the mvcl instruction with 0xb0 padding byte in order to | 43 | * copy_page uses the mvcl instruction with 0xb0 padding byte in order to |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index fbb5ee3ae57c..e08ec38f8c6e 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -91,7 +91,9 @@ extern unsigned long zero_page_mask; | |||
91 | */ | 91 | */ |
92 | #define PTRS_PER_PTE 256 | 92 | #define PTRS_PER_PTE 256 |
93 | #ifndef CONFIG_64BIT | 93 | #ifndef CONFIG_64BIT |
94 | #define __PAGETABLE_PUD_FOLDED | ||
94 | #define PTRS_PER_PMD 1 | 95 | #define PTRS_PER_PMD 1 |
96 | #define __PAGETABLE_PMD_FOLDED | ||
95 | #define PTRS_PER_PUD 1 | 97 | #define PTRS_PER_PUD 1 |
96 | #else /* CONFIG_64BIT */ | 98 | #else /* CONFIG_64BIT */ |
97 | #define PTRS_PER_PMD 2048 | 99 | #define PTRS_PER_PMD 2048 |
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 82c19899574f..6c79f1b44fe7 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c | |||
@@ -57,6 +57,44 @@ | |||
57 | 57 | ||
58 | unsigned long ftrace_plt; | 58 | unsigned long ftrace_plt; |
59 | 59 | ||
60 | static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn) | ||
61 | { | ||
62 | #ifdef CC_USING_HOTPATCH | ||
63 | /* brcl 0,0 */ | ||
64 | insn->opc = 0xc004; | ||
65 | insn->disp = 0; | ||
66 | #else | ||
67 | /* stg r14,8(r15) */ | ||
68 | insn->opc = 0xe3e0; | ||
69 | insn->disp = 0xf0080024; | ||
70 | #endif | ||
71 | } | ||
72 | |||
73 | static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn) | ||
74 | { | ||
75 | #ifdef CONFIG_KPROBES | ||
76 | if (insn->opc == BREAKPOINT_INSTRUCTION) | ||
77 | return 1; | ||
78 | #endif | ||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn) | ||
83 | { | ||
84 | #ifdef CONFIG_KPROBES | ||
85 | insn->opc = BREAKPOINT_INSTRUCTION; | ||
86 | insn->disp = KPROBE_ON_FTRACE_NOP; | ||
87 | #endif | ||
88 | } | ||
89 | |||
90 | static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn *insn) | ||
91 | { | ||
92 | #ifdef CONFIG_KPROBES | ||
93 | insn->opc = BREAKPOINT_INSTRUCTION; | ||
94 | insn->disp = KPROBE_ON_FTRACE_CALL; | ||
95 | #endif | ||
96 | } | ||
97 | |||
60 | int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, | 98 | int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, |
61 | unsigned long addr) | 99 | unsigned long addr) |
62 | { | 100 | { |
@@ -72,16 +110,9 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, | |||
72 | return -EFAULT; | 110 | return -EFAULT; |
73 | if (addr == MCOUNT_ADDR) { | 111 | if (addr == MCOUNT_ADDR) { |
74 | /* Initial code replacement */ | 112 | /* Initial code replacement */ |
75 | #ifdef CC_USING_HOTPATCH | 113 | ftrace_generate_orig_insn(&orig); |
76 | /* We expect to see brcl 0,0 */ | ||
77 | ftrace_generate_nop_insn(&orig); | ||
78 | #else | ||
79 | /* We expect to see stg r14,8(r15) */ | ||
80 | orig.opc = 0xe3e0; | ||
81 | orig.disp = 0xf0080024; | ||
82 | #endif | ||
83 | ftrace_generate_nop_insn(&new); | 114 | ftrace_generate_nop_insn(&new); |
84 | } else if (old.opc == BREAKPOINT_INSTRUCTION) { | 115 | } else if (is_kprobe_on_ftrace(&old)) { |
85 | /* | 116 | /* |
86 | * If we find a breakpoint instruction, a kprobe has been | 117 | * If we find a breakpoint instruction, a kprobe has been |
87 | * placed at the beginning of the function. We write the | 118 | * placed at the beginning of the function. We write the |
@@ -89,9 +120,8 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, | |||
89 | * bytes of the original instruction so that the kprobes | 120 | * bytes of the original instruction so that the kprobes |
90 | * handler can execute a nop, if it reaches this breakpoint. | 121 | * handler can execute a nop, if it reaches this breakpoint. |
91 | */ | 122 | */ |
92 | new.opc = orig.opc = BREAKPOINT_INSTRUCTION; | 123 | ftrace_generate_kprobe_call_insn(&orig); |
93 | orig.disp = KPROBE_ON_FTRACE_CALL; | 124 | ftrace_generate_kprobe_nop_insn(&new); |
94 | new.disp = KPROBE_ON_FTRACE_NOP; | ||
95 | } else { | 125 | } else { |
96 | /* Replace ftrace call with a nop. */ | 126 | /* Replace ftrace call with a nop. */ |
97 | ftrace_generate_call_insn(&orig, rec->ip); | 127 | ftrace_generate_call_insn(&orig, rec->ip); |
@@ -111,7 +141,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |||
111 | 141 | ||
112 | if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old))) | 142 | if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old))) |
113 | return -EFAULT; | 143 | return -EFAULT; |
114 | if (old.opc == BREAKPOINT_INSTRUCTION) { | 144 | if (is_kprobe_on_ftrace(&old)) { |
115 | /* | 145 | /* |
116 | * If we find a breakpoint instruction, a kprobe has been | 146 | * If we find a breakpoint instruction, a kprobe has been |
117 | * placed at the beginning of the function. We write the | 147 | * placed at the beginning of the function. We write the |
@@ -119,9 +149,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |||
119 | * bytes of the original instruction so that the kprobes | 149 | * bytes of the original instruction so that the kprobes |
120 | * handler can execute a brasl if it reaches this breakpoint. | 150 | * handler can execute a brasl if it reaches this breakpoint. |
121 | */ | 151 | */ |
122 | new.opc = orig.opc = BREAKPOINT_INSTRUCTION; | 152 | ftrace_generate_kprobe_nop_insn(&orig); |
123 | orig.disp = KPROBE_ON_FTRACE_NOP; | 153 | ftrace_generate_kprobe_call_insn(&new); |
124 | new.disp = KPROBE_ON_FTRACE_CALL; | ||
125 | } else { | 154 | } else { |
126 | /* Replace nop with an ftrace call. */ | 155 | /* Replace nop with an ftrace call. */ |
127 | ftrace_generate_nop_insn(&orig); | 156 | ftrace_generate_nop_insn(&orig); |
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c index cb2d51e779df..830066f936c8 100644 --- a/arch/s390/kernel/jump_label.c +++ b/arch/s390/kernel/jump_label.c | |||
@@ -36,16 +36,20 @@ static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn) | |||
36 | insn->offset = (entry->target - entry->code) >> 1; | 36 | insn->offset = (entry->target - entry->code) >> 1; |
37 | } | 37 | } |
38 | 38 | ||
39 | static void jump_label_bug(struct jump_entry *entry, struct insn *insn) | 39 | static void jump_label_bug(struct jump_entry *entry, struct insn *expected, |
40 | struct insn *new) | ||
40 | { | 41 | { |
41 | unsigned char *ipc = (unsigned char *)entry->code; | 42 | unsigned char *ipc = (unsigned char *)entry->code; |
42 | unsigned char *ipe = (unsigned char *)insn; | 43 | unsigned char *ipe = (unsigned char *)expected; |
44 | unsigned char *ipn = (unsigned char *)new; | ||
43 | 45 | ||
44 | pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc); | 46 | pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc); |
45 | pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n", | 47 | pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n", |
46 | ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]); | 48 | ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]); |
47 | pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n", | 49 | pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n", |
48 | ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]); | 50 | ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]); |
51 | pr_emerg("New: %02x %02x %02x %02x %02x %02x\n", | ||
52 | ipn[0], ipn[1], ipn[2], ipn[3], ipn[4], ipn[5]); | ||
49 | panic("Corrupted kernel text"); | 53 | panic("Corrupted kernel text"); |
50 | } | 54 | } |
51 | 55 | ||
@@ -69,10 +73,10 @@ static void __jump_label_transform(struct jump_entry *entry, | |||
69 | } | 73 | } |
70 | if (init) { | 74 | if (init) { |
71 | if (memcmp((void *)entry->code, &orignop, sizeof(orignop))) | 75 | if (memcmp((void *)entry->code, &orignop, sizeof(orignop))) |
72 | jump_label_bug(entry, &old); | 76 | jump_label_bug(entry, &orignop, &new); |
73 | } else { | 77 | } else { |
74 | if (memcmp((void *)entry->code, &old, sizeof(old))) | 78 | if (memcmp((void *)entry->code, &old, sizeof(old))) |
75 | jump_label_bug(entry, &old); | 79 | jump_label_bug(entry, &old, &new); |
76 | } | 80 | } |
77 | probe_kernel_write((void *)entry->code, &new, sizeof(new)); | 81 | probe_kernel_write((void *)entry->code, &new, sizeof(new)); |
78 | } | 82 | } |
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 36154a2f1814..2ca95862e336 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c | |||
@@ -436,6 +436,7 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
436 | const Elf_Shdr *sechdrs, | 436 | const Elf_Shdr *sechdrs, |
437 | struct module *me) | 437 | struct module *me) |
438 | { | 438 | { |
439 | jump_label_apply_nops(me); | ||
439 | vfree(me->arch.syminfo); | 440 | vfree(me->arch.syminfo); |
440 | me->arch.syminfo = NULL; | 441 | me->arch.syminfo = NULL; |
441 | return 0; | 442 | return 0; |
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index c3f8d157cb0d..e6a1578fc000 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c | |||
@@ -1415,7 +1415,7 @@ CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG); | |||
1415 | 1415 | ||
1416 | static struct attribute *cpumsf_pmu_events_attr[] = { | 1416 | static struct attribute *cpumsf_pmu_events_attr[] = { |
1417 | CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC), | 1417 | CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC), |
1418 | CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG), | 1418 | NULL, |
1419 | NULL, | 1419 | NULL, |
1420 | }; | 1420 | }; |
1421 | 1421 | ||
@@ -1606,8 +1606,11 @@ static int __init init_cpum_sampling_pmu(void) | |||
1606 | return -EINVAL; | 1606 | return -EINVAL; |
1607 | } | 1607 | } |
1608 | 1608 | ||
1609 | if (si.ad) | 1609 | if (si.ad) { |
1610 | sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); | 1610 | sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); |
1611 | cpumsf_pmu_events_attr[1] = | ||
1612 | CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG); | ||
1613 | } | ||
1611 | 1614 | ||
1612 | sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); | 1615 | sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); |
1613 | if (!sfdbg) | 1616 | if (!sfdbg) |
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 26108232fcaa..dc488e13b7e3 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c | |||
@@ -18,7 +18,7 @@ | |||
18 | 18 | ||
19 | static DEFINE_PER_CPU(struct cpuid, cpu_id); | 19 | static DEFINE_PER_CPU(struct cpuid, cpu_id); |
20 | 20 | ||
21 | void cpu_relax(void) | 21 | void notrace cpu_relax(void) |
22 | { | 22 | { |
23 | if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) | 23 | if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) |
24 | asm volatile("diag 0,0,0x44"); | 24 | asm volatile("diag 0,0,0x44"); |
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index 6b09fdffbd2f..ca6294645dd3 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S | |||
@@ -177,6 +177,17 @@ restart_entry: | |||
177 | lhi %r1,1 | 177 | lhi %r1,1 |
178 | sigp %r1,%r0,SIGP_SET_ARCHITECTURE | 178 | sigp %r1,%r0,SIGP_SET_ARCHITECTURE |
179 | sam64 | 179 | sam64 |
180 | #ifdef CONFIG_SMP | ||
181 | larl %r1,smp_cpu_mt_shift | ||
182 | icm %r1,15,0(%r1) | ||
183 | jz smt_done | ||
184 | llgfr %r1,%r1 | ||
185 | smt_loop: | ||
186 | sigp %r1,%r0,SIGP_SET_MULTI_THREADING | ||
187 | brc 8,smt_done /* accepted */ | ||
188 | brc 2,smt_loop /* busy, try again */ | ||
189 | smt_done: | ||
190 | #endif | ||
180 | larl %r1,.Lnew_pgm_check_psw | 191 | larl %r1,.Lnew_pgm_check_psw |
181 | lpswe 0(%r1) | 192 | lpswe 0(%r1) |
182 | pgm_check_entry: | 193 | pgm_check_entry: |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0c3623927563..19e17bd7aec0 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -165,7 +165,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
165 | case KVM_CAP_ONE_REG: | 165 | case KVM_CAP_ONE_REG: |
166 | case KVM_CAP_ENABLE_CAP: | 166 | case KVM_CAP_ENABLE_CAP: |
167 | case KVM_CAP_S390_CSS_SUPPORT: | 167 | case KVM_CAP_S390_CSS_SUPPORT: |
168 | case KVM_CAP_IRQFD: | ||
169 | case KVM_CAP_IOEVENTFD: | 168 | case KVM_CAP_IOEVENTFD: |
170 | case KVM_CAP_DEVICE_CTRL: | 169 | case KVM_CAP_DEVICE_CTRL: |
171 | case KVM_CAP_ENABLE_CAP_VM: | 170 | case KVM_CAP_ENABLE_CAP_VM: |
@@ -522,7 +521,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) | |||
522 | memcpy(&kvm->arch.model.cpu_id, &proc->cpuid, | 521 | memcpy(&kvm->arch.model.cpu_id, &proc->cpuid, |
523 | sizeof(struct cpuid)); | 522 | sizeof(struct cpuid)); |
524 | kvm->arch.model.ibc = proc->ibc; | 523 | kvm->arch.model.ibc = proc->ibc; |
525 | memcpy(kvm->arch.model.fac->kvm, proc->fac_list, | 524 | memcpy(kvm->arch.model.fac->list, proc->fac_list, |
526 | S390_ARCH_FAC_LIST_SIZE_BYTE); | 525 | S390_ARCH_FAC_LIST_SIZE_BYTE); |
527 | } else | 526 | } else |
528 | ret = -EFAULT; | 527 | ret = -EFAULT; |
@@ -556,7 +555,7 @@ static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr) | |||
556 | } | 555 | } |
557 | memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid)); | 556 | memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid)); |
558 | proc->ibc = kvm->arch.model.ibc; | 557 | proc->ibc = kvm->arch.model.ibc; |
559 | memcpy(&proc->fac_list, kvm->arch.model.fac->kvm, S390_ARCH_FAC_LIST_SIZE_BYTE); | 558 | memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE); |
560 | if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) | 559 | if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) |
561 | ret = -EFAULT; | 560 | ret = -EFAULT; |
562 | kfree(proc); | 561 | kfree(proc); |
@@ -576,10 +575,10 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) | |||
576 | } | 575 | } |
577 | get_cpu_id((struct cpuid *) &mach->cpuid); | 576 | get_cpu_id((struct cpuid *) &mach->cpuid); |
578 | mach->ibc = sclp_get_ibc(); | 577 | mach->ibc = sclp_get_ibc(); |
579 | memcpy(&mach->fac_mask, kvm_s390_fac_list_mask, | 578 | memcpy(&mach->fac_mask, kvm->arch.model.fac->mask, |
580 | kvm_s390_fac_list_mask_size() * sizeof(u64)); | 579 | S390_ARCH_FAC_LIST_SIZE_BYTE); |
581 | memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, | 580 | memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, |
582 | S390_ARCH_FAC_LIST_SIZE_U64); | 581 | S390_ARCH_FAC_LIST_SIZE_BYTE); |
583 | if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) | 582 | if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) |
584 | ret = -EFAULT; | 583 | ret = -EFAULT; |
585 | kfree(mach); | 584 | kfree(mach); |
@@ -778,15 +777,18 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
778 | static int kvm_s390_query_ap_config(u8 *config) | 777 | static int kvm_s390_query_ap_config(u8 *config) |
779 | { | 778 | { |
780 | u32 fcn_code = 0x04000000UL; | 779 | u32 fcn_code = 0x04000000UL; |
781 | u32 cc; | 780 | u32 cc = 0; |
782 | 781 | ||
782 | memset(config, 0, 128); | ||
783 | asm volatile( | 783 | asm volatile( |
784 | "lgr 0,%1\n" | 784 | "lgr 0,%1\n" |
785 | "lgr 2,%2\n" | 785 | "lgr 2,%2\n" |
786 | ".long 0xb2af0000\n" /* PQAP(QCI) */ | 786 | ".long 0xb2af0000\n" /* PQAP(QCI) */ |
787 | "ipm %0\n" | 787 | "0: ipm %0\n" |
788 | "srl %0,28\n" | 788 | "srl %0,28\n" |
789 | : "=r" (cc) | 789 | "1:\n" |
790 | EX_TABLE(0b, 1b) | ||
791 | : "+r" (cc) | ||
790 | : "r" (fcn_code), "r" (config) | 792 | : "r" (fcn_code), "r" (config) |
791 | : "cc", "0", "2", "memory" | 793 | : "cc", "0", "2", "memory" |
792 | ); | 794 | ); |
@@ -839,9 +841,13 @@ static int kvm_s390_crypto_init(struct kvm *kvm) | |||
839 | 841 | ||
840 | kvm_s390_set_crycb_format(kvm); | 842 | kvm_s390_set_crycb_format(kvm); |
841 | 843 | ||
842 | /* Disable AES/DEA protected key functions by default */ | 844 | /* Enable AES/DEA protected key functions by default */ |
843 | kvm->arch.crypto.aes_kw = 0; | 845 | kvm->arch.crypto.aes_kw = 1; |
844 | kvm->arch.crypto.dea_kw = 0; | 846 | kvm->arch.crypto.dea_kw = 1; |
847 | get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, | ||
848 | sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); | ||
849 | get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, | ||
850 | sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); | ||
845 | 851 | ||
846 | return 0; | 852 | return 0; |
847 | } | 853 | } |
@@ -886,40 +892,29 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
886 | /* | 892 | /* |
887 | * The architectural maximum amount of facilities is 16 kbit. To store | 893 | * The architectural maximum amount of facilities is 16 kbit. To store |
888 | * this amount, 2 kbyte of memory is required. Thus we need a full | 894 | * this amount, 2 kbyte of memory is required. Thus we need a full |
889 | * page to hold the active copy (arch.model.fac->sie) and the current | 895 | * page to hold the guest facility list (arch.model.fac->list) and the |
890 | * facilities set (arch.model.fac->kvm). Its address size has to be | 896 | * facility mask (arch.model.fac->mask). Its address size has to be |
891 | * 31 bits and word aligned. | 897 | * 31 bits and word aligned. |
892 | */ | 898 | */ |
893 | kvm->arch.model.fac = | 899 | kvm->arch.model.fac = |
894 | (struct s390_model_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | 900 | (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); |
895 | if (!kvm->arch.model.fac) | 901 | if (!kvm->arch.model.fac) |
896 | goto out_nofac; | 902 | goto out_nofac; |
897 | 903 | ||
898 | memcpy(kvm->arch.model.fac->kvm, S390_lowcore.stfle_fac_list, | 904 | /* Populate the facility mask initially. */ |
899 | S390_ARCH_FAC_LIST_SIZE_U64); | 905 | memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list, |
900 | 906 | S390_ARCH_FAC_LIST_SIZE_BYTE); | |
901 | /* | ||
902 | * If this KVM host runs *not* in a LPAR, relax the facility bits | ||
903 | * of the kvm facility mask by all missing facilities. This will allow | ||
904 | * to determine the right CPU model by means of the remaining facilities. | ||
905 | * Live guest migration must prohibit the migration of KVMs running in | ||
906 | * a LPAR to non LPAR hosts. | ||
907 | */ | ||
908 | if (!MACHINE_IS_LPAR) | ||
909 | for (i = 0; i < kvm_s390_fac_list_mask_size(); i++) | ||
910 | kvm_s390_fac_list_mask[i] &= kvm->arch.model.fac->kvm[i]; | ||
911 | |||
912 | /* | ||
913 | * Apply the kvm facility mask to limit the kvm supported/tolerated | ||
914 | * facility list. | ||
915 | */ | ||
916 | for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { | 907 | for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { |
917 | if (i < kvm_s390_fac_list_mask_size()) | 908 | if (i < kvm_s390_fac_list_mask_size()) |
918 | kvm->arch.model.fac->kvm[i] &= kvm_s390_fac_list_mask[i]; | 909 | kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i]; |
919 | else | 910 | else |
920 | kvm->arch.model.fac->kvm[i] = 0UL; | 911 | kvm->arch.model.fac->mask[i] = 0UL; |
921 | } | 912 | } |
922 | 913 | ||
914 | /* Populate the facility list initially. */ | ||
915 | memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask, | ||
916 | S390_ARCH_FAC_LIST_SIZE_BYTE); | ||
917 | |||
923 | kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); | 918 | kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); |
924 | kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; | 919 | kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; |
925 | 920 | ||
@@ -1165,8 +1160,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1165 | 1160 | ||
1166 | mutex_lock(&vcpu->kvm->lock); | 1161 | mutex_lock(&vcpu->kvm->lock); |
1167 | vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id; | 1162 | vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id; |
1168 | memcpy(vcpu->kvm->arch.model.fac->sie, vcpu->kvm->arch.model.fac->kvm, | ||
1169 | S390_ARCH_FAC_LIST_SIZE_BYTE); | ||
1170 | vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc; | 1163 | vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc; |
1171 | mutex_unlock(&vcpu->kvm->lock); | 1164 | mutex_unlock(&vcpu->kvm->lock); |
1172 | 1165 | ||
@@ -1212,7 +1205,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | |||
1212 | vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; | 1205 | vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; |
1213 | set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); | 1206 | set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); |
1214 | } | 1207 | } |
1215 | vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->sie; | 1208 | vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->list; |
1216 | 1209 | ||
1217 | spin_lock_init(&vcpu->arch.local_int.lock); | 1210 | spin_lock_init(&vcpu->arch.local_int.lock); |
1218 | vcpu->arch.local_int.float_int = &kvm->arch.float_int; | 1211 | vcpu->arch.local_int.float_int = &kvm->arch.float_int; |
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 985c2114d7ef..c34109aa552d 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h | |||
@@ -128,7 +128,8 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc) | |||
128 | /* test availability of facility in a kvm intance */ | 128 | /* test availability of facility in a kvm intance */ |
129 | static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) | 129 | static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) |
130 | { | 130 | { |
131 | return __test_facility(nr, kvm->arch.model.fac->kvm); | 131 | return __test_facility(nr, kvm->arch.model.fac->mask) && |
132 | __test_facility(nr, kvm->arch.model.fac->list); | ||
132 | } | 133 | } |
133 | 134 | ||
134 | /* are cpu states controlled by user space */ | 135 | /* are cpu states controlled by user space */ |
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index bdd9b5b17e03..351116939ea2 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
@@ -348,7 +348,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu) | |||
348 | * We need to shift the lower 32 facility bits (bit 0-31) from a u64 | 348 | * We need to shift the lower 32 facility bits (bit 0-31) from a u64 |
349 | * into a u32 memory representation. They will remain bits 0-31. | 349 | * into a u32 memory representation. They will remain bits 0-31. |
350 | */ | 350 | */ |
351 | fac = *vcpu->kvm->arch.model.fac->sie >> 32; | 351 | fac = *vcpu->kvm->arch.model.fac->list >> 32; |
352 | rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), | 352 | rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), |
353 | &fac, sizeof(fac)); | 353 | &fac, sizeof(fac)); |
354 | if (rc) | 354 | if (rc) |
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 753a56731951..f0b85443e060 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c | |||
@@ -287,7 +287,7 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev, | |||
287 | addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); | 287 | addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); |
288 | return (void __iomem *) addr + offset; | 288 | return (void __iomem *) addr + offset; |
289 | } | 289 | } |
290 | EXPORT_SYMBOL_GPL(pci_iomap_range); | 290 | EXPORT_SYMBOL(pci_iomap_range); |
291 | 291 | ||
292 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | 292 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
293 | { | 293 | { |
@@ -309,7 +309,7 @@ void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) | |||
309 | } | 309 | } |
310 | spin_unlock(&zpci_iomap_lock); | 310 | spin_unlock(&zpci_iomap_lock); |
311 | } | 311 | } |
312 | EXPORT_SYMBOL_GPL(pci_iounmap); | 312 | EXPORT_SYMBOL(pci_iounmap); |
313 | 313 | ||
314 | static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, | 314 | static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, |
315 | int size, u32 *val) | 315 | int size, u32 *val) |
@@ -483,9 +483,8 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev) | |||
483 | airq_iv_free_bit(zpci_aisb_iv, zdev->aisb); | 483 | airq_iv_free_bit(zpci_aisb_iv, zdev->aisb); |
484 | } | 484 | } |
485 | 485 | ||
486 | static void zpci_map_resources(struct zpci_dev *zdev) | 486 | static void zpci_map_resources(struct pci_dev *pdev) |
487 | { | 487 | { |
488 | struct pci_dev *pdev = zdev->pdev; | ||
489 | resource_size_t len; | 488 | resource_size_t len; |
490 | int i; | 489 | int i; |
491 | 490 | ||
@@ -499,9 +498,8 @@ static void zpci_map_resources(struct zpci_dev *zdev) | |||
499 | } | 498 | } |
500 | } | 499 | } |
501 | 500 | ||
502 | static void zpci_unmap_resources(struct zpci_dev *zdev) | 501 | static void zpci_unmap_resources(struct pci_dev *pdev) |
503 | { | 502 | { |
504 | struct pci_dev *pdev = zdev->pdev; | ||
505 | resource_size_t len; | 503 | resource_size_t len; |
506 | int i; | 504 | int i; |
507 | 505 | ||
@@ -651,7 +649,7 @@ int pcibios_add_device(struct pci_dev *pdev) | |||
651 | 649 | ||
652 | zdev->pdev = pdev; | 650 | zdev->pdev = pdev; |
653 | pdev->dev.groups = zpci_attr_groups; | 651 | pdev->dev.groups = zpci_attr_groups; |
654 | zpci_map_resources(zdev); | 652 | zpci_map_resources(pdev); |
655 | 653 | ||
656 | for (i = 0; i < PCI_BAR_COUNT; i++) { | 654 | for (i = 0; i < PCI_BAR_COUNT; i++) { |
657 | res = &pdev->resource[i]; | 655 | res = &pdev->resource[i]; |
@@ -663,6 +661,11 @@ int pcibios_add_device(struct pci_dev *pdev) | |||
663 | return 0; | 661 | return 0; |
664 | } | 662 | } |
665 | 663 | ||
664 | void pcibios_release_device(struct pci_dev *pdev) | ||
665 | { | ||
666 | zpci_unmap_resources(pdev); | ||
667 | } | ||
668 | |||
666 | int pcibios_enable_device(struct pci_dev *pdev, int mask) | 669 | int pcibios_enable_device(struct pci_dev *pdev, int mask) |
667 | { | 670 | { |
668 | struct zpci_dev *zdev = get_zdev(pdev); | 671 | struct zpci_dev *zdev = get_zdev(pdev); |
@@ -670,7 +673,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask) | |||
670 | zdev->pdev = pdev; | 673 | zdev->pdev = pdev; |
671 | zpci_debug_init_device(zdev); | 674 | zpci_debug_init_device(zdev); |
672 | zpci_fmb_enable_device(zdev); | 675 | zpci_fmb_enable_device(zdev); |
673 | zpci_map_resources(zdev); | ||
674 | 676 | ||
675 | return pci_enable_resources(pdev, mask); | 677 | return pci_enable_resources(pdev, mask); |
676 | } | 678 | } |
@@ -679,7 +681,6 @@ void pcibios_disable_device(struct pci_dev *pdev) | |||
679 | { | 681 | { |
680 | struct zpci_dev *zdev = get_zdev(pdev); | 682 | struct zpci_dev *zdev = get_zdev(pdev); |
681 | 683 | ||
682 | zpci_unmap_resources(zdev); | ||
683 | zpci_fmb_disable_device(zdev); | 684 | zpci_fmb_disable_device(zdev); |
684 | zpci_debug_exit_device(zdev); | 685 | zpci_debug_exit_device(zdev); |
685 | zdev->pdev = NULL; | 686 | zdev->pdev = NULL; |
@@ -688,7 +689,8 @@ void pcibios_disable_device(struct pci_dev *pdev) | |||
688 | #ifdef CONFIG_HIBERNATE_CALLBACKS | 689 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
689 | static int zpci_restore(struct device *dev) | 690 | static int zpci_restore(struct device *dev) |
690 | { | 691 | { |
691 | struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); | 692 | struct pci_dev *pdev = to_pci_dev(dev); |
693 | struct zpci_dev *zdev = get_zdev(pdev); | ||
692 | int ret = 0; | 694 | int ret = 0; |
693 | 695 | ||
694 | if (zdev->state != ZPCI_FN_STATE_ONLINE) | 696 | if (zdev->state != ZPCI_FN_STATE_ONLINE) |
@@ -698,7 +700,7 @@ static int zpci_restore(struct device *dev) | |||
698 | if (ret) | 700 | if (ret) |
699 | goto out; | 701 | goto out; |
700 | 702 | ||
701 | zpci_map_resources(zdev); | 703 | zpci_map_resources(pdev); |
702 | zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET, | 704 | zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET, |
703 | zdev->start_dma + zdev->iommu_size - 1, | 705 | zdev->start_dma + zdev->iommu_size - 1, |
704 | (u64) zdev->dma_table); | 706 | (u64) zdev->dma_table); |
@@ -709,12 +711,14 @@ out: | |||
709 | 711 | ||
710 | static int zpci_freeze(struct device *dev) | 712 | static int zpci_freeze(struct device *dev) |
711 | { | 713 | { |
712 | struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); | 714 | struct pci_dev *pdev = to_pci_dev(dev); |
715 | struct zpci_dev *zdev = get_zdev(pdev); | ||
713 | 716 | ||
714 | if (zdev->state != ZPCI_FN_STATE_ONLINE) | 717 | if (zdev->state != ZPCI_FN_STATE_ONLINE) |
715 | return 0; | 718 | return 0; |
716 | 719 | ||
717 | zpci_unregister_ioat(zdev, 0); | 720 | zpci_unregister_ioat(zdev, 0); |
721 | zpci_unmap_resources(pdev); | ||
718 | return clp_disable_fh(zdev); | 722 | return clp_disable_fh(zdev); |
719 | } | 723 | } |
720 | 724 | ||
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c index 8aa271b3d1ad..b1bb2b72302c 100644 --- a/arch/s390/pci/pci_mmio.c +++ b/arch/s390/pci/pci_mmio.c | |||
@@ -64,8 +64,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr, | |||
64 | if (copy_from_user(buf, user_buffer, length)) | 64 | if (copy_from_user(buf, user_buffer, length)) |
65 | goto out; | 65 | goto out; |
66 | 66 | ||
67 | memcpy_toio(io_addr, buf, length); | 67 | ret = zpci_memcpy_toio(io_addr, buf, length); |
68 | ret = 0; | ||
69 | out: | 68 | out: |
70 | if (buf != local_buf) | 69 | if (buf != local_buf) |
71 | kfree(buf); | 70 | kfree(buf); |
@@ -98,16 +97,16 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr, | |||
98 | goto out; | 97 | goto out; |
99 | io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); | 98 | io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); |
100 | 99 | ||
101 | ret = -EFAULT; | 100 | if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) { |
102 | if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) | 101 | ret = -EFAULT; |
103 | goto out; | 102 | goto out; |
104 | 103 | } | |
105 | memcpy_fromio(buf, io_addr, length); | 104 | ret = zpci_memcpy_fromio(buf, io_addr, length); |
106 | 105 | if (ret) | |
107 | if (copy_to_user(user_buffer, buf, length)) | ||
108 | goto out; | 106 | goto out; |
107 | if (copy_to_user(user_buffer, buf, length)) | ||
108 | ret = -EFAULT; | ||
109 | 109 | ||
110 | ret = 0; | ||
111 | out: | 110 | out: |
112 | if (buf != local_buf) | 111 | if (buf != local_buf) |
113 | kfree(buf); | 112 | kfree(buf); |
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 96ac69c5eba0..efb00ec75805 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -86,6 +86,9 @@ config ARCH_DEFCONFIG | |||
86 | default "arch/sparc/configs/sparc32_defconfig" if SPARC32 | 86 | default "arch/sparc/configs/sparc32_defconfig" if SPARC32 |
87 | default "arch/sparc/configs/sparc64_defconfig" if SPARC64 | 87 | default "arch/sparc/configs/sparc64_defconfig" if SPARC64 |
88 | 88 | ||
89 | config ARCH_PROC_KCORE_TEXT | ||
90 | def_bool y | ||
91 | |||
89 | config IOMMU_HELPER | 92 | config IOMMU_HELPER |
90 | bool | 93 | bool |
91 | default y if SPARC64 | 94 | default y if SPARC64 |
diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h index 4f6725ff4c33..f5b6537306f0 100644 --- a/arch/sparc/include/asm/hypervisor.h +++ b/arch/sparc/include/asm/hypervisor.h | |||
@@ -2957,6 +2957,17 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num, | |||
2957 | unsigned long reg_val); | 2957 | unsigned long reg_val); |
2958 | #endif | 2958 | #endif |
2959 | 2959 | ||
2960 | |||
2961 | #define HV_FAST_M7_GET_PERFREG 0x43 | ||
2962 | #define HV_FAST_M7_SET_PERFREG 0x44 | ||
2963 | |||
2964 | #ifndef __ASSEMBLY__ | ||
2965 | unsigned long sun4v_m7_get_perfreg(unsigned long reg_num, | ||
2966 | unsigned long *reg_val); | ||
2967 | unsigned long sun4v_m7_set_perfreg(unsigned long reg_num, | ||
2968 | unsigned long reg_val); | ||
2969 | #endif | ||
2970 | |||
2960 | /* Function numbers for HV_CORE_TRAP. */ | 2971 | /* Function numbers for HV_CORE_TRAP. */ |
2961 | #define HV_CORE_SET_VER 0x00 | 2972 | #define HV_CORE_SET_VER 0x00 |
2962 | #define HV_CORE_PUTCHAR 0x01 | 2973 | #define HV_CORE_PUTCHAR 0x01 |
@@ -2981,6 +2992,7 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num, | |||
2981 | #define HV_GRP_SDIO 0x0108 | 2992 | #define HV_GRP_SDIO 0x0108 |
2982 | #define HV_GRP_SDIO_ERR 0x0109 | 2993 | #define HV_GRP_SDIO_ERR 0x0109 |
2983 | #define HV_GRP_REBOOT_DATA 0x0110 | 2994 | #define HV_GRP_REBOOT_DATA 0x0110 |
2995 | #define HV_GRP_M7_PERF 0x0114 | ||
2984 | #define HV_GRP_NIAG_PERF 0x0200 | 2996 | #define HV_GRP_NIAG_PERF 0x0200 |
2985 | #define HV_GRP_FIRE_PERF 0x0201 | 2997 | #define HV_GRP_FIRE_PERF 0x0201 |
2986 | #define HV_GRP_N2_CPU 0x0202 | 2998 | #define HV_GRP_N2_CPU 0x0202 |
diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h index 9b672be70dda..50d4840d9aeb 100644 --- a/arch/sparc/include/asm/io_64.h +++ b/arch/sparc/include/asm/io_64.h | |||
@@ -407,16 +407,16 @@ static inline void iounmap(volatile void __iomem *addr) | |||
407 | { | 407 | { |
408 | } | 408 | } |
409 | 409 | ||
410 | #define ioread8(X) readb(X) | 410 | #define ioread8 readb |
411 | #define ioread16(X) readw(X) | 411 | #define ioread16 readw |
412 | #define ioread16be(X) __raw_readw(X) | 412 | #define ioread16be __raw_readw |
413 | #define ioread32(X) readl(X) | 413 | #define ioread32 readl |
414 | #define ioread32be(X) __raw_readl(X) | 414 | #define ioread32be __raw_readl |
415 | #define iowrite8(val,X) writeb(val,X) | 415 | #define iowrite8 writeb |
416 | #define iowrite16(val,X) writew(val,X) | 416 | #define iowrite16 writew |
417 | #define iowrite16be(val,X) __raw_writew(val,X) | 417 | #define iowrite16be __raw_writew |
418 | #define iowrite32(val,X) writel(val,X) | 418 | #define iowrite32 writel |
419 | #define iowrite32be(val,X) __raw_writel(val,X) | 419 | #define iowrite32be __raw_writel |
420 | 420 | ||
421 | /* Create a virtual mapping cookie for an IO port range */ | 421 | /* Create a virtual mapping cookie for an IO port range */ |
422 | void __iomem *ioport_map(unsigned long port, unsigned int nr); | 422 | void __iomem *ioport_map(unsigned long port, unsigned int nr); |
diff --git a/arch/sparc/include/asm/starfire.h b/arch/sparc/include/asm/starfire.h index c100dc27a0a9..176fa0ad19f1 100644 --- a/arch/sparc/include/asm/starfire.h +++ b/arch/sparc/include/asm/starfire.h | |||
@@ -12,7 +12,6 @@ | |||
12 | extern int this_is_starfire; | 12 | extern int this_is_starfire; |
13 | 13 | ||
14 | void check_if_starfire(void); | 14 | void check_if_starfire(void); |
15 | int starfire_hard_smp_processor_id(void); | ||
16 | void starfire_hookup(int); | 15 | void starfire_hookup(int); |
17 | unsigned int starfire_translate(unsigned long imap, unsigned int upaid); | 16 | unsigned int starfire_translate(unsigned long imap, unsigned int upaid); |
18 | 17 | ||
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h index 88d322b67fac..07cc49e541f4 100644 --- a/arch/sparc/kernel/entry.h +++ b/arch/sparc/kernel/entry.h | |||
@@ -98,11 +98,7 @@ void sun4v_do_mna(struct pt_regs *regs, | |||
98 | void do_privop(struct pt_regs *regs); | 98 | void do_privop(struct pt_regs *regs); |
99 | void do_privact(struct pt_regs *regs); | 99 | void do_privact(struct pt_regs *regs); |
100 | void do_cee(struct pt_regs *regs); | 100 | void do_cee(struct pt_regs *regs); |
101 | void do_cee_tl1(struct pt_regs *regs); | ||
102 | void do_dae_tl1(struct pt_regs *regs); | ||
103 | void do_iae_tl1(struct pt_regs *regs); | ||
104 | void do_div0_tl1(struct pt_regs *regs); | 101 | void do_div0_tl1(struct pt_regs *regs); |
105 | void do_fpdis_tl1(struct pt_regs *regs); | ||
106 | void do_fpieee_tl1(struct pt_regs *regs); | 102 | void do_fpieee_tl1(struct pt_regs *regs); |
107 | void do_fpother_tl1(struct pt_regs *regs); | 103 | void do_fpother_tl1(struct pt_regs *regs); |
108 | void do_ill_tl1(struct pt_regs *regs); | 104 | void do_ill_tl1(struct pt_regs *regs); |
diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c index 5c55145bfbf0..662500fa555f 100644 --- a/arch/sparc/kernel/hvapi.c +++ b/arch/sparc/kernel/hvapi.c | |||
@@ -48,6 +48,7 @@ static struct api_info api_table[] = { | |||
48 | { .group = HV_GRP_VT_CPU, }, | 48 | { .group = HV_GRP_VT_CPU, }, |
49 | { .group = HV_GRP_T5_CPU, }, | 49 | { .group = HV_GRP_T5_CPU, }, |
50 | { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API }, | 50 | { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API }, |
51 | { .group = HV_GRP_M7_PERF, }, | ||
51 | }; | 52 | }; |
52 | 53 | ||
53 | static DEFINE_SPINLOCK(hvapi_lock); | 54 | static DEFINE_SPINLOCK(hvapi_lock); |
diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S index caedf8320416..afbaba52d2f1 100644 --- a/arch/sparc/kernel/hvcalls.S +++ b/arch/sparc/kernel/hvcalls.S | |||
@@ -837,3 +837,19 @@ ENTRY(sun4v_t5_set_perfreg) | |||
837 | retl | 837 | retl |
838 | nop | 838 | nop |
839 | ENDPROC(sun4v_t5_set_perfreg) | 839 | ENDPROC(sun4v_t5_set_perfreg) |
840 | |||
841 | ENTRY(sun4v_m7_get_perfreg) | ||
842 | mov %o1, %o4 | ||
843 | mov HV_FAST_M7_GET_PERFREG, %o5 | ||
844 | ta HV_FAST_TRAP | ||
845 | stx %o1, [%o4] | ||
846 | retl | ||
847 | nop | ||
848 | ENDPROC(sun4v_m7_get_perfreg) | ||
849 | |||
850 | ENTRY(sun4v_m7_set_perfreg) | ||
851 | mov HV_FAST_M7_SET_PERFREG, %o5 | ||
852 | ta HV_FAST_TRAP | ||
853 | retl | ||
854 | nop | ||
855 | ENDPROC(sun4v_m7_set_perfreg) | ||
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 9ce5afe167ff..b36365f49478 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c | |||
@@ -639,10 +639,7 @@ static void pci_claim_bus_resources(struct pci_bus *bus) | |||
639 | (unsigned long long)r->end, | 639 | (unsigned long long)r->end, |
640 | (unsigned int)r->flags); | 640 | (unsigned int)r->flags); |
641 | 641 | ||
642 | if (pci_claim_resource(dev, i) == 0) | 642 | pci_claim_resource(dev, i); |
643 | continue; | ||
644 | |||
645 | pci_claim_bridge_resource(dev, i); | ||
646 | } | 643 | } |
647 | } | 644 | } |
648 | 645 | ||
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index 7e967c8018c8..eb978c77c76a 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c | |||
@@ -217,6 +217,31 @@ static const struct pcr_ops n5_pcr_ops = { | |||
217 | .pcr_nmi_disable = PCR_N4_PICNPT, | 217 | .pcr_nmi_disable = PCR_N4_PICNPT, |
218 | }; | 218 | }; |
219 | 219 | ||
220 | static u64 m7_pcr_read(unsigned long reg_num) | ||
221 | { | ||
222 | unsigned long val; | ||
223 | |||
224 | (void) sun4v_m7_get_perfreg(reg_num, &val); | ||
225 | |||
226 | return val; | ||
227 | } | ||
228 | |||
229 | static void m7_pcr_write(unsigned long reg_num, u64 val) | ||
230 | { | ||
231 | (void) sun4v_m7_set_perfreg(reg_num, val); | ||
232 | } | ||
233 | |||
234 | static const struct pcr_ops m7_pcr_ops = { | ||
235 | .read_pcr = m7_pcr_read, | ||
236 | .write_pcr = m7_pcr_write, | ||
237 | .read_pic = n4_pic_read, | ||
238 | .write_pic = n4_pic_write, | ||
239 | .nmi_picl_value = n4_picl_value, | ||
240 | .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE | | ||
241 | PCR_N4_UTRACE | PCR_N4_TOE | | ||
242 | (26 << PCR_N4_SL_SHIFT)), | ||
243 | .pcr_nmi_disable = PCR_N4_PICNPT, | ||
244 | }; | ||
220 | 245 | ||
221 | static unsigned long perf_hsvc_group; | 246 | static unsigned long perf_hsvc_group; |
222 | static unsigned long perf_hsvc_major; | 247 | static unsigned long perf_hsvc_major; |
@@ -248,6 +273,10 @@ static int __init register_perf_hsvc(void) | |||
248 | perf_hsvc_group = HV_GRP_T5_CPU; | 273 | perf_hsvc_group = HV_GRP_T5_CPU; |
249 | break; | 274 | break; |
250 | 275 | ||
276 | case SUN4V_CHIP_SPARC_M7: | ||
277 | perf_hsvc_group = HV_GRP_M7_PERF; | ||
278 | break; | ||
279 | |||
251 | default: | 280 | default: |
252 | return -ENODEV; | 281 | return -ENODEV; |
253 | } | 282 | } |
@@ -293,6 +322,10 @@ static int __init setup_sun4v_pcr_ops(void) | |||
293 | pcr_ops = &n5_pcr_ops; | 322 | pcr_ops = &n5_pcr_ops; |
294 | break; | 323 | break; |
295 | 324 | ||
325 | case SUN4V_CHIP_SPARC_M7: | ||
326 | pcr_ops = &m7_pcr_ops; | ||
327 | break; | ||
328 | |||
296 | default: | 329 | default: |
297 | ret = -ENODEV; | 330 | ret = -ENODEV; |
298 | break; | 331 | break; |
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 46a5e4508752..86eebfa3b158 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
@@ -792,6 +792,42 @@ static const struct sparc_pmu niagara4_pmu = { | |||
792 | .num_pic_regs = 4, | 792 | .num_pic_regs = 4, |
793 | }; | 793 | }; |
794 | 794 | ||
795 | static void sparc_m7_write_pmc(int idx, u64 val) | ||
796 | { | ||
797 | u64 pcr; | ||
798 | |||
799 | pcr = pcr_ops->read_pcr(idx); | ||
800 | /* ensure ov and ntc are reset */ | ||
801 | pcr &= ~(PCR_N4_OV | PCR_N4_NTC); | ||
802 | |||
803 | pcr_ops->write_pic(idx, val & 0xffffffff); | ||
804 | |||
805 | pcr_ops->write_pcr(idx, pcr); | ||
806 | } | ||
807 | |||
808 | static const struct sparc_pmu sparc_m7_pmu = { | ||
809 | .event_map = niagara4_event_map, | ||
810 | .cache_map = &niagara4_cache_map, | ||
811 | .max_events = ARRAY_SIZE(niagara4_perfmon_event_map), | ||
812 | .read_pmc = sparc_vt_read_pmc, | ||
813 | .write_pmc = sparc_m7_write_pmc, | ||
814 | .upper_shift = 5, | ||
815 | .lower_shift = 5, | ||
816 | .event_mask = 0x7ff, | ||
817 | .user_bit = PCR_N4_UTRACE, | ||
818 | .priv_bit = PCR_N4_STRACE, | ||
819 | |||
820 | /* We explicitly don't support hypervisor tracing. */ | ||
821 | .hv_bit = 0, | ||
822 | |||
823 | .irq_bit = PCR_N4_TOE, | ||
824 | .upper_nop = 0, | ||
825 | .lower_nop = 0, | ||
826 | .flags = 0, | ||
827 | .max_hw_events = 4, | ||
828 | .num_pcrs = 4, | ||
829 | .num_pic_regs = 4, | ||
830 | }; | ||
795 | static const struct sparc_pmu *sparc_pmu __read_mostly; | 831 | static const struct sparc_pmu *sparc_pmu __read_mostly; |
796 | 832 | ||
797 | static u64 event_encoding(u64 event_id, int idx) | 833 | static u64 event_encoding(u64 event_id, int idx) |
@@ -960,6 +996,8 @@ out: | |||
960 | cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; | 996 | cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; |
961 | } | 997 | } |
962 | 998 | ||
999 | static void sparc_pmu_start(struct perf_event *event, int flags); | ||
1000 | |||
963 | /* On this PMU each PIC has it's own PCR control register. */ | 1001 | /* On this PMU each PIC has it's own PCR control register. */ |
964 | static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) | 1002 | static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) |
965 | { | 1003 | { |
@@ -972,20 +1010,13 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) | |||
972 | struct perf_event *cp = cpuc->event[i]; | 1010 | struct perf_event *cp = cpuc->event[i]; |
973 | struct hw_perf_event *hwc = &cp->hw; | 1011 | struct hw_perf_event *hwc = &cp->hw; |
974 | int idx = hwc->idx; | 1012 | int idx = hwc->idx; |
975 | u64 enc; | ||
976 | 1013 | ||
977 | if (cpuc->current_idx[i] != PIC_NO_INDEX) | 1014 | if (cpuc->current_idx[i] != PIC_NO_INDEX) |
978 | continue; | 1015 | continue; |
979 | 1016 | ||
980 | sparc_perf_event_set_period(cp, hwc, idx); | ||
981 | cpuc->current_idx[i] = idx; | 1017 | cpuc->current_idx[i] = idx; |
982 | 1018 | ||
983 | enc = perf_event_get_enc(cpuc->events[i]); | 1019 | sparc_pmu_start(cp, PERF_EF_RELOAD); |
984 | cpuc->pcr[idx] &= ~mask_for_index(idx); | ||
985 | if (hwc->state & PERF_HES_STOPPED) | ||
986 | cpuc->pcr[idx] |= nop_for_index(idx); | ||
987 | else | ||
988 | cpuc->pcr[idx] |= event_encoding(enc, idx); | ||
989 | } | 1020 | } |
990 | out: | 1021 | out: |
991 | for (i = 0; i < cpuc->n_events; i++) { | 1022 | for (i = 0; i < cpuc->n_events; i++) { |
@@ -1101,7 +1132,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags) | |||
1101 | int i; | 1132 | int i; |
1102 | 1133 | ||
1103 | local_irq_save(flags); | 1134 | local_irq_save(flags); |
1104 | perf_pmu_disable(event->pmu); | ||
1105 | 1135 | ||
1106 | for (i = 0; i < cpuc->n_events; i++) { | 1136 | for (i = 0; i < cpuc->n_events; i++) { |
1107 | if (event == cpuc->event[i]) { | 1137 | if (event == cpuc->event[i]) { |
@@ -1127,7 +1157,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags) | |||
1127 | } | 1157 | } |
1128 | } | 1158 | } |
1129 | 1159 | ||
1130 | perf_pmu_enable(event->pmu); | ||
1131 | local_irq_restore(flags); | 1160 | local_irq_restore(flags); |
1132 | } | 1161 | } |
1133 | 1162 | ||
@@ -1361,7 +1390,6 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags) | |||
1361 | unsigned long flags; | 1390 | unsigned long flags; |
1362 | 1391 | ||
1363 | local_irq_save(flags); | 1392 | local_irq_save(flags); |
1364 | perf_pmu_disable(event->pmu); | ||
1365 | 1393 | ||
1366 | n0 = cpuc->n_events; | 1394 | n0 = cpuc->n_events; |
1367 | if (n0 >= sparc_pmu->max_hw_events) | 1395 | if (n0 >= sparc_pmu->max_hw_events) |
@@ -1394,7 +1422,6 @@ nocheck: | |||
1394 | 1422 | ||
1395 | ret = 0; | 1423 | ret = 0; |
1396 | out: | 1424 | out: |
1397 | perf_pmu_enable(event->pmu); | ||
1398 | local_irq_restore(flags); | 1425 | local_irq_restore(flags); |
1399 | return ret; | 1426 | return ret; |
1400 | } | 1427 | } |
@@ -1667,6 +1694,10 @@ static bool __init supported_pmu(void) | |||
1667 | sparc_pmu = &niagara4_pmu; | 1694 | sparc_pmu = &niagara4_pmu; |
1668 | return true; | 1695 | return true; |
1669 | } | 1696 | } |
1697 | if (!strcmp(sparc_pmu_type, "sparc-m7")) { | ||
1698 | sparc_pmu = &sparc_m7_pmu; | ||
1699 | return true; | ||
1700 | } | ||
1670 | return false; | 1701 | return false; |
1671 | } | 1702 | } |
1672 | 1703 | ||
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 0be7bf978cb1..46a59643bb1c 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c | |||
@@ -287,6 +287,8 @@ void arch_trigger_all_cpu_backtrace(bool include_self) | |||
287 | printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n", | 287 | printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n", |
288 | gp->tpc, gp->o7, gp->i7, gp->rpc); | 288 | gp->tpc, gp->o7, gp->i7, gp->rpc); |
289 | } | 289 | } |
290 | |||
291 | touch_nmi_watchdog(); | ||
290 | } | 292 | } |
291 | 293 | ||
292 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); | 294 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); |
@@ -362,6 +364,8 @@ static void pmu_snapshot_all_cpus(void) | |||
362 | (cpu == this_cpu ? '*' : ' '), cpu, | 364 | (cpu == this_cpu ? '*' : ' '), cpu, |
363 | pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3], | 365 | pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3], |
364 | pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]); | 366 | pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]); |
367 | |||
368 | touch_nmi_watchdog(); | ||
365 | } | 369 | } |
366 | 370 | ||
367 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); | 371 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); |
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index da6f1a7fc4db..61139d9924ca 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
@@ -1406,11 +1406,32 @@ void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) | |||
1406 | scheduler_ipi(); | 1406 | scheduler_ipi(); |
1407 | } | 1407 | } |
1408 | 1408 | ||
1409 | /* This is a nop because we capture all other cpus | 1409 | static void stop_this_cpu(void *dummy) |
1410 | * anyways when making the PROM active. | 1410 | { |
1411 | */ | 1411 | prom_stopself(); |
1412 | } | ||
1413 | |||
1412 | void smp_send_stop(void) | 1414 | void smp_send_stop(void) |
1413 | { | 1415 | { |
1416 | int cpu; | ||
1417 | |||
1418 | if (tlb_type == hypervisor) { | ||
1419 | for_each_online_cpu(cpu) { | ||
1420 | if (cpu == smp_processor_id()) | ||
1421 | continue; | ||
1422 | #ifdef CONFIG_SUN_LDOMS | ||
1423 | if (ldom_domaining_enabled) { | ||
1424 | unsigned long hv_err; | ||
1425 | hv_err = sun4v_cpu_stop(cpu); | ||
1426 | if (hv_err) | ||
1427 | printk(KERN_ERR "sun4v_cpu_stop() " | ||
1428 | "failed err=%lu\n", hv_err); | ||
1429 | } else | ||
1430 | #endif | ||
1431 | prom_stopcpu_cpuid(cpu); | ||
1432 | } | ||
1433 | } else | ||
1434 | smp_call_function(stop_this_cpu, NULL, 0); | ||
1414 | } | 1435 | } |
1415 | 1436 | ||
1416 | /** | 1437 | /** |
diff --git a/arch/sparc/kernel/starfire.c b/arch/sparc/kernel/starfire.c index 82281a566bb8..167fdfd9c837 100644 --- a/arch/sparc/kernel/starfire.c +++ b/arch/sparc/kernel/starfire.c | |||
@@ -28,11 +28,6 @@ void check_if_starfire(void) | |||
28 | this_is_starfire = 1; | 28 | this_is_starfire = 1; |
29 | } | 29 | } |
30 | 30 | ||
31 | int starfire_hard_smp_processor_id(void) | ||
32 | { | ||
33 | return upa_readl(0x1fff40000d0UL); | ||
34 | } | ||
35 | |||
36 | /* | 31 | /* |
37 | * Each Starfire board has 32 registers which perform translation | 32 | * Each Starfire board has 32 registers which perform translation |
38 | * and delivery of traditional interrupt packets into the extended | 33 | * and delivery of traditional interrupt packets into the extended |
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index c85403d0496c..30e7ddb27a3a 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c | |||
@@ -333,7 +333,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second | |||
333 | long err; | 333 | long err; |
334 | 334 | ||
335 | /* No need for backward compatibility. We can start fresh... */ | 335 | /* No need for backward compatibility. We can start fresh... */ |
336 | if (call <= SEMCTL) { | 336 | if (call <= SEMTIMEDOP) { |
337 | switch (call) { | 337 | switch (call) { |
338 | case SEMOP: | 338 | case SEMOP: |
339 | err = sys_semtimedop(first, ptr, | 339 | err = sys_semtimedop(first, ptr, |
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index a27651e866e7..0e699745d643 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c | |||
@@ -2427,6 +2427,8 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs) | |||
2427 | } | 2427 | } |
2428 | user_instruction_dump ((unsigned int __user *) regs->tpc); | 2428 | user_instruction_dump ((unsigned int __user *) regs->tpc); |
2429 | } | 2429 | } |
2430 | if (panic_on_oops) | ||
2431 | panic("Fatal exception"); | ||
2430 | if (regs->tstate & TSTATE_PRIV) | 2432 | if (regs->tstate & TSTATE_PRIV) |
2431 | do_exit(SIGKILL); | 2433 | do_exit(SIGKILL); |
2432 | do_exit(SIGSEGV); | 2434 | do_exit(SIGSEGV); |
@@ -2564,27 +2566,6 @@ void do_cee(struct pt_regs *regs) | |||
2564 | die_if_kernel("TL0: Cache Error Exception", regs); | 2566 | die_if_kernel("TL0: Cache Error Exception", regs); |
2565 | } | 2567 | } |
2566 | 2568 | ||
2567 | void do_cee_tl1(struct pt_regs *regs) | ||
2568 | { | ||
2569 | exception_enter(); | ||
2570 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
2571 | die_if_kernel("TL1: Cache Error Exception", regs); | ||
2572 | } | ||
2573 | |||
2574 | void do_dae_tl1(struct pt_regs *regs) | ||
2575 | { | ||
2576 | exception_enter(); | ||
2577 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
2578 | die_if_kernel("TL1: Data Access Exception", regs); | ||
2579 | } | ||
2580 | |||
2581 | void do_iae_tl1(struct pt_regs *regs) | ||
2582 | { | ||
2583 | exception_enter(); | ||
2584 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
2585 | die_if_kernel("TL1: Instruction Access Exception", regs); | ||
2586 | } | ||
2587 | |||
2588 | void do_div0_tl1(struct pt_regs *regs) | 2569 | void do_div0_tl1(struct pt_regs *regs) |
2589 | { | 2570 | { |
2590 | exception_enter(); | 2571 | exception_enter(); |
@@ -2592,13 +2573,6 @@ void do_div0_tl1(struct pt_regs *regs) | |||
2592 | die_if_kernel("TL1: DIV0 Exception", regs); | 2573 | die_if_kernel("TL1: DIV0 Exception", regs); |
2593 | } | 2574 | } |
2594 | 2575 | ||
2595 | void do_fpdis_tl1(struct pt_regs *regs) | ||
2596 | { | ||
2597 | exception_enter(); | ||
2598 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
2599 | die_if_kernel("TL1: FPU Disabled", regs); | ||
2600 | } | ||
2601 | |||
2602 | void do_fpieee_tl1(struct pt_regs *regs) | 2576 | void do_fpieee_tl1(struct pt_regs *regs) |
2603 | { | 2577 | { |
2604 | exception_enter(); | 2578 | exception_enter(); |
diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S index b7f6334e159f..857ad4f8905f 100644 --- a/arch/sparc/lib/memmove.S +++ b/arch/sparc/lib/memmove.S | |||
@@ -8,9 +8,11 @@ | |||
8 | 8 | ||
9 | .text | 9 | .text |
10 | ENTRY(memmove) /* o0=dst o1=src o2=len */ | 10 | ENTRY(memmove) /* o0=dst o1=src o2=len */ |
11 | mov %o0, %g1 | 11 | brz,pn %o2, 99f |
12 | mov %o0, %g1 | ||
13 | |||
12 | cmp %o0, %o1 | 14 | cmp %o0, %o1 |
13 | bleu,pt %xcc, memcpy | 15 | bleu,pt %xcc, 2f |
14 | add %o1, %o2, %g7 | 16 | add %o1, %o2, %g7 |
15 | cmp %g7, %o0 | 17 | cmp %g7, %o0 |
16 | bleu,pt %xcc, memcpy | 18 | bleu,pt %xcc, memcpy |
@@ -24,7 +26,34 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */ | |||
24 | stb %g7, [%o0] | 26 | stb %g7, [%o0] |
25 | bne,pt %icc, 1b | 27 | bne,pt %icc, 1b |
26 | sub %o0, 1, %o0 | 28 | sub %o0, 1, %o0 |
27 | 29 | 99: | |
28 | retl | 30 | retl |
29 | mov %g1, %o0 | 31 | mov %g1, %o0 |
32 | |||
33 | /* We can't just call memcpy for these memmove cases. On some | ||
34 | * chips the memcpy uses cache initializing stores and when dst | ||
35 | * and src are close enough, those can clobber the source data | ||
36 | * before we've loaded it in. | ||
37 | */ | ||
38 | 2: or %o0, %o1, %g7 | ||
39 | or %o2, %g7, %g7 | ||
40 | andcc %g7, 0x7, %g0 | ||
41 | bne,pn %xcc, 4f | ||
42 | nop | ||
43 | |||
44 | 3: ldx [%o1], %g7 | ||
45 | add %o1, 8, %o1 | ||
46 | subcc %o2, 8, %o2 | ||
47 | add %o0, 8, %o0 | ||
48 | bne,pt %icc, 3b | ||
49 | stx %g7, [%o0 - 0x8] | ||
50 | ba,a,pt %xcc, 99b | ||
51 | |||
52 | 4: ldub [%o1], %g7 | ||
53 | add %o1, 1, %o1 | ||
54 | subcc %o2, 1, %o2 | ||
55 | add %o0, 1, %o0 | ||
56 | bne,pt %icc, 4b | ||
57 | stb %g7, [%o0 - 0x1] | ||
58 | ba,a,pt %xcc, 99b | ||
30 | ENDPROC(memmove) | 59 | ENDPROC(memmove) |
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 3ea267c53320..4ca0d6ba5ec8 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
@@ -2820,7 +2820,7 @@ static int __init report_memory(void) | |||
2820 | 2820 | ||
2821 | return 0; | 2821 | return 0; |
2822 | } | 2822 | } |
2823 | device_initcall(report_memory); | 2823 | arch_initcall(report_memory); |
2824 | 2824 | ||
2825 | #ifdef CONFIG_SMP | 2825 | #ifdef CONFIG_SMP |
2826 | #define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range | 2826 | #define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c2fb8a87dccb..b7d31ca55187 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -499,6 +499,7 @@ config X86_INTEL_QUARK | |||
499 | depends on X86_IO_APIC | 499 | depends on X86_IO_APIC |
500 | select IOSF_MBI | 500 | select IOSF_MBI |
501 | select INTEL_IMR | 501 | select INTEL_IMR |
502 | select COMMON_CLK | ||
502 | ---help--- | 503 | ---help--- |
503 | Select to include support for Quark X1000 SoC. | 504 | Select to include support for Quark X1000 SoC. |
504 | Say Y here if you have a Quark based system such as the Arduino | 505 | Say Y here if you have a Quark based system such as the Arduino |
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c index 7083c16cccba..bb1376381985 100644 --- a/arch/x86/boot/compressed/aslr.c +++ b/arch/x86/boot/compressed/aslr.c | |||
@@ -14,13 +14,6 @@ | |||
14 | static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" | 14 | static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" |
15 | LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; | 15 | LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; |
16 | 16 | ||
17 | struct kaslr_setup_data { | ||
18 | __u64 next; | ||
19 | __u32 type; | ||
20 | __u32 len; | ||
21 | __u8 data[1]; | ||
22 | } kaslr_setup_data; | ||
23 | |||
24 | #define I8254_PORT_CONTROL 0x43 | 17 | #define I8254_PORT_CONTROL 0x43 |
25 | #define I8254_PORT_COUNTER0 0x40 | 18 | #define I8254_PORT_COUNTER0 0x40 |
26 | #define I8254_CMD_READBACK 0xC0 | 19 | #define I8254_CMD_READBACK 0xC0 |
@@ -302,29 +295,7 @@ static unsigned long find_random_addr(unsigned long minimum, | |||
302 | return slots_fetch_random(); | 295 | return slots_fetch_random(); |
303 | } | 296 | } |
304 | 297 | ||
305 | static void add_kaslr_setup_data(struct boot_params *params, __u8 enabled) | 298 | unsigned char *choose_kernel_location(unsigned char *input, |
306 | { | ||
307 | struct setup_data *data; | ||
308 | |||
309 | kaslr_setup_data.type = SETUP_KASLR; | ||
310 | kaslr_setup_data.len = 1; | ||
311 | kaslr_setup_data.next = 0; | ||
312 | kaslr_setup_data.data[0] = enabled; | ||
313 | |||
314 | data = (struct setup_data *)(unsigned long)params->hdr.setup_data; | ||
315 | |||
316 | while (data && data->next) | ||
317 | data = (struct setup_data *)(unsigned long)data->next; | ||
318 | |||
319 | if (data) | ||
320 | data->next = (unsigned long)&kaslr_setup_data; | ||
321 | else | ||
322 | params->hdr.setup_data = (unsigned long)&kaslr_setup_data; | ||
323 | |||
324 | } | ||
325 | |||
326 | unsigned char *choose_kernel_location(struct boot_params *params, | ||
327 | unsigned char *input, | ||
328 | unsigned long input_size, | 299 | unsigned long input_size, |
329 | unsigned char *output, | 300 | unsigned char *output, |
330 | unsigned long output_size) | 301 | unsigned long output_size) |
@@ -335,17 +306,14 @@ unsigned char *choose_kernel_location(struct boot_params *params, | |||
335 | #ifdef CONFIG_HIBERNATION | 306 | #ifdef CONFIG_HIBERNATION |
336 | if (!cmdline_find_option_bool("kaslr")) { | 307 | if (!cmdline_find_option_bool("kaslr")) { |
337 | debug_putstr("KASLR disabled by default...\n"); | 308 | debug_putstr("KASLR disabled by default...\n"); |
338 | add_kaslr_setup_data(params, 0); | ||
339 | goto out; | 309 | goto out; |
340 | } | 310 | } |
341 | #else | 311 | #else |
342 | if (cmdline_find_option_bool("nokaslr")) { | 312 | if (cmdline_find_option_bool("nokaslr")) { |
343 | debug_putstr("KASLR disabled by cmdline...\n"); | 313 | debug_putstr("KASLR disabled by cmdline...\n"); |
344 | add_kaslr_setup_data(params, 0); | ||
345 | goto out; | 314 | goto out; |
346 | } | 315 | } |
347 | #endif | 316 | #endif |
348 | add_kaslr_setup_data(params, 1); | ||
349 | 317 | ||
350 | /* Record the various known unsafe memory ranges. */ | 318 | /* Record the various known unsafe memory ranges. */ |
351 | mem_avoid_init((unsigned long)input, input_size, | 319 | mem_avoid_init((unsigned long)input, input_size, |
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 5903089c818f..a950864a64da 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c | |||
@@ -401,8 +401,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap, | |||
401 | * the entire decompressed kernel plus relocation table, or the | 401 | * the entire decompressed kernel plus relocation table, or the |
402 | * entire decompressed kernel plus .bss and .brk sections. | 402 | * entire decompressed kernel plus .bss and .brk sections. |
403 | */ | 403 | */ |
404 | output = choose_kernel_location(real_mode, input_data, input_len, | 404 | output = choose_kernel_location(input_data, input_len, output, |
405 | output, | ||
406 | output_len > run_size ? output_len | 405 | output_len > run_size ? output_len |
407 | : run_size); | 406 | : run_size); |
408 | 407 | ||
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index ee3576b2666b..04477d68403f 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h | |||
@@ -57,8 +57,7 @@ int cmdline_find_option_bool(const char *option); | |||
57 | 57 | ||
58 | #if CONFIG_RANDOMIZE_BASE | 58 | #if CONFIG_RANDOMIZE_BASE |
59 | /* aslr.c */ | 59 | /* aslr.c */ |
60 | unsigned char *choose_kernel_location(struct boot_params *params, | 60 | unsigned char *choose_kernel_location(unsigned char *input, |
61 | unsigned char *input, | ||
62 | unsigned long input_size, | 61 | unsigned long input_size, |
63 | unsigned char *output, | 62 | unsigned char *output, |
64 | unsigned long output_size); | 63 | unsigned long output_size); |
@@ -66,8 +65,7 @@ unsigned char *choose_kernel_location(struct boot_params *params, | |||
66 | bool has_cpuflag(int flag); | 65 | bool has_cpuflag(int flag); |
67 | #else | 66 | #else |
68 | static inline | 67 | static inline |
69 | unsigned char *choose_kernel_location(struct boot_params *params, | 68 | unsigned char *choose_kernel_location(unsigned char *input, |
70 | unsigned char *input, | ||
71 | unsigned long input_size, | 69 | unsigned long input_size, |
72 | unsigned char *output, | 70 | unsigned char *output, |
73 | unsigned long output_size) | 71 | unsigned long output_size) |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 947c6bf52c33..54f60ab41c63 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
@@ -1155,7 +1155,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req) | |||
1155 | src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC); | 1155 | src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC); |
1156 | if (!src) | 1156 | if (!src) |
1157 | return -ENOMEM; | 1157 | return -ENOMEM; |
1158 | assoc = (src + req->cryptlen + auth_tag_len); | 1158 | assoc = (src + req->cryptlen); |
1159 | scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); | 1159 | scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); |
1160 | scatterwalk_map_and_copy(assoc, req->assoc, 0, | 1160 | scatterwalk_map_and_copy(assoc, req->assoc, 0, |
1161 | req->assoclen, 0); | 1161 | req->assoclen, 0); |
@@ -1180,7 +1180,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req) | |||
1180 | scatterwalk_done(&src_sg_walk, 0, 0); | 1180 | scatterwalk_done(&src_sg_walk, 0, 0); |
1181 | scatterwalk_done(&assoc_sg_walk, 0, 0); | 1181 | scatterwalk_done(&assoc_sg_walk, 0, 0); |
1182 | } else { | 1182 | } else { |
1183 | scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1); | 1183 | scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1); |
1184 | kfree(src); | 1184 | kfree(src); |
1185 | } | 1185 | } |
1186 | return retval; | 1186 | return retval; |
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 0dbc08282291..72ba21a8b5fc 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h | |||
@@ -370,7 +370,7 @@ static inline void drop_fpu(struct task_struct *tsk) | |||
370 | preempt_disable(); | 370 | preempt_disable(); |
371 | tsk->thread.fpu_counter = 0; | 371 | tsk->thread.fpu_counter = 0; |
372 | __drop_fpu(tsk); | 372 | __drop_fpu(tsk); |
373 | clear_used_math(); | 373 | clear_stopped_child_used_math(tsk); |
374 | preempt_enable(); | 374 | preempt_enable(); |
375 | } | 375 | } |
376 | 376 | ||
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index 95e11f79f123..f97fbe3abb67 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h | |||
@@ -51,8 +51,6 @@ extern int devmem_is_allowed(unsigned long pagenr); | |||
51 | extern unsigned long max_low_pfn_mapped; | 51 | extern unsigned long max_low_pfn_mapped; |
52 | extern unsigned long max_pfn_mapped; | 52 | extern unsigned long max_pfn_mapped; |
53 | 53 | ||
54 | extern bool kaslr_enabled; | ||
55 | |||
56 | static inline phys_addr_t get_max_mapped(void) | 54 | static inline phys_addr_t get_max_mapped(void) |
57 | { | 55 | { |
58 | return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; | 56 | return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; |
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index fa1195dae425..164e3f8d3c3d 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h | |||
@@ -93,6 +93,8 @@ extern raw_spinlock_t pci_config_lock; | |||
93 | extern int (*pcibios_enable_irq)(struct pci_dev *dev); | 93 | extern int (*pcibios_enable_irq)(struct pci_dev *dev); |
94 | extern void (*pcibios_disable_irq)(struct pci_dev *dev); | 94 | extern void (*pcibios_disable_irq)(struct pci_dev *dev); |
95 | 95 | ||
96 | extern bool mp_should_keep_irq(struct device *dev); | ||
97 | |||
96 | struct pci_raw_ops { | 98 | struct pci_raw_ops { |
97 | int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, | 99 | int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, |
98 | int reg, int len, u32 *val); | 100 | int reg, int len, u32 *val); |
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 12a26b979bf1..f2f9b39b274a 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h | |||
@@ -231,6 +231,6 @@ __copy_from_user_inatomic_nocache(void *dst, const void __user *src, | |||
231 | } | 231 | } |
232 | 232 | ||
233 | unsigned long | 233 | unsigned long |
234 | copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest); | 234 | copy_user_handle_tail(char *to, char *from, unsigned len); |
235 | 235 | ||
236 | #endif /* _ASM_X86_UACCESS_64_H */ | 236 | #endif /* _ASM_X86_UACCESS_64_H */ |
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index 5fa9770035dc..c9a6d68b8d62 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h | |||
@@ -82,18 +82,15 @@ static inline int xsave_state_booting(struct xsave_struct *fx, u64 mask) | |||
82 | if (boot_cpu_has(X86_FEATURE_XSAVES)) | 82 | if (boot_cpu_has(X86_FEATURE_XSAVES)) |
83 | asm volatile("1:"XSAVES"\n\t" | 83 | asm volatile("1:"XSAVES"\n\t" |
84 | "2:\n\t" | 84 | "2:\n\t" |
85 | : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | 85 | xstate_fault |
86 | : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | ||
86 | : "memory"); | 87 | : "memory"); |
87 | else | 88 | else |
88 | asm volatile("1:"XSAVE"\n\t" | 89 | asm volatile("1:"XSAVE"\n\t" |
89 | "2:\n\t" | 90 | "2:\n\t" |
90 | : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | 91 | xstate_fault |
92 | : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | ||
91 | : "memory"); | 93 | : "memory"); |
92 | |||
93 | asm volatile(xstate_fault | ||
94 | : "0" (0) | ||
95 | : "memory"); | ||
96 | |||
97 | return err; | 94 | return err; |
98 | } | 95 | } |
99 | 96 | ||
@@ -112,18 +109,15 @@ static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask) | |||
112 | if (boot_cpu_has(X86_FEATURE_XSAVES)) | 109 | if (boot_cpu_has(X86_FEATURE_XSAVES)) |
113 | asm volatile("1:"XRSTORS"\n\t" | 110 | asm volatile("1:"XRSTORS"\n\t" |
114 | "2:\n\t" | 111 | "2:\n\t" |
115 | : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | 112 | xstate_fault |
113 | : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | ||
116 | : "memory"); | 114 | : "memory"); |
117 | else | 115 | else |
118 | asm volatile("1:"XRSTOR"\n\t" | 116 | asm volatile("1:"XRSTOR"\n\t" |
119 | "2:\n\t" | 117 | "2:\n\t" |
120 | : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | 118 | xstate_fault |
119 | : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | ||
121 | : "memory"); | 120 | : "memory"); |
122 | |||
123 | asm volatile(xstate_fault | ||
124 | : "0" (0) | ||
125 | : "memory"); | ||
126 | |||
127 | return err; | 121 | return err; |
128 | } | 122 | } |
129 | 123 | ||
@@ -149,9 +143,9 @@ static inline int xsave_state(struct xsave_struct *fx, u64 mask) | |||
149 | */ | 143 | */ |
150 | alternative_input_2( | 144 | alternative_input_2( |
151 | "1:"XSAVE, | 145 | "1:"XSAVE, |
152 | "1:"XSAVEOPT, | 146 | XSAVEOPT, |
153 | X86_FEATURE_XSAVEOPT, | 147 | X86_FEATURE_XSAVEOPT, |
154 | "1:"XSAVES, | 148 | XSAVES, |
155 | X86_FEATURE_XSAVES, | 149 | X86_FEATURE_XSAVES, |
156 | [fx] "D" (fx), "a" (lmask), "d" (hmask) : | 150 | [fx] "D" (fx), "a" (lmask), "d" (hmask) : |
157 | "memory"); | 151 | "memory"); |
@@ -178,7 +172,7 @@ static inline int xrstor_state(struct xsave_struct *fx, u64 mask) | |||
178 | */ | 172 | */ |
179 | alternative_input( | 173 | alternative_input( |
180 | "1: " XRSTOR, | 174 | "1: " XRSTOR, |
181 | "1: " XRSTORS, | 175 | XRSTORS, |
182 | X86_FEATURE_XSAVES, | 176 | X86_FEATURE_XSAVES, |
183 | "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | 177 | "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) |
184 | : "memory"); | 178 | : "memory"); |
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index 44e6dd7e36a2..225b0988043a 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h | |||
@@ -7,7 +7,6 @@ | |||
7 | #define SETUP_DTB 2 | 7 | #define SETUP_DTB 2 |
8 | #define SETUP_PCI 3 | 8 | #define SETUP_PCI 3 |
9 | #define SETUP_EFI 4 | 9 | #define SETUP_EFI 4 |
10 | #define SETUP_KASLR 5 | ||
11 | 10 | ||
12 | /* ram_size flags */ | 11 | /* ram_size flags */ |
13 | #define RAMDISK_IMAGE_START_MASK 0x07FF | 12 | #define RAMDISK_IMAGE_START_MASK 0x07FF |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 3d525c6124f6..803b684676ff 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -1338,6 +1338,26 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d) | |||
1338 | } | 1338 | } |
1339 | 1339 | ||
1340 | /* | 1340 | /* |
1341 | * ACPI offers an alternative platform interface model that removes | ||
1342 | * ACPI hardware requirements for platforms that do not implement | ||
1343 | * the PC Architecture. | ||
1344 | * | ||
1345 | * We initialize the Hardware-reduced ACPI model here: | ||
1346 | */ | ||
1347 | static void __init acpi_reduced_hw_init(void) | ||
1348 | { | ||
1349 | if (acpi_gbl_reduced_hardware) { | ||
1350 | /* | ||
1351 | * Override x86_init functions and bypass legacy pic | ||
1352 | * in Hardware-reduced ACPI mode | ||
1353 | */ | ||
1354 | x86_init.timers.timer_init = x86_init_noop; | ||
1355 | x86_init.irqs.pre_vector_init = x86_init_noop; | ||
1356 | legacy_pic = &null_legacy_pic; | ||
1357 | } | ||
1358 | } | ||
1359 | |||
1360 | /* | ||
1341 | * If your system is blacklisted here, but you find that acpi=force | 1361 | * If your system is blacklisted here, but you find that acpi=force |
1342 | * works for you, please contact linux-acpi@vger.kernel.org | 1362 | * works for you, please contact linux-acpi@vger.kernel.org |
1343 | */ | 1363 | */ |
@@ -1536,6 +1556,11 @@ int __init early_acpi_boot_init(void) | |||
1536 | */ | 1556 | */ |
1537 | early_acpi_process_madt(); | 1557 | early_acpi_process_madt(); |
1538 | 1558 | ||
1559 | /* | ||
1560 | * Hardware-reduced ACPI mode initialization: | ||
1561 | */ | ||
1562 | acpi_reduced_hw_init(); | ||
1563 | |||
1539 | return 0; | 1564 | return 0; |
1540 | } | 1565 | } |
1541 | 1566 | ||
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c index c2fd21fed002..017149cded07 100644 --- a/arch/x86/kernel/apic/apic_numachip.c +++ b/arch/x86/kernel/apic/apic_numachip.c | |||
@@ -37,10 +37,12 @@ static const struct apic apic_numachip; | |||
37 | static unsigned int get_apic_id(unsigned long x) | 37 | static unsigned int get_apic_id(unsigned long x) |
38 | { | 38 | { |
39 | unsigned long value; | 39 | unsigned long value; |
40 | unsigned int id; | 40 | unsigned int id = (x >> 24) & 0xff; |
41 | 41 | ||
42 | rdmsrl(MSR_FAM10H_NODE_ID, value); | 42 | if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) { |
43 | id = ((x >> 24) & 0xffU) | ((value << 2) & 0xff00U); | 43 | rdmsrl(MSR_FAM10H_NODE_ID, value); |
44 | id |= (value << 2) & 0xff00; | ||
45 | } | ||
44 | 46 | ||
45 | return id; | 47 | return id; |
46 | } | 48 | } |
@@ -155,10 +157,18 @@ static int __init numachip_probe(void) | |||
155 | 157 | ||
156 | static void fixup_cpu_id(struct cpuinfo_x86 *c, int node) | 158 | static void fixup_cpu_id(struct cpuinfo_x86 *c, int node) |
157 | { | 159 | { |
158 | if (c->phys_proc_id != node) { | 160 | u64 val; |
159 | c->phys_proc_id = node; | 161 | u32 nodes = 1; |
160 | per_cpu(cpu_llc_id, smp_processor_id()) = node; | 162 | |
163 | this_cpu_write(cpu_llc_id, node); | ||
164 | |||
165 | /* Account for nodes per socket in multi-core-module processors */ | ||
166 | if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) { | ||
167 | rdmsrl(MSR_FAM10H_NODE_ID, val); | ||
168 | nodes = ((val >> 3) & 7) + 1; | ||
161 | } | 169 | } |
170 | |||
171 | c->phys_proc_id = node / nodes; | ||
162 | } | 172 | } |
163 | 173 | ||
164 | static int __init numachip_system_init(void) | 174 | static int __init numachip_system_init(void) |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index b5c8ff5e9dfc..2346c95c6ab1 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -1396,6 +1396,12 @@ void cpu_init(void) | |||
1396 | 1396 | ||
1397 | wait_for_master_cpu(cpu); | 1397 | wait_for_master_cpu(cpu); |
1398 | 1398 | ||
1399 | /* | ||
1400 | * Initialize the CR4 shadow before doing anything that could | ||
1401 | * try to read it. | ||
1402 | */ | ||
1403 | cr4_init_shadow(); | ||
1404 | |||
1399 | show_ucode_info_early(); | 1405 | show_ucode_info_early(); |
1400 | 1406 | ||
1401 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | 1407 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 94d7dcb12145..50163fa9034f 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -565,8 +565,8 @@ static const struct _tlb_table intel_tlb_table[] = { | |||
565 | { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" }, | 565 | { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" }, |
566 | { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" }, | 566 | { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" }, |
567 | { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" }, | 567 | { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" }, |
568 | { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set ssociative" }, | 568 | { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set associative" }, |
569 | { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set ssociative" }, | 569 | { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set associative" }, |
570 | { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" }, | 570 | { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" }, |
571 | { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" }, | 571 | { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" }, |
572 | { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" }, | 572 | { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" }, |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 498b6d967138..258990688a5e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -212,11 +212,11 @@ static struct event_constraint intel_hsw_event_constraints[] = { | |||
212 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ | 212 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ |
213 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ | 213 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ |
214 | /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ | 214 | /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ |
215 | INTEL_EVENT_CONSTRAINT(0x08a3, 0x4), | 215 | INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), |
216 | /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ | 216 | /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ |
217 | INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4), | 217 | INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), |
218 | /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ | 218 | /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ |
219 | INTEL_EVENT_CONSTRAINT(0x04a3, 0xf), | 219 | INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), |
220 | EVENT_CONSTRAINT_END | 220 | EVENT_CONSTRAINT_END |
221 | }; | 221 | }; |
222 | 222 | ||
@@ -1649,11 +1649,11 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event | |||
1649 | if (c) | 1649 | if (c) |
1650 | return c; | 1650 | return c; |
1651 | 1651 | ||
1652 | c = intel_pebs_constraints(event); | 1652 | c = intel_shared_regs_constraints(cpuc, event); |
1653 | if (c) | 1653 | if (c) |
1654 | return c; | 1654 | return c; |
1655 | 1655 | ||
1656 | c = intel_shared_regs_constraints(cpuc, event); | 1656 | c = intel_pebs_constraints(event); |
1657 | if (c) | 1657 | if (c) |
1658 | return c; | 1658 | return c; |
1659 | 1659 | ||
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 000d4199b03e..31e2d5bf3e38 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -982,6 +982,9 @@ ENTRY(xen_hypervisor_callback) | |||
982 | ENTRY(xen_do_upcall) | 982 | ENTRY(xen_do_upcall) |
983 | 1: mov %esp, %eax | 983 | 1: mov %esp, %eax |
984 | call xen_evtchn_do_upcall | 984 | call xen_evtchn_do_upcall |
985 | #ifndef CONFIG_PREEMPT | ||
986 | call xen_maybe_preempt_hcall | ||
987 | #endif | ||
985 | jmp ret_from_intr | 988 | jmp ret_from_intr |
986 | CFI_ENDPROC | 989 | CFI_ENDPROC |
987 | ENDPROC(xen_hypervisor_callback) | 990 | ENDPROC(xen_hypervisor_callback) |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index db13655c3a2a..f0095a76c182 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -269,11 +269,14 @@ ENTRY(ret_from_fork) | |||
269 | testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? | 269 | testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? |
270 | jz 1f | 270 | jz 1f |
271 | 271 | ||
272 | testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET | 272 | /* |
273 | jnz int_ret_from_sys_call | 273 | * By the time we get here, we have no idea whether our pt_regs, |
274 | 274 | * ti flags, and ti status came from the 64-bit SYSCALL fast path, | |
275 | RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET | 275 | * the slow path, or one of the ia32entry paths. |
276 | jmp ret_from_sys_call # go to the SYSRET fastpath | 276 | * Use int_ret_from_sys_call to return, since it can safely handle |
277 | * all of the above. | ||
278 | */ | ||
279 | jmp int_ret_from_sys_call | ||
277 | 280 | ||
278 | 1: | 281 | 1: |
279 | subq $REST_SKIP, %rsp # leave space for volatiles | 282 | subq $REST_SKIP, %rsp # leave space for volatiles |
@@ -361,12 +364,21 @@ system_call_fastpath: | |||
361 | * Has incomplete stack frame and undefined top of stack. | 364 | * Has incomplete stack frame and undefined top of stack. |
362 | */ | 365 | */ |
363 | ret_from_sys_call: | 366 | ret_from_sys_call: |
364 | testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | ||
365 | jnz int_ret_from_sys_call_fixup /* Go the the slow path */ | ||
366 | |||
367 | LOCKDEP_SYS_EXIT | 367 | LOCKDEP_SYS_EXIT |
368 | DISABLE_INTERRUPTS(CLBR_NONE) | 368 | DISABLE_INTERRUPTS(CLBR_NONE) |
369 | TRACE_IRQS_OFF | 369 | TRACE_IRQS_OFF |
370 | |||
371 | /* | ||
372 | * We must check ti flags with interrupts (or at least preemption) | ||
373 | * off because we must *never* return to userspace without | ||
374 | * processing exit work that is enqueued if we're preempted here. | ||
375 | * In particular, returning to userspace with any of the one-shot | ||
376 | * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is | ||
377 | * very bad. | ||
378 | */ | ||
379 | testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | ||
380 | jnz int_ret_from_sys_call_fixup /* Go the the slow path */ | ||
381 | |||
370 | CFI_REMEMBER_STATE | 382 | CFI_REMEMBER_STATE |
371 | /* | 383 | /* |
372 | * sysretq will re-enable interrupts: | 384 | * sysretq will re-enable interrupts: |
@@ -383,7 +395,7 @@ ret_from_sys_call: | |||
383 | 395 | ||
384 | int_ret_from_sys_call_fixup: | 396 | int_ret_from_sys_call_fixup: |
385 | FIXUP_TOP_OF_STACK %r11, -ARGOFFSET | 397 | FIXUP_TOP_OF_STACK %r11, -ARGOFFSET |
386 | jmp int_ret_from_sys_call | 398 | jmp int_ret_from_sys_call_irqs_off |
387 | 399 | ||
388 | /* Do syscall tracing */ | 400 | /* Do syscall tracing */ |
389 | tracesys: | 401 | tracesys: |
@@ -429,6 +441,7 @@ tracesys_phase2: | |||
429 | GLOBAL(int_ret_from_sys_call) | 441 | GLOBAL(int_ret_from_sys_call) |
430 | DISABLE_INTERRUPTS(CLBR_NONE) | 442 | DISABLE_INTERRUPTS(CLBR_NONE) |
431 | TRACE_IRQS_OFF | 443 | TRACE_IRQS_OFF |
444 | int_ret_from_sys_call_irqs_off: | ||
432 | movl $_TIF_ALLWORK_MASK,%edi | 445 | movl $_TIF_ALLWORK_MASK,%edi |
433 | /* edi: mask to check */ | 446 | /* edi: mask to check */ |
434 | GLOBAL(int_with_check) | 447 | GLOBAL(int_with_check) |
@@ -786,7 +799,21 @@ retint_swapgs: /* return to user-space */ | |||
786 | cmpq %r11,(EFLAGS-ARGOFFSET)(%rsp) /* R11 == RFLAGS */ | 799 | cmpq %r11,(EFLAGS-ARGOFFSET)(%rsp) /* R11 == RFLAGS */ |
787 | jne opportunistic_sysret_failed | 800 | jne opportunistic_sysret_failed |
788 | 801 | ||
789 | testq $X86_EFLAGS_RF,%r11 /* sysret can't restore RF */ | 802 | /* |
803 | * SYSRET can't restore RF. SYSRET can restore TF, but unlike IRET, | ||
804 | * restoring TF results in a trap from userspace immediately after | ||
805 | * SYSRET. This would cause an infinite loop whenever #DB happens | ||
806 | * with register state that satisfies the opportunistic SYSRET | ||
807 | * conditions. For example, single-stepping this user code: | ||
808 | * | ||
809 | * movq $stuck_here,%rcx | ||
810 | * pushfq | ||
811 | * popq %r11 | ||
812 | * stuck_here: | ||
813 | * | ||
814 | * would never get past 'stuck_here'. | ||
815 | */ | ||
816 | testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 | ||
790 | jnz opportunistic_sysret_failed | 817 | jnz opportunistic_sysret_failed |
791 | 818 | ||
792 | /* nothing to check for RSP */ | 819 | /* nothing to check for RSP */ |
@@ -1208,6 +1235,9 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) | |||
1208 | popq %rsp | 1235 | popq %rsp |
1209 | CFI_DEF_CFA_REGISTER rsp | 1236 | CFI_DEF_CFA_REGISTER rsp |
1210 | decl PER_CPU_VAR(irq_count) | 1237 | decl PER_CPU_VAR(irq_count) |
1238 | #ifndef CONFIG_PREEMPT | ||
1239 | call xen_maybe_preempt_hcall | ||
1240 | #endif | ||
1211 | jmp error_exit | 1241 | jmp error_exit |
1212 | CFI_ENDPROC | 1242 | CFI_ENDPROC |
1213 | END(xen_do_hypervisor_callback) | 1243 | END(xen_do_hypervisor_callback) |
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 7ec1d5f8d283..25ecd56cefa8 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -72,7 +72,7 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = | |||
72 | { "bx", 8, offsetof(struct pt_regs, bx) }, | 72 | { "bx", 8, offsetof(struct pt_regs, bx) }, |
73 | { "cx", 8, offsetof(struct pt_regs, cx) }, | 73 | { "cx", 8, offsetof(struct pt_regs, cx) }, |
74 | { "dx", 8, offsetof(struct pt_regs, dx) }, | 74 | { "dx", 8, offsetof(struct pt_regs, dx) }, |
75 | { "si", 8, offsetof(struct pt_regs, dx) }, | 75 | { "si", 8, offsetof(struct pt_regs, si) }, |
76 | { "di", 8, offsetof(struct pt_regs, di) }, | 76 | { "di", 8, offsetof(struct pt_regs, di) }, |
77 | { "bp", 8, offsetof(struct pt_regs, bp) }, | 77 | { "bp", 8, offsetof(struct pt_regs, bp) }, |
78 | { "sp", 8, offsetof(struct pt_regs, sp) }, | 78 | { "sp", 8, offsetof(struct pt_regs, sp) }, |
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 6a1146ea4d4d..4e3d5a9621fe 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c | |||
@@ -223,27 +223,48 @@ static unsigned long | |||
223 | __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) | 223 | __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) |
224 | { | 224 | { |
225 | struct kprobe *kp; | 225 | struct kprobe *kp; |
226 | unsigned long faddr; | ||
226 | 227 | ||
227 | kp = get_kprobe((void *)addr); | 228 | kp = get_kprobe((void *)addr); |
228 | /* There is no probe, return original address */ | 229 | faddr = ftrace_location(addr); |
229 | if (!kp) | 230 | /* |
231 | * Addresses inside the ftrace location are refused by | ||
232 | * arch_check_ftrace_location(). Something went terribly wrong | ||
233 | * if such an address is checked here. | ||
234 | */ | ||
235 | if (WARN_ON(faddr && faddr != addr)) | ||
236 | return 0UL; | ||
237 | /* | ||
238 | * Use the current code if it is not modified by Kprobe | ||
239 | * and it cannot be modified by ftrace. | ||
240 | */ | ||
241 | if (!kp && !faddr) | ||
230 | return addr; | 242 | return addr; |
231 | 243 | ||
232 | /* | 244 | /* |
233 | * Basically, kp->ainsn.insn has an original instruction. | 245 | * Basically, kp->ainsn.insn has an original instruction. |
234 | * However, RIP-relative instruction can not do single-stepping | 246 | * However, RIP-relative instruction can not do single-stepping |
235 | * at different place, __copy_instruction() tweaks the displacement of | 247 | * at different place, __copy_instruction() tweaks the displacement of |
236 | * that instruction. In that case, we can't recover the instruction | 248 | * that instruction. In that case, we can't recover the instruction |
237 | * from the kp->ainsn.insn. | 249 | * from the kp->ainsn.insn. |
238 | * | 250 | * |
239 | * On the other hand, kp->opcode has a copy of the first byte of | 251 | * On the other hand, in case on normal Kprobe, kp->opcode has a copy |
240 | * the probed instruction, which is overwritten by int3. And | 252 | * of the first byte of the probed instruction, which is overwritten |
241 | * the instruction at kp->addr is not modified by kprobes except | 253 | * by int3. And the instruction at kp->addr is not modified by kprobes |
242 | * for the first byte, we can recover the original instruction | 254 | * except for the first byte, we can recover the original instruction |
243 | * from it and kp->opcode. | 255 | * from it and kp->opcode. |
256 | * | ||
257 | * In case of Kprobes using ftrace, we do not have a copy of | ||
258 | * the original instruction. In fact, the ftrace location might | ||
259 | * be modified at anytime and even could be in an inconsistent state. | ||
260 | * Fortunately, we know that the original code is the ideal 5-byte | ||
261 | * long NOP. | ||
244 | */ | 262 | */ |
245 | memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); | 263 | memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); |
246 | buf[0] = kp->opcode; | 264 | if (faddr) |
265 | memcpy(buf, ideal_nops[NOP_ATOMIC5], 5); | ||
266 | else | ||
267 | buf[0] = kp->opcode; | ||
247 | return (unsigned long)buf; | 268 | return (unsigned long)buf; |
248 | } | 269 | } |
249 | 270 | ||
@@ -251,6 +272,7 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) | |||
251 | * Recover the probed instruction at addr for further analysis. | 272 | * Recover the probed instruction at addr for further analysis. |
252 | * Caller must lock kprobes by kprobe_mutex, or disable preemption | 273 | * Caller must lock kprobes by kprobe_mutex, or disable preemption |
253 | * for preventing to release referencing kprobes. | 274 | * for preventing to release referencing kprobes. |
275 | * Returns zero if the instruction can not get recovered. | ||
254 | */ | 276 | */ |
255 | unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) | 277 | unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) |
256 | { | 278 | { |
@@ -285,6 +307,8 @@ static int can_probe(unsigned long paddr) | |||
285 | * normally used, we just go through if there is no kprobe. | 307 | * normally used, we just go through if there is no kprobe. |
286 | */ | 308 | */ |
287 | __addr = recover_probed_instruction(buf, addr); | 309 | __addr = recover_probed_instruction(buf, addr); |
310 | if (!__addr) | ||
311 | return 0; | ||
288 | kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE); | 312 | kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE); |
289 | insn_get_length(&insn); | 313 | insn_get_length(&insn); |
290 | 314 | ||
@@ -333,6 +357,8 @@ int __copy_instruction(u8 *dest, u8 *src) | |||
333 | unsigned long recovered_insn = | 357 | unsigned long recovered_insn = |
334 | recover_probed_instruction(buf, (unsigned long)src); | 358 | recover_probed_instruction(buf, (unsigned long)src); |
335 | 359 | ||
360 | if (!recovered_insn) | ||
361 | return 0; | ||
336 | kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); | 362 | kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); |
337 | insn_get_length(&insn); | 363 | insn_get_length(&insn); |
338 | /* Another subsystem puts a breakpoint, failed to recover */ | 364 | /* Another subsystem puts a breakpoint, failed to recover */ |
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 0dd8d089c315..7b3b9d15c47a 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c | |||
@@ -259,6 +259,8 @@ static int can_optimize(unsigned long paddr) | |||
259 | */ | 259 | */ |
260 | return 0; | 260 | return 0; |
261 | recovered_insn = recover_probed_instruction(buf, addr); | 261 | recovered_insn = recover_probed_instruction(buf, addr); |
262 | if (!recovered_insn) | ||
263 | return 0; | ||
262 | kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); | 264 | kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); |
263 | insn_get_length(&insn); | 265 | insn_get_length(&insn); |
264 | /* Another subsystem puts a breakpoint */ | 266 | /* Another subsystem puts a breakpoint */ |
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index 9bbb9b35c144..d1ac80b72c72 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c | |||
@@ -47,13 +47,21 @@ do { \ | |||
47 | 47 | ||
48 | #ifdef CONFIG_RANDOMIZE_BASE | 48 | #ifdef CONFIG_RANDOMIZE_BASE |
49 | static unsigned long module_load_offset; | 49 | static unsigned long module_load_offset; |
50 | static int randomize_modules = 1; | ||
50 | 51 | ||
51 | /* Mutex protects the module_load_offset. */ | 52 | /* Mutex protects the module_load_offset. */ |
52 | static DEFINE_MUTEX(module_kaslr_mutex); | 53 | static DEFINE_MUTEX(module_kaslr_mutex); |
53 | 54 | ||
55 | static int __init parse_nokaslr(char *p) | ||
56 | { | ||
57 | randomize_modules = 0; | ||
58 | return 0; | ||
59 | } | ||
60 | early_param("nokaslr", parse_nokaslr); | ||
61 | |||
54 | static unsigned long int get_module_load_offset(void) | 62 | static unsigned long int get_module_load_offset(void) |
55 | { | 63 | { |
56 | if (kaslr_enabled) { | 64 | if (randomize_modules) { |
57 | mutex_lock(&module_kaslr_mutex); | 65 | mutex_lock(&module_kaslr_mutex); |
58 | /* | 66 | /* |
59 | * Calculate the module_load_offset the first time this | 67 | * Calculate the module_load_offset the first time this |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index bae6c609888e..86db4bcd7ce5 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -183,6 +183,16 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
183 | }, | 183 | }, |
184 | }, | 184 | }, |
185 | 185 | ||
186 | /* ASRock */ | ||
187 | { /* Handle problems with rebooting on ASRock Q1900DC-ITX */ | ||
188 | .callback = set_pci_reboot, | ||
189 | .ident = "ASRock Q1900DC-ITX", | ||
190 | .matches = { | ||
191 | DMI_MATCH(DMI_BOARD_VENDOR, "ASRock"), | ||
192 | DMI_MATCH(DMI_BOARD_NAME, "Q1900DC-ITX"), | ||
193 | }, | ||
194 | }, | ||
195 | |||
186 | /* ASUS */ | 196 | /* ASUS */ |
187 | { /* Handle problems with rebooting on ASUS P4S800 */ | 197 | { /* Handle problems with rebooting on ASUS P4S800 */ |
188 | .callback = set_bios_reboot, | 198 | .callback = set_bios_reboot, |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 98dc9317286e..0a2421cca01f 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -122,8 +122,6 @@ | |||
122 | unsigned long max_low_pfn_mapped; | 122 | unsigned long max_low_pfn_mapped; |
123 | unsigned long max_pfn_mapped; | 123 | unsigned long max_pfn_mapped; |
124 | 124 | ||
125 | bool __read_mostly kaslr_enabled = false; | ||
126 | |||
127 | #ifdef CONFIG_DMI | 125 | #ifdef CONFIG_DMI |
128 | RESERVE_BRK(dmi_alloc, 65536); | 126 | RESERVE_BRK(dmi_alloc, 65536); |
129 | #endif | 127 | #endif |
@@ -427,11 +425,6 @@ static void __init reserve_initrd(void) | |||
427 | } | 425 | } |
428 | #endif /* CONFIG_BLK_DEV_INITRD */ | 426 | #endif /* CONFIG_BLK_DEV_INITRD */ |
429 | 427 | ||
430 | static void __init parse_kaslr_setup(u64 pa_data, u32 data_len) | ||
431 | { | ||
432 | kaslr_enabled = (bool)(pa_data + sizeof(struct setup_data)); | ||
433 | } | ||
434 | |||
435 | static void __init parse_setup_data(void) | 428 | static void __init parse_setup_data(void) |
436 | { | 429 | { |
437 | struct setup_data *data; | 430 | struct setup_data *data; |
@@ -457,9 +450,6 @@ static void __init parse_setup_data(void) | |||
457 | case SETUP_EFI: | 450 | case SETUP_EFI: |
458 | parse_efi_setup(pa_data, data_len); | 451 | parse_efi_setup(pa_data, data_len); |
459 | break; | 452 | break; |
460 | case SETUP_KASLR: | ||
461 | parse_kaslr_setup(pa_data, data_len); | ||
462 | break; | ||
463 | default: | 453 | default: |
464 | break; | 454 | break; |
465 | } | 455 | } |
@@ -842,14 +832,10 @@ static void __init trim_low_memory_range(void) | |||
842 | static int | 832 | static int |
843 | dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) | 833 | dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) |
844 | { | 834 | { |
845 | if (kaslr_enabled) | 835 | pr_emerg("Kernel Offset: 0x%lx from 0x%lx " |
846 | pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n", | 836 | "(relocation range: 0x%lx-0x%lx)\n", |
847 | (unsigned long)&_text - __START_KERNEL, | 837 | (unsigned long)&_text - __START_KERNEL, __START_KERNEL, |
848 | __START_KERNEL, | 838 | __START_KERNEL_map, MODULES_VADDR-1); |
849 | __START_KERNEL_map, | ||
850 | MODULES_VADDR-1); | ||
851 | else | ||
852 | pr_emerg("Kernel Offset: disabled\n"); | ||
853 | 839 | ||
854 | return 0; | 840 | return 0; |
855 | } | 841 | } |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 9d2073e2ecc9..4ff5d162ff9f 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -384,7 +384,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code) | |||
384 | goto exit; | 384 | goto exit; |
385 | conditional_sti(regs); | 385 | conditional_sti(regs); |
386 | 386 | ||
387 | if (!user_mode(regs)) | 387 | if (!user_mode_vm(regs)) |
388 | die("bounds", regs, error_code); | 388 | die("bounds", regs, error_code); |
389 | 389 | ||
390 | if (!cpu_feature_enabled(X86_FEATURE_MPX)) { | 390 | if (!cpu_feature_enabled(X86_FEATURE_MPX)) { |
@@ -637,7 +637,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code) | |||
637 | * then it's very likely the result of an icebp/int01 trap. | 637 | * then it's very likely the result of an icebp/int01 trap. |
638 | * User wants a sigtrap for that. | 638 | * User wants a sigtrap for that. |
639 | */ | 639 | */ |
640 | if (!dr6 && user_mode(regs)) | 640 | if (!dr6 && user_mode_vm(regs)) |
641 | user_icebp = 1; | 641 | user_icebp = 1; |
642 | 642 | ||
643 | /* Catch kmemcheck conditions first of all! */ | 643 | /* Catch kmemcheck conditions first of all! */ |
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 34f66e58a896..cdc6cf903078 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
@@ -379,7 +379,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) | |||
379 | * thread's fpu state, reconstruct fxstate from the fsave | 379 | * thread's fpu state, reconstruct fxstate from the fsave |
380 | * header. Sanitize the copied state etc. | 380 | * header. Sanitize the copied state etc. |
381 | */ | 381 | */ |
382 | struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave; | 382 | struct fpu *fpu = &tsk->thread.fpu; |
383 | struct user_i387_ia32_struct env; | 383 | struct user_i387_ia32_struct env; |
384 | int err = 0; | 384 | int err = 0; |
385 | 385 | ||
@@ -393,14 +393,15 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) | |||
393 | */ | 393 | */ |
394 | drop_fpu(tsk); | 394 | drop_fpu(tsk); |
395 | 395 | ||
396 | if (__copy_from_user(xsave, buf_fx, state_size) || | 396 | if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) || |
397 | __copy_from_user(&env, buf, sizeof(env))) { | 397 | __copy_from_user(&env, buf, sizeof(env))) { |
398 | fpu_finit(fpu); | ||
398 | err = -1; | 399 | err = -1; |
399 | } else { | 400 | } else { |
400 | sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only); | 401 | sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only); |
401 | set_used_math(); | ||
402 | } | 402 | } |
403 | 403 | ||
404 | set_used_math(); | ||
404 | if (use_eager_fpu()) { | 405 | if (use_eager_fpu()) { |
405 | preempt_disable(); | 406 | preempt_disable(); |
406 | math_state_restore(); | 407 | math_state_restore(); |
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index e0b794a84c35..106c01557f2b 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -4950,7 +4950,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) | |||
4950 | goto done; | 4950 | goto done; |
4951 | } | 4951 | } |
4952 | } | 4952 | } |
4953 | ctxt->dst.orig_val = ctxt->dst.val; | 4953 | /* Copy full 64-bit value for CMPXCHG8B. */ |
4954 | ctxt->dst.orig_val64 = ctxt->dst.val64; | ||
4954 | 4955 | ||
4955 | special_insn: | 4956 | special_insn: |
4956 | 4957 | ||
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index cc31f7c06d3d..9541ba34126b 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c | |||
@@ -507,6 +507,7 @@ static int picdev_read(struct kvm_pic *s, | |||
507 | return -EOPNOTSUPP; | 507 | return -EOPNOTSUPP; |
508 | 508 | ||
509 | if (len != 1) { | 509 | if (len != 1) { |
510 | memset(val, 0, len); | ||
510 | pr_pic_unimpl("non byte read\n"); | 511 | pr_pic_unimpl("non byte read\n"); |
511 | return 0; | 512 | return 0; |
512 | } | 513 | } |
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index b1947e0f3e10..46d4449772bc 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c | |||
@@ -422,6 +422,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, | |||
422 | struct kvm_ioapic *ioapic, int vector, int trigger_mode) | 422 | struct kvm_ioapic *ioapic, int vector, int trigger_mode) |
423 | { | 423 | { |
424 | int i; | 424 | int i; |
425 | struct kvm_lapic *apic = vcpu->arch.apic; | ||
425 | 426 | ||
426 | for (i = 0; i < IOAPIC_NUM_PINS; i++) { | 427 | for (i = 0; i < IOAPIC_NUM_PINS; i++) { |
427 | union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; | 428 | union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; |
@@ -443,7 +444,8 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, | |||
443 | kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); | 444 | kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); |
444 | spin_lock(&ioapic->lock); | 445 | spin_lock(&ioapic->lock); |
445 | 446 | ||
446 | if (trigger_mode != IOAPIC_LEVEL_TRIG) | 447 | if (trigger_mode != IOAPIC_LEVEL_TRIG || |
448 | kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) | ||
447 | continue; | 449 | continue; |
448 | 450 | ||
449 | ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); | 451 | ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index e55b5fc344eb..4ee827d7bf36 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -833,8 +833,7 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2) | |||
833 | 833 | ||
834 | static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector) | 834 | static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector) |
835 | { | 835 | { |
836 | if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) && | 836 | if (kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) { |
837 | kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) { | ||
838 | int trigger_mode; | 837 | int trigger_mode; |
839 | if (apic_test_vector(vector, apic->regs + APIC_TMR)) | 838 | if (apic_test_vector(vector, apic->regs + APIC_TMR)) |
840 | trigger_mode = IOAPIC_LEVEL_TRIG; | 839 | trigger_mode = IOAPIC_LEVEL_TRIG; |
@@ -1572,7 +1571,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu) | |||
1572 | apic_set_reg(apic, APIC_TMR + 0x10 * i, 0); | 1571 | apic_set_reg(apic, APIC_TMR + 0x10 * i, 0); |
1573 | } | 1572 | } |
1574 | apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm); | 1573 | apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm); |
1575 | apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm); | 1574 | apic->isr_count = kvm_x86_ops->hwapic_isr_update ? 1 : 0; |
1576 | apic->highest_isr_cache = -1; | 1575 | apic->highest_isr_cache = -1; |
1577 | update_divide_count(apic); | 1576 | update_divide_count(apic); |
1578 | atomic_set(&apic->lapic_timer.pending, 0); | 1577 | atomic_set(&apic->lapic_timer.pending, 0); |
@@ -1782,7 +1781,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu, | |||
1782 | update_divide_count(apic); | 1781 | update_divide_count(apic); |
1783 | start_apic_timer(apic); | 1782 | start_apic_timer(apic); |
1784 | apic->irr_pending = true; | 1783 | apic->irr_pending = true; |
1785 | apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ? | 1784 | apic->isr_count = kvm_x86_ops->hwapic_isr_update ? |
1786 | 1 : count_vectors(apic->regs + APIC_ISR); | 1785 | 1 : count_vectors(apic->regs + APIC_ISR); |
1787 | apic->highest_isr_cache = -1; | 1786 | apic->highest_isr_cache = -1; |
1788 | if (kvm_x86_ops->hwapic_irr_update) | 1787 | if (kvm_x86_ops->hwapic_irr_update) |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index d319e0c24758..cc618c882f90 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -3649,11 +3649,6 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) | |||
3649 | return; | 3649 | return; |
3650 | } | 3650 | } |
3651 | 3651 | ||
3652 | static void svm_hwapic_isr_update(struct kvm *kvm, int isr) | ||
3653 | { | ||
3654 | return; | ||
3655 | } | ||
3656 | |||
3657 | static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu) | 3652 | static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu) |
3658 | { | 3653 | { |
3659 | return; | 3654 | return; |
@@ -4403,7 +4398,6 @@ static struct kvm_x86_ops svm_x86_ops = { | |||
4403 | .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode, | 4398 | .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode, |
4404 | .vm_has_apicv = svm_vm_has_apicv, | 4399 | .vm_has_apicv = svm_vm_has_apicv, |
4405 | .load_eoi_exitmap = svm_load_eoi_exitmap, | 4400 | .load_eoi_exitmap = svm_load_eoi_exitmap, |
4406 | .hwapic_isr_update = svm_hwapic_isr_update, | ||
4407 | .sync_pir_to_irr = svm_sync_pir_to_irr, | 4401 | .sync_pir_to_irr = svm_sync_pir_to_irr, |
4408 | 4402 | ||
4409 | .set_tss_addr = svm_set_tss_addr, | 4403 | .set_tss_addr = svm_set_tss_addr, |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 14c1a18d206a..ae4f6d35d19c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -2168,7 +2168,10 @@ static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu) | |||
2168 | { | 2168 | { |
2169 | unsigned long *msr_bitmap; | 2169 | unsigned long *msr_bitmap; |
2170 | 2170 | ||
2171 | if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) { | 2171 | if (is_guest_mode(vcpu)) |
2172 | msr_bitmap = vmx_msr_bitmap_nested; | ||
2173 | else if (irqchip_in_kernel(vcpu->kvm) && | ||
2174 | apic_x2apic_mode(vcpu->arch.apic)) { | ||
2172 | if (is_long_mode(vcpu)) | 2175 | if (is_long_mode(vcpu)) |
2173 | msr_bitmap = vmx_msr_bitmap_longmode_x2apic; | 2176 | msr_bitmap = vmx_msr_bitmap_longmode_x2apic; |
2174 | else | 2177 | else |
@@ -2476,8 +2479,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) | |||
2476 | if (enable_ept) { | 2479 | if (enable_ept) { |
2477 | /* nested EPT: emulate EPT also to L1 */ | 2480 | /* nested EPT: emulate EPT also to L1 */ |
2478 | vmx->nested.nested_vmx_secondary_ctls_high |= | 2481 | vmx->nested.nested_vmx_secondary_ctls_high |= |
2479 | SECONDARY_EXEC_ENABLE_EPT | | 2482 | SECONDARY_EXEC_ENABLE_EPT; |
2480 | SECONDARY_EXEC_UNRESTRICTED_GUEST; | ||
2481 | vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | | 2483 | vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | |
2482 | VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT | | 2484 | VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT | |
2483 | VMX_EPT_INVEPT_BIT; | 2485 | VMX_EPT_INVEPT_BIT; |
@@ -2491,6 +2493,10 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) | |||
2491 | } else | 2493 | } else |
2492 | vmx->nested.nested_vmx_ept_caps = 0; | 2494 | vmx->nested.nested_vmx_ept_caps = 0; |
2493 | 2495 | ||
2496 | if (enable_unrestricted_guest) | ||
2497 | vmx->nested.nested_vmx_secondary_ctls_high |= | ||
2498 | SECONDARY_EXEC_UNRESTRICTED_GUEST; | ||
2499 | |||
2494 | /* miscellaneous data */ | 2500 | /* miscellaneous data */ |
2495 | rdmsr(MSR_IA32_VMX_MISC, | 2501 | rdmsr(MSR_IA32_VMX_MISC, |
2496 | vmx->nested.nested_vmx_misc_low, | 2502 | vmx->nested.nested_vmx_misc_low, |
@@ -4367,6 +4373,18 @@ static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) | |||
4367 | return 0; | 4373 | return 0; |
4368 | } | 4374 | } |
4369 | 4375 | ||
4376 | static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu) | ||
4377 | { | ||
4378 | #ifdef CONFIG_SMP | ||
4379 | if (vcpu->mode == IN_GUEST_MODE) { | ||
4380 | apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), | ||
4381 | POSTED_INTR_VECTOR); | ||
4382 | return true; | ||
4383 | } | ||
4384 | #endif | ||
4385 | return false; | ||
4386 | } | ||
4387 | |||
4370 | static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, | 4388 | static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, |
4371 | int vector) | 4389 | int vector) |
4372 | { | 4390 | { |
@@ -4375,9 +4393,7 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, | |||
4375 | if (is_guest_mode(vcpu) && | 4393 | if (is_guest_mode(vcpu) && |
4376 | vector == vmx->nested.posted_intr_nv) { | 4394 | vector == vmx->nested.posted_intr_nv) { |
4377 | /* the PIR and ON have been set by L1. */ | 4395 | /* the PIR and ON have been set by L1. */ |
4378 | if (vcpu->mode == IN_GUEST_MODE) | 4396 | kvm_vcpu_trigger_posted_interrupt(vcpu); |
4379 | apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), | ||
4380 | POSTED_INTR_VECTOR); | ||
4381 | /* | 4397 | /* |
4382 | * If a posted intr is not recognized by hardware, | 4398 | * If a posted intr is not recognized by hardware, |
4383 | * we will accomplish it in the next vmentry. | 4399 | * we will accomplish it in the next vmentry. |
@@ -4409,12 +4425,7 @@ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) | |||
4409 | 4425 | ||
4410 | r = pi_test_and_set_on(&vmx->pi_desc); | 4426 | r = pi_test_and_set_on(&vmx->pi_desc); |
4411 | kvm_make_request(KVM_REQ_EVENT, vcpu); | 4427 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
4412 | #ifdef CONFIG_SMP | 4428 | if (r || !kvm_vcpu_trigger_posted_interrupt(vcpu)) |
4413 | if (!r && (vcpu->mode == IN_GUEST_MODE)) | ||
4414 | apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), | ||
4415 | POSTED_INTR_VECTOR); | ||
4416 | else | ||
4417 | #endif | ||
4418 | kvm_vcpu_kick(vcpu); | 4429 | kvm_vcpu_kick(vcpu); |
4419 | } | 4430 | } |
4420 | 4431 | ||
@@ -9213,9 +9224,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) | |||
9213 | } | 9224 | } |
9214 | 9225 | ||
9215 | if (cpu_has_vmx_msr_bitmap() && | 9226 | if (cpu_has_vmx_msr_bitmap() && |
9216 | exec_control & CPU_BASED_USE_MSR_BITMAPS && | 9227 | exec_control & CPU_BASED_USE_MSR_BITMAPS) { |
9217 | nested_vmx_merge_msr_bitmap(vcpu, vmcs12)) { | 9228 | nested_vmx_merge_msr_bitmap(vcpu, vmcs12); |
9218 | vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_nested)); | 9229 | /* MSR_BITMAP will be set by following vmx_set_efer. */ |
9219 | } else | 9230 | } else |
9220 | exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; | 9231 | exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; |
9221 | 9232 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index bd7a70be41b3..32bf19ef3115 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2744,7 +2744,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
2744 | case KVM_CAP_USER_NMI: | 2744 | case KVM_CAP_USER_NMI: |
2745 | case KVM_CAP_REINJECT_CONTROL: | 2745 | case KVM_CAP_REINJECT_CONTROL: |
2746 | case KVM_CAP_IRQ_INJECT_STATUS: | 2746 | case KVM_CAP_IRQ_INJECT_STATUS: |
2747 | case KVM_CAP_IRQFD: | ||
2748 | case KVM_CAP_IOEVENTFD: | 2747 | case KVM_CAP_IOEVENTFD: |
2749 | case KVM_CAP_IOEVENTFD_NO_LENGTH: | 2748 | case KVM_CAP_IOEVENTFD_NO_LENGTH: |
2750 | case KVM_CAP_PIT2: | 2749 | case KVM_CAP_PIT2: |
diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig index 4a0890f815c4..08f41caada45 100644 --- a/arch/x86/lguest/Kconfig +++ b/arch/x86/lguest/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config LGUEST_GUEST | 1 | config LGUEST_GUEST |
2 | bool "Lguest guest support" | 2 | bool "Lguest guest support" |
3 | depends on X86_32 && PARAVIRT | 3 | depends on X86_32 && PARAVIRT && PCI |
4 | select TTY | 4 | select TTY |
5 | select VIRTUALIZATION | 5 | select VIRTUALIZATION |
6 | select VIRTIO | 6 | select VIRTIO |
@@ -8,7 +8,7 @@ config LGUEST_GUEST | |||
8 | help | 8 | help |
9 | Lguest is a tiny in-kernel hypervisor. Selecting this will | 9 | Lguest is a tiny in-kernel hypervisor. Selecting this will |
10 | allow your kernel to boot under lguest. This option will increase | 10 | allow your kernel to boot under lguest. This option will increase |
11 | your kernel size by about 6k. If in doubt, say N. | 11 | your kernel size by about 10k. If in doubt, say N. |
12 | 12 | ||
13 | If you say Y here, make sure you say Y (or M) to the virtio block | 13 | If you say Y here, make sure you say Y (or M) to the virtio block |
14 | and net drivers which lguest needs. | 14 | and net drivers which lguest needs. |
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index c905e89e19fe..1f33b3d1fd68 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c | |||
@@ -69,21 +69,20 @@ EXPORT_SYMBOL(copy_in_user); | |||
69 | * it is not necessary to optimize tail handling. | 69 | * it is not necessary to optimize tail handling. |
70 | */ | 70 | */ |
71 | __visible unsigned long | 71 | __visible unsigned long |
72 | copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest) | 72 | copy_user_handle_tail(char *to, char *from, unsigned len) |
73 | { | 73 | { |
74 | char c; | ||
75 | unsigned zero_len; | ||
76 | |||
77 | for (; len; --len, to++) { | 74 | for (; len; --len, to++) { |
75 | char c; | ||
76 | |||
78 | if (__get_user_nocheck(c, from++, sizeof(char))) | 77 | if (__get_user_nocheck(c, from++, sizeof(char))) |
79 | break; | 78 | break; |
80 | if (__put_user_nocheck(c, to, sizeof(char))) | 79 | if (__put_user_nocheck(c, to, sizeof(char))) |
81 | break; | 80 | break; |
82 | } | 81 | } |
83 | |||
84 | for (c = 0, zero_len = len; zerorest && zero_len; --zero_len) | ||
85 | if (__put_user_nocheck(c, to++, sizeof(char))) | ||
86 | break; | ||
87 | clac(); | 82 | clac(); |
83 | |||
84 | /* If the destination is a kernel buffer, we always clear the end */ | ||
85 | if ((unsigned long)to >= TASK_SIZE_MAX) | ||
86 | memset(to, 0, len); | ||
88 | return len; | 87 | return len; |
89 | } | 88 | } |
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 6ac273832f28..e4695985f9de 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
@@ -331,7 +331,7 @@ static void probe_pci_root_info(struct pci_root_info *info, | |||
331 | struct list_head *list) | 331 | struct list_head *list) |
332 | { | 332 | { |
333 | int ret; | 333 | int ret; |
334 | struct resource_entry *entry; | 334 | struct resource_entry *entry, *tmp; |
335 | 335 | ||
336 | sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum); | 336 | sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum); |
337 | info->bridge = device; | 337 | info->bridge = device; |
@@ -345,8 +345,13 @@ static void probe_pci_root_info(struct pci_root_info *info, | |||
345 | dev_dbg(&device->dev, | 345 | dev_dbg(&device->dev, |
346 | "no IO and memory resources present in _CRS\n"); | 346 | "no IO and memory resources present in _CRS\n"); |
347 | else | 347 | else |
348 | resource_list_for_each_entry(entry, list) | 348 | resource_list_for_each_entry_safe(entry, tmp, list) { |
349 | entry->res->name = info->name; | 349 | if ((entry->res->flags & IORESOURCE_WINDOW) == 0 || |
350 | (entry->res->flags & IORESOURCE_DISABLED)) | ||
351 | resource_list_destroy_entry(entry); | ||
352 | else | ||
353 | entry->res->name = info->name; | ||
354 | } | ||
350 | } | 355 | } |
351 | 356 | ||
352 | struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) | 357 | struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) |
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 3d2612b68694..2fb384724ebb 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c | |||
@@ -513,31 +513,6 @@ void __init pcibios_set_cache_line_size(void) | |||
513 | } | 513 | } |
514 | } | 514 | } |
515 | 515 | ||
516 | /* | ||
517 | * Some device drivers assume dev->irq won't change after calling | ||
518 | * pci_disable_device(). So delay releasing of IRQ resource to driver | ||
519 | * unbinding time. Otherwise it will break PM subsystem and drivers | ||
520 | * like xen-pciback etc. | ||
521 | */ | ||
522 | static int pci_irq_notifier(struct notifier_block *nb, unsigned long action, | ||
523 | void *data) | ||
524 | { | ||
525 | struct pci_dev *dev = to_pci_dev(data); | ||
526 | |||
527 | if (action != BUS_NOTIFY_UNBOUND_DRIVER) | ||
528 | return NOTIFY_DONE; | ||
529 | |||
530 | if (pcibios_disable_irq) | ||
531 | pcibios_disable_irq(dev); | ||
532 | |||
533 | return NOTIFY_OK; | ||
534 | } | ||
535 | |||
536 | static struct notifier_block pci_irq_nb = { | ||
537 | .notifier_call = pci_irq_notifier, | ||
538 | .priority = INT_MIN, | ||
539 | }; | ||
540 | |||
541 | int __init pcibios_init(void) | 516 | int __init pcibios_init(void) |
542 | { | 517 | { |
543 | if (!raw_pci_ops) { | 518 | if (!raw_pci_ops) { |
@@ -550,9 +525,6 @@ int __init pcibios_init(void) | |||
550 | 525 | ||
551 | if (pci_bf_sort >= pci_force_bf) | 526 | if (pci_bf_sort >= pci_force_bf) |
552 | pci_sort_breadthfirst(); | 527 | pci_sort_breadthfirst(); |
553 | |||
554 | bus_register_notifier(&pci_bus_type, &pci_irq_nb); | ||
555 | |||
556 | return 0; | 528 | return 0; |
557 | } | 529 | } |
558 | 530 | ||
@@ -711,6 +683,12 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) | |||
711 | return 0; | 683 | return 0; |
712 | } | 684 | } |
713 | 685 | ||
686 | void pcibios_disable_device (struct pci_dev *dev) | ||
687 | { | ||
688 | if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq) | ||
689 | pcibios_disable_irq(dev); | ||
690 | } | ||
691 | |||
714 | int pci_ext_cfg_avail(void) | 692 | int pci_ext_cfg_avail(void) |
715 | { | 693 | { |
716 | if (raw_pci_ext_ops) | 694 | if (raw_pci_ext_ops) |
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c index efb849323c74..852aa4c92da0 100644 --- a/arch/x86/pci/intel_mid_pci.c +++ b/arch/x86/pci/intel_mid_pci.c | |||
@@ -234,10 +234,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) | |||
234 | 234 | ||
235 | static void intel_mid_pci_irq_disable(struct pci_dev *dev) | 235 | static void intel_mid_pci_irq_disable(struct pci_dev *dev) |
236 | { | 236 | { |
237 | if (dev->irq_managed && dev->irq > 0) { | 237 | if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed && |
238 | dev->irq > 0) { | ||
238 | mp_unmap_irq(dev->irq); | 239 | mp_unmap_irq(dev->irq); |
239 | dev->irq_managed = 0; | 240 | dev->irq_managed = 0; |
240 | dev->irq = 0; | ||
241 | } | 241 | } |
242 | } | 242 | } |
243 | 243 | ||
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index e71b3dbd87b8..5dc6ca5e1741 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c | |||
@@ -1256,9 +1256,22 @@ static int pirq_enable_irq(struct pci_dev *dev) | |||
1256 | return 0; | 1256 | return 0; |
1257 | } | 1257 | } |
1258 | 1258 | ||
1259 | bool mp_should_keep_irq(struct device *dev) | ||
1260 | { | ||
1261 | if (dev->power.is_prepared) | ||
1262 | return true; | ||
1263 | #ifdef CONFIG_PM | ||
1264 | if (dev->power.runtime_status == RPM_SUSPENDING) | ||
1265 | return true; | ||
1266 | #endif | ||
1267 | |||
1268 | return false; | ||
1269 | } | ||
1270 | |||
1259 | static void pirq_disable_irq(struct pci_dev *dev) | 1271 | static void pirq_disable_irq(struct pci_dev *dev) |
1260 | { | 1272 | { |
1261 | if (io_apic_assign_pci_irqs && dev->irq_managed && dev->irq) { | 1273 | if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) && |
1274 | dev->irq_managed && dev->irq) { | ||
1262 | mp_unmap_irq(dev->irq); | 1275 | mp_unmap_irq(dev->irq); |
1263 | dev->irq = 0; | 1276 | dev->irq = 0; |
1264 | dev->irq_managed = 0; | 1277 | dev->irq_managed = 0; |
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c index 1bbedc4b0f88..3005f0c89f2e 100644 --- a/arch/x86/platform/intel-mid/intel-mid.c +++ b/arch/x86/platform/intel-mid/intel-mid.c | |||
@@ -130,7 +130,7 @@ static void intel_mid_arch_setup(void) | |||
130 | intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip](); | 130 | intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip](); |
131 | else { | 131 | else { |
132 | intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL](); | 132 | intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL](); |
133 | pr_info("ARCH: Uknown SoC, assuming PENWELL!\n"); | 133 | pr_info("ARCH: Unknown SoC, assuming PENWELL!\n"); |
134 | } | 134 | } |
135 | 135 | ||
136 | out: | 136 | out: |
diff --git a/arch/x86/vdso/vdso32/sigreturn.S b/arch/x86/vdso/vdso32/sigreturn.S index 31776d0efc8c..d7ec4e251c0a 100644 --- a/arch/x86/vdso/vdso32/sigreturn.S +++ b/arch/x86/vdso/vdso32/sigreturn.S | |||
@@ -17,6 +17,7 @@ | |||
17 | .text | 17 | .text |
18 | .globl __kernel_sigreturn | 18 | .globl __kernel_sigreturn |
19 | .type __kernel_sigreturn,@function | 19 | .type __kernel_sigreturn,@function |
20 | nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */ | ||
20 | ALIGN | 21 | ALIGN |
21 | __kernel_sigreturn: | 22 | __kernel_sigreturn: |
22 | .LSTART_sigreturn: | 23 | .LSTART_sigreturn: |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index bd8b8459c3d0..5240f563076d 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -1070,6 +1070,23 @@ static inline void xen_write_cr8(unsigned long val) | |||
1070 | BUG_ON(val); | 1070 | BUG_ON(val); |
1071 | } | 1071 | } |
1072 | #endif | 1072 | #endif |
1073 | |||
1074 | static u64 xen_read_msr_safe(unsigned int msr, int *err) | ||
1075 | { | ||
1076 | u64 val; | ||
1077 | |||
1078 | val = native_read_msr_safe(msr, err); | ||
1079 | switch (msr) { | ||
1080 | case MSR_IA32_APICBASE: | ||
1081 | #ifdef CONFIG_X86_X2APIC | ||
1082 | if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31)))) | ||
1083 | #endif | ||
1084 | val &= ~X2APIC_ENABLE; | ||
1085 | break; | ||
1086 | } | ||
1087 | return val; | ||
1088 | } | ||
1089 | |||
1073 | static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) | 1090 | static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) |
1074 | { | 1091 | { |
1075 | int ret; | 1092 | int ret; |
@@ -1240,7 +1257,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { | |||
1240 | 1257 | ||
1241 | .wbinvd = native_wbinvd, | 1258 | .wbinvd = native_wbinvd, |
1242 | 1259 | ||
1243 | .read_msr = native_read_msr_safe, | 1260 | .read_msr = xen_read_msr_safe, |
1244 | .write_msr = xen_write_msr_safe, | 1261 | .write_msr = xen_write_msr_safe, |
1245 | 1262 | ||
1246 | .read_tsc = native_read_tsc, | 1263 | .read_tsc = native_read_tsc, |
@@ -1741,6 +1758,7 @@ asmlinkage __visible void __init xen_start_kernel(void) | |||
1741 | #ifdef CONFIG_X86_32 | 1758 | #ifdef CONFIG_X86_32 |
1742 | i386_start_kernel(); | 1759 | i386_start_kernel(); |
1743 | #else | 1760 | #else |
1761 | cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */ | ||
1744 | x86_64_start_reservations((char *)__pa_symbol(&boot_params)); | 1762 | x86_64_start_reservations((char *)__pa_symbol(&boot_params)); |
1745 | #endif | 1763 | #endif |
1746 | } | 1764 | } |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 740ae3026a14..b47124d4cd67 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
@@ -91,6 +91,12 @@ EXPORT_SYMBOL_GPL(xen_p2m_size); | |||
91 | unsigned long xen_max_p2m_pfn __read_mostly; | 91 | unsigned long xen_max_p2m_pfn __read_mostly; |
92 | EXPORT_SYMBOL_GPL(xen_max_p2m_pfn); | 92 | EXPORT_SYMBOL_GPL(xen_max_p2m_pfn); |
93 | 93 | ||
94 | #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT | ||
95 | #define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT | ||
96 | #else | ||
97 | #define P2M_LIMIT 0 | ||
98 | #endif | ||
99 | |||
94 | static DEFINE_SPINLOCK(p2m_update_lock); | 100 | static DEFINE_SPINLOCK(p2m_update_lock); |
95 | 101 | ||
96 | static unsigned long *p2m_mid_missing_mfn; | 102 | static unsigned long *p2m_mid_missing_mfn; |
@@ -385,9 +391,11 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m) | |||
385 | void __init xen_vmalloc_p2m_tree(void) | 391 | void __init xen_vmalloc_p2m_tree(void) |
386 | { | 392 | { |
387 | static struct vm_struct vm; | 393 | static struct vm_struct vm; |
394 | unsigned long p2m_limit; | ||
388 | 395 | ||
396 | p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE; | ||
389 | vm.flags = VM_ALLOC; | 397 | vm.flags = VM_ALLOC; |
390 | vm.size = ALIGN(sizeof(unsigned long) * xen_max_p2m_pfn, | 398 | vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit), |
391 | PMD_SIZE * PMDS_PER_MID_PAGE); | 399 | PMD_SIZE * PMDS_PER_MID_PAGE); |
392 | vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE); | 400 | vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE); |
393 | pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size); | 401 | pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size); |
@@ -563,7 +571,7 @@ static bool alloc_p2m(unsigned long pfn) | |||
563 | if (p2m_pfn == PFN_DOWN(__pa(p2m_missing))) | 571 | if (p2m_pfn == PFN_DOWN(__pa(p2m_missing))) |
564 | p2m_init(p2m); | 572 | p2m_init(p2m); |
565 | else | 573 | else |
566 | p2m_init_identity(p2m, pfn); | 574 | p2m_init_identity(p2m, pfn & ~(P2M_PER_PAGE - 1)); |
567 | 575 | ||
568 | spin_lock_irqsave(&p2m_update_lock, flags); | 576 | spin_lock_irqsave(&p2m_update_lock, flags); |
569 | 577 | ||
diff --git a/block/blk-merge.c b/block/blk-merge.c index fc1ff3b1ea1f..fd3fee81c23c 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -592,7 +592,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) | |||
592 | if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) { | 592 | if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) { |
593 | struct bio_vec *bprev; | 593 | struct bio_vec *bprev; |
594 | 594 | ||
595 | bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1]; | 595 | bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1]; |
596 | if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset)) | 596 | if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset)) |
597 | return false; | 597 | return false; |
598 | } | 598 | } |
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index d53a764b05ea..be3290cc0644 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c | |||
@@ -278,9 +278,11 @@ static int bt_get(struct blk_mq_alloc_data *data, | |||
278 | /* | 278 | /* |
279 | * We're out of tags on this hardware queue, kick any | 279 | * We're out of tags on this hardware queue, kick any |
280 | * pending IO submits before going to sleep waiting for | 280 | * pending IO submits before going to sleep waiting for |
281 | * some to complete. | 281 | * some to complete. Note that hctx can be NULL here for |
282 | * reserved tag allocation. | ||
282 | */ | 283 | */ |
283 | blk_mq_run_hw_queue(hctx, false); | 284 | if (hctx) |
285 | blk_mq_run_hw_queue(hctx, false); | ||
284 | 286 | ||
285 | /* | 287 | /* |
286 | * Retry tag allocation after running the hardware queue, | 288 | * Retry tag allocation after running the hardware queue, |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 4f4bea21052e..33c428530193 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -1457,7 +1457,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, | |||
1457 | 1457 | ||
1458 | do { | 1458 | do { |
1459 | page = alloc_pages_node(set->numa_node, | 1459 | page = alloc_pages_node(set->numa_node, |
1460 | GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY, | 1460 | GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, |
1461 | this_order); | 1461 | this_order); |
1462 | if (page) | 1462 | if (page) |
1463 | break; | 1463 | break; |
@@ -1479,8 +1479,6 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, | |||
1479 | left -= to_do * rq_size; | 1479 | left -= to_do * rq_size; |
1480 | for (j = 0; j < to_do; j++) { | 1480 | for (j = 0; j < to_do; j++) { |
1481 | tags->rqs[i] = p; | 1481 | tags->rqs[i] = p; |
1482 | tags->rqs[i]->atomic_flags = 0; | ||
1483 | tags->rqs[i]->cmd_flags = 0; | ||
1484 | if (set->ops->init_request) { | 1482 | if (set->ops->init_request) { |
1485 | if (set->ops->init_request(set->driver_data, | 1483 | if (set->ops->init_request(set->driver_data, |
1486 | tags->rqs[i], hctx_idx, i, | 1484 | tags->rqs[i], hctx_idx, i, |
@@ -1938,7 +1936,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) | |||
1938 | */ | 1936 | */ |
1939 | if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release, | 1937 | if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release, |
1940 | PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) | 1938 | PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) |
1941 | goto err_map; | 1939 | goto err_mq_usage; |
1942 | 1940 | ||
1943 | setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); | 1941 | setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); |
1944 | blk_queue_rq_timeout(q, 30000); | 1942 | blk_queue_rq_timeout(q, 30000); |
@@ -1981,7 +1979,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) | |||
1981 | blk_mq_init_cpu_queues(q, set->nr_hw_queues); | 1979 | blk_mq_init_cpu_queues(q, set->nr_hw_queues); |
1982 | 1980 | ||
1983 | if (blk_mq_init_hw_queues(q, set)) | 1981 | if (blk_mq_init_hw_queues(q, set)) |
1984 | goto err_hw; | 1982 | goto err_mq_usage; |
1985 | 1983 | ||
1986 | mutex_lock(&all_q_mutex); | 1984 | mutex_lock(&all_q_mutex); |
1987 | list_add_tail(&q->all_q_node, &all_q_list); | 1985 | list_add_tail(&q->all_q_node, &all_q_list); |
@@ -1993,7 +1991,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) | |||
1993 | 1991 | ||
1994 | return q; | 1992 | return q; |
1995 | 1993 | ||
1996 | err_hw: | 1994 | err_mq_usage: |
1997 | blk_cleanup_queue(q); | 1995 | blk_cleanup_queue(q); |
1998 | err_hctxs: | 1996 | err_hctxs: |
1999 | kfree(map); | 1997 | kfree(map); |
diff --git a/block/blk-settings.c b/block/blk-settings.c index 6ed2cbe5e8c9..12600bfffca9 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -585,7 +585,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | |||
585 | b->physical_block_size); | 585 | b->physical_block_size); |
586 | 586 | ||
587 | t->io_min = max(t->io_min, b->io_min); | 587 | t->io_min = max(t->io_min, b->io_min); |
588 | t->io_opt = lcm(t->io_opt, b->io_opt); | 588 | t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); |
589 | 589 | ||
590 | t->cluster &= b->cluster; | 590 | t->cluster &= b->cluster; |
591 | t->discard_zeroes_data &= b->discard_zeroes_data; | 591 | t->discard_zeroes_data &= b->discard_zeroes_data; |
@@ -616,7 +616,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | |||
616 | b->raid_partial_stripes_expensive); | 616 | b->raid_partial_stripes_expensive); |
617 | 617 | ||
618 | /* Find lowest common alignment_offset */ | 618 | /* Find lowest common alignment_offset */ |
619 | t->alignment_offset = lcm(t->alignment_offset, alignment) | 619 | t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) |
620 | % max(t->physical_block_size, t->io_min); | 620 | % max(t->physical_block_size, t->io_min); |
621 | 621 | ||
622 | /* Verify that new alignment_offset is on a logical block boundary */ | 622 | /* Verify that new alignment_offset is on a logical block boundary */ |
@@ -643,7 +643,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | |||
643 | b->max_discard_sectors); | 643 | b->max_discard_sectors); |
644 | t->discard_granularity = max(t->discard_granularity, | 644 | t->discard_granularity = max(t->discard_granularity, |
645 | b->discard_granularity); | 645 | b->discard_granularity); |
646 | t->discard_alignment = lcm(t->discard_alignment, alignment) % | 646 | t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) % |
647 | t->discard_granularity; | 647 | t->discard_granularity; |
648 | } | 648 | } |
649 | 649 | ||
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 657964e8ab7e..37fb19047603 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c | |||
@@ -65,6 +65,7 @@ struct lpss_private_data; | |||
65 | 65 | ||
66 | struct lpss_device_desc { | 66 | struct lpss_device_desc { |
67 | unsigned int flags; | 67 | unsigned int flags; |
68 | const char *clk_con_id; | ||
68 | unsigned int prv_offset; | 69 | unsigned int prv_offset; |
69 | size_t prv_size_override; | 70 | size_t prv_size_override; |
70 | void (*setup)(struct lpss_private_data *pdata); | 71 | void (*setup)(struct lpss_private_data *pdata); |
@@ -140,6 +141,7 @@ static struct lpss_device_desc lpt_i2c_dev_desc = { | |||
140 | 141 | ||
141 | static struct lpss_device_desc lpt_uart_dev_desc = { | 142 | static struct lpss_device_desc lpt_uart_dev_desc = { |
142 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, | 143 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, |
144 | .clk_con_id = "baudclk", | ||
143 | .prv_offset = 0x800, | 145 | .prv_offset = 0x800, |
144 | .setup = lpss_uart_setup, | 146 | .setup = lpss_uart_setup, |
145 | }; | 147 | }; |
@@ -156,6 +158,7 @@ static struct lpss_device_desc byt_pwm_dev_desc = { | |||
156 | 158 | ||
157 | static struct lpss_device_desc byt_uart_dev_desc = { | 159 | static struct lpss_device_desc byt_uart_dev_desc = { |
158 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, | 160 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, |
161 | .clk_con_id = "baudclk", | ||
159 | .prv_offset = 0x800, | 162 | .prv_offset = 0x800, |
160 | .setup = lpss_uart_setup, | 163 | .setup = lpss_uart_setup, |
161 | }; | 164 | }; |
@@ -313,7 +316,7 @@ out: | |||
313 | return PTR_ERR(clk); | 316 | return PTR_ERR(clk); |
314 | 317 | ||
315 | pdata->clk = clk; | 318 | pdata->clk = clk; |
316 | clk_register_clkdev(clk, NULL, devname); | 319 | clk_register_clkdev(clk, dev_desc->clk_con_id, devname); |
317 | return 0; | 320 | return 0; |
318 | } | 321 | } |
319 | 322 | ||
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index e7f718d6918a..b1def411c0b8 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c | |||
@@ -485,6 +485,14 @@ void acpi_pci_irq_disable(struct pci_dev *dev) | |||
485 | if (!pin || !dev->irq_managed || dev->irq <= 0) | 485 | if (!pin || !dev->irq_managed || dev->irq <= 0) |
486 | return; | 486 | return; |
487 | 487 | ||
488 | /* Keep IOAPIC pin configuration when suspending */ | ||
489 | if (dev->dev.power.is_prepared) | ||
490 | return; | ||
491 | #ifdef CONFIG_PM | ||
492 | if (dev->dev.power.runtime_status == RPM_SUSPENDING) | ||
493 | return; | ||
494 | #endif | ||
495 | |||
488 | entry = acpi_pci_irq_lookup(dev, pin); | 496 | entry = acpi_pci_irq_lookup(dev, pin); |
489 | if (!entry) | 497 | if (!entry) |
490 | return; | 498 | return; |
@@ -505,6 +513,5 @@ void acpi_pci_irq_disable(struct pci_dev *dev) | |||
505 | if (gsi >= 0) { | 513 | if (gsi >= 0) { |
506 | acpi_unregister_gsi(gsi); | 514 | acpi_unregister_gsi(gsi); |
507 | dev->irq_managed = 0; | 515 | dev->irq_managed = 0; |
508 | dev->irq = 0; | ||
509 | } | 516 | } |
510 | } | 517 | } |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index c6bb9f1257c9..f98db0b50551 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -922,7 +922,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) | |||
922 | return -EINVAL; | 922 | return -EINVAL; |
923 | 923 | ||
924 | drv->safe_state_index = -1; | 924 | drv->safe_state_index = -1; |
925 | for (i = 0; i < CPUIDLE_STATE_MAX; i++) { | 925 | for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) { |
926 | drv->states[i].name[0] = '\0'; | 926 | drv->states[i].name[0] = '\0'; |
927 | drv->states[i].desc[0] = '\0'; | 927 | drv->states[i].desc[0] = '\0'; |
928 | } | 928 | } |
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index c723668e3e27..5589a6e2a023 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c | |||
@@ -42,8 +42,10 @@ static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io) | |||
42 | * CHECKME: len might be required to check versus a minimum | 42 | * CHECKME: len might be required to check versus a minimum |
43 | * length as well. 1 for io is fine, but for memory it does | 43 | * length as well. 1 for io is fine, but for memory it does |
44 | * not make any sense at all. | 44 | * not make any sense at all. |
45 | * Note: some BIOSes report incorrect length for ACPI address space | ||
46 | * descriptor, so remove check of 'reslen == len' to avoid regression. | ||
45 | */ | 47 | */ |
46 | if (len && reslen && reslen == len && start <= end) | 48 | if (len && reslen && start <= end) |
47 | return true; | 49 | return true; |
48 | 50 | ||
49 | pr_debug("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n", | 51 | pr_debug("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n", |
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index debd30917010..26eb70c8f518 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -2110,7 +2110,8 @@ static int __init intel_opregion_present(void) | |||
2110 | 2110 | ||
2111 | int acpi_video_register(void) | 2111 | int acpi_video_register(void) |
2112 | { | 2112 | { |
2113 | int result = 0; | 2113 | int ret; |
2114 | |||
2114 | if (register_count) { | 2115 | if (register_count) { |
2115 | /* | 2116 | /* |
2116 | * if the function of acpi_video_register is already called, | 2117 | * if the function of acpi_video_register is already called, |
@@ -2122,9 +2123,9 @@ int acpi_video_register(void) | |||
2122 | mutex_init(&video_list_lock); | 2123 | mutex_init(&video_list_lock); |
2123 | INIT_LIST_HEAD(&video_bus_head); | 2124 | INIT_LIST_HEAD(&video_bus_head); |
2124 | 2125 | ||
2125 | result = acpi_bus_register_driver(&acpi_video_bus); | 2126 | ret = acpi_bus_register_driver(&acpi_video_bus); |
2126 | if (result < 0) | 2127 | if (ret) |
2127 | return -ENODEV; | 2128 | return ret; |
2128 | 2129 | ||
2129 | /* | 2130 | /* |
2130 | * When the acpi_video_bus is loaded successfully, increase | 2131 | * When the acpi_video_bus is loaded successfully, increase |
@@ -2176,6 +2177,17 @@ EXPORT_SYMBOL(acpi_video_unregister_backlight); | |||
2176 | 2177 | ||
2177 | static int __init acpi_video_init(void) | 2178 | static int __init acpi_video_init(void) |
2178 | { | 2179 | { |
2180 | /* | ||
2181 | * Let the module load even if ACPI is disabled (e.g. due to | ||
2182 | * a broken BIOS) so that i915.ko can still be loaded on such | ||
2183 | * old systems without an AcpiOpRegion. | ||
2184 | * | ||
2185 | * acpi_video_register() will report -ENODEV later as well due | ||
2186 | * to acpi_disabled when i915.ko tries to register itself afterwards. | ||
2187 | */ | ||
2188 | if (acpi_disabled) | ||
2189 | return 0; | ||
2190 | |||
2179 | dmi_check_system(video_dmi_table); | 2191 | dmi_check_system(video_dmi_table); |
2180 | 2192 | ||
2181 | if (intel_opregion_present()) | 2193 | if (intel_opregion_present()) |
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 33b09b6568a4..6607f3c6ace1 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c | |||
@@ -551,7 +551,6 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, | |||
551 | { | 551 | { |
552 | void *page_addr; | 552 | void *page_addr; |
553 | unsigned long user_page_addr; | 553 | unsigned long user_page_addr; |
554 | struct vm_struct tmp_area; | ||
555 | struct page **page; | 554 | struct page **page; |
556 | struct mm_struct *mm; | 555 | struct mm_struct *mm; |
557 | 556 | ||
@@ -600,10 +599,11 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, | |||
600 | proc->pid, page_addr); | 599 | proc->pid, page_addr); |
601 | goto err_alloc_page_failed; | 600 | goto err_alloc_page_failed; |
602 | } | 601 | } |
603 | tmp_area.addr = page_addr; | 602 | ret = map_kernel_range_noflush((unsigned long)page_addr, |
604 | tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; | 603 | PAGE_SIZE, PAGE_KERNEL, page); |
605 | ret = map_vm_area(&tmp_area, PAGE_KERNEL, page); | 604 | flush_cache_vmap((unsigned long)page_addr, |
606 | if (ret) { | 605 | (unsigned long)page_addr + PAGE_SIZE); |
606 | if (ret != 1) { | ||
607 | pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", | 607 | pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", |
608 | proc->pid, page_addr); | 608 | proc->pid, page_addr); |
609 | goto err_map_kernel_failed; | 609 | goto err_map_kernel_failed; |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 4c35f0822d06..23dac3babfe3 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -4204,9 +4204,18 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4204 | { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, | 4204 | { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, |
4205 | 4205 | ||
4206 | /* devices that don't properly handle queued TRIM commands */ | 4206 | /* devices that don't properly handle queued TRIM commands */ |
4207 | { "Micron_M[56]*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | | 4207 | { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | |
4208 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | ||
4209 | { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | | ||
4210 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | ||
4211 | { "Micron_M5[15]0*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | | ||
4212 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | ||
4213 | { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | | ||
4214 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | ||
4215 | { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | | ||
4216 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | ||
4217 | { "Samsung SSD 850 PRO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | | ||
4208 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4218 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
4209 | { "Crucial_CT*SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, | ||
4210 | 4219 | ||
4211 | /* | 4220 | /* |
4212 | * As defined, the DRAT (Deterministic Read After Trim) and RZAT | 4221 | * As defined, the DRAT (Deterministic Read After Trim) and RZAT |
@@ -4226,6 +4235,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4226 | */ | 4235 | */ |
4227 | { "INTEL*SSDSC2MH*", NULL, 0, }, | 4236 | { "INTEL*SSDSC2MH*", NULL, 0, }, |
4228 | 4237 | ||
4238 | { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, | ||
4239 | { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, | ||
4229 | { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4240 | { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
4230 | { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4241 | { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
4231 | { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4242 | { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
@@ -4737,7 +4748,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag) | |||
4737 | return NULL; | 4748 | return NULL; |
4738 | 4749 | ||
4739 | /* libsas case */ | 4750 | /* libsas case */ |
4740 | if (!ap->scsi_host) { | 4751 | if (ap->flags & ATA_FLAG_SAS_HOST) { |
4741 | tag = ata_sas_allocate_tag(ap); | 4752 | tag = ata_sas_allocate_tag(ap); |
4742 | if (tag < 0) | 4753 | if (tag < 0) |
4743 | return NULL; | 4754 | return NULL; |
@@ -4776,7 +4787,7 @@ void ata_qc_free(struct ata_queued_cmd *qc) | |||
4776 | tag = qc->tag; | 4787 | tag = qc->tag; |
4777 | if (likely(ata_tag_valid(tag))) { | 4788 | if (likely(ata_tag_valid(tag))) { |
4778 | qc->tag = ATA_TAG_POISON; | 4789 | qc->tag = ATA_TAG_POISON; |
4779 | if (!ap->scsi_host) | 4790 | if (ap->flags & ATA_FLAG_SAS_HOST) |
4780 | ata_sas_free_tag(tag, ap); | 4791 | ata_sas_free_tag(tag, ap); |
4781 | } | 4792 | } |
4782 | } | 4793 | } |
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index f9054cd36a72..5389579c5120 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c | |||
@@ -869,6 +869,8 @@ try_offline_again: | |||
869 | */ | 869 | */ |
870 | ata_msleep(ap, 1); | 870 | ata_msleep(ap, 1); |
871 | 871 | ||
872 | sata_set_spd(link); | ||
873 | |||
872 | /* | 874 | /* |
873 | * Now, bring the host controller online again, this can take time | 875 | * Now, bring the host controller online again, this can take time |
874 | * as PHY reset and communication establishment, 1st D2H FIS and | 876 | * as PHY reset and communication establishment, 1st D2H FIS and |
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index ba4abbe4693c..45937f88e77c 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
@@ -2242,7 +2242,7 @@ static void rtpm_status_str(struct seq_file *s, struct device *dev) | |||
2242 | } | 2242 | } |
2243 | 2243 | ||
2244 | static int pm_genpd_summary_one(struct seq_file *s, | 2244 | static int pm_genpd_summary_one(struct seq_file *s, |
2245 | struct generic_pm_domain *gpd) | 2245 | struct generic_pm_domain *genpd) |
2246 | { | 2246 | { |
2247 | static const char * const status_lookup[] = { | 2247 | static const char * const status_lookup[] = { |
2248 | [GPD_STATE_ACTIVE] = "on", | 2248 | [GPD_STATE_ACTIVE] = "on", |
@@ -2256,26 +2256,26 @@ static int pm_genpd_summary_one(struct seq_file *s, | |||
2256 | struct gpd_link *link; | 2256 | struct gpd_link *link; |
2257 | int ret; | 2257 | int ret; |
2258 | 2258 | ||
2259 | ret = mutex_lock_interruptible(&gpd->lock); | 2259 | ret = mutex_lock_interruptible(&genpd->lock); |
2260 | if (ret) | 2260 | if (ret) |
2261 | return -ERESTARTSYS; | 2261 | return -ERESTARTSYS; |
2262 | 2262 | ||
2263 | if (WARN_ON(gpd->status >= ARRAY_SIZE(status_lookup))) | 2263 | if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) |
2264 | goto exit; | 2264 | goto exit; |
2265 | seq_printf(s, "%-30s %-15s ", gpd->name, status_lookup[gpd->status]); | 2265 | seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]); |
2266 | 2266 | ||
2267 | /* | 2267 | /* |
2268 | * Modifications on the list require holding locks on both | 2268 | * Modifications on the list require holding locks on both |
2269 | * master and slave, so we are safe. | 2269 | * master and slave, so we are safe. |
2270 | * Also gpd->name is immutable. | 2270 | * Also genpd->name is immutable. |
2271 | */ | 2271 | */ |
2272 | list_for_each_entry(link, &gpd->master_links, master_node) { | 2272 | list_for_each_entry(link, &genpd->master_links, master_node) { |
2273 | seq_printf(s, "%s", link->slave->name); | 2273 | seq_printf(s, "%s", link->slave->name); |
2274 | if (!list_is_last(&link->master_node, &gpd->master_links)) | 2274 | if (!list_is_last(&link->master_node, &genpd->master_links)) |
2275 | seq_puts(s, ", "); | 2275 | seq_puts(s, ", "); |
2276 | } | 2276 | } |
2277 | 2277 | ||
2278 | list_for_each_entry(pm_data, &gpd->dev_list, list_node) { | 2278 | list_for_each_entry(pm_data, &genpd->dev_list, list_node) { |
2279 | kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL); | 2279 | kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL); |
2280 | if (kobj_path == NULL) | 2280 | if (kobj_path == NULL) |
2281 | continue; | 2281 | continue; |
@@ -2287,14 +2287,14 @@ static int pm_genpd_summary_one(struct seq_file *s, | |||
2287 | 2287 | ||
2288 | seq_puts(s, "\n"); | 2288 | seq_puts(s, "\n"); |
2289 | exit: | 2289 | exit: |
2290 | mutex_unlock(&gpd->lock); | 2290 | mutex_unlock(&genpd->lock); |
2291 | 2291 | ||
2292 | return 0; | 2292 | return 0; |
2293 | } | 2293 | } |
2294 | 2294 | ||
2295 | static int pm_genpd_summary_show(struct seq_file *s, void *data) | 2295 | static int pm_genpd_summary_show(struct seq_file *s, void *data) |
2296 | { | 2296 | { |
2297 | struct generic_pm_domain *gpd; | 2297 | struct generic_pm_domain *genpd; |
2298 | int ret = 0; | 2298 | int ret = 0; |
2299 | 2299 | ||
2300 | seq_puts(s, " domain status slaves\n"); | 2300 | seq_puts(s, " domain status slaves\n"); |
@@ -2305,8 +2305,8 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data) | |||
2305 | if (ret) | 2305 | if (ret) |
2306 | return -ERESTARTSYS; | 2306 | return -ERESTARTSYS; |
2307 | 2307 | ||
2308 | list_for_each_entry(gpd, &gpd_list, gpd_list_node) { | 2308 | list_for_each_entry(genpd, &gpd_list, gpd_list_node) { |
2309 | ret = pm_genpd_summary_one(s, gpd); | 2309 | ret = pm_genpd_summary_one(s, genpd); |
2310 | if (ret) | 2310 | if (ret) |
2311 | break; | 2311 | break; |
2312 | } | 2312 | } |
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index c2744b30d5d9..aab7158d2afe 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c | |||
@@ -730,6 +730,7 @@ void pm_system_wakeup(void) | |||
730 | pm_abort_suspend = true; | 730 | pm_abort_suspend = true; |
731 | freeze_wake(); | 731 | freeze_wake(); |
732 | } | 732 | } |
733 | EXPORT_SYMBOL_GPL(pm_system_wakeup); | ||
733 | 734 | ||
734 | void pm_wakeup_clear(void) | 735 | void pm_wakeup_clear(void) |
735 | { | 736 | { |
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index beb8b27d4621..a13587b5c2be 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h | |||
@@ -243,4 +243,12 @@ extern struct regcache_ops regcache_rbtree_ops; | |||
243 | extern struct regcache_ops regcache_lzo_ops; | 243 | extern struct regcache_ops regcache_lzo_ops; |
244 | extern struct regcache_ops regcache_flat_ops; | 244 | extern struct regcache_ops regcache_flat_ops; |
245 | 245 | ||
246 | static inline const char *regmap_name(const struct regmap *map) | ||
247 | { | ||
248 | if (map->dev) | ||
249 | return dev_name(map->dev); | ||
250 | |||
251 | return map->name; | ||
252 | } | ||
253 | |||
246 | #endif | 254 | #endif |
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index d453a2c98ad0..81751a49d8bf 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c | |||
@@ -307,7 +307,7 @@ static int regcache_rbtree_insert_to_block(struct regmap *map, | |||
307 | if (pos == 0) { | 307 | if (pos == 0) { |
308 | memmove(blk + offset * map->cache_word_size, | 308 | memmove(blk + offset * map->cache_word_size, |
309 | blk, rbnode->blklen * map->cache_word_size); | 309 | blk, rbnode->blklen * map->cache_word_size); |
310 | bitmap_shift_right(present, present, offset, blklen); | 310 | bitmap_shift_left(present, present, offset, blklen); |
311 | } | 311 | } |
312 | 312 | ||
313 | /* update the rbnode block, its size and the base register */ | 313 | /* update the rbnode block, its size and the base register */ |
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index f373c35f9e1d..87db9893b463 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c | |||
@@ -218,7 +218,7 @@ int regcache_read(struct regmap *map, | |||
218 | ret = map->cache_ops->read(map, reg, value); | 218 | ret = map->cache_ops->read(map, reg, value); |
219 | 219 | ||
220 | if (ret == 0) | 220 | if (ret == 0) |
221 | trace_regmap_reg_read_cache(map->dev, reg, *value); | 221 | trace_regmap_reg_read_cache(map, reg, *value); |
222 | 222 | ||
223 | return ret; | 223 | return ret; |
224 | } | 224 | } |
@@ -311,7 +311,7 @@ int regcache_sync(struct regmap *map) | |||
311 | dev_dbg(map->dev, "Syncing %s cache\n", | 311 | dev_dbg(map->dev, "Syncing %s cache\n", |
312 | map->cache_ops->name); | 312 | map->cache_ops->name); |
313 | name = map->cache_ops->name; | 313 | name = map->cache_ops->name; |
314 | trace_regcache_sync(map->dev, name, "start"); | 314 | trace_regcache_sync(map, name, "start"); |
315 | 315 | ||
316 | if (!map->cache_dirty) | 316 | if (!map->cache_dirty) |
317 | goto out; | 317 | goto out; |
@@ -346,7 +346,7 @@ out: | |||
346 | 346 | ||
347 | regmap_async_complete(map); | 347 | regmap_async_complete(map); |
348 | 348 | ||
349 | trace_regcache_sync(map->dev, name, "stop"); | 349 | trace_regcache_sync(map, name, "stop"); |
350 | 350 | ||
351 | return ret; | 351 | return ret; |
352 | } | 352 | } |
@@ -381,7 +381,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min, | |||
381 | name = map->cache_ops->name; | 381 | name = map->cache_ops->name; |
382 | dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max); | 382 | dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max); |
383 | 383 | ||
384 | trace_regcache_sync(map->dev, name, "start region"); | 384 | trace_regcache_sync(map, name, "start region"); |
385 | 385 | ||
386 | if (!map->cache_dirty) | 386 | if (!map->cache_dirty) |
387 | goto out; | 387 | goto out; |
@@ -401,7 +401,7 @@ out: | |||
401 | 401 | ||
402 | regmap_async_complete(map); | 402 | regmap_async_complete(map); |
403 | 403 | ||
404 | trace_regcache_sync(map->dev, name, "stop region"); | 404 | trace_regcache_sync(map, name, "stop region"); |
405 | 405 | ||
406 | return ret; | 406 | return ret; |
407 | } | 407 | } |
@@ -428,7 +428,7 @@ int regcache_drop_region(struct regmap *map, unsigned int min, | |||
428 | 428 | ||
429 | map->lock(map->lock_arg); | 429 | map->lock(map->lock_arg); |
430 | 430 | ||
431 | trace_regcache_drop_region(map->dev, min, max); | 431 | trace_regcache_drop_region(map, min, max); |
432 | 432 | ||
433 | ret = map->cache_ops->drop(map, min, max); | 433 | ret = map->cache_ops->drop(map, min, max); |
434 | 434 | ||
@@ -455,7 +455,7 @@ void regcache_cache_only(struct regmap *map, bool enable) | |||
455 | map->lock(map->lock_arg); | 455 | map->lock(map->lock_arg); |
456 | WARN_ON(map->cache_bypass && enable); | 456 | WARN_ON(map->cache_bypass && enable); |
457 | map->cache_only = enable; | 457 | map->cache_only = enable; |
458 | trace_regmap_cache_only(map->dev, enable); | 458 | trace_regmap_cache_only(map, enable); |
459 | map->unlock(map->lock_arg); | 459 | map->unlock(map->lock_arg); |
460 | } | 460 | } |
461 | EXPORT_SYMBOL_GPL(regcache_cache_only); | 461 | EXPORT_SYMBOL_GPL(regcache_cache_only); |
@@ -493,7 +493,7 @@ void regcache_cache_bypass(struct regmap *map, bool enable) | |||
493 | map->lock(map->lock_arg); | 493 | map->lock(map->lock_arg); |
494 | WARN_ON(map->cache_only && enable); | 494 | WARN_ON(map->cache_only && enable); |
495 | map->cache_bypass = enable; | 495 | map->cache_bypass = enable; |
496 | trace_regmap_cache_bypass(map->dev, enable); | 496 | trace_regmap_cache_bypass(map, enable); |
497 | map->unlock(map->lock_arg); | 497 | map->unlock(map->lock_arg); |
498 | } | 498 | } |
499 | EXPORT_SYMBOL_GPL(regcache_cache_bypass); | 499 | EXPORT_SYMBOL_GPL(regcache_cache_bypass); |
@@ -608,7 +608,8 @@ static int regcache_sync_block_single(struct regmap *map, void *block, | |||
608 | for (i = start; i < end; i++) { | 608 | for (i = start; i < end; i++) { |
609 | regtmp = block_base + (i * map->reg_stride); | 609 | regtmp = block_base + (i * map->reg_stride); |
610 | 610 | ||
611 | if (!regcache_reg_present(cache_present, i)) | 611 | if (!regcache_reg_present(cache_present, i) || |
612 | !regmap_writeable(map, regtmp)) | ||
612 | continue; | 613 | continue; |
613 | 614 | ||
614 | val = regcache_get_val(map, block, i); | 615 | val = regcache_get_val(map, block, i); |
@@ -677,7 +678,8 @@ static int regcache_sync_block_raw(struct regmap *map, void *block, | |||
677 | for (i = start; i < end; i++) { | 678 | for (i = start; i < end; i++) { |
678 | regtmp = block_base + (i * map->reg_stride); | 679 | regtmp = block_base + (i * map->reg_stride); |
679 | 680 | ||
680 | if (!regcache_reg_present(cache_present, i)) { | 681 | if (!regcache_reg_present(cache_present, i) || |
682 | !regmap_writeable(map, regtmp)) { | ||
681 | ret = regcache_sync_block_raw_flush(map, &data, | 683 | ret = regcache_sync_block_raw_flush(map, &data, |
682 | base, regtmp); | 684 | base, regtmp); |
683 | if (ret != 0) | 685 | if (ret != 0) |
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 6299a50a5960..a6c3f75b4b01 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c | |||
@@ -499,7 +499,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, | |||
499 | goto err_alloc; | 499 | goto err_alloc; |
500 | } | 500 | } |
501 | 501 | ||
502 | ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags, | 502 | ret = request_threaded_irq(irq, NULL, regmap_irq_thread, |
503 | irq_flags | IRQF_ONESHOT, | ||
503 | chip->name, d); | 504 | chip->name, d); |
504 | if (ret != 0) { | 505 | if (ret != 0) { |
505 | dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n", | 506 | dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n", |
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index f99b098ddabf..dbfe6a69c3da 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c | |||
@@ -1281,7 +1281,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, | |||
1281 | if (map->async && map->bus->async_write) { | 1281 | if (map->async && map->bus->async_write) { |
1282 | struct regmap_async *async; | 1282 | struct regmap_async *async; |
1283 | 1283 | ||
1284 | trace_regmap_async_write_start(map->dev, reg, val_len); | 1284 | trace_regmap_async_write_start(map, reg, val_len); |
1285 | 1285 | ||
1286 | spin_lock_irqsave(&map->async_lock, flags); | 1286 | spin_lock_irqsave(&map->async_lock, flags); |
1287 | async = list_first_entry_or_null(&map->async_free, | 1287 | async = list_first_entry_or_null(&map->async_free, |
@@ -1339,8 +1339,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, | |||
1339 | return ret; | 1339 | return ret; |
1340 | } | 1340 | } |
1341 | 1341 | ||
1342 | trace_regmap_hw_write_start(map->dev, reg, | 1342 | trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); |
1343 | val_len / map->format.val_bytes); | ||
1344 | 1343 | ||
1345 | /* If we're doing a single register write we can probably just | 1344 | /* If we're doing a single register write we can probably just |
1346 | * send the work_buf directly, otherwise try to do a gather | 1345 | * send the work_buf directly, otherwise try to do a gather |
@@ -1372,8 +1371,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, | |||
1372 | kfree(buf); | 1371 | kfree(buf); |
1373 | } | 1372 | } |
1374 | 1373 | ||
1375 | trace_regmap_hw_write_done(map->dev, reg, | 1374 | trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); |
1376 | val_len / map->format.val_bytes); | ||
1377 | 1375 | ||
1378 | return ret; | 1376 | return ret; |
1379 | } | 1377 | } |
@@ -1407,12 +1405,12 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg, | |||
1407 | 1405 | ||
1408 | map->format.format_write(map, reg, val); | 1406 | map->format.format_write(map, reg, val); |
1409 | 1407 | ||
1410 | trace_regmap_hw_write_start(map->dev, reg, 1); | 1408 | trace_regmap_hw_write_start(map, reg, 1); |
1411 | 1409 | ||
1412 | ret = map->bus->write(map->bus_context, map->work_buf, | 1410 | ret = map->bus->write(map->bus_context, map->work_buf, |
1413 | map->format.buf_size); | 1411 | map->format.buf_size); |
1414 | 1412 | ||
1415 | trace_regmap_hw_write_done(map->dev, reg, 1); | 1413 | trace_regmap_hw_write_done(map, reg, 1); |
1416 | 1414 | ||
1417 | return ret; | 1415 | return ret; |
1418 | } | 1416 | } |
@@ -1470,7 +1468,7 @@ int _regmap_write(struct regmap *map, unsigned int reg, | |||
1470 | dev_info(map->dev, "%x <= %x\n", reg, val); | 1468 | dev_info(map->dev, "%x <= %x\n", reg, val); |
1471 | #endif | 1469 | #endif |
1472 | 1470 | ||
1473 | trace_regmap_reg_write(map->dev, reg, val); | 1471 | trace_regmap_reg_write(map, reg, val); |
1474 | 1472 | ||
1475 | return map->reg_write(context, reg, val); | 1473 | return map->reg_write(context, reg, val); |
1476 | } | 1474 | } |
@@ -1773,7 +1771,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map, | |||
1773 | for (i = 0; i < num_regs; i++) { | 1771 | for (i = 0; i < num_regs; i++) { |
1774 | int reg = regs[i].reg; | 1772 | int reg = regs[i].reg; |
1775 | int val = regs[i].def; | 1773 | int val = regs[i].def; |
1776 | trace_regmap_hw_write_start(map->dev, reg, 1); | 1774 | trace_regmap_hw_write_start(map, reg, 1); |
1777 | map->format.format_reg(u8, reg, map->reg_shift); | 1775 | map->format.format_reg(u8, reg, map->reg_shift); |
1778 | u8 += reg_bytes + pad_bytes; | 1776 | u8 += reg_bytes + pad_bytes; |
1779 | map->format.format_val(u8, val, 0); | 1777 | map->format.format_val(u8, val, 0); |
@@ -1788,7 +1786,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map, | |||
1788 | 1786 | ||
1789 | for (i = 0; i < num_regs; i++) { | 1787 | for (i = 0; i < num_regs; i++) { |
1790 | int reg = regs[i].reg; | 1788 | int reg = regs[i].reg; |
1791 | trace_regmap_hw_write_done(map->dev, reg, 1); | 1789 | trace_regmap_hw_write_done(map, reg, 1); |
1792 | } | 1790 | } |
1793 | return ret; | 1791 | return ret; |
1794 | } | 1792 | } |
@@ -2059,15 +2057,13 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, | |||
2059 | */ | 2057 | */ |
2060 | u8[0] |= map->read_flag_mask; | 2058 | u8[0] |= map->read_flag_mask; |
2061 | 2059 | ||
2062 | trace_regmap_hw_read_start(map->dev, reg, | 2060 | trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); |
2063 | val_len / map->format.val_bytes); | ||
2064 | 2061 | ||
2065 | ret = map->bus->read(map->bus_context, map->work_buf, | 2062 | ret = map->bus->read(map->bus_context, map->work_buf, |
2066 | map->format.reg_bytes + map->format.pad_bytes, | 2063 | map->format.reg_bytes + map->format.pad_bytes, |
2067 | val, val_len); | 2064 | val, val_len); |
2068 | 2065 | ||
2069 | trace_regmap_hw_read_done(map->dev, reg, | 2066 | trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); |
2070 | val_len / map->format.val_bytes); | ||
2071 | 2067 | ||
2072 | return ret; | 2068 | return ret; |
2073 | } | 2069 | } |
@@ -2123,7 +2119,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg, | |||
2123 | dev_info(map->dev, "%x => %x\n", reg, *val); | 2119 | dev_info(map->dev, "%x => %x\n", reg, *val); |
2124 | #endif | 2120 | #endif |
2125 | 2121 | ||
2126 | trace_regmap_reg_read(map->dev, reg, *val); | 2122 | trace_regmap_reg_read(map, reg, *val); |
2127 | 2123 | ||
2128 | if (!map->cache_bypass) | 2124 | if (!map->cache_bypass) |
2129 | regcache_write(map, reg, *val); | 2125 | regcache_write(map, reg, *val); |
@@ -2480,7 +2476,7 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret) | |||
2480 | struct regmap *map = async->map; | 2476 | struct regmap *map = async->map; |
2481 | bool wake; | 2477 | bool wake; |
2482 | 2478 | ||
2483 | trace_regmap_async_io_complete(map->dev); | 2479 | trace_regmap_async_io_complete(map); |
2484 | 2480 | ||
2485 | spin_lock(&map->async_lock); | 2481 | spin_lock(&map->async_lock); |
2486 | list_move(&async->list, &map->async_free); | 2482 | list_move(&async->list, &map->async_free); |
@@ -2525,7 +2521,7 @@ int regmap_async_complete(struct regmap *map) | |||
2525 | if (!map->bus || !map->bus->async_write) | 2521 | if (!map->bus || !map->bus->async_write) |
2526 | return 0; | 2522 | return 0; |
2527 | 2523 | ||
2528 | trace_regmap_async_complete_start(map->dev); | 2524 | trace_regmap_async_complete_start(map); |
2529 | 2525 | ||
2530 | wait_event(map->async_waitq, regmap_async_is_done(map)); | 2526 | wait_event(map->async_waitq, regmap_async_is_done(map)); |
2531 | 2527 | ||
@@ -2534,7 +2530,7 @@ int regmap_async_complete(struct regmap *map) | |||
2534 | map->async_ret = 0; | 2530 | map->async_ret = 0; |
2535 | spin_unlock_irqrestore(&map->async_lock, flags); | 2531 | spin_unlock_irqrestore(&map->async_lock, flags); |
2536 | 2532 | ||
2537 | trace_regmap_async_complete_done(map->dev); | 2533 | trace_regmap_async_complete_done(map); |
2538 | 2534 | ||
2539 | return ret; | 2535 | return ret; |
2540 | } | 2536 | } |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 4bc2a5cb9935..a98c41f72c63 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -803,10 +803,6 @@ static int __init nbd_init(void) | |||
803 | return -EINVAL; | 803 | return -EINVAL; |
804 | } | 804 | } |
805 | 805 | ||
806 | nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); | ||
807 | if (!nbd_dev) | ||
808 | return -ENOMEM; | ||
809 | |||
810 | part_shift = 0; | 806 | part_shift = 0; |
811 | if (max_part > 0) { | 807 | if (max_part > 0) { |
812 | part_shift = fls(max_part); | 808 | part_shift = fls(max_part); |
@@ -828,6 +824,10 @@ static int __init nbd_init(void) | |||
828 | if (nbds_max > 1UL << (MINORBITS - part_shift)) | 824 | if (nbds_max > 1UL << (MINORBITS - part_shift)) |
829 | return -EINVAL; | 825 | return -EINVAL; |
830 | 826 | ||
827 | nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); | ||
828 | if (!nbd_dev) | ||
829 | return -ENOMEM; | ||
830 | |||
831 | for (i = 0; i < nbds_max; i++) { | 831 | for (i = 0; i < nbds_max; i++) { |
832 | struct gendisk *disk = alloc_disk(1 << part_shift); | 832 | struct gendisk *disk = alloc_disk(1 << part_shift); |
833 | if (!disk) | 833 | if (!disk) |
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index b64bccbb78c9..e23be20a3417 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c | |||
@@ -482,6 +482,7 @@ static int nvme_error_status(u16 status) | |||
482 | } | 482 | } |
483 | } | 483 | } |
484 | 484 | ||
485 | #ifdef CONFIG_BLK_DEV_INTEGRITY | ||
485 | static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi) | 486 | static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi) |
486 | { | 487 | { |
487 | if (be32_to_cpu(pi->ref_tag) == v) | 488 | if (be32_to_cpu(pi->ref_tag) == v) |
@@ -538,6 +539,58 @@ static void nvme_dif_remap(struct request *req, | |||
538 | kunmap_atomic(pmap); | 539 | kunmap_atomic(pmap); |
539 | } | 540 | } |
540 | 541 | ||
542 | static int nvme_noop_verify(struct blk_integrity_iter *iter) | ||
543 | { | ||
544 | return 0; | ||
545 | } | ||
546 | |||
547 | static int nvme_noop_generate(struct blk_integrity_iter *iter) | ||
548 | { | ||
549 | return 0; | ||
550 | } | ||
551 | |||
552 | struct blk_integrity nvme_meta_noop = { | ||
553 | .name = "NVME_META_NOOP", | ||
554 | .generate_fn = nvme_noop_generate, | ||
555 | .verify_fn = nvme_noop_verify, | ||
556 | }; | ||
557 | |||
558 | static void nvme_init_integrity(struct nvme_ns *ns) | ||
559 | { | ||
560 | struct blk_integrity integrity; | ||
561 | |||
562 | switch (ns->pi_type) { | ||
563 | case NVME_NS_DPS_PI_TYPE3: | ||
564 | integrity = t10_pi_type3_crc; | ||
565 | break; | ||
566 | case NVME_NS_DPS_PI_TYPE1: | ||
567 | case NVME_NS_DPS_PI_TYPE2: | ||
568 | integrity = t10_pi_type1_crc; | ||
569 | break; | ||
570 | default: | ||
571 | integrity = nvme_meta_noop; | ||
572 | break; | ||
573 | } | ||
574 | integrity.tuple_size = ns->ms; | ||
575 | blk_integrity_register(ns->disk, &integrity); | ||
576 | blk_queue_max_integrity_segments(ns->queue, 1); | ||
577 | } | ||
578 | #else /* CONFIG_BLK_DEV_INTEGRITY */ | ||
579 | static void nvme_dif_remap(struct request *req, | ||
580 | void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi)) | ||
581 | { | ||
582 | } | ||
583 | static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi) | ||
584 | { | ||
585 | } | ||
586 | static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi) | ||
587 | { | ||
588 | } | ||
589 | static void nvme_init_integrity(struct nvme_ns *ns) | ||
590 | { | ||
591 | } | ||
592 | #endif | ||
593 | |||
541 | static void req_completion(struct nvme_queue *nvmeq, void *ctx, | 594 | static void req_completion(struct nvme_queue *nvmeq, void *ctx, |
542 | struct nvme_completion *cqe) | 595 | struct nvme_completion *cqe) |
543 | { | 596 | { |
@@ -1959,43 +2012,6 @@ static void nvme_config_discard(struct nvme_ns *ns) | |||
1959 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); | 2012 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); |
1960 | } | 2013 | } |
1961 | 2014 | ||
1962 | static int nvme_noop_verify(struct blk_integrity_iter *iter) | ||
1963 | { | ||
1964 | return 0; | ||
1965 | } | ||
1966 | |||
1967 | static int nvme_noop_generate(struct blk_integrity_iter *iter) | ||
1968 | { | ||
1969 | return 0; | ||
1970 | } | ||
1971 | |||
1972 | struct blk_integrity nvme_meta_noop = { | ||
1973 | .name = "NVME_META_NOOP", | ||
1974 | .generate_fn = nvme_noop_generate, | ||
1975 | .verify_fn = nvme_noop_verify, | ||
1976 | }; | ||
1977 | |||
1978 | static void nvme_init_integrity(struct nvme_ns *ns) | ||
1979 | { | ||
1980 | struct blk_integrity integrity; | ||
1981 | |||
1982 | switch (ns->pi_type) { | ||
1983 | case NVME_NS_DPS_PI_TYPE3: | ||
1984 | integrity = t10_pi_type3_crc; | ||
1985 | break; | ||
1986 | case NVME_NS_DPS_PI_TYPE1: | ||
1987 | case NVME_NS_DPS_PI_TYPE2: | ||
1988 | integrity = t10_pi_type1_crc; | ||
1989 | break; | ||
1990 | default: | ||
1991 | integrity = nvme_meta_noop; | ||
1992 | break; | ||
1993 | } | ||
1994 | integrity.tuple_size = ns->ms; | ||
1995 | blk_integrity_register(ns->disk, &integrity); | ||
1996 | blk_queue_max_integrity_segments(ns->queue, 1); | ||
1997 | } | ||
1998 | |||
1999 | static int nvme_revalidate_disk(struct gendisk *disk) | 2015 | static int nvme_revalidate_disk(struct gendisk *disk) |
2000 | { | 2016 | { |
2001 | struct nvme_ns *ns = disk->private_data; | 2017 | struct nvme_ns *ns = disk->private_data; |
@@ -2036,7 +2052,8 @@ static int nvme_revalidate_disk(struct gendisk *disk) | |||
2036 | pi_type = ns->ms == sizeof(struct t10_pi_tuple) ? | 2052 | pi_type = ns->ms == sizeof(struct t10_pi_tuple) ? |
2037 | id->dps & NVME_NS_DPS_PI_MASK : 0; | 2053 | id->dps & NVME_NS_DPS_PI_MASK : 0; |
2038 | 2054 | ||
2039 | if (disk->integrity && (ns->pi_type != pi_type || ns->ms != old_ms || | 2055 | if (blk_get_integrity(disk) && (ns->pi_type != pi_type || |
2056 | ns->ms != old_ms || | ||
2040 | bs != queue_logical_block_size(disk->queue) || | 2057 | bs != queue_logical_block_size(disk->queue) || |
2041 | (ns->ms && id->flbas & NVME_NS_FLBAS_META_EXT))) | 2058 | (ns->ms && id->flbas & NVME_NS_FLBAS_META_EXT))) |
2042 | blk_integrity_unregister(disk); | 2059 | blk_integrity_unregister(disk); |
@@ -2044,11 +2061,11 @@ static int nvme_revalidate_disk(struct gendisk *disk) | |||
2044 | ns->pi_type = pi_type; | 2061 | ns->pi_type = pi_type; |
2045 | blk_queue_logical_block_size(ns->queue, bs); | 2062 | blk_queue_logical_block_size(ns->queue, bs); |
2046 | 2063 | ||
2047 | if (ns->ms && !disk->integrity && (disk->flags & GENHD_FL_UP) && | 2064 | if (ns->ms && !blk_get_integrity(disk) && (disk->flags & GENHD_FL_UP) && |
2048 | !(id->flbas & NVME_NS_FLBAS_META_EXT)) | 2065 | !(id->flbas & NVME_NS_FLBAS_META_EXT)) |
2049 | nvme_init_integrity(ns); | 2066 | nvme_init_integrity(ns); |
2050 | 2067 | ||
2051 | if (id->ncap == 0 || (ns->ms && !disk->integrity)) | 2068 | if (id->ncap == 0 || (ns->ms && !blk_get_integrity(disk))) |
2052 | set_capacity(disk, 0); | 2069 | set_capacity(disk, 0); |
2053 | else | 2070 | else |
2054 | set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); | 2071 | set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); |
@@ -2652,7 +2669,7 @@ static void nvme_dev_remove(struct nvme_dev *dev) | |||
2652 | 2669 | ||
2653 | list_for_each_entry(ns, &dev->namespaces, list) { | 2670 | list_for_each_entry(ns, &dev->namespaces, list) { |
2654 | if (ns->disk->flags & GENHD_FL_UP) { | 2671 | if (ns->disk->flags & GENHD_FL_UP) { |
2655 | if (ns->disk->integrity) | 2672 | if (blk_get_integrity(ns->disk)) |
2656 | blk_integrity_unregister(ns->disk); | 2673 | blk_integrity_unregister(ns->disk); |
2657 | del_gendisk(ns->disk); | 2674 | del_gendisk(ns->disk); |
2658 | } | 2675 | } |
@@ -2986,6 +3003,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2986 | } | 3003 | } |
2987 | get_device(dev->device); | 3004 | get_device(dev->device); |
2988 | 3005 | ||
3006 | INIT_LIST_HEAD(&dev->node); | ||
2989 | INIT_WORK(&dev->probe_work, nvme_async_probe); | 3007 | INIT_WORK(&dev->probe_work, nvme_async_probe); |
2990 | schedule_work(&dev->probe_work); | 3008 | schedule_work(&dev->probe_work); |
2991 | return 0; | 3009 | return 0; |
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 8e233edd7a09..871bd3550cb0 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c | |||
@@ -528,7 +528,7 @@ out_cleanup: | |||
528 | static inline void update_used_max(struct zram *zram, | 528 | static inline void update_used_max(struct zram *zram, |
529 | const unsigned long pages) | 529 | const unsigned long pages) |
530 | { | 530 | { |
531 | int old_max, cur_max; | 531 | unsigned long old_max, cur_max; |
532 | 532 | ||
533 | old_max = atomic_long_read(&zram->stats.max_used_pages); | 533 | old_max = atomic_long_read(&zram->stats.max_used_pages); |
534 | 534 | ||
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index b87688881143..8bfc4c2bba87 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
@@ -272,6 +272,7 @@ static const struct usb_device_id blacklist_table[] = { | |||
272 | { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL }, | 272 | { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL }, |
273 | 273 | ||
274 | /* Intel Bluetooth devices */ | 274 | /* Intel Bluetooth devices */ |
275 | { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR }, | ||
275 | { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL }, | 276 | { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL }, |
276 | { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL }, | 277 | { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL }, |
277 | { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW }, | 278 | { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW }, |
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c index 79524ed2a3cb..8753b0f6a317 100644 --- a/drivers/char/ipmi/ipmi_powernv.c +++ b/drivers/char/ipmi/ipmi_powernv.c | |||
@@ -125,6 +125,7 @@ static int ipmi_powernv_recv(struct ipmi_smi_powernv *smi) | |||
125 | spin_lock_irqsave(&smi->msg_lock, flags); | 125 | spin_lock_irqsave(&smi->msg_lock, flags); |
126 | 126 | ||
127 | if (!smi->cur_msg) { | 127 | if (!smi->cur_msg) { |
128 | spin_unlock_irqrestore(&smi->msg_lock, flags); | ||
128 | pr_warn("no current message?\n"); | 129 | pr_warn("no current message?\n"); |
129 | return 0; | 130 | return 0; |
130 | } | 131 | } |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index f6646ed3047e..518585c1ce94 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -263,6 +263,11 @@ struct smi_info { | |||
263 | bool supports_event_msg_buff; | 263 | bool supports_event_msg_buff; |
264 | 264 | ||
265 | /* | 265 | /* |
266 | * Can we clear the global enables receive irq bit? | ||
267 | */ | ||
268 | bool cannot_clear_recv_irq_bit; | ||
269 | |||
270 | /* | ||
266 | * Did we get an attention that we did not handle? | 271 | * Did we get an attention that we did not handle? |
267 | */ | 272 | */ |
268 | bool got_attn; | 273 | bool got_attn; |
@@ -461,6 +466,9 @@ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) | |||
461 | * allocate messages, we just leave them in the BMC and run the system | 466 | * allocate messages, we just leave them in the BMC and run the system |
462 | * polled until we can allocate some memory. Once we have some | 467 | * polled until we can allocate some memory. Once we have some |
463 | * memory, we will re-enable the interrupt. | 468 | * memory, we will re-enable the interrupt. |
469 | * | ||
470 | * Note that we cannot just use disable_irq(), since the interrupt may | ||
471 | * be shared. | ||
464 | */ | 472 | */ |
465 | static inline bool disable_si_irq(struct smi_info *smi_info) | 473 | static inline bool disable_si_irq(struct smi_info *smi_info) |
466 | { | 474 | { |
@@ -549,20 +557,15 @@ static u8 current_global_enables(struct smi_info *smi_info, u8 base, | |||
549 | 557 | ||
550 | if (smi_info->supports_event_msg_buff) | 558 | if (smi_info->supports_event_msg_buff) |
551 | enables |= IPMI_BMC_EVT_MSG_BUFF; | 559 | enables |= IPMI_BMC_EVT_MSG_BUFF; |
552 | else | ||
553 | enables &= ~IPMI_BMC_EVT_MSG_BUFF; | ||
554 | 560 | ||
555 | if (smi_info->irq && !smi_info->interrupt_disabled) | 561 | if ((smi_info->irq && !smi_info->interrupt_disabled) || |
562 | smi_info->cannot_clear_recv_irq_bit) | ||
556 | enables |= IPMI_BMC_RCV_MSG_INTR; | 563 | enables |= IPMI_BMC_RCV_MSG_INTR; |
557 | else | ||
558 | enables &= ~IPMI_BMC_RCV_MSG_INTR; | ||
559 | 564 | ||
560 | if (smi_info->supports_event_msg_buff && | 565 | if (smi_info->supports_event_msg_buff && |
561 | smi_info->irq && !smi_info->interrupt_disabled) | 566 | smi_info->irq && !smi_info->interrupt_disabled) |
562 | 567 | ||
563 | enables |= IPMI_BMC_EVT_MSG_INTR; | 568 | enables |= IPMI_BMC_EVT_MSG_INTR; |
564 | else | ||
565 | enables &= ~IPMI_BMC_EVT_MSG_INTR; | ||
566 | 569 | ||
567 | *irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR); | 570 | *irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR); |
568 | 571 | ||
@@ -2900,6 +2903,96 @@ static int try_get_dev_id(struct smi_info *smi_info) | |||
2900 | return rv; | 2903 | return rv; |
2901 | } | 2904 | } |
2902 | 2905 | ||
2906 | /* | ||
2907 | * Some BMCs do not support clearing the receive irq bit in the global | ||
2908 | * enables (even if they don't support interrupts on the BMC). Check | ||
2909 | * for this and handle it properly. | ||
2910 | */ | ||
2911 | static void check_clr_rcv_irq(struct smi_info *smi_info) | ||
2912 | { | ||
2913 | unsigned char msg[3]; | ||
2914 | unsigned char *resp; | ||
2915 | unsigned long resp_len; | ||
2916 | int rv; | ||
2917 | |||
2918 | resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); | ||
2919 | if (!resp) { | ||
2920 | printk(KERN_WARNING PFX "Out of memory allocating response for" | ||
2921 | " global enables command, cannot check recv irq bit" | ||
2922 | " handling.\n"); | ||
2923 | return; | ||
2924 | } | ||
2925 | |||
2926 | msg[0] = IPMI_NETFN_APP_REQUEST << 2; | ||
2927 | msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; | ||
2928 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); | ||
2929 | |||
2930 | rv = wait_for_msg_done(smi_info); | ||
2931 | if (rv) { | ||
2932 | printk(KERN_WARNING PFX "Error getting response from get" | ||
2933 | " global enables command, cannot check recv irq bit" | ||
2934 | " handling.\n"); | ||
2935 | goto out; | ||
2936 | } | ||
2937 | |||
2938 | resp_len = smi_info->handlers->get_result(smi_info->si_sm, | ||
2939 | resp, IPMI_MAX_MSG_LENGTH); | ||
2940 | |||
2941 | if (resp_len < 4 || | ||
2942 | resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || | ||
2943 | resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD || | ||
2944 | resp[2] != 0) { | ||
2945 | printk(KERN_WARNING PFX "Invalid return from get global" | ||
2946 | " enables command, cannot check recv irq bit" | ||
2947 | " handling.\n"); | ||
2948 | rv = -EINVAL; | ||
2949 | goto out; | ||
2950 | } | ||
2951 | |||
2952 | if ((resp[3] & IPMI_BMC_RCV_MSG_INTR) == 0) | ||
2953 | /* Already clear, should work ok. */ | ||
2954 | goto out; | ||
2955 | |||
2956 | msg[0] = IPMI_NETFN_APP_REQUEST << 2; | ||
2957 | msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; | ||
2958 | msg[2] = resp[3] & ~IPMI_BMC_RCV_MSG_INTR; | ||
2959 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); | ||
2960 | |||
2961 | rv = wait_for_msg_done(smi_info); | ||
2962 | if (rv) { | ||
2963 | printk(KERN_WARNING PFX "Error getting response from set" | ||
2964 | " global enables command, cannot check recv irq bit" | ||
2965 | " handling.\n"); | ||
2966 | goto out; | ||
2967 | } | ||
2968 | |||
2969 | resp_len = smi_info->handlers->get_result(smi_info->si_sm, | ||
2970 | resp, IPMI_MAX_MSG_LENGTH); | ||
2971 | |||
2972 | if (resp_len < 3 || | ||
2973 | resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || | ||
2974 | resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) { | ||
2975 | printk(KERN_WARNING PFX "Invalid return from get global" | ||
2976 | " enables command, cannot check recv irq bit" | ||
2977 | " handling.\n"); | ||
2978 | rv = -EINVAL; | ||
2979 | goto out; | ||
2980 | } | ||
2981 | |||
2982 | if (resp[2] != 0) { | ||
2983 | /* | ||
2984 | * An error when setting the event buffer bit means | ||
2985 | * clearing the bit is not supported. | ||
2986 | */ | ||
2987 | printk(KERN_WARNING PFX "The BMC does not support clearing" | ||
2988 | " the recv irq bit, compensating, but the BMC needs to" | ||
2989 | " be fixed.\n"); | ||
2990 | smi_info->cannot_clear_recv_irq_bit = true; | ||
2991 | } | ||
2992 | out: | ||
2993 | kfree(resp); | ||
2994 | } | ||
2995 | |||
2903 | static int try_enable_event_buffer(struct smi_info *smi_info) | 2996 | static int try_enable_event_buffer(struct smi_info *smi_info) |
2904 | { | 2997 | { |
2905 | unsigned char msg[3]; | 2998 | unsigned char msg[3]; |
@@ -3395,6 +3488,8 @@ static int try_smi_init(struct smi_info *new_smi) | |||
3395 | goto out_err; | 3488 | goto out_err; |
3396 | } | 3489 | } |
3397 | 3490 | ||
3491 | check_clr_rcv_irq(new_smi); | ||
3492 | |||
3398 | setup_oem_data_handler(new_smi); | 3493 | setup_oem_data_handler(new_smi); |
3399 | setup_xaction_handlers(new_smi); | 3494 | setup_xaction_handlers(new_smi); |
3400 | 3495 | ||
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index f6e378dac5f5..f40e3bd2c69c 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c | |||
@@ -468,11 +468,13 @@ static int ipmi_ssif_thread(void *data) | |||
468 | int result; | 468 | int result; |
469 | 469 | ||
470 | /* Wait for something to do */ | 470 | /* Wait for something to do */ |
471 | wait_for_completion(&ssif_info->wake_thread); | 471 | result = wait_for_completion_interruptible( |
472 | init_completion(&ssif_info->wake_thread); | 472 | &ssif_info->wake_thread); |
473 | |||
474 | if (ssif_info->stopping) | 473 | if (ssif_info->stopping) |
475 | break; | 474 | break; |
475 | if (result == -ERESTARTSYS) | ||
476 | continue; | ||
477 | init_completion(&ssif_info->wake_thread); | ||
476 | 478 | ||
477 | if (ssif_info->i2c_read_write == I2C_SMBUS_WRITE) { | 479 | if (ssif_info->i2c_read_write == I2C_SMBUS_WRITE) { |
478 | result = i2c_smbus_write_block_data( | 480 | result = i2c_smbus_write_block_data( |
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 1d278ccd751f..e096e9cddb40 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c | |||
@@ -140,24 +140,24 @@ static int tpm_dev_add_device(struct tpm_chip *chip) | |||
140 | { | 140 | { |
141 | int rc; | 141 | int rc; |
142 | 142 | ||
143 | rc = device_add(&chip->dev); | 143 | rc = cdev_add(&chip->cdev, chip->dev.devt, 1); |
144 | if (rc) { | 144 | if (rc) { |
145 | dev_err(&chip->dev, | 145 | dev_err(&chip->dev, |
146 | "unable to device_register() %s, major %d, minor %d, err=%d\n", | 146 | "unable to cdev_add() %s, major %d, minor %d, err=%d\n", |
147 | chip->devname, MAJOR(chip->dev.devt), | 147 | chip->devname, MAJOR(chip->dev.devt), |
148 | MINOR(chip->dev.devt), rc); | 148 | MINOR(chip->dev.devt), rc); |
149 | 149 | ||
150 | device_unregister(&chip->dev); | ||
150 | return rc; | 151 | return rc; |
151 | } | 152 | } |
152 | 153 | ||
153 | rc = cdev_add(&chip->cdev, chip->dev.devt, 1); | 154 | rc = device_add(&chip->dev); |
154 | if (rc) { | 155 | if (rc) { |
155 | dev_err(&chip->dev, | 156 | dev_err(&chip->dev, |
156 | "unable to cdev_add() %s, major %d, minor %d, err=%d\n", | 157 | "unable to device_register() %s, major %d, minor %d, err=%d\n", |
157 | chip->devname, MAJOR(chip->dev.devt), | 158 | chip->devname, MAJOR(chip->dev.devt), |
158 | MINOR(chip->dev.devt), rc); | 159 | MINOR(chip->dev.devt), rc); |
159 | 160 | ||
160 | device_unregister(&chip->dev); | ||
161 | return rc; | 161 | return rc; |
162 | } | 162 | } |
163 | 163 | ||
@@ -174,27 +174,17 @@ static void tpm_dev_del_device(struct tpm_chip *chip) | |||
174 | * tpm_chip_register() - create a character device for the TPM chip | 174 | * tpm_chip_register() - create a character device for the TPM chip |
175 | * @chip: TPM chip to use. | 175 | * @chip: TPM chip to use. |
176 | * | 176 | * |
177 | * Creates a character device for the TPM chip and adds sysfs interfaces for | 177 | * Creates a character device for the TPM chip and adds sysfs attributes for |
178 | * the device, PPI and TCPA. As the last step this function adds the | 178 | * the device. As the last step this function adds the chip to the list of TPM |
179 | * chip to the list of TPM chips available for use. | 179 | * chips available for in-kernel use. |
180 | * | 180 | * |
181 | * NOTE: This function should be only called after the chip initialization | 181 | * This function should be only called after the chip initialization is |
182 | * is complete. | 182 | * complete. |
183 | * | ||
184 | * Called from tpm_<specific>.c probe function only for devices | ||
185 | * the driver has determined it should claim. Prior to calling | ||
186 | * this function the specific probe function has called pci_enable_device | ||
187 | * upon errant exit from this function specific probe function should call | ||
188 | * pci_disable_device | ||
189 | */ | 183 | */ |
190 | int tpm_chip_register(struct tpm_chip *chip) | 184 | int tpm_chip_register(struct tpm_chip *chip) |
191 | { | 185 | { |
192 | int rc; | 186 | int rc; |
193 | 187 | ||
194 | rc = tpm_dev_add_device(chip); | ||
195 | if (rc) | ||
196 | return rc; | ||
197 | |||
198 | /* Populate sysfs for TPM1 devices. */ | 188 | /* Populate sysfs for TPM1 devices. */ |
199 | if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { | 189 | if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { |
200 | rc = tpm_sysfs_add_device(chip); | 190 | rc = tpm_sysfs_add_device(chip); |
@@ -208,6 +198,10 @@ int tpm_chip_register(struct tpm_chip *chip) | |||
208 | chip->bios_dir = tpm_bios_log_setup(chip->devname); | 198 | chip->bios_dir = tpm_bios_log_setup(chip->devname); |
209 | } | 199 | } |
210 | 200 | ||
201 | rc = tpm_dev_add_device(chip); | ||
202 | if (rc) | ||
203 | return rc; | ||
204 | |||
211 | /* Make the chip available. */ | 205 | /* Make the chip available. */ |
212 | spin_lock(&driver_lock); | 206 | spin_lock(&driver_lock); |
213 | list_add_rcu(&chip->list, &tpm_chip_list); | 207 | list_add_rcu(&chip->list, &tpm_chip_list); |
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index b1e53e3aece5..42ffa5e7a1e0 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c | |||
@@ -124,7 +124,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) | |||
124 | { | 124 | { |
125 | struct ibmvtpm_dev *ibmvtpm; | 125 | struct ibmvtpm_dev *ibmvtpm; |
126 | struct ibmvtpm_crq crq; | 126 | struct ibmvtpm_crq crq; |
127 | u64 *word = (u64 *) &crq; | 127 | __be64 *word = (__be64 *)&crq; |
128 | int rc; | 128 | int rc; |
129 | 129 | ||
130 | ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip); | 130 | ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip); |
@@ -145,11 +145,11 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) | |||
145 | memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); | 145 | memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); |
146 | crq.valid = (u8)IBMVTPM_VALID_CMD; | 146 | crq.valid = (u8)IBMVTPM_VALID_CMD; |
147 | crq.msg = (u8)VTPM_TPM_COMMAND; | 147 | crq.msg = (u8)VTPM_TPM_COMMAND; |
148 | crq.len = (u16)count; | 148 | crq.len = cpu_to_be16(count); |
149 | crq.data = ibmvtpm->rtce_dma_handle; | 149 | crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle); |
150 | 150 | ||
151 | rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(word[0]), | 151 | rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]), |
152 | cpu_to_be64(word[1])); | 152 | be64_to_cpu(word[1])); |
153 | if (rc != H_SUCCESS) { | 153 | if (rc != H_SUCCESS) { |
154 | dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); | 154 | dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); |
155 | rc = 0; | 155 | rc = 0; |
diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h index f595f14426bf..6af92890518f 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.h +++ b/drivers/char/tpm/tpm_ibmvtpm.h | |||
@@ -22,9 +22,9 @@ | |||
22 | struct ibmvtpm_crq { | 22 | struct ibmvtpm_crq { |
23 | u8 valid; | 23 | u8 valid; |
24 | u8 msg; | 24 | u8 msg; |
25 | u16 len; | 25 | __be16 len; |
26 | u32 data; | 26 | __be32 data; |
27 | u64 reserved; | 27 | __be64 reserved; |
28 | } __attribute__((packed, aligned(8))); | 28 | } __attribute__((packed, aligned(8))); |
29 | 29 | ||
30 | struct ibmvtpm_crq_queue { | 30 | struct ibmvtpm_crq_queue { |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index fae2dbbf5745..72d7028f779b 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -142,6 +142,7 @@ struct ports_device { | |||
142 | * notification | 142 | * notification |
143 | */ | 143 | */ |
144 | struct work_struct control_work; | 144 | struct work_struct control_work; |
145 | struct work_struct config_work; | ||
145 | 146 | ||
146 | struct list_head ports; | 147 | struct list_head ports; |
147 | 148 | ||
@@ -1837,10 +1838,21 @@ static void config_intr(struct virtio_device *vdev) | |||
1837 | 1838 | ||
1838 | portdev = vdev->priv; | 1839 | portdev = vdev->priv; |
1839 | 1840 | ||
1841 | if (!use_multiport(portdev)) | ||
1842 | schedule_work(&portdev->config_work); | ||
1843 | } | ||
1844 | |||
1845 | static void config_work_handler(struct work_struct *work) | ||
1846 | { | ||
1847 | struct ports_device *portdev; | ||
1848 | |||
1849 | portdev = container_of(work, struct ports_device, control_work); | ||
1840 | if (!use_multiport(portdev)) { | 1850 | if (!use_multiport(portdev)) { |
1851 | struct virtio_device *vdev; | ||
1841 | struct port *port; | 1852 | struct port *port; |
1842 | u16 rows, cols; | 1853 | u16 rows, cols; |
1843 | 1854 | ||
1855 | vdev = portdev->vdev; | ||
1844 | virtio_cread(vdev, struct virtio_console_config, cols, &cols); | 1856 | virtio_cread(vdev, struct virtio_console_config, cols, &cols); |
1845 | virtio_cread(vdev, struct virtio_console_config, rows, &rows); | 1857 | virtio_cread(vdev, struct virtio_console_config, rows, &rows); |
1846 | 1858 | ||
@@ -2040,12 +2052,14 @@ static int virtcons_probe(struct virtio_device *vdev) | |||
2040 | 2052 | ||
2041 | virtio_device_ready(portdev->vdev); | 2053 | virtio_device_ready(portdev->vdev); |
2042 | 2054 | ||
2055 | INIT_WORK(&portdev->config_work, &config_work_handler); | ||
2056 | INIT_WORK(&portdev->control_work, &control_work_handler); | ||
2057 | |||
2043 | if (multiport) { | 2058 | if (multiport) { |
2044 | unsigned int nr_added_bufs; | 2059 | unsigned int nr_added_bufs; |
2045 | 2060 | ||
2046 | spin_lock_init(&portdev->c_ivq_lock); | 2061 | spin_lock_init(&portdev->c_ivq_lock); |
2047 | spin_lock_init(&portdev->c_ovq_lock); | 2062 | spin_lock_init(&portdev->c_ovq_lock); |
2048 | INIT_WORK(&portdev->control_work, &control_work_handler); | ||
2049 | 2063 | ||
2050 | nr_added_bufs = fill_queue(portdev->c_ivq, | 2064 | nr_added_bufs = fill_queue(portdev->c_ivq, |
2051 | &portdev->c_ivq_lock); | 2065 | &portdev->c_ivq_lock); |
@@ -2113,6 +2127,8 @@ static void virtcons_remove(struct virtio_device *vdev) | |||
2113 | /* Finish up work that's lined up */ | 2127 | /* Finish up work that's lined up */ |
2114 | if (use_multiport(portdev)) | 2128 | if (use_multiport(portdev)) |
2115 | cancel_work_sync(&portdev->control_work); | 2129 | cancel_work_sync(&portdev->control_work); |
2130 | else | ||
2131 | cancel_work_sync(&portdev->config_work); | ||
2116 | 2132 | ||
2117 | list_for_each_entry_safe(port, port2, &portdev->ports, list) | 2133 | list_for_each_entry_safe(port, port2, &portdev->ports, list) |
2118 | unplug_port(port); | 2134 | unplug_port(port); |
@@ -2164,6 +2180,7 @@ static int virtcons_freeze(struct virtio_device *vdev) | |||
2164 | 2180 | ||
2165 | virtqueue_disable_cb(portdev->c_ivq); | 2181 | virtqueue_disable_cb(portdev->c_ivq); |
2166 | cancel_work_sync(&portdev->control_work); | 2182 | cancel_work_sync(&portdev->control_work); |
2183 | cancel_work_sync(&portdev->config_work); | ||
2167 | /* | 2184 | /* |
2168 | * Once more: if control_work_handler() was running, it would | 2185 | * Once more: if control_work_handler() was running, it would |
2169 | * enable the cb as the last step. | 2186 | * enable the cb as the last step. |
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c index f07c8152e5cc..3f27d21fb729 100644 --- a/drivers/clk/at91/pmc.c +++ b/drivers/clk/at91/pmc.c | |||
@@ -89,12 +89,29 @@ static int pmc_irq_set_type(struct irq_data *d, unsigned type) | |||
89 | return 0; | 89 | return 0; |
90 | } | 90 | } |
91 | 91 | ||
92 | static void pmc_irq_suspend(struct irq_data *d) | ||
93 | { | ||
94 | struct at91_pmc *pmc = irq_data_get_irq_chip_data(d); | ||
95 | |||
96 | pmc->imr = pmc_read(pmc, AT91_PMC_IMR); | ||
97 | pmc_write(pmc, AT91_PMC_IDR, pmc->imr); | ||
98 | } | ||
99 | |||
100 | static void pmc_irq_resume(struct irq_data *d) | ||
101 | { | ||
102 | struct at91_pmc *pmc = irq_data_get_irq_chip_data(d); | ||
103 | |||
104 | pmc_write(pmc, AT91_PMC_IER, pmc->imr); | ||
105 | } | ||
106 | |||
92 | static struct irq_chip pmc_irq = { | 107 | static struct irq_chip pmc_irq = { |
93 | .name = "PMC", | 108 | .name = "PMC", |
94 | .irq_disable = pmc_irq_mask, | 109 | .irq_disable = pmc_irq_mask, |
95 | .irq_mask = pmc_irq_mask, | 110 | .irq_mask = pmc_irq_mask, |
96 | .irq_unmask = pmc_irq_unmask, | 111 | .irq_unmask = pmc_irq_unmask, |
97 | .irq_set_type = pmc_irq_set_type, | 112 | .irq_set_type = pmc_irq_set_type, |
113 | .irq_suspend = pmc_irq_suspend, | ||
114 | .irq_resume = pmc_irq_resume, | ||
98 | }; | 115 | }; |
99 | 116 | ||
100 | static struct lock_class_key pmc_lock_class; | 117 | static struct lock_class_key pmc_lock_class; |
@@ -224,7 +241,8 @@ static struct at91_pmc *__init at91_pmc_init(struct device_node *np, | |||
224 | goto out_free_pmc; | 241 | goto out_free_pmc; |
225 | 242 | ||
226 | pmc_write(pmc, AT91_PMC_IDR, 0xffffffff); | 243 | pmc_write(pmc, AT91_PMC_IDR, 0xffffffff); |
227 | if (request_irq(pmc->virq, pmc_irq_handler, IRQF_SHARED, "pmc", pmc)) | 244 | if (request_irq(pmc->virq, pmc_irq_handler, |
245 | IRQF_SHARED | IRQF_COND_SUSPEND, "pmc", pmc)) | ||
228 | goto out_remove_irqdomain; | 246 | goto out_remove_irqdomain; |
229 | 247 | ||
230 | return pmc; | 248 | return pmc; |
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h index 52d2041fa3f6..69abb08cf146 100644 --- a/drivers/clk/at91/pmc.h +++ b/drivers/clk/at91/pmc.h | |||
@@ -33,6 +33,7 @@ struct at91_pmc { | |||
33 | spinlock_t lock; | 33 | spinlock_t lock; |
34 | const struct at91_pmc_caps *caps; | 34 | const struct at91_pmc_caps *caps; |
35 | struct irq_domain *irqdomain; | 35 | struct irq_domain *irqdomain; |
36 | u32 imr; | ||
36 | }; | 37 | }; |
37 | 38 | ||
38 | static inline void pmc_lock(struct at91_pmc *pmc) | 39 | static inline void pmc_lock(struct at91_pmc *pmc) |
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index db7f8bce7467..25006a8bb8e6 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c | |||
@@ -144,12 +144,6 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw, | |||
144 | divider->flags); | 144 | divider->flags); |
145 | } | 145 | } |
146 | 146 | ||
147 | /* | ||
148 | * The reverse of DIV_ROUND_UP: The maximum number which | ||
149 | * divided by m is r | ||
150 | */ | ||
151 | #define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1) | ||
152 | |||
153 | static bool _is_valid_table_div(const struct clk_div_table *table, | 147 | static bool _is_valid_table_div(const struct clk_div_table *table, |
154 | unsigned int div) | 148 | unsigned int div) |
155 | { | 149 | { |
@@ -225,19 +219,24 @@ static int _div_round_closest(const struct clk_div_table *table, | |||
225 | unsigned long parent_rate, unsigned long rate, | 219 | unsigned long parent_rate, unsigned long rate, |
226 | unsigned long flags) | 220 | unsigned long flags) |
227 | { | 221 | { |
228 | int up, down, div; | 222 | int up, down; |
223 | unsigned long up_rate, down_rate; | ||
229 | 224 | ||
230 | up = down = div = DIV_ROUND_CLOSEST(parent_rate, rate); | 225 | up = DIV_ROUND_UP(parent_rate, rate); |
226 | down = parent_rate / rate; | ||
231 | 227 | ||
232 | if (flags & CLK_DIVIDER_POWER_OF_TWO) { | 228 | if (flags & CLK_DIVIDER_POWER_OF_TWO) { |
233 | up = __roundup_pow_of_two(div); | 229 | up = __roundup_pow_of_two(up); |
234 | down = __rounddown_pow_of_two(div); | 230 | down = __rounddown_pow_of_two(down); |
235 | } else if (table) { | 231 | } else if (table) { |
236 | up = _round_up_table(table, div); | 232 | up = _round_up_table(table, up); |
237 | down = _round_down_table(table, div); | 233 | down = _round_down_table(table, down); |
238 | } | 234 | } |
239 | 235 | ||
240 | return (up - div) <= (div - down) ? up : down; | 236 | up_rate = DIV_ROUND_UP(parent_rate, up); |
237 | down_rate = DIV_ROUND_UP(parent_rate, down); | ||
238 | |||
239 | return (rate - up_rate) <= (down_rate - rate) ? up : down; | ||
241 | } | 240 | } |
242 | 241 | ||
243 | static int _div_round(const struct clk_div_table *table, | 242 | static int _div_round(const struct clk_div_table *table, |
@@ -313,7 +312,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, | |||
313 | return i; | 312 | return i; |
314 | } | 313 | } |
315 | parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), | 314 | parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), |
316 | MULT_ROUND_UP(rate, i)); | 315 | rate * i); |
317 | now = DIV_ROUND_UP(parent_rate, i); | 316 | now = DIV_ROUND_UP(parent_rate, i); |
318 | if (_is_best_div(rate, now, best, flags)) { | 317 | if (_is_best_div(rate, now, best, flags)) { |
319 | bestdiv = i; | 318 | bestdiv = i; |
@@ -353,7 +352,7 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, | |||
353 | bestdiv = readl(divider->reg) >> divider->shift; | 352 | bestdiv = readl(divider->reg) >> divider->shift; |
354 | bestdiv &= div_mask(divider->width); | 353 | bestdiv &= div_mask(divider->width); |
355 | bestdiv = _get_div(divider->table, bestdiv, divider->flags); | 354 | bestdiv = _get_div(divider->table, bestdiv, divider->flags); |
356 | return bestdiv; | 355 | return DIV_ROUND_UP(*prate, bestdiv); |
357 | } | 356 | } |
358 | 357 | ||
359 | return divider_round_rate(hw, rate, prate, divider->table, | 358 | return divider_round_rate(hw, rate, prate, divider->table, |
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index eb0152961d3c..237f23f68bfc 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c | |||
@@ -1350,7 +1350,6 @@ static unsigned long clk_core_get_rate(struct clk_core *clk) | |||
1350 | 1350 | ||
1351 | return rate; | 1351 | return rate; |
1352 | } | 1352 | } |
1353 | EXPORT_SYMBOL_GPL(clk_core_get_rate); | ||
1354 | 1353 | ||
1355 | /** | 1354 | /** |
1356 | * clk_get_rate - return the rate of clk | 1355 | * clk_get_rate - return the rate of clk |
@@ -2171,6 +2170,32 @@ int clk_get_phase(struct clk *clk) | |||
2171 | } | 2170 | } |
2172 | 2171 | ||
2173 | /** | 2172 | /** |
2173 | * clk_is_match - check if two clk's point to the same hardware clock | ||
2174 | * @p: clk compared against q | ||
2175 | * @q: clk compared against p | ||
2176 | * | ||
2177 | * Returns true if the two struct clk pointers both point to the same hardware | ||
2178 | * clock node. Put differently, returns true if struct clk *p and struct clk *q | ||
2179 | * share the same struct clk_core object. | ||
2180 | * | ||
2181 | * Returns false otherwise. Note that two NULL clks are treated as matching. | ||
2182 | */ | ||
2183 | bool clk_is_match(const struct clk *p, const struct clk *q) | ||
2184 | { | ||
2185 | /* trivial case: identical struct clk's or both NULL */ | ||
2186 | if (p == q) | ||
2187 | return true; | ||
2188 | |||
2189 | /* true if clk->core pointers match. Avoid derefing garbage */ | ||
2190 | if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q)) | ||
2191 | if (p->core == q->core) | ||
2192 | return true; | ||
2193 | |||
2194 | return false; | ||
2195 | } | ||
2196 | EXPORT_SYMBOL_GPL(clk_is_match); | ||
2197 | |||
2198 | /** | ||
2174 | * __clk_init - initialize the data structures in a struct clk | 2199 | * __clk_init - initialize the data structures in a struct clk |
2175 | * @dev: device initializing this clk, placeholder for now | 2200 | * @dev: device initializing this clk, placeholder for now |
2176 | * @clk: clk being initialized | 2201 | * @clk: clk being initialized |
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c index b0b562b9ce0e..e60feffc10a1 100644 --- a/drivers/clk/qcom/gcc-msm8960.c +++ b/drivers/clk/qcom/gcc-msm8960.c | |||
@@ -48,6 +48,17 @@ static struct clk_pll pll3 = { | |||
48 | }, | 48 | }, |
49 | }; | 49 | }; |
50 | 50 | ||
51 | static struct clk_regmap pll4_vote = { | ||
52 | .enable_reg = 0x34c0, | ||
53 | .enable_mask = BIT(4), | ||
54 | .hw.init = &(struct clk_init_data){ | ||
55 | .name = "pll4_vote", | ||
56 | .parent_names = (const char *[]){ "pll4" }, | ||
57 | .num_parents = 1, | ||
58 | .ops = &clk_pll_vote_ops, | ||
59 | }, | ||
60 | }; | ||
61 | |||
51 | static struct clk_pll pll8 = { | 62 | static struct clk_pll pll8 = { |
52 | .l_reg = 0x3144, | 63 | .l_reg = 0x3144, |
53 | .m_reg = 0x3148, | 64 | .m_reg = 0x3148, |
@@ -3023,6 +3034,7 @@ static struct clk_branch rpm_msg_ram_h_clk = { | |||
3023 | 3034 | ||
3024 | static struct clk_regmap *gcc_msm8960_clks[] = { | 3035 | static struct clk_regmap *gcc_msm8960_clks[] = { |
3025 | [PLL3] = &pll3.clkr, | 3036 | [PLL3] = &pll3.clkr, |
3037 | [PLL4_VOTE] = &pll4_vote, | ||
3026 | [PLL8] = &pll8.clkr, | 3038 | [PLL8] = &pll8.clkr, |
3027 | [PLL8_VOTE] = &pll8_vote, | 3039 | [PLL8_VOTE] = &pll8_vote, |
3028 | [PLL14] = &pll14.clkr, | 3040 | [PLL14] = &pll14.clkr, |
@@ -3247,6 +3259,7 @@ static const struct qcom_reset_map gcc_msm8960_resets[] = { | |||
3247 | 3259 | ||
3248 | static struct clk_regmap *gcc_apq8064_clks[] = { | 3260 | static struct clk_regmap *gcc_apq8064_clks[] = { |
3249 | [PLL3] = &pll3.clkr, | 3261 | [PLL3] = &pll3.clkr, |
3262 | [PLL4_VOTE] = &pll4_vote, | ||
3250 | [PLL8] = &pll8.clkr, | 3263 | [PLL8] = &pll8.clkr, |
3251 | [PLL8_VOTE] = &pll8_vote, | 3264 | [PLL8_VOTE] = &pll8_vote, |
3252 | [PLL14] = &pll14.clkr, | 3265 | [PLL14] = &pll14.clkr, |
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c index 121ffde25dc3..c9ff27b4648b 100644 --- a/drivers/clk/qcom/lcc-ipq806x.c +++ b/drivers/clk/qcom/lcc-ipq806x.c | |||
@@ -462,7 +462,6 @@ static struct platform_driver lcc_ipq806x_driver = { | |||
462 | .remove = lcc_ipq806x_remove, | 462 | .remove = lcc_ipq806x_remove, |
463 | .driver = { | 463 | .driver = { |
464 | .name = "lcc-ipq806x", | 464 | .name = "lcc-ipq806x", |
465 | .owner = THIS_MODULE, | ||
466 | .of_match_table = lcc_ipq806x_match_table, | 465 | .of_match_table = lcc_ipq806x_match_table, |
467 | }, | 466 | }, |
468 | }; | 467 | }; |
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c index a75a408cfccd..e2c863295f00 100644 --- a/drivers/clk/qcom/lcc-msm8960.c +++ b/drivers/clk/qcom/lcc-msm8960.c | |||
@@ -417,8 +417,8 @@ static struct clk_rcg slimbus_src = { | |||
417 | .mnctr_en_bit = 8, | 417 | .mnctr_en_bit = 8, |
418 | .mnctr_reset_bit = 7, | 418 | .mnctr_reset_bit = 7, |
419 | .mnctr_mode_shift = 5, | 419 | .mnctr_mode_shift = 5, |
420 | .n_val_shift = 16, | 420 | .n_val_shift = 24, |
421 | .m_val_shift = 16, | 421 | .m_val_shift = 8, |
422 | .width = 8, | 422 | .width = 8, |
423 | }, | 423 | }, |
424 | .p = { | 424 | .p = { |
@@ -547,7 +547,7 @@ static int lcc_msm8960_probe(struct platform_device *pdev) | |||
547 | return PTR_ERR(regmap); | 547 | return PTR_ERR(regmap); |
548 | 548 | ||
549 | /* Use the correct frequency plan depending on speed of PLL4 */ | 549 | /* Use the correct frequency plan depending on speed of PLL4 */ |
550 | val = regmap_read(regmap, 0x4, &val); | 550 | regmap_read(regmap, 0x4, &val); |
551 | if (val == 0x12) { | 551 | if (val == 0x12) { |
552 | slimbus_src.freq_tbl = clk_tbl_aif_osr_492; | 552 | slimbus_src.freq_tbl = clk_tbl_aif_osr_492; |
553 | mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492; | 553 | mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492; |
@@ -574,7 +574,6 @@ static struct platform_driver lcc_msm8960_driver = { | |||
574 | .remove = lcc_msm8960_remove, | 574 | .remove = lcc_msm8960_remove, |
575 | .driver = { | 575 | .driver = { |
576 | .name = "lcc-msm8960", | 576 | .name = "lcc-msm8960", |
577 | .owner = THIS_MODULE, | ||
578 | .of_match_table = lcc_msm8960_match_table, | 577 | .of_match_table = lcc_msm8960_match_table, |
579 | }, | 578 | }, |
580 | }; | 579 | }; |
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c index 6ef89639a9f6..d21640634adf 100644 --- a/drivers/clk/ti/fapll.c +++ b/drivers/clk/ti/fapll.c | |||
@@ -84,7 +84,7 @@ static int ti_fapll_enable(struct clk_hw *hw) | |||
84 | struct fapll_data *fd = to_fapll(hw); | 84 | struct fapll_data *fd = to_fapll(hw); |
85 | u32 v = readl_relaxed(fd->base); | 85 | u32 v = readl_relaxed(fd->base); |
86 | 86 | ||
87 | v |= (1 << FAPLL_MAIN_PLLEN); | 87 | v |= FAPLL_MAIN_PLLEN; |
88 | writel_relaxed(v, fd->base); | 88 | writel_relaxed(v, fd->base); |
89 | 89 | ||
90 | return 0; | 90 | return 0; |
@@ -95,7 +95,7 @@ static void ti_fapll_disable(struct clk_hw *hw) | |||
95 | struct fapll_data *fd = to_fapll(hw); | 95 | struct fapll_data *fd = to_fapll(hw); |
96 | u32 v = readl_relaxed(fd->base); | 96 | u32 v = readl_relaxed(fd->base); |
97 | 97 | ||
98 | v &= ~(1 << FAPLL_MAIN_PLLEN); | 98 | v &= ~FAPLL_MAIN_PLLEN; |
99 | writel_relaxed(v, fd->base); | 99 | writel_relaxed(v, fd->base); |
100 | } | 100 | } |
101 | 101 | ||
@@ -104,7 +104,7 @@ static int ti_fapll_is_enabled(struct clk_hw *hw) | |||
104 | struct fapll_data *fd = to_fapll(hw); | 104 | struct fapll_data *fd = to_fapll(hw); |
105 | u32 v = readl_relaxed(fd->base); | 105 | u32 v = readl_relaxed(fd->base); |
106 | 106 | ||
107 | return v & (1 << FAPLL_MAIN_PLLEN); | 107 | return v & FAPLL_MAIN_PLLEN; |
108 | } | 108 | } |
109 | 109 | ||
110 | static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw, | 110 | static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw, |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 1c2506f68122..a0b036ccb118 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
@@ -63,6 +63,11 @@ config VT8500_TIMER | |||
63 | config CADENCE_TTC_TIMER | 63 | config CADENCE_TTC_TIMER |
64 | bool | 64 | bool |
65 | 65 | ||
66 | config ASM9260_TIMER | ||
67 | bool | ||
68 | select CLKSRC_MMIO | ||
69 | select CLKSRC_OF | ||
70 | |||
66 | config CLKSRC_NOMADIK_MTU | 71 | config CLKSRC_NOMADIK_MTU |
67 | bool | 72 | bool |
68 | depends on (ARCH_NOMADIK || ARCH_U8500) | 73 | depends on (ARCH_NOMADIK || ARCH_U8500) |
@@ -187,6 +192,7 @@ config SYS_SUPPORTS_EM_STI | |||
187 | config SH_TIMER_CMT | 192 | config SH_TIMER_CMT |
188 | bool "Renesas CMT timer driver" if COMPILE_TEST | 193 | bool "Renesas CMT timer driver" if COMPILE_TEST |
189 | depends on GENERIC_CLOCKEVENTS | 194 | depends on GENERIC_CLOCKEVENTS |
195 | depends on HAS_IOMEM | ||
190 | default SYS_SUPPORTS_SH_CMT | 196 | default SYS_SUPPORTS_SH_CMT |
191 | help | 197 | help |
192 | This enables build of a clocksource and clockevent driver for | 198 | This enables build of a clocksource and clockevent driver for |
@@ -196,6 +202,7 @@ config SH_TIMER_CMT | |||
196 | config SH_TIMER_MTU2 | 202 | config SH_TIMER_MTU2 |
197 | bool "Renesas MTU2 timer driver" if COMPILE_TEST | 203 | bool "Renesas MTU2 timer driver" if COMPILE_TEST |
198 | depends on GENERIC_CLOCKEVENTS | 204 | depends on GENERIC_CLOCKEVENTS |
205 | depends on HAS_IOMEM | ||
199 | default SYS_SUPPORTS_SH_MTU2 | 206 | default SYS_SUPPORTS_SH_MTU2 |
200 | help | 207 | help |
201 | This enables build of a clockevent driver for the Multi-Function | 208 | This enables build of a clockevent driver for the Multi-Function |
@@ -205,6 +212,7 @@ config SH_TIMER_MTU2 | |||
205 | config SH_TIMER_TMU | 212 | config SH_TIMER_TMU |
206 | bool "Renesas TMU timer driver" if COMPILE_TEST | 213 | bool "Renesas TMU timer driver" if COMPILE_TEST |
207 | depends on GENERIC_CLOCKEVENTS | 214 | depends on GENERIC_CLOCKEVENTS |
215 | depends on HAS_IOMEM | ||
208 | default SYS_SUPPORTS_SH_TMU | 216 | default SYS_SUPPORTS_SH_TMU |
209 | help | 217 | help |
210 | This enables build of a clocksource and clockevent driver for | 218 | This enables build of a clocksource and clockevent driver for |
@@ -245,15 +253,4 @@ config CLKSRC_PXA | |||
245 | help | 253 | help |
246 | This enables OST0 support available on PXA and SA-11x0 | 254 | This enables OST0 support available on PXA and SA-11x0 |
247 | platforms. | 255 | platforms. |
248 | |||
249 | config ASM9260_TIMER | ||
250 | bool "Alphascale ASM9260 timer driver" | ||
251 | depends on GENERIC_CLOCKEVENTS | ||
252 | select CLKSRC_MMIO | ||
253 | select CLKSRC_OF | ||
254 | default y if MACH_ASM9260 | ||
255 | help | ||
256 | This enables build of a clocksource and clockevent driver for | ||
257 | the 32-bit System Timer hardware available on a Alphascale ASM9260. | ||
258 | |||
259 | endmenu | 256 | endmenu |
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c index 32a3d25795d3..68ab42356d0e 100644 --- a/drivers/clocksource/mtk_timer.c +++ b/drivers/clocksource/mtk_timer.c | |||
@@ -224,6 +224,8 @@ static void __init mtk_timer_init(struct device_node *node) | |||
224 | } | 224 | } |
225 | rate = clk_get_rate(clk); | 225 | rate = clk_get_rate(clk); |
226 | 226 | ||
227 | mtk_timer_global_reset(evt); | ||
228 | |||
227 | if (request_irq(evt->dev.irq, mtk_timer_interrupt, | 229 | if (request_irq(evt->dev.irq, mtk_timer_interrupt, |
228 | IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) { | 230 | IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) { |
229 | pr_warn("failed to setup irq %d\n", evt->dev.irq); | 231 | pr_warn("failed to setup irq %d\n", evt->dev.irq); |
@@ -232,8 +234,6 @@ static void __init mtk_timer_init(struct device_node *node) | |||
232 | 234 | ||
233 | evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); | 235 | evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); |
234 | 236 | ||
235 | mtk_timer_global_reset(evt); | ||
236 | |||
237 | /* Configure clock source */ | 237 | /* Configure clock source */ |
238 | mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN); | 238 | mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN); |
239 | clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC), | 239 | clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC), |
@@ -241,10 +241,11 @@ static void __init mtk_timer_init(struct device_node *node) | |||
241 | 241 | ||
242 | /* Configure clock event */ | 242 | /* Configure clock event */ |
243 | mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT); | 243 | mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT); |
244 | mtk_timer_enable_irq(evt, GPT_CLK_EVT); | ||
245 | |||
246 | clockevents_config_and_register(&evt->dev, rate, 0x3, | 244 | clockevents_config_and_register(&evt->dev, rate, 0x3, |
247 | 0xffffffff); | 245 | 0xffffffff); |
246 | |||
247 | mtk_timer_enable_irq(evt, GPT_CLK_EVT); | ||
248 | |||
248 | return; | 249 | return; |
249 | 250 | ||
250 | err_clk_disable: | 251 | err_clk_disable: |
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c index 941f3f344e08..d9438af2bbd6 100644 --- a/drivers/clocksource/pxa_timer.c +++ b/drivers/clocksource/pxa_timer.c | |||
@@ -163,7 +163,7 @@ static struct irqaction pxa_ost0_irq = { | |||
163 | .dev_id = &ckevt_pxa_osmr0, | 163 | .dev_id = &ckevt_pxa_osmr0, |
164 | }; | 164 | }; |
165 | 165 | ||
166 | static void pxa_timer_common_init(int irq, unsigned long clock_tick_rate) | 166 | static void __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate) |
167 | { | 167 | { |
168 | timer_writel(0, OIER); | 168 | timer_writel(0, OIER); |
169 | timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR); | 169 | timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR); |
diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c index bba62f9deefb..ec57ba2bbd87 100644 --- a/drivers/clocksource/time-efm32.c +++ b/drivers/clocksource/time-efm32.c | |||
@@ -225,12 +225,12 @@ static int __init efm32_clockevent_init(struct device_node *np) | |||
225 | clock_event_ddata.base = base; | 225 | clock_event_ddata.base = base; |
226 | clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ); | 226 | clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ); |
227 | 227 | ||
228 | setup_irq(irq, &efm32_clock_event_irq); | ||
229 | |||
230 | clockevents_config_and_register(&clock_event_ddata.evtdev, | 228 | clockevents_config_and_register(&clock_event_ddata.evtdev, |
231 | DIV_ROUND_CLOSEST(rate, 1024), | 229 | DIV_ROUND_CLOSEST(rate, 1024), |
232 | 0xf, 0xffff); | 230 | 0xf, 0xffff); |
233 | 231 | ||
232 | setup_irq(irq, &efm32_clock_event_irq); | ||
233 | |||
234 | return 0; | 234 | return 0; |
235 | 235 | ||
236 | err_get_irq: | 236 | err_get_irq: |
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c index 02268448dc85..58597fbcc046 100644 --- a/drivers/clocksource/timer-sun5i.c +++ b/drivers/clocksource/timer-sun5i.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/irq.h> | 17 | #include <linux/irq.h> |
18 | #include <linux/irqreturn.h> | 18 | #include <linux/irqreturn.h> |
19 | #include <linux/reset.h> | 19 | #include <linux/reset.h> |
20 | #include <linux/sched_clock.h> | ||
21 | #include <linux/of.h> | 20 | #include <linux/of.h> |
22 | #include <linux/of_address.h> | 21 | #include <linux/of_address.h> |
23 | #include <linux/of_irq.h> | 22 | #include <linux/of_irq.h> |
@@ -137,11 +136,6 @@ static struct irqaction sun5i_timer_irq = { | |||
137 | .dev_id = &sun5i_clockevent, | 136 | .dev_id = &sun5i_clockevent, |
138 | }; | 137 | }; |
139 | 138 | ||
140 | static u64 sun5i_timer_sched_read(void) | ||
141 | { | ||
142 | return ~readl(timer_base + TIMER_CNTVAL_LO_REG(1)); | ||
143 | } | ||
144 | |||
145 | static void __init sun5i_timer_init(struct device_node *node) | 139 | static void __init sun5i_timer_init(struct device_node *node) |
146 | { | 140 | { |
147 | struct reset_control *rstc; | 141 | struct reset_control *rstc; |
@@ -172,16 +166,11 @@ static void __init sun5i_timer_init(struct device_node *node) | |||
172 | writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, | 166 | writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, |
173 | timer_base + TIMER_CTL_REG(1)); | 167 | timer_base + TIMER_CTL_REG(1)); |
174 | 168 | ||
175 | sched_clock_register(sun5i_timer_sched_read, 32, rate); | ||
176 | clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name, | 169 | clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name, |
177 | rate, 340, 32, clocksource_mmio_readl_down); | 170 | rate, 340, 32, clocksource_mmio_readl_down); |
178 | 171 | ||
179 | ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); | 172 | ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); |
180 | 173 | ||
181 | ret = setup_irq(irq, &sun5i_timer_irq); | ||
182 | if (ret) | ||
183 | pr_warn("failed to setup irq %d\n", irq); | ||
184 | |||
185 | /* Enable timer0 interrupt */ | 174 | /* Enable timer0 interrupt */ |
186 | val = readl(timer_base + TIMER_IRQ_EN_REG); | 175 | val = readl(timer_base + TIMER_IRQ_EN_REG); |
187 | writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); | 176 | writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); |
@@ -191,6 +180,10 @@ static void __init sun5i_timer_init(struct device_node *node) | |||
191 | 180 | ||
192 | clockevents_config_and_register(&sun5i_clockevent, rate, | 181 | clockevents_config_and_register(&sun5i_clockevent, rate, |
193 | TIMER_SYNC_TICKS, 0xffffffff); | 182 | TIMER_SYNC_TICKS, 0xffffffff); |
183 | |||
184 | ret = setup_irq(irq, &sun5i_timer_irq); | ||
185 | if (ret) | ||
186 | pr_warn("failed to setup irq %d\n", irq); | ||
194 | } | 187 | } |
195 | CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer", | 188 | CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer", |
196 | sun5i_timer_init); | 189 | sun5i_timer_init); |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 28e59a48b35f..8ae655c364f4 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -1698,15 +1698,18 @@ void cpufreq_resume(void) | |||
1698 | || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS)) | 1698 | || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS)) |
1699 | pr_err("%s: Failed to start governor for policy: %p\n", | 1699 | pr_err("%s: Failed to start governor for policy: %p\n", |
1700 | __func__, policy); | 1700 | __func__, policy); |
1701 | |||
1702 | /* | ||
1703 | * schedule call cpufreq_update_policy() for boot CPU, i.e. last | ||
1704 | * policy in list. It will verify that the current freq is in | ||
1705 | * sync with what we believe it to be. | ||
1706 | */ | ||
1707 | if (list_is_last(&policy->policy_list, &cpufreq_policy_list)) | ||
1708 | schedule_work(&policy->update); | ||
1709 | } | 1701 | } |
1702 | |||
1703 | /* | ||
1704 | * schedule call cpufreq_update_policy() for first-online CPU, as that | ||
1705 | * wouldn't be hotplugged-out on suspend. It will verify that the | ||
1706 | * current freq is in sync with what we believe it to be. | ||
1707 | */ | ||
1708 | policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask)); | ||
1709 | if (WARN_ON(!policy)) | ||
1710 | return; | ||
1711 | |||
1712 | schedule_work(&policy->update); | ||
1710 | } | 1713 | } |
1711 | 1714 | ||
1712 | /** | 1715 | /** |
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c index 5e98c6b1f284..82d2fbb20f7e 100644 --- a/drivers/cpufreq/exynos-cpufreq.c +++ b/drivers/cpufreq/exynos-cpufreq.c | |||
@@ -159,7 +159,7 @@ static struct cpufreq_driver exynos_driver = { | |||
159 | 159 | ||
160 | static int exynos_cpufreq_probe(struct platform_device *pdev) | 160 | static int exynos_cpufreq_probe(struct platform_device *pdev) |
161 | { | 161 | { |
162 | struct device_node *cpus, *np; | 162 | struct device_node *cpu0; |
163 | int ret = -EINVAL; | 163 | int ret = -EINVAL; |
164 | 164 | ||
165 | exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL); | 165 | exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL); |
@@ -206,28 +206,19 @@ static int exynos_cpufreq_probe(struct platform_device *pdev) | |||
206 | if (ret) | 206 | if (ret) |
207 | goto err_cpufreq_reg; | 207 | goto err_cpufreq_reg; |
208 | 208 | ||
209 | cpus = of_find_node_by_path("/cpus"); | 209 | cpu0 = of_get_cpu_node(0, NULL); |
210 | if (!cpus) { | 210 | if (!cpu0) { |
211 | pr_err("failed to find cpus node\n"); | 211 | pr_err("failed to find cpu0 node\n"); |
212 | return 0; | 212 | return 0; |
213 | } | 213 | } |
214 | 214 | ||
215 | np = of_get_next_child(cpus, NULL); | 215 | if (of_find_property(cpu0, "#cooling-cells", NULL)) { |
216 | if (!np) { | 216 | cdev = of_cpufreq_cooling_register(cpu0, |
217 | pr_err("failed to find cpus child node\n"); | ||
218 | of_node_put(cpus); | ||
219 | return 0; | ||
220 | } | ||
221 | |||
222 | if (of_find_property(np, "#cooling-cells", NULL)) { | ||
223 | cdev = of_cpufreq_cooling_register(np, | ||
224 | cpu_present_mask); | 217 | cpu_present_mask); |
225 | if (IS_ERR(cdev)) | 218 | if (IS_ERR(cdev)) |
226 | pr_err("running cpufreq without cooling device: %ld\n", | 219 | pr_err("running cpufreq without cooling device: %ld\n", |
227 | PTR_ERR(cdev)); | 220 | PTR_ERR(cdev)); |
228 | } | 221 | } |
229 | of_node_put(np); | ||
230 | of_node_put(cpus); | ||
231 | 222 | ||
232 | return 0; | 223 | return 0; |
233 | 224 | ||
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c index bee5df7794d3..7cb4b766cf94 100644 --- a/drivers/cpufreq/ppc-corenet-cpufreq.c +++ b/drivers/cpufreq/ppc-corenet-cpufreq.c | |||
@@ -22,6 +22,8 @@ | |||
22 | #include <linux/smp.h> | 22 | #include <linux/smp.h> |
23 | #include <sysdev/fsl_soc.h> | 23 | #include <sysdev/fsl_soc.h> |
24 | 24 | ||
25 | #include <asm/smp.h> /* for get_hard_smp_processor_id() in UP configs */ | ||
26 | |||
25 | /** | 27 | /** |
26 | * struct cpu_data - per CPU data struct | 28 | * struct cpu_data - per CPU data struct |
27 | * @parent: the parent node of cpu clock | 29 | * @parent: the parent node of cpu clock |
diff --git a/drivers/cpuidle/cpuidle-mvebu-v7.c b/drivers/cpuidle/cpuidle-mvebu-v7.c index 38e68618513a..980151f34707 100644 --- a/drivers/cpuidle/cpuidle-mvebu-v7.c +++ b/drivers/cpuidle/cpuidle-mvebu-v7.c | |||
@@ -37,11 +37,11 @@ static int mvebu_v7_enter_idle(struct cpuidle_device *dev, | |||
37 | deepidle = true; | 37 | deepidle = true; |
38 | 38 | ||
39 | ret = mvebu_v7_cpu_suspend(deepidle); | 39 | ret = mvebu_v7_cpu_suspend(deepidle); |
40 | cpu_pm_exit(); | ||
41 | |||
40 | if (ret) | 42 | if (ret) |
41 | return ret; | 43 | return ret; |
42 | 44 | ||
43 | cpu_pm_exit(); | ||
44 | |||
45 | return index; | 45 | return index; |
46 | } | 46 | } |
47 | 47 | ||
@@ -50,17 +50,17 @@ static struct cpuidle_driver armadaxp_idle_driver = { | |||
50 | .states[0] = ARM_CPUIDLE_WFI_STATE, | 50 | .states[0] = ARM_CPUIDLE_WFI_STATE, |
51 | .states[1] = { | 51 | .states[1] = { |
52 | .enter = mvebu_v7_enter_idle, | 52 | .enter = mvebu_v7_enter_idle, |
53 | .exit_latency = 10, | 53 | .exit_latency = 100, |
54 | .power_usage = 50, | 54 | .power_usage = 50, |
55 | .target_residency = 100, | 55 | .target_residency = 1000, |
56 | .name = "MV CPU IDLE", | 56 | .name = "MV CPU IDLE", |
57 | .desc = "CPU power down", | 57 | .desc = "CPU power down", |
58 | }, | 58 | }, |
59 | .states[2] = { | 59 | .states[2] = { |
60 | .enter = mvebu_v7_enter_idle, | 60 | .enter = mvebu_v7_enter_idle, |
61 | .exit_latency = 100, | 61 | .exit_latency = 1000, |
62 | .power_usage = 5, | 62 | .power_usage = 5, |
63 | .target_residency = 1000, | 63 | .target_residency = 10000, |
64 | .flags = MVEBU_V7_FLAG_DEEP_IDLE, | 64 | .flags = MVEBU_V7_FLAG_DEEP_IDLE, |
65 | .name = "MV CPU DEEP IDLE", | 65 | .name = "MV CPU DEEP IDLE", |
66 | .desc = "CPU and L2 Fabric power down", | 66 | .desc = "CPU and L2 Fabric power down", |
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 4d534582514e..7a73a279e179 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
@@ -44,6 +44,12 @@ void disable_cpuidle(void) | |||
44 | off = 1; | 44 | off = 1; |
45 | } | 45 | } |
46 | 46 | ||
47 | bool cpuidle_not_available(struct cpuidle_driver *drv, | ||
48 | struct cpuidle_device *dev) | ||
49 | { | ||
50 | return off || !initialized || !drv || !dev || !dev->enabled; | ||
51 | } | ||
52 | |||
47 | /** | 53 | /** |
48 | * cpuidle_play_dead - cpu off-lining | 54 | * cpuidle_play_dead - cpu off-lining |
49 | * | 55 | * |
@@ -66,14 +72,8 @@ int cpuidle_play_dead(void) | |||
66 | return -ENODEV; | 72 | return -ENODEV; |
67 | } | 73 | } |
68 | 74 | ||
69 | /** | 75 | static int find_deepest_state(struct cpuidle_driver *drv, |
70 | * cpuidle_find_deepest_state - Find deepest state meeting specific conditions. | 76 | struct cpuidle_device *dev, bool freeze) |
71 | * @drv: cpuidle driver for the given CPU. | ||
72 | * @dev: cpuidle device for the given CPU. | ||
73 | * @freeze: Whether or not the state should be suitable for suspend-to-idle. | ||
74 | */ | ||
75 | static int cpuidle_find_deepest_state(struct cpuidle_driver *drv, | ||
76 | struct cpuidle_device *dev, bool freeze) | ||
77 | { | 77 | { |
78 | unsigned int latency_req = 0; | 78 | unsigned int latency_req = 0; |
79 | int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1; | 79 | int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1; |
@@ -92,6 +92,17 @@ static int cpuidle_find_deepest_state(struct cpuidle_driver *drv, | |||
92 | return ret; | 92 | return ret; |
93 | } | 93 | } |
94 | 94 | ||
95 | /** | ||
96 | * cpuidle_find_deepest_state - Find the deepest available idle state. | ||
97 | * @drv: cpuidle driver for the given CPU. | ||
98 | * @dev: cpuidle device for the given CPU. | ||
99 | */ | ||
100 | int cpuidle_find_deepest_state(struct cpuidle_driver *drv, | ||
101 | struct cpuidle_device *dev) | ||
102 | { | ||
103 | return find_deepest_state(drv, dev, false); | ||
104 | } | ||
105 | |||
95 | static void enter_freeze_proper(struct cpuidle_driver *drv, | 106 | static void enter_freeze_proper(struct cpuidle_driver *drv, |
96 | struct cpuidle_device *dev, int index) | 107 | struct cpuidle_device *dev, int index) |
97 | { | 108 | { |
@@ -113,15 +124,14 @@ static void enter_freeze_proper(struct cpuidle_driver *drv, | |||
113 | 124 | ||
114 | /** | 125 | /** |
115 | * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle. | 126 | * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle. |
127 | * @drv: cpuidle driver for the given CPU. | ||
128 | * @dev: cpuidle device for the given CPU. | ||
116 | * | 129 | * |
117 | * If there are states with the ->enter_freeze callback, find the deepest of | 130 | * If there are states with the ->enter_freeze callback, find the deepest of |
118 | * them and enter it with frozen tick. Otherwise, find the deepest state | 131 | * them and enter it with frozen tick. |
119 | * available and enter it normally. | ||
120 | */ | 132 | */ |
121 | void cpuidle_enter_freeze(void) | 133 | int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev) |
122 | { | 134 | { |
123 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | ||
124 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | ||
125 | int index; | 135 | int index; |
126 | 136 | ||
127 | /* | 137 | /* |
@@ -129,24 +139,11 @@ void cpuidle_enter_freeze(void) | |||
129 | * that interrupts won't be enabled when it exits and allows the tick to | 139 | * that interrupts won't be enabled when it exits and allows the tick to |
130 | * be frozen safely. | 140 | * be frozen safely. |
131 | */ | 141 | */ |
132 | index = cpuidle_find_deepest_state(drv, dev, true); | 142 | index = find_deepest_state(drv, dev, true); |
133 | if (index >= 0) { | ||
134 | enter_freeze_proper(drv, dev, index); | ||
135 | return; | ||
136 | } | ||
137 | |||
138 | /* | ||
139 | * It is not safe to freeze the tick, find the deepest state available | ||
140 | * at all and try to enter it normally. | ||
141 | */ | ||
142 | index = cpuidle_find_deepest_state(drv, dev, false); | ||
143 | if (index >= 0) | 143 | if (index >= 0) |
144 | cpuidle_enter(drv, dev, index); | 144 | enter_freeze_proper(drv, dev, index); |
145 | else | ||
146 | arch_cpu_idle(); | ||
147 | 145 | ||
148 | /* Interrupts are enabled again here. */ | 146 | return index; |
149 | local_irq_disable(); | ||
150 | } | 147 | } |
151 | 148 | ||
152 | /** | 149 | /** |
@@ -205,12 +202,6 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, | |||
205 | */ | 202 | */ |
206 | int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | 203 | int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) |
207 | { | 204 | { |
208 | if (off || !initialized) | ||
209 | return -ENODEV; | ||
210 | |||
211 | if (!drv || !dev || !dev->enabled) | ||
212 | return -EBUSY; | ||
213 | |||
214 | return cpuidle_curr_governor->select(drv, dev); | 205 | return cpuidle_curr_governor->select(drv, dev); |
215 | } | 206 | } |
216 | 207 | ||
@@ -339,9 +330,6 @@ int cpuidle_enable_device(struct cpuidle_device *dev) | |||
339 | if (!dev->registered) | 330 | if (!dev->registered) |
340 | return -EINVAL; | 331 | return -EINVAL; |
341 | 332 | ||
342 | if (!dev->state_count) | ||
343 | dev->state_count = drv->state_count; | ||
344 | |||
345 | ret = cpuidle_add_device_sysfs(dev); | 333 | ret = cpuidle_add_device_sysfs(dev); |
346 | if (ret) | 334 | if (ret) |
347 | return ret; | 335 | return ret; |
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 97c5903b4606..832a2c3f01ff 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c | |||
@@ -401,7 +401,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device) | |||
401 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device); | 401 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device); |
402 | 402 | ||
403 | /* state statistics */ | 403 | /* state statistics */ |
404 | for (i = 0; i < device->state_count; i++) { | 404 | for (i = 0; i < drv->state_count; i++) { |
405 | kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); | 405 | kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); |
406 | if (!kobj) | 406 | if (!kobj) |
407 | goto error_state; | 407 | goto error_state; |
@@ -433,9 +433,10 @@ error_state: | |||
433 | */ | 433 | */ |
434 | static void cpuidle_remove_state_sysfs(struct cpuidle_device *device) | 434 | static void cpuidle_remove_state_sysfs(struct cpuidle_device *device) |
435 | { | 435 | { |
436 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device); | ||
436 | int i; | 437 | int i; |
437 | 438 | ||
438 | for (i = 0; i < device->state_count; i++) | 439 | for (i = 0; i < drv->state_count; i++) |
439 | cpuidle_free_state_kobj(device, i); | 440 | cpuidle_free_state_kobj(device, i); |
440 | } | 441 | } |
441 | 442 | ||
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c index e5541117b3e9..50ef8bd8708b 100644 --- a/drivers/dma-buf/fence.c +++ b/drivers/dma-buf/fence.c | |||
@@ -159,6 +159,9 @@ fence_wait_timeout(struct fence *fence, bool intr, signed long timeout) | |||
159 | if (WARN_ON(timeout < 0)) | 159 | if (WARN_ON(timeout < 0)) |
160 | return -EINVAL; | 160 | return -EINVAL; |
161 | 161 | ||
162 | if (timeout == 0) | ||
163 | return fence_is_signaled(fence); | ||
164 | |||
162 | trace_fence_wait_start(fence); | 165 | trace_fence_wait_start(fence); |
163 | ret = fence->ops->wait(fence, intr, timeout); | 166 | ret = fence->ops->wait(fence, intr, timeout); |
164 | trace_fence_wait_end(fence); | 167 | trace_fence_wait_end(fence); |
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c index 3c97c8fa8d02..39920d77f288 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/reservation.c | |||
@@ -327,6 +327,9 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, | |||
327 | unsigned seq, shared_count, i = 0; | 327 | unsigned seq, shared_count, i = 0; |
328 | long ret = timeout; | 328 | long ret = timeout; |
329 | 329 | ||
330 | if (!timeout) | ||
331 | return reservation_object_test_signaled_rcu(obj, wait_all); | ||
332 | |||
330 | retry: | 333 | retry: |
331 | fence = NULL; | 334 | fence = NULL; |
332 | shared_count = 0; | 335 | shared_count = 0; |
@@ -402,8 +405,6 @@ reservation_object_test_signaled_single(struct fence *passed_fence) | |||
402 | int ret = 1; | 405 | int ret = 1; |
403 | 406 | ||
404 | if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { | 407 | if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { |
405 | int ret; | ||
406 | |||
407 | fence = fence_get_rcu(lfence); | 408 | fence = fence_get_rcu(lfence); |
408 | if (!fence) | 409 | if (!fence) |
409 | return -1; | 410 | return -1; |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index a874b6ec6650..942ca541dcbd 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -51,19 +51,6 @@ config INTEL_MIC_X100_DMA | |||
51 | OS and tools for MIC to use with this driver are available from | 51 | OS and tools for MIC to use with this driver are available from |
52 | <http://software.intel.com/en-us/mic-developer>. | 52 | <http://software.intel.com/en-us/mic-developer>. |
53 | 53 | ||
54 | config INTEL_MID_DMAC | ||
55 | tristate "Intel MID DMA support for Peripheral DMA controllers" | ||
56 | depends on PCI && X86 | ||
57 | select DMA_ENGINE | ||
58 | default n | ||
59 | help | ||
60 | Enable support for the Intel(R) MID DMA engine present | ||
61 | in Intel MID chipsets. | ||
62 | |||
63 | Say Y here if you have such a chipset. | ||
64 | |||
65 | If unsure, say N. | ||
66 | |||
67 | config ASYNC_TX_ENABLE_CHANNEL_SWITCH | 54 | config ASYNC_TX_ENABLE_CHANNEL_SWITCH |
68 | bool | 55 | bool |
69 | 56 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index f915f61ec574..539d4825bd76 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -6,7 +6,6 @@ obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o | |||
6 | obj-$(CONFIG_DMA_ACPI) += acpi-dma.o | 6 | obj-$(CONFIG_DMA_ACPI) += acpi-dma.o |
7 | obj-$(CONFIG_DMA_OF) += of-dma.o | 7 | obj-$(CONFIG_DMA_OF) += of-dma.o |
8 | 8 | ||
9 | obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o | ||
10 | obj-$(CONFIG_DMATEST) += dmatest.o | 9 | obj-$(CONFIG_DMATEST) += dmatest.o |
11 | obj-$(CONFIG_INTEL_IOATDMA) += ioat/ | 10 | obj-$(CONFIG_INTEL_IOATDMA) += ioat/ |
12 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o | 11 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 4a5fd245014e..83aa55d6fa5d 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -97,6 +97,12 @@ | |||
97 | 97 | ||
98 | #define DRIVER_NAME "pl08xdmac" | 98 | #define DRIVER_NAME "pl08xdmac" |
99 | 99 | ||
100 | #define PL80X_DMA_BUSWIDTHS \ | ||
101 | BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ | ||
102 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | ||
103 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | ||
104 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | ||
105 | |||
100 | static struct amba_driver pl08x_amba_driver; | 106 | static struct amba_driver pl08x_amba_driver; |
101 | struct pl08x_driver_data; | 107 | struct pl08x_driver_data; |
102 | 108 | ||
@@ -2070,6 +2076,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
2070 | pl08x->memcpy.device_pause = pl08x_pause; | 2076 | pl08x->memcpy.device_pause = pl08x_pause; |
2071 | pl08x->memcpy.device_resume = pl08x_resume; | 2077 | pl08x->memcpy.device_resume = pl08x_resume; |
2072 | pl08x->memcpy.device_terminate_all = pl08x_terminate_all; | 2078 | pl08x->memcpy.device_terminate_all = pl08x_terminate_all; |
2079 | pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS; | ||
2080 | pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS; | ||
2081 | pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM); | ||
2082 | pl08x->memcpy.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; | ||
2073 | 2083 | ||
2074 | /* Initialize slave engine */ | 2084 | /* Initialize slave engine */ |
2075 | dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); | 2085 | dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); |
@@ -2086,6 +2096,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
2086 | pl08x->slave.device_pause = pl08x_pause; | 2096 | pl08x->slave.device_pause = pl08x_pause; |
2087 | pl08x->slave.device_resume = pl08x_resume; | 2097 | pl08x->slave.device_resume = pl08x_resume; |
2088 | pl08x->slave.device_terminate_all = pl08x_terminate_all; | 2098 | pl08x->slave.device_terminate_all = pl08x_terminate_all; |
2099 | pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS; | ||
2100 | pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS; | ||
2101 | pl08x->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
2102 | pl08x->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; | ||
2089 | 2103 | ||
2090 | /* Get the platform data */ | 2104 | /* Get the platform data */ |
2091 | pl08x->pd = dev_get_platdata(&adev->dev); | 2105 | pl08x->pd = dev_get_platdata(&adev->dev); |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 1e1a4c567542..0b4fc6fb48ce 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -238,93 +238,126 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) | |||
238 | } | 238 | } |
239 | 239 | ||
240 | /* | 240 | /* |
241 | * atc_get_current_descriptors - | 241 | * atc_get_desc_by_cookie - get the descriptor of a cookie |
242 | * locate the descriptor which equal to physical address in DSCR | 242 | * @atchan: the DMA channel |
243 | * @atchan: the channel we want to start | 243 | * @cookie: the cookie to get the descriptor for |
244 | * @dscr_addr: physical descriptor address in DSCR | ||
245 | */ | 244 | */ |
246 | static struct at_desc *atc_get_current_descriptors(struct at_dma_chan *atchan, | 245 | static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan, |
247 | u32 dscr_addr) | 246 | dma_cookie_t cookie) |
248 | { | 247 | { |
249 | struct at_desc *desc, *_desc, *child, *desc_cur = NULL; | 248 | struct at_desc *desc, *_desc; |
250 | 249 | ||
251 | list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { | 250 | list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) { |
252 | if (desc->lli.dscr == dscr_addr) { | 251 | if (desc->txd.cookie == cookie) |
253 | desc_cur = desc; | 252 | return desc; |
254 | break; | 253 | } |
255 | } | ||
256 | 254 | ||
257 | list_for_each_entry(child, &desc->tx_list, desc_node) { | 255 | list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { |
258 | if (child->lli.dscr == dscr_addr) { | 256 | if (desc->txd.cookie == cookie) |
259 | desc_cur = child; | 257 | return desc; |
260 | break; | ||
261 | } | ||
262 | } | ||
263 | } | 258 | } |
264 | 259 | ||
265 | return desc_cur; | 260 | return NULL; |
266 | } | 261 | } |
267 | 262 | ||
268 | /* | 263 | /** |
269 | * atc_get_bytes_left - | 264 | * atc_calc_bytes_left - calculates the number of bytes left according to the |
270 | * Get the number of bytes residue in dma buffer, | 265 | * value read from CTRLA. |
271 | * @chan: the channel we want to start | 266 | * |
267 | * @current_len: the number of bytes left before reading CTRLA | ||
268 | * @ctrla: the value of CTRLA | ||
269 | * @desc: the descriptor containing the transfer width | ||
270 | */ | ||
271 | static inline int atc_calc_bytes_left(int current_len, u32 ctrla, | ||
272 | struct at_desc *desc) | ||
273 | { | ||
274 | return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width); | ||
275 | } | ||
276 | |||
277 | /** | ||
278 | * atc_calc_bytes_left_from_reg - calculates the number of bytes left according | ||
279 | * to the current value of CTRLA. | ||
280 | * | ||
281 | * @current_len: the number of bytes left before reading CTRLA | ||
282 | * @atchan: the channel to read CTRLA for | ||
283 | * @desc: the descriptor containing the transfer width | ||
284 | */ | ||
285 | static inline int atc_calc_bytes_left_from_reg(int current_len, | ||
286 | struct at_dma_chan *atchan, struct at_desc *desc) | ||
287 | { | ||
288 | u32 ctrla = channel_readl(atchan, CTRLA); | ||
289 | |||
290 | return atc_calc_bytes_left(current_len, ctrla, desc); | ||
291 | } | ||
292 | |||
293 | /** | ||
294 | * atc_get_bytes_left - get the number of bytes residue for a cookie | ||
295 | * @chan: DMA channel | ||
296 | * @cookie: transaction identifier to check status of | ||
272 | */ | 297 | */ |
273 | static int atc_get_bytes_left(struct dma_chan *chan) | 298 | static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) |
274 | { | 299 | { |
275 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 300 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
276 | struct at_dma *atdma = to_at_dma(chan->device); | ||
277 | int chan_id = atchan->chan_common.chan_id; | ||
278 | struct at_desc *desc_first = atc_first_active(atchan); | 301 | struct at_desc *desc_first = atc_first_active(atchan); |
279 | struct at_desc *desc_cur; | 302 | struct at_desc *desc; |
280 | int ret = 0, count = 0; | 303 | int ret; |
304 | u32 ctrla, dscr; | ||
281 | 305 | ||
282 | /* | 306 | /* |
283 | * Initialize necessary values in the first time. | 307 | * If the cookie doesn't match to the currently running transfer then |
284 | * remain_desc record remain desc length. | 308 | * we can return the total length of the associated DMA transfer, |
309 | * because it is still queued. | ||
285 | */ | 310 | */ |
286 | if (atchan->remain_desc == 0) | 311 | desc = atc_get_desc_by_cookie(atchan, cookie); |
287 | /* First descriptor embedds the transaction length */ | 312 | if (desc == NULL) |
288 | atchan->remain_desc = desc_first->len; | 313 | return -EINVAL; |
314 | else if (desc != desc_first) | ||
315 | return desc->total_len; | ||
289 | 316 | ||
290 | /* | 317 | /* cookie matches to the currently running transfer */ |
291 | * This happens when current descriptor transfer complete. | 318 | ret = desc_first->total_len; |
292 | * The residual buffer size should reduce current descriptor length. | ||
293 | */ | ||
294 | if (unlikely(test_bit(ATC_IS_BTC, &atchan->status))) { | ||
295 | clear_bit(ATC_IS_BTC, &atchan->status); | ||
296 | desc_cur = atc_get_current_descriptors(atchan, | ||
297 | channel_readl(atchan, DSCR)); | ||
298 | if (!desc_cur) { | ||
299 | ret = -EINVAL; | ||
300 | goto out; | ||
301 | } | ||
302 | 319 | ||
303 | count = (desc_cur->lli.ctrla & ATC_BTSIZE_MAX) | 320 | if (desc_first->lli.dscr) { |
304 | << desc_first->tx_width; | 321 | /* hardware linked list transfer */ |
305 | if (atchan->remain_desc < count) { | 322 | |
306 | ret = -EINVAL; | 323 | /* |
307 | goto out; | 324 | * Calculate the residue by removing the length of the child |
325 | * descriptors already transferred from the total length. | ||
326 | * To get the current child descriptor we can use the value of | ||
327 | * the channel's DSCR register and compare it against the value | ||
328 | * of the hardware linked list structure of each child | ||
329 | * descriptor. | ||
330 | */ | ||
331 | |||
332 | ctrla = channel_readl(atchan, CTRLA); | ||
333 | rmb(); /* ensure CTRLA is read before DSCR */ | ||
334 | dscr = channel_readl(atchan, DSCR); | ||
335 | |||
336 | /* for the first descriptor we can be more accurate */ | ||
337 | if (desc_first->lli.dscr == dscr) | ||
338 | return atc_calc_bytes_left(ret, ctrla, desc_first); | ||
339 | |||
340 | ret -= desc_first->len; | ||
341 | list_for_each_entry(desc, &desc_first->tx_list, desc_node) { | ||
342 | if (desc->lli.dscr == dscr) | ||
343 | break; | ||
344 | |||
345 | ret -= desc->len; | ||
308 | } | 346 | } |
309 | 347 | ||
310 | atchan->remain_desc -= count; | ||
311 | ret = atchan->remain_desc; | ||
312 | } else { | ||
313 | /* | 348 | /* |
314 | * Get residual bytes when current | 349 | * For the last descriptor in the chain we can calculate |
315 | * descriptor transfer in progress. | 350 | * the remaining bytes using the channel's register. |
351 | * Note that the transfer width of the first and last | ||
352 | * descriptor may differ. | ||
316 | */ | 353 | */ |
317 | count = (channel_readl(atchan, CTRLA) & ATC_BTSIZE_MAX) | 354 | if (!desc->lli.dscr) |
318 | << (desc_first->tx_width); | 355 | ret = atc_calc_bytes_left_from_reg(ret, atchan, desc); |
319 | ret = atchan->remain_desc - count; | 356 | } else { |
357 | /* single transfer */ | ||
358 | ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first); | ||
320 | } | 359 | } |
321 | /* | ||
322 | * Check fifo empty. | ||
323 | */ | ||
324 | if (!(dma_readl(atdma, CHSR) & AT_DMA_EMPT(chan_id))) | ||
325 | atc_issue_pending(chan); | ||
326 | 360 | ||
327 | out: | ||
328 | return ret; | 361 | return ret; |
329 | } | 362 | } |
330 | 363 | ||
@@ -539,8 +572,6 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id) | |||
539 | /* Give information to tasklet */ | 572 | /* Give information to tasklet */ |
540 | set_bit(ATC_IS_ERROR, &atchan->status); | 573 | set_bit(ATC_IS_ERROR, &atchan->status); |
541 | } | 574 | } |
542 | if (pending & AT_DMA_BTC(i)) | ||
543 | set_bit(ATC_IS_BTC, &atchan->status); | ||
544 | tasklet_schedule(&atchan->tasklet); | 575 | tasklet_schedule(&atchan->tasklet); |
545 | ret = IRQ_HANDLED; | 576 | ret = IRQ_HANDLED; |
546 | } | 577 | } |
@@ -653,14 +684,18 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
653 | desc->lli.ctrlb = ctrlb; | 684 | desc->lli.ctrlb = ctrlb; |
654 | 685 | ||
655 | desc->txd.cookie = 0; | 686 | desc->txd.cookie = 0; |
687 | desc->len = xfer_count << src_width; | ||
656 | 688 | ||
657 | atc_desc_chain(&first, &prev, desc); | 689 | atc_desc_chain(&first, &prev, desc); |
658 | } | 690 | } |
659 | 691 | ||
660 | /* First descriptor of the chain embedds additional information */ | 692 | /* First descriptor of the chain embedds additional information */ |
661 | first->txd.cookie = -EBUSY; | 693 | first->txd.cookie = -EBUSY; |
662 | first->len = len; | 694 | first->total_len = len; |
695 | |||
696 | /* set transfer width for the calculation of the residue */ | ||
663 | first->tx_width = src_width; | 697 | first->tx_width = src_width; |
698 | prev->tx_width = src_width; | ||
664 | 699 | ||
665 | /* set end-of-link to the last link descriptor of list*/ | 700 | /* set end-of-link to the last link descriptor of list*/ |
666 | set_desc_eol(desc); | 701 | set_desc_eol(desc); |
@@ -752,6 +787,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
752 | | ATC_SRC_WIDTH(mem_width) | 787 | | ATC_SRC_WIDTH(mem_width) |
753 | | len >> mem_width; | 788 | | len >> mem_width; |
754 | desc->lli.ctrlb = ctrlb; | 789 | desc->lli.ctrlb = ctrlb; |
790 | desc->len = len; | ||
755 | 791 | ||
756 | atc_desc_chain(&first, &prev, desc); | 792 | atc_desc_chain(&first, &prev, desc); |
757 | total_len += len; | 793 | total_len += len; |
@@ -792,6 +828,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
792 | | ATC_DST_WIDTH(mem_width) | 828 | | ATC_DST_WIDTH(mem_width) |
793 | | len >> reg_width; | 829 | | len >> reg_width; |
794 | desc->lli.ctrlb = ctrlb; | 830 | desc->lli.ctrlb = ctrlb; |
831 | desc->len = len; | ||
795 | 832 | ||
796 | atc_desc_chain(&first, &prev, desc); | 833 | atc_desc_chain(&first, &prev, desc); |
797 | total_len += len; | 834 | total_len += len; |
@@ -806,8 +843,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
806 | 843 | ||
807 | /* First descriptor of the chain embedds additional information */ | 844 | /* First descriptor of the chain embedds additional information */ |
808 | first->txd.cookie = -EBUSY; | 845 | first->txd.cookie = -EBUSY; |
809 | first->len = total_len; | 846 | first->total_len = total_len; |
847 | |||
848 | /* set transfer width for the calculation of the residue */ | ||
810 | first->tx_width = reg_width; | 849 | first->tx_width = reg_width; |
850 | prev->tx_width = reg_width; | ||
811 | 851 | ||
812 | /* first link descriptor of list is responsible of flags */ | 852 | /* first link descriptor of list is responsible of flags */ |
813 | first->txd.flags = flags; /* client is in control of this ack */ | 853 | first->txd.flags = flags; /* client is in control of this ack */ |
@@ -872,6 +912,7 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, | |||
872 | | ATC_FC_MEM2PER | 912 | | ATC_FC_MEM2PER |
873 | | ATC_SIF(atchan->mem_if) | 913 | | ATC_SIF(atchan->mem_if) |
874 | | ATC_DIF(atchan->per_if); | 914 | | ATC_DIF(atchan->per_if); |
915 | desc->len = period_len; | ||
875 | break; | 916 | break; |
876 | 917 | ||
877 | case DMA_DEV_TO_MEM: | 918 | case DMA_DEV_TO_MEM: |
@@ -883,6 +924,7 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, | |||
883 | | ATC_FC_PER2MEM | 924 | | ATC_FC_PER2MEM |
884 | | ATC_SIF(atchan->per_if) | 925 | | ATC_SIF(atchan->per_if) |
885 | | ATC_DIF(atchan->mem_if); | 926 | | ATC_DIF(atchan->mem_if); |
927 | desc->len = period_len; | ||
886 | break; | 928 | break; |
887 | 929 | ||
888 | default: | 930 | default: |
@@ -964,7 +1006,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | |||
964 | 1006 | ||
965 | /* First descriptor of the chain embedds additional information */ | 1007 | /* First descriptor of the chain embedds additional information */ |
966 | first->txd.cookie = -EBUSY; | 1008 | first->txd.cookie = -EBUSY; |
967 | first->len = buf_len; | 1009 | first->total_len = buf_len; |
968 | first->tx_width = reg_width; | 1010 | first->tx_width = reg_width; |
969 | 1011 | ||
970 | return &first->txd; | 1012 | return &first->txd; |
@@ -1118,7 +1160,7 @@ atc_tx_status(struct dma_chan *chan, | |||
1118 | spin_lock_irqsave(&atchan->lock, flags); | 1160 | spin_lock_irqsave(&atchan->lock, flags); |
1119 | 1161 | ||
1120 | /* Get number of bytes left in the active transactions */ | 1162 | /* Get number of bytes left in the active transactions */ |
1121 | bytes = atc_get_bytes_left(chan); | 1163 | bytes = atc_get_bytes_left(chan, cookie); |
1122 | 1164 | ||
1123 | spin_unlock_irqrestore(&atchan->lock, flags); | 1165 | spin_unlock_irqrestore(&atchan->lock, flags); |
1124 | 1166 | ||
@@ -1214,7 +1256,6 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) | |||
1214 | 1256 | ||
1215 | spin_lock_irqsave(&atchan->lock, flags); | 1257 | spin_lock_irqsave(&atchan->lock, flags); |
1216 | atchan->descs_allocated = i; | 1258 | atchan->descs_allocated = i; |
1217 | atchan->remain_desc = 0; | ||
1218 | list_splice(&tmp_list, &atchan->free_list); | 1259 | list_splice(&tmp_list, &atchan->free_list); |
1219 | dma_cookie_init(chan); | 1260 | dma_cookie_init(chan); |
1220 | spin_unlock_irqrestore(&atchan->lock, flags); | 1261 | spin_unlock_irqrestore(&atchan->lock, flags); |
@@ -1257,7 +1298,6 @@ static void atc_free_chan_resources(struct dma_chan *chan) | |||
1257 | list_splice_init(&atchan->free_list, &list); | 1298 | list_splice_init(&atchan->free_list, &list); |
1258 | atchan->descs_allocated = 0; | 1299 | atchan->descs_allocated = 0; |
1259 | atchan->status = 0; | 1300 | atchan->status = 0; |
1260 | atchan->remain_desc = 0; | ||
1261 | 1301 | ||
1262 | dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); | 1302 | dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); |
1263 | } | 1303 | } |
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index d6bba6c636c2..2727ca560572 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h | |||
@@ -181,8 +181,9 @@ struct at_lli { | |||
181 | * @at_lli: hardware lli structure | 181 | * @at_lli: hardware lli structure |
182 | * @txd: support for the async_tx api | 182 | * @txd: support for the async_tx api |
183 | * @desc_node: node on the channed descriptors list | 183 | * @desc_node: node on the channed descriptors list |
184 | * @len: total transaction bytecount | 184 | * @len: descriptor byte count |
185 | * @tx_width: transfer width | 185 | * @tx_width: transfer width |
186 | * @total_len: total transaction byte count | ||
186 | */ | 187 | */ |
187 | struct at_desc { | 188 | struct at_desc { |
188 | /* FIRST values the hardware uses */ | 189 | /* FIRST values the hardware uses */ |
@@ -194,6 +195,7 @@ struct at_desc { | |||
194 | struct list_head desc_node; | 195 | struct list_head desc_node; |
195 | size_t len; | 196 | size_t len; |
196 | u32 tx_width; | 197 | u32 tx_width; |
198 | size_t total_len; | ||
197 | }; | 199 | }; |
198 | 200 | ||
199 | static inline struct at_desc * | 201 | static inline struct at_desc * |
@@ -213,7 +215,6 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd) | |||
213 | enum atc_status { | 215 | enum atc_status { |
214 | ATC_IS_ERROR = 0, | 216 | ATC_IS_ERROR = 0, |
215 | ATC_IS_PAUSED = 1, | 217 | ATC_IS_PAUSED = 1, |
216 | ATC_IS_BTC = 2, | ||
217 | ATC_IS_CYCLIC = 24, | 218 | ATC_IS_CYCLIC = 24, |
218 | }; | 219 | }; |
219 | 220 | ||
@@ -231,7 +232,6 @@ enum atc_status { | |||
231 | * @save_cfg: configuration register that is saved on suspend/resume cycle | 232 | * @save_cfg: configuration register that is saved on suspend/resume cycle |
232 | * @save_dscr: for cyclic operations, preserve next descriptor address in | 233 | * @save_dscr: for cyclic operations, preserve next descriptor address in |
233 | * the cyclic list on suspend/resume cycle | 234 | * the cyclic list on suspend/resume cycle |
234 | * @remain_desc: to save remain desc length | ||
235 | * @dma_sconfig: configuration for slave transfers, passed via | 235 | * @dma_sconfig: configuration for slave transfers, passed via |
236 | * .device_config | 236 | * .device_config |
237 | * @lock: serializes enqueue/dequeue operations to descriptors lists | 237 | * @lock: serializes enqueue/dequeue operations to descriptors lists |
@@ -251,7 +251,6 @@ struct at_dma_chan { | |||
251 | struct tasklet_struct tasklet; | 251 | struct tasklet_struct tasklet; |
252 | u32 save_cfg; | 252 | u32 save_cfg; |
253 | u32 save_dscr; | 253 | u32 save_dscr; |
254 | u32 remain_desc; | ||
255 | struct dma_slave_config dma_sconfig; | 254 | struct dma_slave_config dma_sconfig; |
256 | 255 | ||
257 | spinlock_t lock; | 256 | spinlock_t lock; |
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 09e2825a547a..d9891d3461f6 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c | |||
@@ -664,7 +664,6 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, | |||
664 | struct at_xdmac_desc *first = NULL, *prev = NULL; | 664 | struct at_xdmac_desc *first = NULL, *prev = NULL; |
665 | unsigned int periods = buf_len / period_len; | 665 | unsigned int periods = buf_len / period_len; |
666 | int i; | 666 | int i; |
667 | u32 cfg; | ||
668 | 667 | ||
669 | dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n", | 668 | dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n", |
670 | __func__, &buf_addr, buf_len, period_len, | 669 | __func__, &buf_addr, buf_len, period_len, |
@@ -700,17 +699,17 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, | |||
700 | if (direction == DMA_DEV_TO_MEM) { | 699 | if (direction == DMA_DEV_TO_MEM) { |
701 | desc->lld.mbr_sa = atchan->per_src_addr; | 700 | desc->lld.mbr_sa = atchan->per_src_addr; |
702 | desc->lld.mbr_da = buf_addr + i * period_len; | 701 | desc->lld.mbr_da = buf_addr + i * period_len; |
703 | cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; | 702 | desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; |
704 | } else { | 703 | } else { |
705 | desc->lld.mbr_sa = buf_addr + i * period_len; | 704 | desc->lld.mbr_sa = buf_addr + i * period_len; |
706 | desc->lld.mbr_da = atchan->per_dst_addr; | 705 | desc->lld.mbr_da = atchan->per_dst_addr; |
707 | cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; | 706 | desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; |
708 | } | 707 | } |
709 | desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 | 708 | desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 |
710 | | AT_XDMAC_MBR_UBC_NDEN | 709 | | AT_XDMAC_MBR_UBC_NDEN |
711 | | AT_XDMAC_MBR_UBC_NSEN | 710 | | AT_XDMAC_MBR_UBC_NSEN |
712 | | AT_XDMAC_MBR_UBC_NDE | 711 | | AT_XDMAC_MBR_UBC_NDE |
713 | | period_len >> at_xdmac_get_dwidth(cfg); | 712 | | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg); |
714 | 713 | ||
715 | dev_dbg(chan2dev(chan), | 714 | dev_dbg(chan2dev(chan), |
716 | "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", | 715 | "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", |
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 0723096fb50a..c92d6a70ccf3 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c | |||
@@ -475,6 +475,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) | |||
475 | * c->desc is NULL and exit.) | 475 | * c->desc is NULL and exit.) |
476 | */ | 476 | */ |
477 | if (c->desc) { | 477 | if (c->desc) { |
478 | bcm2835_dma_desc_free(&c->desc->vd); | ||
478 | c->desc = NULL; | 479 | c->desc = NULL; |
479 | bcm2835_dma_abort(c->chan_base); | 480 | bcm2835_dma_abort(c->chan_base); |
480 | 481 | ||
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index 512cb8e2805e..ceedafbd23e0 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c | |||
@@ -903,6 +903,11 @@ static const struct cppi_glue_infos *get_glue_info(struct device *dev) | |||
903 | return of_id->data; | 903 | return of_id->data; |
904 | } | 904 | } |
905 | 905 | ||
906 | #define CPPI41_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | ||
907 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | ||
908 | BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ | ||
909 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) | ||
910 | |||
906 | static int cppi41_dma_probe(struct platform_device *pdev) | 911 | static int cppi41_dma_probe(struct platform_device *pdev) |
907 | { | 912 | { |
908 | struct cppi41_dd *cdd; | 913 | struct cppi41_dd *cdd; |
@@ -926,6 +931,10 @@ static int cppi41_dma_probe(struct platform_device *pdev) | |||
926 | cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; | 931 | cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; |
927 | cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; | 932 | cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; |
928 | cdd->ddev.device_terminate_all = cppi41_stop_chan; | 933 | cdd->ddev.device_terminate_all = cppi41_stop_chan; |
934 | cdd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
935 | cdd->ddev.src_addr_widths = CPPI41_DMA_BUSWIDTHS; | ||
936 | cdd->ddev.dst_addr_widths = CPPI41_DMA_BUSWIDTHS; | ||
937 | cdd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
929 | cdd->ddev.dev = dev; | 938 | cdd->ddev.dev = dev; |
930 | INIT_LIST_HEAD(&cdd->ddev.channels); | 939 | INIT_LIST_HEAD(&cdd->ddev.channels); |
931 | cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; | 940 | cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; |
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c index 4527a3ebeac4..84884418fd30 100644 --- a/drivers/dma/dma-jz4740.c +++ b/drivers/dma/dma-jz4740.c | |||
@@ -511,6 +511,9 @@ static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc) | |||
511 | kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc)); | 511 | kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc)); |
512 | } | 512 | } |
513 | 513 | ||
514 | #define JZ4740_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | ||
515 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) | ||
516 | |||
514 | static int jz4740_dma_probe(struct platform_device *pdev) | 517 | static int jz4740_dma_probe(struct platform_device *pdev) |
515 | { | 518 | { |
516 | struct jz4740_dmaengine_chan *chan; | 519 | struct jz4740_dmaengine_chan *chan; |
@@ -548,6 +551,10 @@ static int jz4740_dma_probe(struct platform_device *pdev) | |||
548 | dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic; | 551 | dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic; |
549 | dd->device_config = jz4740_dma_slave_config; | 552 | dd->device_config = jz4740_dma_slave_config; |
550 | dd->device_terminate_all = jz4740_dma_terminate_all; | 553 | dd->device_terminate_all = jz4740_dma_terminate_all; |
554 | dd->src_addr_widths = JZ4740_DMA_BUSWIDTHS; | ||
555 | dd->dst_addr_widths = JZ4740_DMA_BUSWIDTHS; | ||
556 | dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
557 | dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
551 | dd->dev = &pdev->dev; | 558 | dd->dev = &pdev->dev; |
552 | INIT_LIST_HEAD(&dd->channels); | 559 | INIT_LIST_HEAD(&dd->channels); |
553 | 560 | ||
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index f15712f2fec6..ac336a961dea 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -859,9 +859,6 @@ int dma_async_device_register(struct dma_device *device) | |||
859 | BUG_ON(!device->device_issue_pending); | 859 | BUG_ON(!device->device_issue_pending); |
860 | BUG_ON(!device->dev); | 860 | BUG_ON(!device->dev); |
861 | 861 | ||
862 | WARN(dma_has_cap(DMA_SLAVE, device->cap_mask) && !device->directions, | ||
863 | "this driver doesn't support generic slave capabilities reporting\n"); | ||
864 | |||
865 | /* note: this only matters in the | 862 | /* note: this only matters in the |
866 | * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case | 863 | * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case |
867 | */ | 864 | */ |
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 455b7a4f1e87..a8ad05291b27 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c | |||
@@ -626,7 +626,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | |||
626 | dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status); | 626 | dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status); |
627 | 627 | ||
628 | /* Check if we have any interrupt from the DMAC */ | 628 | /* Check if we have any interrupt from the DMAC */ |
629 | if (!status) | 629 | if (!status || !dw->in_use) |
630 | return IRQ_NONE; | 630 | return IRQ_NONE; |
631 | 631 | ||
632 | /* | 632 | /* |
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index 6565a361e7e5..b2c3ae071429 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c | |||
@@ -26,6 +26,8 @@ | |||
26 | 26 | ||
27 | #include "internal.h" | 27 | #include "internal.h" |
28 | 28 | ||
29 | #define DRV_NAME "dw_dmac" | ||
30 | |||
29 | static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, | 31 | static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, |
30 | struct of_dma *ofdma) | 32 | struct of_dma *ofdma) |
31 | { | 33 | { |
@@ -284,7 +286,7 @@ static struct platform_driver dw_driver = { | |||
284 | .remove = dw_remove, | 286 | .remove = dw_remove, |
285 | .shutdown = dw_shutdown, | 287 | .shutdown = dw_shutdown, |
286 | .driver = { | 288 | .driver = { |
287 | .name = "dw_dmac", | 289 | .name = DRV_NAME, |
288 | .pm = &dw_dev_pm_ops, | 290 | .pm = &dw_dev_pm_ops, |
289 | .of_match_table = of_match_ptr(dw_dma_of_id_table), | 291 | .of_match_table = of_match_ptr(dw_dma_of_id_table), |
290 | .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), | 292 | .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), |
@@ -305,3 +307,4 @@ module_exit(dw_exit); | |||
305 | 307 | ||
306 | MODULE_LICENSE("GPL v2"); | 308 | MODULE_LICENSE("GPL v2"); |
307 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver"); | 309 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver"); |
310 | MODULE_ALIAS("platform:" DRV_NAME); | ||
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 276157f22612..53dbd3b3384c 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -260,6 +260,13 @@ static int edma_terminate_all(struct dma_chan *chan) | |||
260 | */ | 260 | */ |
261 | if (echan->edesc) { | 261 | if (echan->edesc) { |
262 | int cyclic = echan->edesc->cyclic; | 262 | int cyclic = echan->edesc->cyclic; |
263 | |||
264 | /* | ||
265 | * free the running request descriptor | ||
266 | * since it is not in any of the vdesc lists | ||
267 | */ | ||
268 | edma_desc_free(&echan->edesc->vdesc); | ||
269 | |||
263 | echan->edesc = NULL; | 270 | echan->edesc = NULL; |
264 | edma_stop(echan->ch_num); | 271 | edma_stop(echan->ch_num); |
265 | /* Move the cyclic channel back to default queue */ | 272 | /* Move the cyclic channel back to default queue */ |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 18c0a131e4e4..66a0efb9651d 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -531,6 +531,10 @@ static int sdma_run_channel0(struct sdma_engine *sdma) | |||
531 | dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); | 531 | dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); |
532 | } | 532 | } |
533 | 533 | ||
534 | /* Set bits of CONFIG register with dynamic context switching */ | ||
535 | if (readl(sdma->regs + SDMA_H_CONFIG) == 0) | ||
536 | writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); | ||
537 | |||
534 | return ret ? 0 : -ETIMEDOUT; | 538 | return ret ? 0 : -ETIMEDOUT; |
535 | } | 539 | } |
536 | 540 | ||
@@ -1394,9 +1398,6 @@ static int sdma_init(struct sdma_engine *sdma) | |||
1394 | 1398 | ||
1395 | writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); | 1399 | writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); |
1396 | 1400 | ||
1397 | /* Set bits of CONFIG register with given context switching mode */ | ||
1398 | writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); | ||
1399 | |||
1400 | /* Initializes channel's priorities */ | 1401 | /* Initializes channel's priorities */ |
1401 | sdma_set_channel_priority(&sdma->channel[0], 7); | 1402 | sdma_set_channel_priority(&sdma->channel[0], 7); |
1402 | 1403 | ||
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c deleted file mode 100644 index 5aaead9b56f7..000000000000 --- a/drivers/dma/intel_mid_dma.c +++ /dev/null | |||
@@ -1,1447 +0,0 @@ | |||
1 | /* | ||
2 | * intel_mid_dma.c - Intel Langwell DMA Drivers | ||
3 | * | ||
4 | * Copyright (C) 2008-10 Intel Corp | ||
5 | * Author: Vinod Koul <vinod.koul@intel.com> | ||
6 | * The driver design is based on dw_dmac driver | ||
7 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; version 2 of the License. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
21 | * | ||
22 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
23 | * | ||
24 | * | ||
25 | */ | ||
26 | #include <linux/pci.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | #include <linux/pm_runtime.h> | ||
29 | #include <linux/intel_mid_dma.h> | ||
30 | #include <linux/module.h> | ||
31 | |||
32 | #include "dmaengine.h" | ||
33 | |||
34 | #define MAX_CHAN 4 /*max ch across controllers*/ | ||
35 | #include "intel_mid_dma_regs.h" | ||
36 | |||
37 | #define INTEL_MID_DMAC1_ID 0x0814 | ||
38 | #define INTEL_MID_DMAC2_ID 0x0813 | ||
39 | #define INTEL_MID_GP_DMAC2_ID 0x0827 | ||
40 | #define INTEL_MFLD_DMAC1_ID 0x0830 | ||
41 | #define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008 | ||
42 | #define LNW_PERIPHRAL_MASK_SIZE 0x10 | ||
43 | #define LNW_PERIPHRAL_STATUS 0x0 | ||
44 | #define LNW_PERIPHRAL_MASK 0x8 | ||
45 | |||
46 | struct intel_mid_dma_probe_info { | ||
47 | u8 max_chan; | ||
48 | u8 ch_base; | ||
49 | u16 block_size; | ||
50 | u32 pimr_mask; | ||
51 | }; | ||
52 | |||
53 | #define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \ | ||
54 | ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \ | ||
55 | .max_chan = (_max_chan), \ | ||
56 | .ch_base = (_ch_base), \ | ||
57 | .block_size = (_block_size), \ | ||
58 | .pimr_mask = (_pimr_mask), \ | ||
59 | }) | ||
60 | |||
61 | /***************************************************************************** | ||
62 | Utility Functions*/ | ||
63 | /** | ||
64 | * get_ch_index - convert status to channel | ||
65 | * @status: status mask | ||
66 | * @base: dma ch base value | ||
67 | * | ||
68 | * Modify the status mask and return the channel index needing | ||
69 | * attention (or -1 if neither) | ||
70 | */ | ||
71 | static int get_ch_index(int *status, unsigned int base) | ||
72 | { | ||
73 | int i; | ||
74 | for (i = 0; i < MAX_CHAN; i++) { | ||
75 | if (*status & (1 << (i + base))) { | ||
76 | *status = *status & ~(1 << (i + base)); | ||
77 | pr_debug("MDMA: index %d New status %x\n", i, *status); | ||
78 | return i; | ||
79 | } | ||
80 | } | ||
81 | return -1; | ||
82 | } | ||
83 | |||
84 | /** | ||
85 | * get_block_ts - calculates dma transaction length | ||
86 | * @len: dma transfer length | ||
87 | * @tx_width: dma transfer src width | ||
88 | * @block_size: dma controller max block size | ||
89 | * | ||
90 | * Based on src width calculate the DMA trsaction length in data items | ||
91 | * return data items or FFFF if exceeds max length for block | ||
92 | */ | ||
93 | static int get_block_ts(int len, int tx_width, int block_size) | ||
94 | { | ||
95 | int byte_width = 0, block_ts = 0; | ||
96 | |||
97 | switch (tx_width) { | ||
98 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
99 | byte_width = 1; | ||
100 | break; | ||
101 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
102 | byte_width = 2; | ||
103 | break; | ||
104 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
105 | default: | ||
106 | byte_width = 4; | ||
107 | break; | ||
108 | } | ||
109 | |||
110 | block_ts = len/byte_width; | ||
111 | if (block_ts > block_size) | ||
112 | block_ts = 0xFFFF; | ||
113 | return block_ts; | ||
114 | } | ||
115 | |||
116 | /***************************************************************************** | ||
117 | DMAC1 interrupt Functions*/ | ||
118 | |||
119 | /** | ||
120 | * dmac1_mask_periphral_intr - mask the periphral interrupt | ||
121 | * @mid: dma device for which masking is required | ||
122 | * | ||
123 | * Masks the DMA periphral interrupt | ||
124 | * this is valid for DMAC1 family controllers only | ||
125 | * This controller should have periphral mask registers already mapped | ||
126 | */ | ||
127 | static void dmac1_mask_periphral_intr(struct middma_device *mid) | ||
128 | { | ||
129 | u32 pimr; | ||
130 | |||
131 | if (mid->pimr_mask) { | ||
132 | pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); | ||
133 | pimr |= mid->pimr_mask; | ||
134 | writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); | ||
135 | } | ||
136 | return; | ||
137 | } | ||
138 | |||
139 | /** | ||
140 | * dmac1_unmask_periphral_intr - unmask the periphral interrupt | ||
141 | * @midc: dma channel for which masking is required | ||
142 | * | ||
143 | * UnMasks the DMA periphral interrupt, | ||
144 | * this is valid for DMAC1 family controllers only | ||
145 | * This controller should have periphral mask registers already mapped | ||
146 | */ | ||
147 | static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc) | ||
148 | { | ||
149 | u32 pimr; | ||
150 | struct middma_device *mid = to_middma_device(midc->chan.device); | ||
151 | |||
152 | if (mid->pimr_mask) { | ||
153 | pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); | ||
154 | pimr &= ~mid->pimr_mask; | ||
155 | writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); | ||
156 | } | ||
157 | return; | ||
158 | } | ||
159 | |||
160 | /** | ||
161 | * enable_dma_interrupt - enable the periphral interrupt | ||
162 | * @midc: dma channel for which enable interrupt is required | ||
163 | * | ||
164 | * Enable the DMA periphral interrupt, | ||
165 | * this is valid for DMAC1 family controllers only | ||
166 | * This controller should have periphral mask registers already mapped | ||
167 | */ | ||
168 | static void enable_dma_interrupt(struct intel_mid_dma_chan *midc) | ||
169 | { | ||
170 | dmac1_unmask_periphral_intr(midc); | ||
171 | |||
172 | /*en ch interrupts*/ | ||
173 | iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); | ||
174 | iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); | ||
175 | return; | ||
176 | } | ||
177 | |||
178 | /** | ||
179 | * disable_dma_interrupt - disable the periphral interrupt | ||
180 | * @midc: dma channel for which disable interrupt is required | ||
181 | * | ||
182 | * Disable the DMA periphral interrupt, | ||
183 | * this is valid for DMAC1 family controllers only | ||
184 | * This controller should have periphral mask registers already mapped | ||
185 | */ | ||
186 | static void disable_dma_interrupt(struct intel_mid_dma_chan *midc) | ||
187 | { | ||
188 | /*Check LPE PISR, make sure fwd is disabled*/ | ||
189 | iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK); | ||
190 | iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); | ||
191 | iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); | ||
192 | return; | ||
193 | } | ||
194 | |||
195 | /***************************************************************************** | ||
196 | DMA channel helper Functions*/ | ||
197 | /** | ||
198 | * mid_desc_get - get a descriptor | ||
199 | * @midc: dma channel for which descriptor is required | ||
200 | * | ||
201 | * Obtain a descriptor for the channel. Returns NULL if none are free. | ||
202 | * Once the descriptor is returned it is private until put on another | ||
203 | * list or freed | ||
204 | */ | ||
205 | static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc) | ||
206 | { | ||
207 | struct intel_mid_dma_desc *desc, *_desc; | ||
208 | struct intel_mid_dma_desc *ret = NULL; | ||
209 | |||
210 | spin_lock_bh(&midc->lock); | ||
211 | list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { | ||
212 | if (async_tx_test_ack(&desc->txd)) { | ||
213 | list_del(&desc->desc_node); | ||
214 | ret = desc; | ||
215 | break; | ||
216 | } | ||
217 | } | ||
218 | spin_unlock_bh(&midc->lock); | ||
219 | return ret; | ||
220 | } | ||
221 | |||
222 | /** | ||
223 | * mid_desc_put - put a descriptor | ||
224 | * @midc: dma channel for which descriptor is required | ||
225 | * @desc: descriptor to put | ||
226 | * | ||
227 | * Return a descriptor from lwn_desc_get back to the free pool | ||
228 | */ | ||
229 | static void midc_desc_put(struct intel_mid_dma_chan *midc, | ||
230 | struct intel_mid_dma_desc *desc) | ||
231 | { | ||
232 | if (desc) { | ||
233 | spin_lock_bh(&midc->lock); | ||
234 | list_add_tail(&desc->desc_node, &midc->free_list); | ||
235 | spin_unlock_bh(&midc->lock); | ||
236 | } | ||
237 | } | ||
238 | /** | ||
239 | * midc_dostart - begin a DMA transaction | ||
240 | * @midc: channel for which txn is to be started | ||
241 | * @first: first descriptor of series | ||
242 | * | ||
243 | * Load a transaction into the engine. This must be called with midc->lock | ||
244 | * held and bh disabled. | ||
245 | */ | ||
246 | static void midc_dostart(struct intel_mid_dma_chan *midc, | ||
247 | struct intel_mid_dma_desc *first) | ||
248 | { | ||
249 | struct middma_device *mid = to_middma_device(midc->chan.device); | ||
250 | |||
251 | /* channel is idle */ | ||
252 | if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) { | ||
253 | /*error*/ | ||
254 | pr_err("ERR_MDMA: channel is busy in start\n"); | ||
255 | /* The tasklet will hopefully advance the queue... */ | ||
256 | return; | ||
257 | } | ||
258 | midc->busy = true; | ||
259 | /*write registers and en*/ | ||
260 | iowrite32(first->sar, midc->ch_regs + SAR); | ||
261 | iowrite32(first->dar, midc->ch_regs + DAR); | ||
262 | iowrite32(first->lli_phys, midc->ch_regs + LLP); | ||
263 | iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH); | ||
264 | iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW); | ||
265 | iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW); | ||
266 | iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH); | ||
267 | pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n", | ||
268 | (int)first->sar, (int)first->dar, first->cfg_hi, | ||
269 | first->cfg_lo, first->ctl_hi, first->ctl_lo); | ||
270 | first->status = DMA_IN_PROGRESS; | ||
271 | |||
272 | iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); | ||
273 | } | ||
274 | |||
275 | /** | ||
276 | * midc_descriptor_complete - process completed descriptor | ||
277 | * @midc: channel owning the descriptor | ||
278 | * @desc: the descriptor itself | ||
279 | * | ||
280 | * Process a completed descriptor and perform any callbacks upon | ||
281 | * the completion. The completion handling drops the lock during the | ||
282 | * callbacks but must be called with the lock held. | ||
283 | */ | ||
284 | static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, | ||
285 | struct intel_mid_dma_desc *desc) | ||
286 | __releases(&midc->lock) __acquires(&midc->lock) | ||
287 | { | ||
288 | struct dma_async_tx_descriptor *txd = &desc->txd; | ||
289 | dma_async_tx_callback callback_txd = NULL; | ||
290 | struct intel_mid_dma_lli *llitem; | ||
291 | void *param_txd = NULL; | ||
292 | |||
293 | dma_cookie_complete(txd); | ||
294 | callback_txd = txd->callback; | ||
295 | param_txd = txd->callback_param; | ||
296 | |||
297 | if (desc->lli != NULL) { | ||
298 | /*clear the DONE bit of completed LLI in memory*/ | ||
299 | llitem = desc->lli + desc->current_lli; | ||
300 | llitem->ctl_hi &= CLEAR_DONE; | ||
301 | if (desc->current_lli < desc->lli_length-1) | ||
302 | (desc->current_lli)++; | ||
303 | else | ||
304 | desc->current_lli = 0; | ||
305 | } | ||
306 | spin_unlock_bh(&midc->lock); | ||
307 | if (callback_txd) { | ||
308 | pr_debug("MDMA: TXD callback set ... calling\n"); | ||
309 | callback_txd(param_txd); | ||
310 | } | ||
311 | if (midc->raw_tfr) { | ||
312 | desc->status = DMA_COMPLETE; | ||
313 | if (desc->lli != NULL) { | ||
314 | pci_pool_free(desc->lli_pool, desc->lli, | ||
315 | desc->lli_phys); | ||
316 | pci_pool_destroy(desc->lli_pool); | ||
317 | desc->lli = NULL; | ||
318 | } | ||
319 | list_move(&desc->desc_node, &midc->free_list); | ||
320 | midc->busy = false; | ||
321 | } | ||
322 | spin_lock_bh(&midc->lock); | ||
323 | |||
324 | } | ||
325 | /** | ||
326 | * midc_scan_descriptors - check the descriptors in channel | ||
327 | * mark completed when tx is completete | ||
328 | * @mid: device | ||
329 | * @midc: channel to scan | ||
330 | * | ||
331 | * Walk the descriptor chain for the device and process any entries | ||
332 | * that are complete. | ||
333 | */ | ||
334 | static void midc_scan_descriptors(struct middma_device *mid, | ||
335 | struct intel_mid_dma_chan *midc) | ||
336 | { | ||
337 | struct intel_mid_dma_desc *desc = NULL, *_desc = NULL; | ||
338 | |||
339 | /*tx is complete*/ | ||
340 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { | ||
341 | if (desc->status == DMA_IN_PROGRESS) | ||
342 | midc_descriptor_complete(midc, desc); | ||
343 | } | ||
344 | return; | ||
345 | } | ||
346 | /** | ||
347 | * midc_lli_fill_sg - Helper function to convert | ||
348 | * SG list to Linked List Items. | ||
349 | *@midc: Channel | ||
350 | *@desc: DMA descriptor | ||
351 | *@sglist: Pointer to SG list | ||
352 | *@sglen: SG list length | ||
353 | *@flags: DMA transaction flags | ||
354 | * | ||
355 | * Walk through the SG list and convert the SG list into Linked | ||
356 | * List Items (LLI). | ||
357 | */ | ||
358 | static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc, | ||
359 | struct intel_mid_dma_desc *desc, | ||
360 | struct scatterlist *sglist, | ||
361 | unsigned int sglen, | ||
362 | unsigned int flags) | ||
363 | { | ||
364 | struct intel_mid_dma_slave *mids; | ||
365 | struct scatterlist *sg; | ||
366 | dma_addr_t lli_next, sg_phy_addr; | ||
367 | struct intel_mid_dma_lli *lli_bloc_desc; | ||
368 | union intel_mid_dma_ctl_lo ctl_lo; | ||
369 | union intel_mid_dma_ctl_hi ctl_hi; | ||
370 | int i; | ||
371 | |||
372 | pr_debug("MDMA: Entered midc_lli_fill_sg\n"); | ||
373 | mids = midc->mid_slave; | ||
374 | |||
375 | lli_bloc_desc = desc->lli; | ||
376 | lli_next = desc->lli_phys; | ||
377 | |||
378 | ctl_lo.ctl_lo = desc->ctl_lo; | ||
379 | ctl_hi.ctl_hi = desc->ctl_hi; | ||
380 | for_each_sg(sglist, sg, sglen, i) { | ||
381 | /*Populate CTL_LOW and LLI values*/ | ||
382 | if (i != sglen - 1) { | ||
383 | lli_next = lli_next + | ||
384 | sizeof(struct intel_mid_dma_lli); | ||
385 | } else { | ||
386 | /*Check for circular list, otherwise terminate LLI to ZERO*/ | ||
387 | if (flags & DMA_PREP_CIRCULAR_LIST) { | ||
388 | pr_debug("MDMA: LLI is configured in circular mode\n"); | ||
389 | lli_next = desc->lli_phys; | ||
390 | } else { | ||
391 | lli_next = 0; | ||
392 | ctl_lo.ctlx.llp_dst_en = 0; | ||
393 | ctl_lo.ctlx.llp_src_en = 0; | ||
394 | } | ||
395 | } | ||
396 | /*Populate CTL_HI values*/ | ||
397 | ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg), | ||
398 | desc->width, | ||
399 | midc->dma->block_size); | ||
400 | /*Populate SAR and DAR values*/ | ||
401 | sg_phy_addr = sg_dma_address(sg); | ||
402 | if (desc->dirn == DMA_MEM_TO_DEV) { | ||
403 | lli_bloc_desc->sar = sg_phy_addr; | ||
404 | lli_bloc_desc->dar = mids->dma_slave.dst_addr; | ||
405 | } else if (desc->dirn == DMA_DEV_TO_MEM) { | ||
406 | lli_bloc_desc->sar = mids->dma_slave.src_addr; | ||
407 | lli_bloc_desc->dar = sg_phy_addr; | ||
408 | } | ||
409 | /*Copy values into block descriptor in system memroy*/ | ||
410 | lli_bloc_desc->llp = lli_next; | ||
411 | lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo; | ||
412 | lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi; | ||
413 | |||
414 | lli_bloc_desc++; | ||
415 | } | ||
416 | /*Copy very first LLI values to descriptor*/ | ||
417 | desc->ctl_lo = desc->lli->ctl_lo; | ||
418 | desc->ctl_hi = desc->lli->ctl_hi; | ||
419 | desc->sar = desc->lli->sar; | ||
420 | desc->dar = desc->lli->dar; | ||
421 | |||
422 | return 0; | ||
423 | } | ||
424 | /***************************************************************************** | ||
425 | DMA engine callback Functions*/ | ||
426 | /** | ||
427 | * intel_mid_dma_tx_submit - callback to submit DMA transaction | ||
428 | * @tx: dma engine descriptor | ||
429 | * | ||
430 | * Submit the DMA transaction for this descriptor, start if ch idle | ||
431 | */ | ||
432 | static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
433 | { | ||
434 | struct intel_mid_dma_desc *desc = to_intel_mid_dma_desc(tx); | ||
435 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(tx->chan); | ||
436 | dma_cookie_t cookie; | ||
437 | |||
438 | spin_lock_bh(&midc->lock); | ||
439 | cookie = dma_cookie_assign(tx); | ||
440 | |||
441 | if (list_empty(&midc->active_list)) | ||
442 | list_add_tail(&desc->desc_node, &midc->active_list); | ||
443 | else | ||
444 | list_add_tail(&desc->desc_node, &midc->queue); | ||
445 | |||
446 | midc_dostart(midc, desc); | ||
447 | spin_unlock_bh(&midc->lock); | ||
448 | |||
449 | return cookie; | ||
450 | } | ||
451 | |||
452 | /** | ||
453 | * intel_mid_dma_issue_pending - callback to issue pending txn | ||
454 | * @chan: chan where pending trascation needs to be checked and submitted | ||
455 | * | ||
456 | * Call for scan to issue pending descriptors | ||
457 | */ | ||
458 | static void intel_mid_dma_issue_pending(struct dma_chan *chan) | ||
459 | { | ||
460 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | ||
461 | |||
462 | spin_lock_bh(&midc->lock); | ||
463 | if (!list_empty(&midc->queue)) | ||
464 | midc_scan_descriptors(to_middma_device(chan->device), midc); | ||
465 | spin_unlock_bh(&midc->lock); | ||
466 | } | ||
467 | |||
468 | /** | ||
469 | * intel_mid_dma_tx_status - Return status of txn | ||
470 | * @chan: chan for where status needs to be checked | ||
471 | * @cookie: cookie for txn | ||
472 | * @txstate: DMA txn state | ||
473 | * | ||
474 | * Return status of DMA txn | ||
475 | */ | ||
476 | static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, | ||
477 | dma_cookie_t cookie, | ||
478 | struct dma_tx_state *txstate) | ||
479 | { | ||
480 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | ||
481 | enum dma_status ret; | ||
482 | |||
483 | ret = dma_cookie_status(chan, cookie, txstate); | ||
484 | if (ret != DMA_COMPLETE) { | ||
485 | spin_lock_bh(&midc->lock); | ||
486 | midc_scan_descriptors(to_middma_device(chan->device), midc); | ||
487 | spin_unlock_bh(&midc->lock); | ||
488 | |||
489 | ret = dma_cookie_status(chan, cookie, txstate); | ||
490 | } | ||
491 | |||
492 | return ret; | ||
493 | } | ||
494 | |||
495 | static int intel_mid_dma_config(struct dma_chan *chan, | ||
496 | struct dma_slave_config *slave) | ||
497 | { | ||
498 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | ||
499 | struct intel_mid_dma_slave *mid_slave; | ||
500 | |||
501 | BUG_ON(!midc); | ||
502 | BUG_ON(!slave); | ||
503 | pr_debug("MDMA: slave control called\n"); | ||
504 | |||
505 | mid_slave = to_intel_mid_dma_slave(slave); | ||
506 | |||
507 | BUG_ON(!mid_slave); | ||
508 | |||
509 | midc->mid_slave = mid_slave; | ||
510 | return 0; | ||
511 | } | ||
512 | |||
513 | static int intel_mid_dma_terminate_all(struct dma_chan *chan) | ||
514 | { | ||
515 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | ||
516 | struct middma_device *mid = to_middma_device(chan->device); | ||
517 | struct intel_mid_dma_desc *desc, *_desc; | ||
518 | union intel_mid_dma_cfg_lo cfg_lo; | ||
519 | |||
520 | spin_lock_bh(&midc->lock); | ||
521 | if (midc->busy == false) { | ||
522 | spin_unlock_bh(&midc->lock); | ||
523 | return 0; | ||
524 | } | ||
525 | /*Suspend and disable the channel*/ | ||
526 | cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW); | ||
527 | cfg_lo.cfgx.ch_susp = 1; | ||
528 | iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW); | ||
529 | iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); | ||
530 | midc->busy = false; | ||
531 | /* Disable interrupts */ | ||
532 | disable_dma_interrupt(midc); | ||
533 | midc->descs_allocated = 0; | ||
534 | |||
535 | spin_unlock_bh(&midc->lock); | ||
536 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { | ||
537 | if (desc->lli != NULL) { | ||
538 | pci_pool_free(desc->lli_pool, desc->lli, | ||
539 | desc->lli_phys); | ||
540 | pci_pool_destroy(desc->lli_pool); | ||
541 | desc->lli = NULL; | ||
542 | } | ||
543 | list_move(&desc->desc_node, &midc->free_list); | ||
544 | } | ||
545 | return 0; | ||
546 | } | ||
547 | |||
548 | |||
549 | /** | ||
550 | * intel_mid_dma_prep_memcpy - Prep memcpy txn | ||
551 | * @chan: chan for DMA transfer | ||
552 | * @dest: destn address | ||
553 | * @src: src address | ||
554 | * @len: DMA transfer len | ||
555 | * @flags: DMA flags | ||
556 | * | ||
557 | * Perform a DMA memcpy. Note we support slave periphral DMA transfers only | ||
558 | * The periphral txn details should be filled in slave structure properly | ||
559 | * Returns the descriptor for this txn | ||
560 | */ | ||
561 | static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( | ||
562 | struct dma_chan *chan, dma_addr_t dest, | ||
563 | dma_addr_t src, size_t len, unsigned long flags) | ||
564 | { | ||
565 | struct intel_mid_dma_chan *midc; | ||
566 | struct intel_mid_dma_desc *desc = NULL; | ||
567 | struct intel_mid_dma_slave *mids; | ||
568 | union intel_mid_dma_ctl_lo ctl_lo; | ||
569 | union intel_mid_dma_ctl_hi ctl_hi; | ||
570 | union intel_mid_dma_cfg_lo cfg_lo; | ||
571 | union intel_mid_dma_cfg_hi cfg_hi; | ||
572 | enum dma_slave_buswidth width; | ||
573 | |||
574 | pr_debug("MDMA: Prep for memcpy\n"); | ||
575 | BUG_ON(!chan); | ||
576 | if (!len) | ||
577 | return NULL; | ||
578 | |||
579 | midc = to_intel_mid_dma_chan(chan); | ||
580 | BUG_ON(!midc); | ||
581 | |||
582 | mids = midc->mid_slave; | ||
583 | BUG_ON(!mids); | ||
584 | |||
585 | pr_debug("MDMA:called for DMA %x CH %d Length %zu\n", | ||
586 | midc->dma->pci_id, midc->ch_id, len); | ||
587 | pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n", | ||
588 | mids->cfg_mode, mids->dma_slave.direction, | ||
589 | mids->hs_mode, mids->dma_slave.src_addr_width); | ||
590 | |||
591 | /*calculate CFG_LO*/ | ||
592 | if (mids->hs_mode == LNW_DMA_SW_HS) { | ||
593 | cfg_lo.cfg_lo = 0; | ||
594 | cfg_lo.cfgx.hs_sel_dst = 1; | ||
595 | cfg_lo.cfgx.hs_sel_src = 1; | ||
596 | } else if (mids->hs_mode == LNW_DMA_HW_HS) | ||
597 | cfg_lo.cfg_lo = 0x00000; | ||
598 | |||
599 | /*calculate CFG_HI*/ | ||
600 | if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { | ||
601 | /*SW HS only*/ | ||
602 | cfg_hi.cfg_hi = 0; | ||
603 | } else { | ||
604 | cfg_hi.cfg_hi = 0; | ||
605 | if (midc->dma->pimr_mask) { | ||
606 | cfg_hi.cfgx.protctl = 0x0; /*default value*/ | ||
607 | cfg_hi.cfgx.fifo_mode = 1; | ||
608 | if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { | ||
609 | cfg_hi.cfgx.src_per = 0; | ||
610 | if (mids->device_instance == 0) | ||
611 | cfg_hi.cfgx.dst_per = 3; | ||
612 | if (mids->device_instance == 1) | ||
613 | cfg_hi.cfgx.dst_per = 1; | ||
614 | } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { | ||
615 | if (mids->device_instance == 0) | ||
616 | cfg_hi.cfgx.src_per = 2; | ||
617 | if (mids->device_instance == 1) | ||
618 | cfg_hi.cfgx.src_per = 0; | ||
619 | cfg_hi.cfgx.dst_per = 0; | ||
620 | } | ||
621 | } else { | ||
622 | cfg_hi.cfgx.protctl = 0x1; /*default value*/ | ||
623 | cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per = | ||
624 | midc->ch_id - midc->dma->chan_base; | ||
625 | } | ||
626 | } | ||
627 | |||
628 | /*calculate CTL_HI*/ | ||
629 | ctl_hi.ctlx.reser = 0; | ||
630 | ctl_hi.ctlx.done = 0; | ||
631 | width = mids->dma_slave.src_addr_width; | ||
632 | |||
633 | ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); | ||
634 | pr_debug("MDMA:calc len %d for block size %d\n", | ||
635 | ctl_hi.ctlx.block_ts, midc->dma->block_size); | ||
636 | /*calculate CTL_LO*/ | ||
637 | ctl_lo.ctl_lo = 0; | ||
638 | ctl_lo.ctlx.int_en = 1; | ||
639 | ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst; | ||
640 | ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst; | ||
641 | |||
642 | /* | ||
643 | * Here we need some translation from "enum dma_slave_buswidth" | ||
644 | * to the format for our dma controller | ||
645 | * standard intel_mid_dmac's format | ||
646 | * 1 Byte 0b000 | ||
647 | * 2 Bytes 0b001 | ||
648 | * 4 Bytes 0b010 | ||
649 | */ | ||
650 | ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2; | ||
651 | ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2; | ||
652 | |||
653 | if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { | ||
654 | ctl_lo.ctlx.tt_fc = 0; | ||
655 | ctl_lo.ctlx.sinc = 0; | ||
656 | ctl_lo.ctlx.dinc = 0; | ||
657 | } else { | ||
658 | if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { | ||
659 | ctl_lo.ctlx.sinc = 0; | ||
660 | ctl_lo.ctlx.dinc = 2; | ||
661 | ctl_lo.ctlx.tt_fc = 1; | ||
662 | } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { | ||
663 | ctl_lo.ctlx.sinc = 2; | ||
664 | ctl_lo.ctlx.dinc = 0; | ||
665 | ctl_lo.ctlx.tt_fc = 2; | ||
666 | } | ||
667 | } | ||
668 | |||
669 | pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n", | ||
670 | ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi); | ||
671 | |||
672 | enable_dma_interrupt(midc); | ||
673 | |||
674 | desc = midc_desc_get(midc); | ||
675 | if (desc == NULL) | ||
676 | goto err_desc_get; | ||
677 | desc->sar = src; | ||
678 | desc->dar = dest ; | ||
679 | desc->len = len; | ||
680 | desc->cfg_hi = cfg_hi.cfg_hi; | ||
681 | desc->cfg_lo = cfg_lo.cfg_lo; | ||
682 | desc->ctl_lo = ctl_lo.ctl_lo; | ||
683 | desc->ctl_hi = ctl_hi.ctl_hi; | ||
684 | desc->width = width; | ||
685 | desc->dirn = mids->dma_slave.direction; | ||
686 | desc->lli_phys = 0; | ||
687 | desc->lli = NULL; | ||
688 | desc->lli_pool = NULL; | ||
689 | return &desc->txd; | ||
690 | |||
691 | err_desc_get: | ||
692 | pr_err("ERR_MDMA: Failed to get desc\n"); | ||
693 | midc_desc_put(midc, desc); | ||
694 | return NULL; | ||
695 | } | ||
696 | /** | ||
697 | * intel_mid_dma_prep_slave_sg - Prep slave sg txn | ||
698 | * @chan: chan for DMA transfer | ||
699 | * @sgl: scatter gather list | ||
700 | * @sg_len: length of sg txn | ||
701 | * @direction: DMA transfer dirtn | ||
702 | * @flags: DMA flags | ||
703 | * @context: transfer context (ignored) | ||
704 | * | ||
705 | * Prepares LLI based periphral transfer | ||
706 | */ | ||
707 | static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( | ||
708 | struct dma_chan *chan, struct scatterlist *sgl, | ||
709 | unsigned int sg_len, enum dma_transfer_direction direction, | ||
710 | unsigned long flags, void *context) | ||
711 | { | ||
712 | struct intel_mid_dma_chan *midc = NULL; | ||
713 | struct intel_mid_dma_slave *mids = NULL; | ||
714 | struct intel_mid_dma_desc *desc = NULL; | ||
715 | struct dma_async_tx_descriptor *txd = NULL; | ||
716 | union intel_mid_dma_ctl_lo ctl_lo; | ||
717 | |||
718 | pr_debug("MDMA: Prep for slave SG\n"); | ||
719 | |||
720 | if (!sg_len) { | ||
721 | pr_err("MDMA: Invalid SG length\n"); | ||
722 | return NULL; | ||
723 | } | ||
724 | midc = to_intel_mid_dma_chan(chan); | ||
725 | BUG_ON(!midc); | ||
726 | |||
727 | mids = midc->mid_slave; | ||
728 | BUG_ON(!mids); | ||
729 | |||
730 | if (!midc->dma->pimr_mask) { | ||
731 | /* We can still handle sg list with only one item */ | ||
732 | if (sg_len == 1) { | ||
733 | txd = intel_mid_dma_prep_memcpy(chan, | ||
734 | mids->dma_slave.dst_addr, | ||
735 | mids->dma_slave.src_addr, | ||
736 | sg_dma_len(sgl), | ||
737 | flags); | ||
738 | return txd; | ||
739 | } else { | ||
740 | pr_warn("MDMA: SG list is not supported by this controller\n"); | ||
741 | return NULL; | ||
742 | } | ||
743 | } | ||
744 | |||
745 | pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", | ||
746 | sg_len, direction, flags); | ||
747 | |||
748 | txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags); | ||
749 | if (NULL == txd) { | ||
750 | pr_err("MDMA: Prep memcpy failed\n"); | ||
751 | return NULL; | ||
752 | } | ||
753 | |||
754 | desc = to_intel_mid_dma_desc(txd); | ||
755 | desc->dirn = direction; | ||
756 | ctl_lo.ctl_lo = desc->ctl_lo; | ||
757 | ctl_lo.ctlx.llp_dst_en = 1; | ||
758 | ctl_lo.ctlx.llp_src_en = 1; | ||
759 | desc->ctl_lo = ctl_lo.ctl_lo; | ||
760 | desc->lli_length = sg_len; | ||
761 | desc->current_lli = 0; | ||
762 | /* DMA coherent memory pool for LLI descriptors*/ | ||
763 | desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool", | ||
764 | midc->dma->pdev, | ||
765 | (sizeof(struct intel_mid_dma_lli)*sg_len), | ||
766 | 32, 0); | ||
767 | if (NULL == desc->lli_pool) { | ||
768 | pr_err("MID_DMA:LLI pool create failed\n"); | ||
769 | return NULL; | ||
770 | } | ||
771 | |||
772 | desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys); | ||
773 | if (!desc->lli) { | ||
774 | pr_err("MID_DMA: LLI alloc failed\n"); | ||
775 | pci_pool_destroy(desc->lli_pool); | ||
776 | return NULL; | ||
777 | } | ||
778 | |||
779 | midc_lli_fill_sg(midc, desc, sgl, sg_len, flags); | ||
780 | if (flags & DMA_PREP_INTERRUPT) { | ||
781 | iowrite32(UNMASK_INTR_REG(midc->ch_id), | ||
782 | midc->dma_base + MASK_BLOCK); | ||
783 | pr_debug("MDMA:Enabled Block interrupt\n"); | ||
784 | } | ||
785 | return &desc->txd; | ||
786 | } | ||
787 | |||
788 | /** | ||
789 | * intel_mid_dma_free_chan_resources - Frees dma resources | ||
790 | * @chan: chan requiring attention | ||
791 | * | ||
792 | * Frees the allocated resources on this DMA chan | ||
793 | */ | ||
794 | static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) | ||
795 | { | ||
796 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | ||
797 | struct middma_device *mid = to_middma_device(chan->device); | ||
798 | struct intel_mid_dma_desc *desc, *_desc; | ||
799 | |||
800 | if (true == midc->busy) { | ||
801 | /*trying to free ch in use!!!!!*/ | ||
802 | pr_err("ERR_MDMA: trying to free ch in use\n"); | ||
803 | } | ||
804 | spin_lock_bh(&midc->lock); | ||
805 | midc->descs_allocated = 0; | ||
806 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { | ||
807 | list_del(&desc->desc_node); | ||
808 | pci_pool_free(mid->dma_pool, desc, desc->txd.phys); | ||
809 | } | ||
810 | list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { | ||
811 | list_del(&desc->desc_node); | ||
812 | pci_pool_free(mid->dma_pool, desc, desc->txd.phys); | ||
813 | } | ||
814 | list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) { | ||
815 | list_del(&desc->desc_node); | ||
816 | pci_pool_free(mid->dma_pool, desc, desc->txd.phys); | ||
817 | } | ||
818 | spin_unlock_bh(&midc->lock); | ||
819 | midc->in_use = false; | ||
820 | midc->busy = false; | ||
821 | /* Disable CH interrupts */ | ||
822 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); | ||
823 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); | ||
824 | pm_runtime_put(&mid->pdev->dev); | ||
825 | } | ||
826 | |||
827 | /** | ||
828 | * intel_mid_dma_alloc_chan_resources - Allocate dma resources | ||
829 | * @chan: chan requiring attention | ||
830 | * | ||
831 | * Allocates DMA resources on this chan | ||
832 | * Return the descriptors allocated | ||
833 | */ | ||
834 | static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) | ||
835 | { | ||
836 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | ||
837 | struct middma_device *mid = to_middma_device(chan->device); | ||
838 | struct intel_mid_dma_desc *desc; | ||
839 | dma_addr_t phys; | ||
840 | int i = 0; | ||
841 | |||
842 | pm_runtime_get_sync(&mid->pdev->dev); | ||
843 | |||
844 | if (mid->state == SUSPENDED) { | ||
845 | if (dma_resume(&mid->pdev->dev)) { | ||
846 | pr_err("ERR_MDMA: resume failed"); | ||
847 | return -EFAULT; | ||
848 | } | ||
849 | } | ||
850 | |||
851 | /* ASSERT: channel is idle */ | ||
852 | if (test_ch_en(mid->dma_base, midc->ch_id)) { | ||
853 | /*ch is not idle*/ | ||
854 | pr_err("ERR_MDMA: ch not idle\n"); | ||
855 | pm_runtime_put(&mid->pdev->dev); | ||
856 | return -EIO; | ||
857 | } | ||
858 | dma_cookie_init(chan); | ||
859 | |||
860 | spin_lock_bh(&midc->lock); | ||
861 | while (midc->descs_allocated < DESCS_PER_CHANNEL) { | ||
862 | spin_unlock_bh(&midc->lock); | ||
863 | desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys); | ||
864 | if (!desc) { | ||
865 | pr_err("ERR_MDMA: desc failed\n"); | ||
866 | pm_runtime_put(&mid->pdev->dev); | ||
867 | return -ENOMEM; | ||
868 | /*check*/ | ||
869 | } | ||
870 | dma_async_tx_descriptor_init(&desc->txd, chan); | ||
871 | desc->txd.tx_submit = intel_mid_dma_tx_submit; | ||
872 | desc->txd.flags = DMA_CTRL_ACK; | ||
873 | desc->txd.phys = phys; | ||
874 | spin_lock_bh(&midc->lock); | ||
875 | i = ++midc->descs_allocated; | ||
876 | list_add_tail(&desc->desc_node, &midc->free_list); | ||
877 | } | ||
878 | spin_unlock_bh(&midc->lock); | ||
879 | midc->in_use = true; | ||
880 | midc->busy = false; | ||
881 | pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i); | ||
882 | return i; | ||
883 | } | ||
884 | |||
885 | /** | ||
886 | * midc_handle_error - Handle DMA txn error | ||
887 | * @mid: controller where error occurred | ||
888 | * @midc: chan where error occurred | ||
889 | * | ||
890 | * Scan the descriptor for error | ||
891 | */ | ||
892 | static void midc_handle_error(struct middma_device *mid, | ||
893 | struct intel_mid_dma_chan *midc) | ||
894 | { | ||
895 | midc_scan_descriptors(mid, midc); | ||
896 | } | ||
897 | |||
898 | /** | ||
899 | * dma_tasklet - DMA interrupt tasklet | ||
900 | * @data: tasklet arg (the controller structure) | ||
901 | * | ||
902 | * Scan the controller for interrupts for completion/error | ||
903 | * Clear the interrupt and call for handling completion/error | ||
904 | */ | ||
905 | static void dma_tasklet(unsigned long data) | ||
906 | { | ||
907 | struct middma_device *mid = NULL; | ||
908 | struct intel_mid_dma_chan *midc = NULL; | ||
909 | u32 status, raw_tfr, raw_block; | ||
910 | int i; | ||
911 | |||
912 | mid = (struct middma_device *)data; | ||
913 | if (mid == NULL) { | ||
914 | pr_err("ERR_MDMA: tasklet Null param\n"); | ||
915 | return; | ||
916 | } | ||
917 | pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id); | ||
918 | raw_tfr = ioread32(mid->dma_base + RAW_TFR); | ||
919 | raw_block = ioread32(mid->dma_base + RAW_BLOCK); | ||
920 | status = raw_tfr | raw_block; | ||
921 | status &= mid->intr_mask; | ||
922 | while (status) { | ||
923 | /*txn interrupt*/ | ||
924 | i = get_ch_index(&status, mid->chan_base); | ||
925 | if (i < 0) { | ||
926 | pr_err("ERR_MDMA:Invalid ch index %x\n", i); | ||
927 | return; | ||
928 | } | ||
929 | midc = &mid->ch[i]; | ||
930 | if (midc == NULL) { | ||
931 | pr_err("ERR_MDMA:Null param midc\n"); | ||
932 | return; | ||
933 | } | ||
934 | pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", | ||
935 | status, midc->ch_id, i); | ||
936 | midc->raw_tfr = raw_tfr; | ||
937 | midc->raw_block = raw_block; | ||
938 | spin_lock_bh(&midc->lock); | ||
939 | /*clearing this interrupts first*/ | ||
940 | iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR); | ||
941 | if (raw_block) { | ||
942 | iowrite32((1 << midc->ch_id), | ||
943 | mid->dma_base + CLEAR_BLOCK); | ||
944 | } | ||
945 | midc_scan_descriptors(mid, midc); | ||
946 | pr_debug("MDMA:Scan of desc... complete, unmasking\n"); | ||
947 | iowrite32(UNMASK_INTR_REG(midc->ch_id), | ||
948 | mid->dma_base + MASK_TFR); | ||
949 | if (raw_block) { | ||
950 | iowrite32(UNMASK_INTR_REG(midc->ch_id), | ||
951 | mid->dma_base + MASK_BLOCK); | ||
952 | } | ||
953 | spin_unlock_bh(&midc->lock); | ||
954 | } | ||
955 | |||
956 | status = ioread32(mid->dma_base + RAW_ERR); | ||
957 | status &= mid->intr_mask; | ||
958 | while (status) { | ||
959 | /*err interrupt*/ | ||
960 | i = get_ch_index(&status, mid->chan_base); | ||
961 | if (i < 0) { | ||
962 | pr_err("ERR_MDMA:Invalid ch index %x\n", i); | ||
963 | return; | ||
964 | } | ||
965 | midc = &mid->ch[i]; | ||
966 | if (midc == NULL) { | ||
967 | pr_err("ERR_MDMA:Null param midc\n"); | ||
968 | return; | ||
969 | } | ||
970 | pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", | ||
971 | status, midc->ch_id, i); | ||
972 | |||
973 | iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR); | ||
974 | spin_lock_bh(&midc->lock); | ||
975 | midc_handle_error(mid, midc); | ||
976 | iowrite32(UNMASK_INTR_REG(midc->ch_id), | ||
977 | mid->dma_base + MASK_ERR); | ||
978 | spin_unlock_bh(&midc->lock); | ||
979 | } | ||
980 | pr_debug("MDMA:Exiting takslet...\n"); | ||
981 | return; | ||
982 | } | ||
983 | |||
984 | static void dma_tasklet1(unsigned long data) | ||
985 | { | ||
986 | pr_debug("MDMA:in takslet1...\n"); | ||
987 | return dma_tasklet(data); | ||
988 | } | ||
989 | |||
990 | static void dma_tasklet2(unsigned long data) | ||
991 | { | ||
992 | pr_debug("MDMA:in takslet2...\n"); | ||
993 | return dma_tasklet(data); | ||
994 | } | ||
995 | |||
996 | /** | ||
997 | * intel_mid_dma_interrupt - DMA ISR | ||
998 | * @irq: IRQ where interrupt occurred | ||
999 | * @data: ISR cllback data (the controller structure) | ||
1000 | * | ||
1001 | * See if this is our interrupt if so then schedule the tasklet | ||
1002 | * otherwise ignore | ||
1003 | */ | ||
1004 | static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) | ||
1005 | { | ||
1006 | struct middma_device *mid = data; | ||
1007 | u32 tfr_status, err_status; | ||
1008 | int call_tasklet = 0; | ||
1009 | |||
1010 | tfr_status = ioread32(mid->dma_base + RAW_TFR); | ||
1011 | err_status = ioread32(mid->dma_base + RAW_ERR); | ||
1012 | if (!tfr_status && !err_status) | ||
1013 | return IRQ_NONE; | ||
1014 | |||
1015 | /*DMA Interrupt*/ | ||
1016 | pr_debug("MDMA:Got an interrupt on irq %d\n", irq); | ||
1017 | pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); | ||
1018 | tfr_status &= mid->intr_mask; | ||
1019 | if (tfr_status) { | ||
1020 | /*need to disable intr*/ | ||
1021 | iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR); | ||
1022 | iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK); | ||
1023 | pr_debug("MDMA: Calling tasklet %x\n", tfr_status); | ||
1024 | call_tasklet = 1; | ||
1025 | } | ||
1026 | err_status &= mid->intr_mask; | ||
1027 | if (err_status) { | ||
1028 | iowrite32((err_status << INT_MASK_WE), | ||
1029 | mid->dma_base + MASK_ERR); | ||
1030 | call_tasklet = 1; | ||
1031 | } | ||
1032 | if (call_tasklet) | ||
1033 | tasklet_schedule(&mid->tasklet); | ||
1034 | |||
1035 | return IRQ_HANDLED; | ||
1036 | } | ||
1037 | |||
1038 | static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data) | ||
1039 | { | ||
1040 | return intel_mid_dma_interrupt(irq, data); | ||
1041 | } | ||
1042 | |||
1043 | static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data) | ||
1044 | { | ||
1045 | return intel_mid_dma_interrupt(irq, data); | ||
1046 | } | ||
1047 | |||
1048 | /** | ||
1049 | * mid_setup_dma - Setup the DMA controller | ||
1050 | * @pdev: Controller PCI device structure | ||
1051 | * | ||
1052 | * Initialize the DMA controller, channels, registers with DMA engine, | ||
1053 | * ISR. Initialize DMA controller channels. | ||
1054 | */ | ||
1055 | static int mid_setup_dma(struct pci_dev *pdev) | ||
1056 | { | ||
1057 | struct middma_device *dma = pci_get_drvdata(pdev); | ||
1058 | int err, i; | ||
1059 | |||
1060 | /* DMA coherent memory pool for DMA descriptor allocations */ | ||
1061 | dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev, | ||
1062 | sizeof(struct intel_mid_dma_desc), | ||
1063 | 32, 0); | ||
1064 | if (NULL == dma->dma_pool) { | ||
1065 | pr_err("ERR_MDMA:pci_pool_create failed\n"); | ||
1066 | err = -ENOMEM; | ||
1067 | goto err_dma_pool; | ||
1068 | } | ||
1069 | |||
1070 | INIT_LIST_HEAD(&dma->common.channels); | ||
1071 | dma->pci_id = pdev->device; | ||
1072 | if (dma->pimr_mask) { | ||
1073 | dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE, | ||
1074 | LNW_PERIPHRAL_MASK_SIZE); | ||
1075 | if (dma->mask_reg == NULL) { | ||
1076 | pr_err("ERR_MDMA:Can't map periphral intr space !!\n"); | ||
1077 | err = -ENOMEM; | ||
1078 | goto err_ioremap; | ||
1079 | } | ||
1080 | } else | ||
1081 | dma->mask_reg = NULL; | ||
1082 | |||
1083 | pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan); | ||
1084 | /*init CH structures*/ | ||
1085 | dma->intr_mask = 0; | ||
1086 | dma->state = RUNNING; | ||
1087 | for (i = 0; i < dma->max_chan; i++) { | ||
1088 | struct intel_mid_dma_chan *midch = &dma->ch[i]; | ||
1089 | |||
1090 | midch->chan.device = &dma->common; | ||
1091 | dma_cookie_init(&midch->chan); | ||
1092 | midch->ch_id = dma->chan_base + i; | ||
1093 | pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id); | ||
1094 | |||
1095 | midch->dma_base = dma->dma_base; | ||
1096 | midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id; | ||
1097 | midch->dma = dma; | ||
1098 | dma->intr_mask |= 1 << (dma->chan_base + i); | ||
1099 | spin_lock_init(&midch->lock); | ||
1100 | |||
1101 | INIT_LIST_HEAD(&midch->active_list); | ||
1102 | INIT_LIST_HEAD(&midch->queue); | ||
1103 | INIT_LIST_HEAD(&midch->free_list); | ||
1104 | /*mask interrupts*/ | ||
1105 | iowrite32(MASK_INTR_REG(midch->ch_id), | ||
1106 | dma->dma_base + MASK_BLOCK); | ||
1107 | iowrite32(MASK_INTR_REG(midch->ch_id), | ||
1108 | dma->dma_base + MASK_SRC_TRAN); | ||
1109 | iowrite32(MASK_INTR_REG(midch->ch_id), | ||
1110 | dma->dma_base + MASK_DST_TRAN); | ||
1111 | iowrite32(MASK_INTR_REG(midch->ch_id), | ||
1112 | dma->dma_base + MASK_ERR); | ||
1113 | iowrite32(MASK_INTR_REG(midch->ch_id), | ||
1114 | dma->dma_base + MASK_TFR); | ||
1115 | |||
1116 | disable_dma_interrupt(midch); | ||
1117 | list_add_tail(&midch->chan.device_node, &dma->common.channels); | ||
1118 | } | ||
1119 | pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask); | ||
1120 | |||
1121 | /*init dma structure*/ | ||
1122 | dma_cap_zero(dma->common.cap_mask); | ||
1123 | dma_cap_set(DMA_MEMCPY, dma->common.cap_mask); | ||
1124 | dma_cap_set(DMA_SLAVE, dma->common.cap_mask); | ||
1125 | dma_cap_set(DMA_PRIVATE, dma->common.cap_mask); | ||
1126 | dma->common.dev = &pdev->dev; | ||
1127 | |||
1128 | dma->common.device_alloc_chan_resources = | ||
1129 | intel_mid_dma_alloc_chan_resources; | ||
1130 | dma->common.device_free_chan_resources = | ||
1131 | intel_mid_dma_free_chan_resources; | ||
1132 | |||
1133 | dma->common.device_tx_status = intel_mid_dma_tx_status; | ||
1134 | dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy; | ||
1135 | dma->common.device_issue_pending = intel_mid_dma_issue_pending; | ||
1136 | dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg; | ||
1137 | dma->common.device_config = intel_mid_dma_config; | ||
1138 | dma->common.device_terminate_all = intel_mid_dma_terminate_all; | ||
1139 | |||
1140 | /*enable dma cntrl*/ | ||
1141 | iowrite32(REG_BIT0, dma->dma_base + DMA_CFG); | ||
1142 | |||
1143 | /*register irq */ | ||
1144 | if (dma->pimr_mask) { | ||
1145 | pr_debug("MDMA:Requesting irq shared for DMAC1\n"); | ||
1146 | err = request_irq(pdev->irq, intel_mid_dma_interrupt1, | ||
1147 | IRQF_SHARED, "INTEL_MID_DMAC1", dma); | ||
1148 | if (0 != err) | ||
1149 | goto err_irq; | ||
1150 | } else { | ||
1151 | dma->intr_mask = 0x03; | ||
1152 | pr_debug("MDMA:Requesting irq for DMAC2\n"); | ||
1153 | err = request_irq(pdev->irq, intel_mid_dma_interrupt2, | ||
1154 | IRQF_SHARED, "INTEL_MID_DMAC2", dma); | ||
1155 | if (0 != err) | ||
1156 | goto err_irq; | ||
1157 | } | ||
1158 | /*register device w/ engine*/ | ||
1159 | err = dma_async_device_register(&dma->common); | ||
1160 | if (0 != err) { | ||
1161 | pr_err("ERR_MDMA:device_register failed: %d\n", err); | ||
1162 | goto err_engine; | ||
1163 | } | ||
1164 | if (dma->pimr_mask) { | ||
1165 | pr_debug("setting up tasklet1 for DMAC1\n"); | ||
1166 | tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma); | ||
1167 | } else { | ||
1168 | pr_debug("setting up tasklet2 for DMAC2\n"); | ||
1169 | tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma); | ||
1170 | } | ||
1171 | return 0; | ||
1172 | |||
1173 | err_engine: | ||
1174 | free_irq(pdev->irq, dma); | ||
1175 | err_irq: | ||
1176 | if (dma->mask_reg) | ||
1177 | iounmap(dma->mask_reg); | ||
1178 | err_ioremap: | ||
1179 | pci_pool_destroy(dma->dma_pool); | ||
1180 | err_dma_pool: | ||
1181 | pr_err("ERR_MDMA:setup_dma failed: %d\n", err); | ||
1182 | return err; | ||
1183 | |||
1184 | } | ||
1185 | |||
1186 | /** | ||
1187 | * middma_shutdown - Shutdown the DMA controller | ||
1188 | * @pdev: Controller PCI device structure | ||
1189 | * | ||
1190 | * Called by remove | ||
1191 | * Unregister DMa controller, clear all structures and free interrupt | ||
1192 | */ | ||
1193 | static void middma_shutdown(struct pci_dev *pdev) | ||
1194 | { | ||
1195 | struct middma_device *device = pci_get_drvdata(pdev); | ||
1196 | |||
1197 | dma_async_device_unregister(&device->common); | ||
1198 | pci_pool_destroy(device->dma_pool); | ||
1199 | if (device->mask_reg) | ||
1200 | iounmap(device->mask_reg); | ||
1201 | if (device->dma_base) | ||
1202 | iounmap(device->dma_base); | ||
1203 | free_irq(pdev->irq, device); | ||
1204 | return; | ||
1205 | } | ||
1206 | |||
1207 | /** | ||
1208 | * intel_mid_dma_probe - PCI Probe | ||
1209 | * @pdev: Controller PCI device structure | ||
1210 | * @id: pci device id structure | ||
1211 | * | ||
1212 | * Initialize the PCI device, map BARs, query driver data. | ||
1213 | * Call setup_dma to complete contoller and chan initilzation | ||
1214 | */ | ||
1215 | static int intel_mid_dma_probe(struct pci_dev *pdev, | ||
1216 | const struct pci_device_id *id) | ||
1217 | { | ||
1218 | struct middma_device *device; | ||
1219 | u32 base_addr, bar_size; | ||
1220 | struct intel_mid_dma_probe_info *info; | ||
1221 | int err; | ||
1222 | |||
1223 | pr_debug("MDMA: probe for %x\n", pdev->device); | ||
1224 | info = (void *)id->driver_data; | ||
1225 | pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n", | ||
1226 | info->max_chan, info->ch_base, | ||
1227 | info->block_size, info->pimr_mask); | ||
1228 | |||
1229 | err = pci_enable_device(pdev); | ||
1230 | if (err) | ||
1231 | goto err_enable_device; | ||
1232 | |||
1233 | err = pci_request_regions(pdev, "intel_mid_dmac"); | ||
1234 | if (err) | ||
1235 | goto err_request_regions; | ||
1236 | |||
1237 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
1238 | if (err) | ||
1239 | goto err_set_dma_mask; | ||
1240 | |||
1241 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
1242 | if (err) | ||
1243 | goto err_set_dma_mask; | ||
1244 | |||
1245 | device = kzalloc(sizeof(*device), GFP_KERNEL); | ||
1246 | if (!device) { | ||
1247 | pr_err("ERR_MDMA:kzalloc failed probe\n"); | ||
1248 | err = -ENOMEM; | ||
1249 | goto err_kzalloc; | ||
1250 | } | ||
1251 | device->pdev = pci_dev_get(pdev); | ||
1252 | |||
1253 | base_addr = pci_resource_start(pdev, 0); | ||
1254 | bar_size = pci_resource_len(pdev, 0); | ||
1255 | device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE); | ||
1256 | if (!device->dma_base) { | ||
1257 | pr_err("ERR_MDMA:ioremap failed\n"); | ||
1258 | err = -ENOMEM; | ||
1259 | goto err_ioremap; | ||
1260 | } | ||
1261 | pci_set_drvdata(pdev, device); | ||
1262 | pci_set_master(pdev); | ||
1263 | device->max_chan = info->max_chan; | ||
1264 | device->chan_base = info->ch_base; | ||
1265 | device->block_size = info->block_size; | ||
1266 | device->pimr_mask = info->pimr_mask; | ||
1267 | |||
1268 | err = mid_setup_dma(pdev); | ||
1269 | if (err) | ||
1270 | goto err_dma; | ||
1271 | |||
1272 | pm_runtime_put_noidle(&pdev->dev); | ||
1273 | pm_runtime_allow(&pdev->dev); | ||
1274 | return 0; | ||
1275 | |||
1276 | err_dma: | ||
1277 | iounmap(device->dma_base); | ||
1278 | err_ioremap: | ||
1279 | pci_dev_put(pdev); | ||
1280 | kfree(device); | ||
1281 | err_kzalloc: | ||
1282 | err_set_dma_mask: | ||
1283 | pci_release_regions(pdev); | ||
1284 | pci_disable_device(pdev); | ||
1285 | err_request_regions: | ||
1286 | err_enable_device: | ||
1287 | pr_err("ERR_MDMA:Probe failed %d\n", err); | ||
1288 | return err; | ||
1289 | } | ||
1290 | |||
1291 | /** | ||
1292 | * intel_mid_dma_remove - PCI remove | ||
1293 | * @pdev: Controller PCI device structure | ||
1294 | * | ||
1295 | * Free up all resources and data | ||
1296 | * Call shutdown_dma to complete contoller and chan cleanup | ||
1297 | */ | ||
1298 | static void intel_mid_dma_remove(struct pci_dev *pdev) | ||
1299 | { | ||
1300 | struct middma_device *device = pci_get_drvdata(pdev); | ||
1301 | |||
1302 | pm_runtime_get_noresume(&pdev->dev); | ||
1303 | pm_runtime_forbid(&pdev->dev); | ||
1304 | middma_shutdown(pdev); | ||
1305 | pci_dev_put(pdev); | ||
1306 | kfree(device); | ||
1307 | pci_release_regions(pdev); | ||
1308 | pci_disable_device(pdev); | ||
1309 | } | ||
1310 | |||
1311 | /* Power Management */ | ||
1312 | /* | ||
1313 | * dma_suspend - PCI suspend function | ||
1314 | * | ||
1315 | * @pci: PCI device structure | ||
1316 | * @state: PM message | ||
1317 | * | ||
1318 | * This function is called by OS when a power event occurs | ||
1319 | */ | ||
1320 | static int dma_suspend(struct device *dev) | ||
1321 | { | ||
1322 | struct pci_dev *pci = to_pci_dev(dev); | ||
1323 | int i; | ||
1324 | struct middma_device *device = pci_get_drvdata(pci); | ||
1325 | pr_debug("MDMA: dma_suspend called\n"); | ||
1326 | |||
1327 | for (i = 0; i < device->max_chan; i++) { | ||
1328 | if (device->ch[i].in_use) | ||
1329 | return -EAGAIN; | ||
1330 | } | ||
1331 | dmac1_mask_periphral_intr(device); | ||
1332 | device->state = SUSPENDED; | ||
1333 | pci_save_state(pci); | ||
1334 | pci_disable_device(pci); | ||
1335 | pci_set_power_state(pci, PCI_D3hot); | ||
1336 | return 0; | ||
1337 | } | ||
1338 | |||
1339 | /** | ||
1340 | * dma_resume - PCI resume function | ||
1341 | * | ||
1342 | * @pci: PCI device structure | ||
1343 | * | ||
1344 | * This function is called by OS when a power event occurs | ||
1345 | */ | ||
1346 | int dma_resume(struct device *dev) | ||
1347 | { | ||
1348 | struct pci_dev *pci = to_pci_dev(dev); | ||
1349 | int ret; | ||
1350 | struct middma_device *device = pci_get_drvdata(pci); | ||
1351 | |||
1352 | pr_debug("MDMA: dma_resume called\n"); | ||
1353 | pci_set_power_state(pci, PCI_D0); | ||
1354 | pci_restore_state(pci); | ||
1355 | ret = pci_enable_device(pci); | ||
1356 | if (ret) { | ||
1357 | pr_err("MDMA: device can't be enabled for %x\n", pci->device); | ||
1358 | return ret; | ||
1359 | } | ||
1360 | device->state = RUNNING; | ||
1361 | iowrite32(REG_BIT0, device->dma_base + DMA_CFG); | ||
1362 | return 0; | ||
1363 | } | ||
1364 | |||
1365 | static int dma_runtime_suspend(struct device *dev) | ||
1366 | { | ||
1367 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
1368 | struct middma_device *device = pci_get_drvdata(pci_dev); | ||
1369 | |||
1370 | device->state = SUSPENDED; | ||
1371 | return 0; | ||
1372 | } | ||
1373 | |||
1374 | static int dma_runtime_resume(struct device *dev) | ||
1375 | { | ||
1376 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
1377 | struct middma_device *device = pci_get_drvdata(pci_dev); | ||
1378 | |||
1379 | device->state = RUNNING; | ||
1380 | iowrite32(REG_BIT0, device->dma_base + DMA_CFG); | ||
1381 | return 0; | ||
1382 | } | ||
1383 | |||
1384 | static int dma_runtime_idle(struct device *dev) | ||
1385 | { | ||
1386 | struct pci_dev *pdev = to_pci_dev(dev); | ||
1387 | struct middma_device *device = pci_get_drvdata(pdev); | ||
1388 | int i; | ||
1389 | |||
1390 | for (i = 0; i < device->max_chan; i++) { | ||
1391 | if (device->ch[i].in_use) | ||
1392 | return -EAGAIN; | ||
1393 | } | ||
1394 | |||
1395 | return 0; | ||
1396 | } | ||
1397 | |||
1398 | /****************************************************************************** | ||
1399 | * PCI stuff | ||
1400 | */ | ||
1401 | static struct pci_device_id intel_mid_dma_ids[] = { | ||
1402 | { PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID), INFO(2, 6, 4095, 0x200020)}, | ||
1403 | { PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID), INFO(2, 0, 2047, 0)}, | ||
1404 | { PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID), INFO(2, 0, 2047, 0)}, | ||
1405 | { PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID), INFO(4, 0, 4095, 0x400040)}, | ||
1406 | { 0, } | ||
1407 | }; | ||
1408 | MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids); | ||
1409 | |||
1410 | static const struct dev_pm_ops intel_mid_dma_pm = { | ||
1411 | .runtime_suspend = dma_runtime_suspend, | ||
1412 | .runtime_resume = dma_runtime_resume, | ||
1413 | .runtime_idle = dma_runtime_idle, | ||
1414 | .suspend = dma_suspend, | ||
1415 | .resume = dma_resume, | ||
1416 | }; | ||
1417 | |||
1418 | static struct pci_driver intel_mid_dma_pci_driver = { | ||
1419 | .name = "Intel MID DMA", | ||
1420 | .id_table = intel_mid_dma_ids, | ||
1421 | .probe = intel_mid_dma_probe, | ||
1422 | .remove = intel_mid_dma_remove, | ||
1423 | #ifdef CONFIG_PM | ||
1424 | .driver = { | ||
1425 | .pm = &intel_mid_dma_pm, | ||
1426 | }, | ||
1427 | #endif | ||
1428 | }; | ||
1429 | |||
1430 | static int __init intel_mid_dma_init(void) | ||
1431 | { | ||
1432 | pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n", | ||
1433 | INTEL_MID_DMA_DRIVER_VERSION); | ||
1434 | return pci_register_driver(&intel_mid_dma_pci_driver); | ||
1435 | } | ||
1436 | fs_initcall(intel_mid_dma_init); | ||
1437 | |||
1438 | static void __exit intel_mid_dma_exit(void) | ||
1439 | { | ||
1440 | pci_unregister_driver(&intel_mid_dma_pci_driver); | ||
1441 | } | ||
1442 | module_exit(intel_mid_dma_exit); | ||
1443 | |||
1444 | MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>"); | ||
1445 | MODULE_DESCRIPTION("Intel (R) MID DMAC Driver"); | ||
1446 | MODULE_LICENSE("GPL v2"); | ||
1447 | MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION); | ||
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h deleted file mode 100644 index 17b42192ea58..000000000000 --- a/drivers/dma/intel_mid_dma_regs.h +++ /dev/null | |||
@@ -1,299 +0,0 @@ | |||
1 | /* | ||
2 | * intel_mid_dma_regs.h - Intel MID DMA Drivers | ||
3 | * | ||
4 | * Copyright (C) 2008-10 Intel Corp | ||
5 | * Author: Vinod Koul <vinod.koul@intel.com> | ||
6 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; version 2 of the License. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along | ||
18 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
19 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
20 | * | ||
21 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
22 | * | ||
23 | * | ||
24 | */ | ||
25 | #ifndef __INTEL_MID_DMAC_REGS_H__ | ||
26 | #define __INTEL_MID_DMAC_REGS_H__ | ||
27 | |||
28 | #include <linux/dmaengine.h> | ||
29 | #include <linux/dmapool.h> | ||
30 | #include <linux/pci_ids.h> | ||
31 | |||
32 | #define INTEL_MID_DMA_DRIVER_VERSION "1.1.0" | ||
33 | |||
34 | #define REG_BIT0 0x00000001 | ||
35 | #define REG_BIT8 0x00000100 | ||
36 | #define INT_MASK_WE 0x8 | ||
37 | #define CLEAR_DONE 0xFFFFEFFF | ||
38 | #define UNMASK_INTR_REG(chan_num) \ | ||
39 | ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) | ||
40 | #define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num) | ||
41 | |||
42 | #define ENABLE_CHANNEL(chan_num) \ | ||
43 | ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) | ||
44 | |||
45 | #define DISABLE_CHANNEL(chan_num) \ | ||
46 | (REG_BIT8 << chan_num) | ||
47 | |||
48 | #define DESCS_PER_CHANNEL 16 | ||
49 | /*DMA Registers*/ | ||
50 | /*registers associated with channel programming*/ | ||
51 | #define DMA_REG_SIZE 0x400 | ||
52 | #define DMA_CH_SIZE 0x58 | ||
53 | |||
54 | /*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/ | ||
55 | #define SAR 0x00 /* Source Address Register*/ | ||
56 | #define DAR 0x08 /* Destination Address Register*/ | ||
57 | #define LLP 0x10 /* Linked List Pointer Register*/ | ||
58 | #define CTL_LOW 0x18 /* Control Register*/ | ||
59 | #define CTL_HIGH 0x1C /* Control Register*/ | ||
60 | #define CFG_LOW 0x40 /* Configuration Register Low*/ | ||
61 | #define CFG_HIGH 0x44 /* Configuration Register high*/ | ||
62 | |||
63 | #define STATUS_TFR 0x2E8 | ||
64 | #define STATUS_BLOCK 0x2F0 | ||
65 | #define STATUS_ERR 0x308 | ||
66 | |||
67 | #define RAW_TFR 0x2C0 | ||
68 | #define RAW_BLOCK 0x2C8 | ||
69 | #define RAW_ERR 0x2E0 | ||
70 | |||
71 | #define MASK_TFR 0x310 | ||
72 | #define MASK_BLOCK 0x318 | ||
73 | #define MASK_SRC_TRAN 0x320 | ||
74 | #define MASK_DST_TRAN 0x328 | ||
75 | #define MASK_ERR 0x330 | ||
76 | |||
77 | #define CLEAR_TFR 0x338 | ||
78 | #define CLEAR_BLOCK 0x340 | ||
79 | #define CLEAR_SRC_TRAN 0x348 | ||
80 | #define CLEAR_DST_TRAN 0x350 | ||
81 | #define CLEAR_ERR 0x358 | ||
82 | |||
83 | #define INTR_STATUS 0x360 | ||
84 | #define DMA_CFG 0x398 | ||
85 | #define DMA_CHAN_EN 0x3A0 | ||
86 | |||
87 | /*DMA channel control registers*/ | ||
88 | union intel_mid_dma_ctl_lo { | ||
89 | struct { | ||
90 | u32 int_en:1; /*enable or disable interrupts*/ | ||
91 | /*should be 0*/ | ||
92 | u32 dst_tr_width:3; /*destination transfer width*/ | ||
93 | /*usually 32 bits = 010*/ | ||
94 | u32 src_tr_width:3; /*source transfer width*/ | ||
95 | /*usually 32 bits = 010*/ | ||
96 | u32 dinc:2; /*destination address inc/dec*/ | ||
97 | /*For mem:INC=00, Periphral NoINC=11*/ | ||
98 | u32 sinc:2; /*source address inc or dec, as above*/ | ||
99 | u32 dst_msize:3; /*destination burst transaction length*/ | ||
100 | /*always = 16 ie 011*/ | ||
101 | u32 src_msize:3; /*source burst transaction length*/ | ||
102 | /*always = 16 ie 011*/ | ||
103 | u32 reser1:3; | ||
104 | u32 tt_fc:3; /*transfer type and flow controller*/ | ||
105 | /*M-M = 000 | ||
106 | P-M = 010 | ||
107 | M-P = 001*/ | ||
108 | u32 dms:2; /*destination master select = 0*/ | ||
109 | u32 sms:2; /*source master select = 0*/ | ||
110 | u32 llp_dst_en:1; /*enable/disable destination LLP = 0*/ | ||
111 | u32 llp_src_en:1; /*enable/disable source LLP = 0*/ | ||
112 | u32 reser2:3; | ||
113 | } ctlx; | ||
114 | u32 ctl_lo; | ||
115 | }; | ||
116 | |||
117 | union intel_mid_dma_ctl_hi { | ||
118 | struct { | ||
119 | u32 block_ts:12; /*block transfer size*/ | ||
120 | u32 done:1; /*Done - updated by DMAC*/ | ||
121 | u32 reser:19; /*configured by DMAC*/ | ||
122 | } ctlx; | ||
123 | u32 ctl_hi; | ||
124 | |||
125 | }; | ||
126 | |||
127 | /*DMA channel configuration registers*/ | ||
128 | union intel_mid_dma_cfg_lo { | ||
129 | struct { | ||
130 | u32 reser1:5; | ||
131 | u32 ch_prior:3; /*channel priority = 0*/ | ||
132 | u32 ch_susp:1; /*channel suspend = 0*/ | ||
133 | u32 fifo_empty:1; /*FIFO empty or not R bit = 0*/ | ||
134 | u32 hs_sel_dst:1; /*select HW/SW destn handshaking*/ | ||
135 | /*HW = 0, SW = 1*/ | ||
136 | u32 hs_sel_src:1; /*select HW/SW src handshaking*/ | ||
137 | u32 reser2:6; | ||
138 | u32 dst_hs_pol:1; /*dest HS interface polarity*/ | ||
139 | u32 src_hs_pol:1; /*src HS interface polarity*/ | ||
140 | u32 max_abrst:10; /*max AMBA burst len = 0 (no sw limit*/ | ||
141 | u32 reload_src:1; /*auto reload src addr =1 if src is P*/ | ||
142 | u32 reload_dst:1; /*AR destn addr =1 if dstn is P*/ | ||
143 | } cfgx; | ||
144 | u32 cfg_lo; | ||
145 | }; | ||
146 | |||
147 | union intel_mid_dma_cfg_hi { | ||
148 | struct { | ||
149 | u32 fcmode:1; /*flow control mode = 1*/ | ||
150 | u32 fifo_mode:1; /*FIFO mode select = 1*/ | ||
151 | u32 protctl:3; /*protection control = 0*/ | ||
152 | u32 rsvd:2; | ||
153 | u32 src_per:4; /*src hw HS interface*/ | ||
154 | u32 dst_per:4; /*dstn hw HS interface*/ | ||
155 | u32 reser2:17; | ||
156 | } cfgx; | ||
157 | u32 cfg_hi; | ||
158 | }; | ||
159 | |||
160 | |||
161 | /** | ||
162 | * struct intel_mid_dma_chan - internal mid representation of a DMA channel | ||
163 | * @chan: dma_chan strcture represetation for mid chan | ||
164 | * @ch_regs: MMIO register space pointer to channel register | ||
165 | * @dma_base: MMIO register space DMA engine base pointer | ||
166 | * @ch_id: DMA channel id | ||
167 | * @lock: channel spinlock | ||
168 | * @active_list: current active descriptors | ||
169 | * @queue: current queued up descriptors | ||
170 | * @free_list: current free descriptors | ||
171 | * @slave: dma slave structure | ||
172 | * @descs_allocated: total number of descriptors allocated | ||
173 | * @dma: dma device structure pointer | ||
174 | * @busy: bool representing if ch is busy (active txn) or not | ||
175 | * @in_use: bool representing if ch is in use or not | ||
176 | * @raw_tfr: raw trf interrupt received | ||
177 | * @raw_block: raw block interrupt received | ||
178 | */ | ||
179 | struct intel_mid_dma_chan { | ||
180 | struct dma_chan chan; | ||
181 | void __iomem *ch_regs; | ||
182 | void __iomem *dma_base; | ||
183 | int ch_id; | ||
184 | spinlock_t lock; | ||
185 | struct list_head active_list; | ||
186 | struct list_head queue; | ||
187 | struct list_head free_list; | ||
188 | unsigned int descs_allocated; | ||
189 | struct middma_device *dma; | ||
190 | bool busy; | ||
191 | bool in_use; | ||
192 | u32 raw_tfr; | ||
193 | u32 raw_block; | ||
194 | struct intel_mid_dma_slave *mid_slave; | ||
195 | }; | ||
196 | |||
197 | static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan( | ||
198 | struct dma_chan *chan) | ||
199 | { | ||
200 | return container_of(chan, struct intel_mid_dma_chan, chan); | ||
201 | } | ||
202 | |||
203 | enum intel_mid_dma_state { | ||
204 | RUNNING = 0, | ||
205 | SUSPENDED, | ||
206 | }; | ||
207 | /** | ||
208 | * struct middma_device - internal representation of a DMA device | ||
209 | * @pdev: PCI device | ||
210 | * @dma_base: MMIO register space pointer of DMA | ||
211 | * @dma_pool: for allocating DMA descriptors | ||
212 | * @common: embedded struct dma_device | ||
213 | * @tasklet: dma tasklet for processing interrupts | ||
214 | * @ch: per channel data | ||
215 | * @pci_id: DMA device PCI ID | ||
216 | * @intr_mask: Interrupt mask to be used | ||
217 | * @mask_reg: MMIO register for periphral mask | ||
218 | * @chan_base: Base ch index (read from driver data) | ||
219 | * @max_chan: max number of chs supported (from drv_data) | ||
220 | * @block_size: Block size of DMA transfer supported (from drv_data) | ||
221 | * @pimr_mask: MMIO register addr for periphral interrupt (from drv_data) | ||
222 | * @state: dma PM device state | ||
223 | */ | ||
224 | struct middma_device { | ||
225 | struct pci_dev *pdev; | ||
226 | void __iomem *dma_base; | ||
227 | struct pci_pool *dma_pool; | ||
228 | struct dma_device common; | ||
229 | struct tasklet_struct tasklet; | ||
230 | struct intel_mid_dma_chan ch[MAX_CHAN]; | ||
231 | unsigned int pci_id; | ||
232 | unsigned int intr_mask; | ||
233 | void __iomem *mask_reg; | ||
234 | int chan_base; | ||
235 | int max_chan; | ||
236 | int block_size; | ||
237 | unsigned int pimr_mask; | ||
238 | enum intel_mid_dma_state state; | ||
239 | }; | ||
240 | |||
241 | static inline struct middma_device *to_middma_device(struct dma_device *common) | ||
242 | { | ||
243 | return container_of(common, struct middma_device, common); | ||
244 | } | ||
245 | |||
246 | struct intel_mid_dma_desc { | ||
247 | void __iomem *block; /*ch ptr*/ | ||
248 | struct list_head desc_node; | ||
249 | struct dma_async_tx_descriptor txd; | ||
250 | size_t len; | ||
251 | dma_addr_t sar; | ||
252 | dma_addr_t dar; | ||
253 | u32 cfg_hi; | ||
254 | u32 cfg_lo; | ||
255 | u32 ctl_lo; | ||
256 | u32 ctl_hi; | ||
257 | struct pci_pool *lli_pool; | ||
258 | struct intel_mid_dma_lli *lli; | ||
259 | dma_addr_t lli_phys; | ||
260 | unsigned int lli_length; | ||
261 | unsigned int current_lli; | ||
262 | dma_addr_t next; | ||
263 | enum dma_transfer_direction dirn; | ||
264 | enum dma_status status; | ||
265 | enum dma_slave_buswidth width; /*width of DMA txn*/ | ||
266 | enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ | ||
267 | |||
268 | }; | ||
269 | |||
270 | struct intel_mid_dma_lli { | ||
271 | dma_addr_t sar; | ||
272 | dma_addr_t dar; | ||
273 | dma_addr_t llp; | ||
274 | u32 ctl_lo; | ||
275 | u32 ctl_hi; | ||
276 | } __attribute__ ((packed)); | ||
277 | |||
278 | static inline int test_ch_en(void __iomem *dma, u32 ch_no) | ||
279 | { | ||
280 | u32 en_reg = ioread32(dma + DMA_CHAN_EN); | ||
281 | return (en_reg >> ch_no) & 0x1; | ||
282 | } | ||
283 | |||
284 | static inline struct intel_mid_dma_desc *to_intel_mid_dma_desc | ||
285 | (struct dma_async_tx_descriptor *txd) | ||
286 | { | ||
287 | return container_of(txd, struct intel_mid_dma_desc, txd); | ||
288 | } | ||
289 | |||
290 | static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave | ||
291 | (struct dma_slave_config *slave) | ||
292 | { | ||
293 | return container_of(slave, struct intel_mid_dma_slave, dma_slave); | ||
294 | } | ||
295 | |||
296 | |||
297 | int dma_resume(struct device *dev); | ||
298 | |||
299 | #endif /*__INTEL_MID_DMAC_REGS_H__*/ | ||
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 77a6dcf25b98..194ec20c9408 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
@@ -230,6 +230,10 @@ static bool is_bwd_noraid(struct pci_dev *pdev) | |||
230 | switch (pdev->device) { | 230 | switch (pdev->device) { |
231 | case PCI_DEVICE_ID_INTEL_IOAT_BWD2: | 231 | case PCI_DEVICE_ID_INTEL_IOAT_BWD2: |
232 | case PCI_DEVICE_ID_INTEL_IOAT_BWD3: | 232 | case PCI_DEVICE_ID_INTEL_IOAT_BWD3: |
233 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0: | ||
234 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1: | ||
235 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2: | ||
236 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3: | ||
233 | return true; | 237 | return true; |
234 | default: | 238 | default: |
235 | return false; | 239 | return false; |
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index 8926f271904e..eb410044e1af 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c | |||
@@ -219,6 +219,9 @@ static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id) | |||
219 | 219 | ||
220 | while (dint) { | 220 | while (dint) { |
221 | i = __ffs(dint); | 221 | i = __ffs(dint); |
222 | /* only handle interrupts belonging to pdma driver*/ | ||
223 | if (i >= pdev->dma_channels) | ||
224 | break; | ||
222 | dint &= (dint - 1); | 225 | dint &= (dint - 1); |
223 | phy = &pdev->phy[i]; | 226 | phy = &pdev->phy[i]; |
224 | ret = mmp_pdma_chan_handler(irq, phy); | 227 | ret = mmp_pdma_chan_handler(irq, phy); |
@@ -999,6 +1002,9 @@ static int mmp_pdma_probe(struct platform_device *op) | |||
999 | struct resource *iores; | 1002 | struct resource *iores; |
1000 | int i, ret, irq = 0; | 1003 | int i, ret, irq = 0; |
1001 | int dma_channels = 0, irq_num = 0; | 1004 | int dma_channels = 0, irq_num = 0; |
1005 | const enum dma_slave_buswidth widths = | ||
1006 | DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | | ||
1007 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
1002 | 1008 | ||
1003 | pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); | 1009 | pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); |
1004 | if (!pdev) | 1010 | if (!pdev) |
@@ -1066,6 +1072,10 @@ static int mmp_pdma_probe(struct platform_device *op) | |||
1066 | pdev->device.device_config = mmp_pdma_config; | 1072 | pdev->device.device_config = mmp_pdma_config; |
1067 | pdev->device.device_terminate_all = mmp_pdma_terminate_all; | 1073 | pdev->device.device_terminate_all = mmp_pdma_terminate_all; |
1068 | pdev->device.copy_align = PDMA_ALIGNMENT; | 1074 | pdev->device.copy_align = PDMA_ALIGNMENT; |
1075 | pdev->device.src_addr_widths = widths; | ||
1076 | pdev->device.dst_addr_widths = widths; | ||
1077 | pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); | ||
1078 | pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | ||
1069 | 1079 | ||
1070 | if (pdev->dev->coherent_dma_mask) | 1080 | if (pdev->dev->coherent_dma_mask) |
1071 | dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask); | 1081 | dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask); |
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 70c2fa9963cd..b6f4e1fc9c78 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c | |||
@@ -110,7 +110,7 @@ struct mmp_tdma_chan { | |||
110 | struct tasklet_struct tasklet; | 110 | struct tasklet_struct tasklet; |
111 | 111 | ||
112 | struct mmp_tdma_desc *desc_arr; | 112 | struct mmp_tdma_desc *desc_arr; |
113 | phys_addr_t desc_arr_phys; | 113 | dma_addr_t desc_arr_phys; |
114 | int desc_num; | 114 | int desc_num; |
115 | enum dma_transfer_direction dir; | 115 | enum dma_transfer_direction dir; |
116 | dma_addr_t dev_addr; | 116 | dma_addr_t dev_addr; |
@@ -166,9 +166,12 @@ static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac) | |||
166 | static int mmp_tdma_disable_chan(struct dma_chan *chan) | 166 | static int mmp_tdma_disable_chan(struct dma_chan *chan) |
167 | { | 167 | { |
168 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); | 168 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); |
169 | u32 tdcr; | ||
169 | 170 | ||
170 | writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, | 171 | tdcr = readl(tdmac->reg_base + TDCR); |
171 | tdmac->reg_base + TDCR); | 172 | tdcr |= TDCR_ABR; |
173 | tdcr &= ~TDCR_CHANEN; | ||
174 | writel(tdcr, tdmac->reg_base + TDCR); | ||
172 | 175 | ||
173 | tdmac->status = DMA_COMPLETE; | 176 | tdmac->status = DMA_COMPLETE; |
174 | 177 | ||
@@ -296,12 +299,27 @@ static int mmp_tdma_clear_chan_irq(struct mmp_tdma_chan *tdmac) | |||
296 | return -EAGAIN; | 299 | return -EAGAIN; |
297 | } | 300 | } |
298 | 301 | ||
302 | static size_t mmp_tdma_get_pos(struct mmp_tdma_chan *tdmac) | ||
303 | { | ||
304 | size_t reg; | ||
305 | |||
306 | if (tdmac->idx == 0) { | ||
307 | reg = __raw_readl(tdmac->reg_base + TDSAR); | ||
308 | reg -= tdmac->desc_arr[0].src_addr; | ||
309 | } else if (tdmac->idx == 1) { | ||
310 | reg = __raw_readl(tdmac->reg_base + TDDAR); | ||
311 | reg -= tdmac->desc_arr[0].dst_addr; | ||
312 | } else | ||
313 | return -EINVAL; | ||
314 | |||
315 | return reg; | ||
316 | } | ||
317 | |||
299 | static irqreturn_t mmp_tdma_chan_handler(int irq, void *dev_id) | 318 | static irqreturn_t mmp_tdma_chan_handler(int irq, void *dev_id) |
300 | { | 319 | { |
301 | struct mmp_tdma_chan *tdmac = dev_id; | 320 | struct mmp_tdma_chan *tdmac = dev_id; |
302 | 321 | ||
303 | if (mmp_tdma_clear_chan_irq(tdmac) == 0) { | 322 | if (mmp_tdma_clear_chan_irq(tdmac) == 0) { |
304 | tdmac->pos = (tdmac->pos + tdmac->period_len) % tdmac->buf_len; | ||
305 | tasklet_schedule(&tdmac->tasklet); | 323 | tasklet_schedule(&tdmac->tasklet); |
306 | return IRQ_HANDLED; | 324 | return IRQ_HANDLED; |
307 | } else | 325 | } else |
@@ -343,7 +361,7 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac) | |||
343 | int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); | 361 | int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); |
344 | 362 | ||
345 | gpool = tdmac->pool; | 363 | gpool = tdmac->pool; |
346 | if (tdmac->desc_arr) | 364 | if (gpool && tdmac->desc_arr) |
347 | gen_pool_free(gpool, (unsigned long)tdmac->desc_arr, | 365 | gen_pool_free(gpool, (unsigned long)tdmac->desc_arr, |
348 | size); | 366 | size); |
349 | tdmac->desc_arr = NULL; | 367 | tdmac->desc_arr = NULL; |
@@ -499,6 +517,7 @@ static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan, | |||
499 | { | 517 | { |
500 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); | 518 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); |
501 | 519 | ||
520 | tdmac->pos = mmp_tdma_get_pos(tdmac); | ||
502 | dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, | 521 | dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, |
503 | tdmac->buf_len - tdmac->pos); | 522 | tdmac->buf_len - tdmac->pos); |
504 | 523 | ||
@@ -610,7 +629,7 @@ static int mmp_tdma_probe(struct platform_device *pdev) | |||
610 | int i, ret; | 629 | int i, ret; |
611 | int irq = 0, irq_num = 0; | 630 | int irq = 0, irq_num = 0; |
612 | int chan_num = TDMA_CHANNEL_NUM; | 631 | int chan_num = TDMA_CHANNEL_NUM; |
613 | struct gen_pool *pool; | 632 | struct gen_pool *pool = NULL; |
614 | 633 | ||
615 | of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev); | 634 | of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev); |
616 | if (of_id) | 635 | if (of_id) |
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c index 15cab7d79525..b4634109e010 100644 --- a/drivers/dma/moxart-dma.c +++ b/drivers/dma/moxart-dma.c | |||
@@ -193,8 +193,10 @@ static int moxart_terminate_all(struct dma_chan *chan) | |||
193 | 193 | ||
194 | spin_lock_irqsave(&ch->vc.lock, flags); | 194 | spin_lock_irqsave(&ch->vc.lock, flags); |
195 | 195 | ||
196 | if (ch->desc) | 196 | if (ch->desc) { |
197 | moxart_dma_desc_free(&ch->desc->vd); | ||
197 | ch->desc = NULL; | 198 | ch->desc = NULL; |
199 | } | ||
198 | 200 | ||
199 | ctrl = readl(ch->base + REG_OFF_CTRL); | 201 | ctrl = readl(ch->base + REG_OFF_CTRL); |
200 | ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN); | 202 | ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN); |
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 7dd6dd121681..167dbaf65742 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c | |||
@@ -981,6 +981,7 @@ static int omap_dma_terminate_all(struct dma_chan *chan) | |||
981 | * c->desc is NULL and exit.) | 981 | * c->desc is NULL and exit.) |
982 | */ | 982 | */ |
983 | if (c->desc) { | 983 | if (c->desc) { |
984 | omap_dma_desc_free(&c->desc->vd); | ||
984 | c->desc = NULL; | 985 | c->desc = NULL; |
985 | /* Avoid stopping the dma twice */ | 986 | /* Avoid stopping the dma twice */ |
986 | if (!c->paused) | 987 | if (!c->paused) |
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c index d7a33b3ac466..9c914d625906 100644 --- a/drivers/dma/qcom_bam_dma.c +++ b/drivers/dma/qcom_bam_dma.c | |||
@@ -162,9 +162,9 @@ static const struct reg_offset_data bam_v1_4_reg_info[] = { | |||
162 | [BAM_P_IRQ_STTS] = { 0x1010, 0x1000, 0x00, 0x00 }, | 162 | [BAM_P_IRQ_STTS] = { 0x1010, 0x1000, 0x00, 0x00 }, |
163 | [BAM_P_IRQ_CLR] = { 0x1014, 0x1000, 0x00, 0x00 }, | 163 | [BAM_P_IRQ_CLR] = { 0x1014, 0x1000, 0x00, 0x00 }, |
164 | [BAM_P_IRQ_EN] = { 0x1018, 0x1000, 0x00, 0x00 }, | 164 | [BAM_P_IRQ_EN] = { 0x1018, 0x1000, 0x00, 0x00 }, |
165 | [BAM_P_EVNT_DEST_ADDR] = { 0x102C, 0x00, 0x1000, 0x00 }, | 165 | [BAM_P_EVNT_DEST_ADDR] = { 0x182C, 0x00, 0x1000, 0x00 }, |
166 | [BAM_P_EVNT_REG] = { 0x1018, 0x00, 0x1000, 0x00 }, | 166 | [BAM_P_EVNT_REG] = { 0x1818, 0x00, 0x1000, 0x00 }, |
167 | [BAM_P_SW_OFSTS] = { 0x1000, 0x00, 0x1000, 0x00 }, | 167 | [BAM_P_SW_OFSTS] = { 0x1800, 0x00, 0x1000, 0x00 }, |
168 | [BAM_P_DATA_FIFO_ADDR] = { 0x1824, 0x00, 0x1000, 0x00 }, | 168 | [BAM_P_DATA_FIFO_ADDR] = { 0x1824, 0x00, 0x1000, 0x00 }, |
169 | [BAM_P_DESC_FIFO_ADDR] = { 0x181C, 0x00, 0x1000, 0x00 }, | 169 | [BAM_P_DESC_FIFO_ADDR] = { 0x181C, 0x00, 0x1000, 0x00 }, |
170 | [BAM_P_EVNT_GEN_TRSHLD] = { 0x1828, 0x00, 0x1000, 0x00 }, | 170 | [BAM_P_EVNT_GEN_TRSHLD] = { 0x1828, 0x00, 0x1000, 0x00 }, |
@@ -1143,6 +1143,10 @@ static int bam_dma_probe(struct platform_device *pdev) | |||
1143 | dma_cap_set(DMA_SLAVE, bdev->common.cap_mask); | 1143 | dma_cap_set(DMA_SLAVE, bdev->common.cap_mask); |
1144 | 1144 | ||
1145 | /* initialize dmaengine apis */ | 1145 | /* initialize dmaengine apis */ |
1146 | bdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
1147 | bdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; | ||
1148 | bdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
1149 | bdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
1146 | bdev->common.device_alloc_chan_resources = bam_alloc_chan; | 1150 | bdev->common.device_alloc_chan_resources = bam_alloc_chan; |
1147 | bdev->common.device_free_chan_resources = bam_free_chan; | 1151 | bdev->common.device_free_chan_resources = bam_free_chan; |
1148 | bdev->common.device_prep_slave_sg = bam_prep_slave_sg; | 1152 | bdev->common.device_prep_slave_sg = bam_prep_slave_sg; |
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index b2431aa30033..9f1d4c7dbab8 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c | |||
@@ -582,15 +582,12 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) | |||
582 | } | 582 | } |
583 | } | 583 | } |
584 | 584 | ||
585 | static void sh_dmae_shutdown(struct platform_device *pdev) | ||
586 | { | ||
587 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | ||
588 | sh_dmae_ctl_stop(shdev); | ||
589 | } | ||
590 | |||
591 | #ifdef CONFIG_PM | 585 | #ifdef CONFIG_PM |
592 | static int sh_dmae_runtime_suspend(struct device *dev) | 586 | static int sh_dmae_runtime_suspend(struct device *dev) |
593 | { | 587 | { |
588 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | ||
589 | |||
590 | sh_dmae_ctl_stop(shdev); | ||
594 | return 0; | 591 | return 0; |
595 | } | 592 | } |
596 | 593 | ||
@@ -605,6 +602,9 @@ static int sh_dmae_runtime_resume(struct device *dev) | |||
605 | #ifdef CONFIG_PM_SLEEP | 602 | #ifdef CONFIG_PM_SLEEP |
606 | static int sh_dmae_suspend(struct device *dev) | 603 | static int sh_dmae_suspend(struct device *dev) |
607 | { | 604 | { |
605 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | ||
606 | |||
607 | sh_dmae_ctl_stop(shdev); | ||
608 | return 0; | 608 | return 0; |
609 | } | 609 | } |
610 | 610 | ||
@@ -929,13 +929,12 @@ static int sh_dmae_remove(struct platform_device *pdev) | |||
929 | } | 929 | } |
930 | 930 | ||
931 | static struct platform_driver sh_dmae_driver = { | 931 | static struct platform_driver sh_dmae_driver = { |
932 | .driver = { | 932 | .driver = { |
933 | .pm = &sh_dmae_pm, | 933 | .pm = &sh_dmae_pm, |
934 | .name = SH_DMAE_DRV_NAME, | 934 | .name = SH_DMAE_DRV_NAME, |
935 | .of_match_table = sh_dmae_of_match, | 935 | .of_match_table = sh_dmae_of_match, |
936 | }, | 936 | }, |
937 | .remove = sh_dmae_remove, | 937 | .remove = sh_dmae_remove, |
938 | .shutdown = sh_dmae_shutdown, | ||
939 | }; | 938 | }; |
940 | 939 | ||
941 | static int __init sh_dmae_init(void) | 940 | static int __init sh_dmae_init(void) |
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index c5f7b4e9eb6c..2eebd28b4c40 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c | |||
@@ -78,7 +78,7 @@ static const char * __init dmi_string(const struct dmi_header *dm, u8 s) | |||
78 | * We have to be cautious here. We have seen BIOSes with DMI pointers | 78 | * We have to be cautious here. We have seen BIOSes with DMI pointers |
79 | * pointing to completely the wrong place for example | 79 | * pointing to completely the wrong place for example |
80 | */ | 80 | */ |
81 | static void dmi_table(u8 *buf, int len, int num, | 81 | static void dmi_table(u8 *buf, u32 len, int num, |
82 | void (*decode)(const struct dmi_header *, void *), | 82 | void (*decode)(const struct dmi_header *, void *), |
83 | void *private_data) | 83 | void *private_data) |
84 | { | 84 | { |
@@ -86,19 +86,16 @@ static void dmi_table(u8 *buf, int len, int num, | |||
86 | int i = 0; | 86 | int i = 0; |
87 | 87 | ||
88 | /* | 88 | /* |
89 | * Stop when we see all the items the table claimed to have | 89 | * Stop when we have seen all the items the table claimed to have |
90 | * OR we run off the end of the table (also happens) | 90 | * (SMBIOS < 3.0 only) OR we reach an end-of-table marker OR we run |
91 | * off the end of the table (should never happen but sometimes does | ||
92 | * on bogus implementations.) | ||
91 | */ | 93 | */ |
92 | while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) { | 94 | while ((!num || i < num) && |
95 | (data - buf + sizeof(struct dmi_header)) <= len) { | ||
93 | const struct dmi_header *dm = (const struct dmi_header *)data; | 96 | const struct dmi_header *dm = (const struct dmi_header *)data; |
94 | 97 | ||
95 | /* | 98 | /* |
96 | * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0] | ||
97 | */ | ||
98 | if (dm->type == DMI_ENTRY_END_OF_TABLE) | ||
99 | break; | ||
100 | |||
101 | /* | ||
102 | * We want to know the total length (formatted area and | 99 | * We want to know the total length (formatted area and |
103 | * strings) before decoding to make sure we won't run off the | 100 | * strings) before decoding to make sure we won't run off the |
104 | * table in dmi_decode or dmi_string | 101 | * table in dmi_decode or dmi_string |
@@ -108,13 +105,20 @@ static void dmi_table(u8 *buf, int len, int num, | |||
108 | data++; | 105 | data++; |
109 | if (data - buf < len - 1) | 106 | if (data - buf < len - 1) |
110 | decode(dm, private_data); | 107 | decode(dm, private_data); |
108 | |||
109 | /* | ||
110 | * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0] | ||
111 | */ | ||
112 | if (dm->type == DMI_ENTRY_END_OF_TABLE) | ||
113 | break; | ||
114 | |||
111 | data += 2; | 115 | data += 2; |
112 | i++; | 116 | i++; |
113 | } | 117 | } |
114 | } | 118 | } |
115 | 119 | ||
116 | static phys_addr_t dmi_base; | 120 | static phys_addr_t dmi_base; |
117 | static u16 dmi_len; | 121 | static u32 dmi_len; |
118 | static u16 dmi_num; | 122 | static u16 dmi_num; |
119 | 123 | ||
120 | static int __init dmi_walk_early(void (*decode)(const struct dmi_header *, | 124 | static int __init dmi_walk_early(void (*decode)(const struct dmi_header *, |
@@ -528,21 +532,10 @@ static int __init dmi_smbios3_present(const u8 *buf) | |||
528 | if (memcmp(buf, "_SM3_", 5) == 0 && | 532 | if (memcmp(buf, "_SM3_", 5) == 0 && |
529 | buf[6] < 32 && dmi_checksum(buf, buf[6])) { | 533 | buf[6] < 32 && dmi_checksum(buf, buf[6])) { |
530 | dmi_ver = get_unaligned_be16(buf + 7); | 534 | dmi_ver = get_unaligned_be16(buf + 7); |
535 | dmi_num = 0; /* No longer specified */ | ||
531 | dmi_len = get_unaligned_le32(buf + 12); | 536 | dmi_len = get_unaligned_le32(buf + 12); |
532 | dmi_base = get_unaligned_le64(buf + 16); | 537 | dmi_base = get_unaligned_le64(buf + 16); |
533 | 538 | ||
534 | /* | ||
535 | * The 64-bit SMBIOS 3.0 entry point no longer has a field | ||
536 | * containing the number of structures present in the table. | ||
537 | * Instead, it defines the table size as a maximum size, and | ||
538 | * relies on the end-of-table structure type (#127) to be used | ||
539 | * to signal the end of the table. | ||
540 | * So let's define dmi_num as an upper bound as well: each | ||
541 | * structure has a 4 byte header, so dmi_len / 4 is an upper | ||
542 | * bound for the number of structures in the table. | ||
543 | */ | ||
544 | dmi_num = dmi_len / 4; | ||
545 | |||
546 | if (dmi_walk_early(dmi_decode) == 0) { | 539 | if (dmi_walk_early(dmi_decode) == 0) { |
547 | pr_info("SMBIOS %d.%d present.\n", | 540 | pr_info("SMBIOS %d.%d present.\n", |
548 | dmi_ver >> 8, dmi_ver & 0xFF); | 541 | dmi_ver >> 8, dmi_ver & 0xFF); |
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index 2fe195002021..f07d4a67fa76 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c | |||
@@ -179,12 +179,12 @@ again: | |||
179 | start = desc->phys_addr; | 179 | start = desc->phys_addr; |
180 | end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT); | 180 | end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT); |
181 | 181 | ||
182 | if ((start + size) > end || (start + size) > max) | 182 | if (end > max) |
183 | continue; | ||
184 | |||
185 | if (end - size > max) | ||
186 | end = max; | 183 | end = max; |
187 | 184 | ||
185 | if ((start + size) > end) | ||
186 | continue; | ||
187 | |||
188 | if (round_down(end - size, align) < start) | 188 | if (round_down(end - size, align) < start) |
189 | continue; | 189 | continue; |
190 | 190 | ||
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c index a6952ba343a8..a65b75161aa4 100644 --- a/drivers/gpio/gpio-mpc8xxx.c +++ b/drivers/gpio/gpio-mpc8xxx.c | |||
@@ -334,7 +334,7 @@ static struct irq_domain_ops mpc8xxx_gpio_irq_ops = { | |||
334 | .xlate = irq_domain_xlate_twocell, | 334 | .xlate = irq_domain_xlate_twocell, |
335 | }; | 335 | }; |
336 | 336 | ||
337 | static struct of_device_id mpc8xxx_gpio_ids[] __initdata = { | 337 | static struct of_device_id mpc8xxx_gpio_ids[] = { |
338 | { .compatible = "fsl,mpc8349-gpio", }, | 338 | { .compatible = "fsl,mpc8349-gpio", }, |
339 | { .compatible = "fsl,mpc8572-gpio", }, | 339 | { .compatible = "fsl,mpc8572-gpio", }, |
340 | { .compatible = "fsl,mpc8610-gpio", }, | 340 | { .compatible = "fsl,mpc8610-gpio", }, |
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c index 257e2989215c..045a952576c7 100644 --- a/drivers/gpio/gpio-syscon.c +++ b/drivers/gpio/gpio-syscon.c | |||
@@ -219,7 +219,7 @@ static int syscon_gpio_probe(struct platform_device *pdev) | |||
219 | ret = of_property_read_u32_index(np, "gpio,syscon-dev", 2, | 219 | ret = of_property_read_u32_index(np, "gpio,syscon-dev", 2, |
220 | &priv->dir_reg_offset); | 220 | &priv->dir_reg_offset); |
221 | if (ret) | 221 | if (ret) |
222 | dev_err(dev, "can't read the dir register offset!\n"); | 222 | dev_dbg(dev, "can't read the dir register offset!\n"); |
223 | 223 | ||
224 | priv->dir_reg_offset <<= 3; | 224 | priv->dir_reg_offset <<= 3; |
225 | } | 225 | } |
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c index 472fb5b8779f..9cdbc0c9cb2d 100644 --- a/drivers/gpio/gpio-tps65912.c +++ b/drivers/gpio/gpio-tps65912.c | |||
@@ -26,9 +26,12 @@ struct tps65912_gpio_data { | |||
26 | struct gpio_chip gpio_chip; | 26 | struct gpio_chip gpio_chip; |
27 | }; | 27 | }; |
28 | 28 | ||
29 | #define to_tgd(gc) container_of(gc, struct tps65912_gpio_data, gpio_chip) | ||
30 | |||
29 | static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset) | 31 | static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset) |
30 | { | 32 | { |
31 | struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); | 33 | struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc); |
34 | struct tps65912 *tps65912 = tps65912_gpio->tps65912; | ||
32 | int val; | 35 | int val; |
33 | 36 | ||
34 | val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset); | 37 | val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset); |
@@ -42,7 +45,8 @@ static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset) | |||
42 | static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset, | 45 | static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset, |
43 | int value) | 46 | int value) |
44 | { | 47 | { |
45 | struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); | 48 | struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc); |
49 | struct tps65912 *tps65912 = tps65912_gpio->tps65912; | ||
46 | 50 | ||
47 | if (value) | 51 | if (value) |
48 | tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset, | 52 | tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset, |
@@ -55,7 +59,8 @@ static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset, | |||
55 | static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset, | 59 | static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset, |
56 | int value) | 60 | int value) |
57 | { | 61 | { |
58 | struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); | 62 | struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc); |
63 | struct tps65912 *tps65912 = tps65912_gpio->tps65912; | ||
59 | 64 | ||
60 | /* Set the initial value */ | 65 | /* Set the initial value */ |
61 | tps65912_gpio_set(gc, offset, value); | 66 | tps65912_gpio_set(gc, offset, value); |
@@ -66,7 +71,8 @@ static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset, | |||
66 | 71 | ||
67 | static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset) | 72 | static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset) |
68 | { | 73 | { |
69 | struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); | 74 | struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc); |
75 | struct tps65912 *tps65912 = tps65912_gpio->tps65912; | ||
70 | 76 | ||
71 | return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset, | 77 | return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset, |
72 | GPIO_CFG_MASK); | 78 | GPIO_CFG_MASK); |
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index c0929d938ced..df990f29757a 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c | |||
@@ -201,6 +201,10 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, | |||
201 | if (!handler) | 201 | if (!handler) |
202 | return AE_BAD_PARAMETER; | 202 | return AE_BAD_PARAMETER; |
203 | 203 | ||
204 | pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin); | ||
205 | if (pin < 0) | ||
206 | return AE_BAD_PARAMETER; | ||
207 | |||
204 | desc = gpiochip_request_own_desc(chip, pin, "ACPI:Event"); | 208 | desc = gpiochip_request_own_desc(chip, pin, "ACPI:Event"); |
205 | if (IS_ERR(desc)) { | 209 | if (IS_ERR(desc)) { |
206 | dev_err(chip->dev, "Failed to request GPIO\n"); | 210 | dev_err(chip->dev, "Failed to request GPIO\n"); |
@@ -551,6 +555,12 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address, | |||
551 | struct gpio_desc *desc; | 555 | struct gpio_desc *desc; |
552 | bool found; | 556 | bool found; |
553 | 557 | ||
558 | pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin); | ||
559 | if (pin < 0) { | ||
560 | status = AE_BAD_PARAMETER; | ||
561 | goto out; | ||
562 | } | ||
563 | |||
554 | mutex_lock(&achip->conn_lock); | 564 | mutex_lock(&achip->conn_lock); |
555 | 565 | ||
556 | found = false; | 566 | found = false; |
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 8cad8e400b44..4650bf830d6b 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c | |||
@@ -46,12 +46,13 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data) | |||
46 | 46 | ||
47 | ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags); | 47 | ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags); |
48 | if (ret < 0) { | 48 | if (ret < 0) { |
49 | /* We've found the gpio chip, but the translation failed. | 49 | /* We've found a gpio chip, but the translation failed. |
50 | * Return true to stop looking and return the translation | 50 | * Store translation error in out_gpio. |
51 | * error via out_gpio | 51 | * Return false to keep looking, as more than one gpio chip |
52 | * could be registered per of-node. | ||
52 | */ | 53 | */ |
53 | gg_data->out_gpio = ERR_PTR(ret); | 54 | gg_data->out_gpio = ERR_PTR(ret); |
54 | return true; | 55 | return false; |
55 | } | 56 | } |
56 | 57 | ||
57 | gg_data->out_gpio = gpiochip_get_desc(gc, ret); | 58 | gg_data->out_gpio = gpiochip_get_desc(gc, ret); |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index b3589d0e39b9..d8135adb2238 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | |||
@@ -62,12 +62,18 @@ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type) | |||
62 | return KFD_MQD_TYPE_CP; | 62 | return KFD_MQD_TYPE_CP; |
63 | } | 63 | } |
64 | 64 | ||
65 | static inline unsigned int get_first_pipe(struct device_queue_manager *dqm) | 65 | unsigned int get_first_pipe(struct device_queue_manager *dqm) |
66 | { | 66 | { |
67 | BUG_ON(!dqm); | 67 | BUG_ON(!dqm || !dqm->dev); |
68 | return dqm->dev->shared_resources.first_compute_pipe; | 68 | return dqm->dev->shared_resources.first_compute_pipe; |
69 | } | 69 | } |
70 | 70 | ||
71 | unsigned int get_pipes_num(struct device_queue_manager *dqm) | ||
72 | { | ||
73 | BUG_ON(!dqm || !dqm->dev); | ||
74 | return dqm->dev->shared_resources.compute_pipe_count; | ||
75 | } | ||
76 | |||
71 | static inline unsigned int get_pipes_num_cpsch(void) | 77 | static inline unsigned int get_pipes_num_cpsch(void) |
72 | { | 78 | { |
73 | return PIPE_PER_ME_CP_SCHEDULING; | 79 | return PIPE_PER_ME_CP_SCHEDULING; |
@@ -639,6 +645,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, | |||
639 | pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id); | 645 | pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id); |
640 | pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id); | 646 | pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id); |
641 | 647 | ||
648 | init_sdma_vm(dqm, q, qpd); | ||
642 | retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, | 649 | retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, |
643 | &q->gart_mqd_addr, &q->properties); | 650 | &q->gart_mqd_addr, &q->properties); |
644 | if (retval != 0) { | 651 | if (retval != 0) { |
@@ -646,7 +653,14 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, | |||
646 | return retval; | 653 | return retval; |
647 | } | 654 | } |
648 | 655 | ||
649 | init_sdma_vm(dqm, q, qpd); | 656 | retval = mqd->load_mqd(mqd, q->mqd, 0, |
657 | 0, NULL); | ||
658 | if (retval != 0) { | ||
659 | deallocate_sdma_queue(dqm, q->sdma_id); | ||
660 | mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); | ||
661 | return retval; | ||
662 | } | ||
663 | |||
650 | return 0; | 664 | return 0; |
651 | } | 665 | } |
652 | 666 | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index d64f86cda34f..488f51d19427 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | |||
@@ -163,6 +163,8 @@ void program_sh_mem_settings(struct device_queue_manager *dqm, | |||
163 | struct qcm_process_device *qpd); | 163 | struct qcm_process_device *qpd); |
164 | int init_pipelines(struct device_queue_manager *dqm, | 164 | int init_pipelines(struct device_queue_manager *dqm, |
165 | unsigned int pipes_num, unsigned int first_pipe); | 165 | unsigned int pipes_num, unsigned int first_pipe); |
166 | unsigned int get_first_pipe(struct device_queue_manager *dqm); | ||
167 | unsigned int get_pipes_num(struct device_queue_manager *dqm); | ||
166 | 168 | ||
167 | extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) | 169 | extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) |
168 | { | 170 | { |
@@ -175,10 +177,4 @@ get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd) | |||
175 | return (pdd->lds_base >> 60) & 0x0E; | 177 | return (pdd->lds_base >> 60) & 0x0E; |
176 | } | 178 | } |
177 | 179 | ||
178 | extern inline unsigned int get_pipes_num(struct device_queue_manager *dqm) | ||
179 | { | ||
180 | BUG_ON(!dqm || !dqm->dev); | ||
181 | return dqm->dev->shared_resources.compute_pipe_count; | ||
182 | } | ||
183 | |||
184 | #endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */ | 180 | #endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */ |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c index 6b072466e2a6..5469efe0523e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c | |||
@@ -131,5 +131,5 @@ static int register_process_cik(struct device_queue_manager *dqm, | |||
131 | 131 | ||
132 | static int initialize_cpsch_cik(struct device_queue_manager *dqm) | 132 | static int initialize_cpsch_cik(struct device_queue_manager *dqm) |
133 | { | 133 | { |
134 | return init_pipelines(dqm, get_pipes_num(dqm), 0); | 134 | return init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm)); |
135 | } | 135 | } |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index e415a2a9207e..c7d298e62c96 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | |||
@@ -44,7 +44,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, | |||
44 | BUG_ON(!kq || !dev); | 44 | BUG_ON(!kq || !dev); |
45 | BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ); | 45 | BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ); |
46 | 46 | ||
47 | pr_debug("kfd: In func %s initializing queue type %d size %d\n", | 47 | pr_debug("amdkfd: In func %s initializing queue type %d size %d\n", |
48 | __func__, KFD_QUEUE_TYPE_HIQ, queue_size); | 48 | __func__, KFD_QUEUE_TYPE_HIQ, queue_size); |
49 | 49 | ||
50 | nop.opcode = IT_NOP; | 50 | nop.opcode = IT_NOP; |
@@ -69,12 +69,16 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, | |||
69 | 69 | ||
70 | prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off); | 70 | prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off); |
71 | 71 | ||
72 | if (prop.doorbell_ptr == NULL) | 72 | if (prop.doorbell_ptr == NULL) { |
73 | pr_err("amdkfd: error init doorbell"); | ||
73 | goto err_get_kernel_doorbell; | 74 | goto err_get_kernel_doorbell; |
75 | } | ||
74 | 76 | ||
75 | retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq); | 77 | retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq); |
76 | if (retval != 0) | 78 | if (retval != 0) { |
79 | pr_err("amdkfd: error init pq queues size (%d)\n", queue_size); | ||
77 | goto err_pq_allocate_vidmem; | 80 | goto err_pq_allocate_vidmem; |
81 | } | ||
78 | 82 | ||
79 | kq->pq_kernel_addr = kq->pq->cpu_ptr; | 83 | kq->pq_kernel_addr = kq->pq->cpu_ptr; |
80 | kq->pq_gpu_addr = kq->pq->gpu_addr; | 84 | kq->pq_gpu_addr = kq->pq->gpu_addr; |
@@ -165,10 +169,8 @@ err_rptr_allocate_vidmem: | |||
165 | err_eop_allocate_vidmem: | 169 | err_eop_allocate_vidmem: |
166 | kfd_gtt_sa_free(dev, kq->pq); | 170 | kfd_gtt_sa_free(dev, kq->pq); |
167 | err_pq_allocate_vidmem: | 171 | err_pq_allocate_vidmem: |
168 | pr_err("kfd: error init pq\n"); | ||
169 | kfd_release_kernel_doorbell(dev, prop.doorbell_ptr); | 172 | kfd_release_kernel_doorbell(dev, prop.doorbell_ptr); |
170 | err_get_kernel_doorbell: | 173 | err_get_kernel_doorbell: |
171 | pr_err("kfd: error init doorbell"); | ||
172 | return false; | 174 | return false; |
173 | 175 | ||
174 | } | 176 | } |
@@ -187,6 +189,8 @@ static void uninitialize(struct kernel_queue *kq) | |||
187 | else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ) | 189 | else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ) |
188 | kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj); | 190 | kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj); |
189 | 191 | ||
192 | kq->mqd->uninit_mqd(kq->mqd, kq->queue->mqd, kq->queue->mqd_mem_obj); | ||
193 | |||
190 | kfd_gtt_sa_free(kq->dev, kq->rptr_mem); | 194 | kfd_gtt_sa_free(kq->dev, kq->rptr_mem); |
191 | kfd_gtt_sa_free(kq->dev, kq->wptr_mem); | 195 | kfd_gtt_sa_free(kq->dev, kq->wptr_mem); |
192 | kq->ops_asic_specific.uninitialize(kq); | 196 | kq->ops_asic_specific.uninitialize(kq); |
@@ -211,7 +215,7 @@ static int acquire_packet_buffer(struct kernel_queue *kq, | |||
211 | queue_address = (unsigned int *)kq->pq_kernel_addr; | 215 | queue_address = (unsigned int *)kq->pq_kernel_addr; |
212 | queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t); | 216 | queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t); |
213 | 217 | ||
214 | pr_debug("kfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n", | 218 | pr_debug("amdkfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n", |
215 | __func__, rptr, wptr, queue_address); | 219 | __func__, rptr, wptr, queue_address); |
216 | 220 | ||
217 | available_size = (rptr - 1 - wptr + queue_size_dwords) % | 221 | available_size = (rptr - 1 - wptr + queue_size_dwords) % |
@@ -296,7 +300,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, | |||
296 | } | 300 | } |
297 | 301 | ||
298 | if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) { | 302 | if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) { |
299 | pr_err("kfd: failed to init kernel queue\n"); | 303 | pr_err("amdkfd: failed to init kernel queue\n"); |
300 | kfree(kq); | 304 | kfree(kq); |
301 | return NULL; | 305 | return NULL; |
302 | } | 306 | } |
@@ -319,7 +323,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev) | |||
319 | 323 | ||
320 | BUG_ON(!dev); | 324 | BUG_ON(!dev); |
321 | 325 | ||
322 | pr_err("kfd: starting kernel queue test\n"); | 326 | pr_err("amdkfd: starting kernel queue test\n"); |
323 | 327 | ||
324 | kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ); | 328 | kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ); |
325 | BUG_ON(!kq); | 329 | BUG_ON(!kq); |
@@ -330,7 +334,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev) | |||
330 | buffer[i] = kq->nop_packet; | 334 | buffer[i] = kq->nop_packet; |
331 | kq->ops.submit_packet(kq); | 335 | kq->ops.submit_packet(kq); |
332 | 336 | ||
333 | pr_err("kfd: ending kernel queue test\n"); | 337 | pr_err("amdkfd: ending kernel queue test\n"); |
334 | } | 338 | } |
335 | 339 | ||
336 | 340 | ||
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index 0409b907de5d..b3e3068c6ec0 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c | |||
@@ -153,7 +153,7 @@ static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c, | |||
153 | (adj->crtc_hdisplay - 1) | | 153 | (adj->crtc_hdisplay - 1) | |
154 | ((adj->crtc_vdisplay - 1) << 16)); | 154 | ((adj->crtc_vdisplay - 1) << 16)); |
155 | 155 | ||
156 | cfg = ATMEL_HLCDC_CLKPOL; | 156 | cfg = 0; |
157 | 157 | ||
158 | prate = clk_get_rate(crtc->dc->hlcdc->sys_clk); | 158 | prate = clk_get_rate(crtc->dc->hlcdc->sys_clk); |
159 | mode_rate = mode->crtc_clock * 1000; | 159 | mode_rate = mode->crtc_clock * 1000; |
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 7320a6c6613f..c1cb17493e0d 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c | |||
@@ -311,8 +311,6 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev) | |||
311 | 311 | ||
312 | pm_runtime_enable(dev->dev); | 312 | pm_runtime_enable(dev->dev); |
313 | 313 | ||
314 | pm_runtime_put_sync(dev->dev); | ||
315 | |||
316 | ret = atmel_hlcdc_dc_modeset_init(dev); | 314 | ret = atmel_hlcdc_dc_modeset_init(dev); |
317 | if (ret < 0) { | 315 | if (ret < 0) { |
318 | dev_err(dev->dev, "failed to initialize mode setting\n"); | 316 | dev_err(dev->dev, "failed to initialize mode setting\n"); |
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c index 063d2a7b941f..e79bd9ba474b 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c | |||
@@ -311,7 +311,8 @@ int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer) | |||
311 | 311 | ||
312 | /* Disable the layer */ | 312 | /* Disable the layer */ |
313 | regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR, | 313 | regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR, |
314 | ATMEL_HLCDC_LAYER_RST); | 314 | ATMEL_HLCDC_LAYER_RST | ATMEL_HLCDC_LAYER_A2Q | |
315 | ATMEL_HLCDC_LAYER_UPDATE); | ||
315 | 316 | ||
316 | /* Clear all pending interrupts */ | 317 | /* Clear all pending interrupts */ |
317 | regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr); | 318 | regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr); |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 6b00173d1be4..b6f076b213bc 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -43,9 +43,10 @@ | |||
43 | #include "drm_crtc_internal.h" | 43 | #include "drm_crtc_internal.h" |
44 | #include "drm_internal.h" | 44 | #include "drm_internal.h" |
45 | 45 | ||
46 | static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, | 46 | static struct drm_framebuffer * |
47 | struct drm_mode_fb_cmd2 *r, | 47 | internal_framebuffer_create(struct drm_device *dev, |
48 | struct drm_file *file_priv); | 48 | struct drm_mode_fb_cmd2 *r, |
49 | struct drm_file *file_priv); | ||
49 | 50 | ||
50 | /* Avoid boilerplate. I'm tired of typing. */ | 51 | /* Avoid boilerplate. I'm tired of typing. */ |
51 | #define DRM_ENUM_NAME_FN(fnname, list) \ | 52 | #define DRM_ENUM_NAME_FN(fnname, list) \ |
@@ -524,17 +525,6 @@ void drm_framebuffer_reference(struct drm_framebuffer *fb) | |||
524 | } | 525 | } |
525 | EXPORT_SYMBOL(drm_framebuffer_reference); | 526 | EXPORT_SYMBOL(drm_framebuffer_reference); |
526 | 527 | ||
527 | static void drm_framebuffer_free_bug(struct kref *kref) | ||
528 | { | ||
529 | BUG(); | ||
530 | } | ||
531 | |||
532 | static void __drm_framebuffer_unreference(struct drm_framebuffer *fb) | ||
533 | { | ||
534 | DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount)); | ||
535 | kref_put(&fb->refcount, drm_framebuffer_free_bug); | ||
536 | } | ||
537 | |||
538 | /** | 528 | /** |
539 | * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr | 529 | * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr |
540 | * @fb: fb to unregister | 530 | * @fb: fb to unregister |
@@ -1319,7 +1309,7 @@ void drm_plane_force_disable(struct drm_plane *plane) | |||
1319 | return; | 1309 | return; |
1320 | } | 1310 | } |
1321 | /* disconnect the plane from the fb and crtc: */ | 1311 | /* disconnect the plane from the fb and crtc: */ |
1322 | __drm_framebuffer_unreference(plane->old_fb); | 1312 | drm_framebuffer_unreference(plane->old_fb); |
1323 | plane->old_fb = NULL; | 1313 | plane->old_fb = NULL; |
1324 | plane->fb = NULL; | 1314 | plane->fb = NULL; |
1325 | plane->crtc = NULL; | 1315 | plane->crtc = NULL; |
@@ -2127,12 +2117,11 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, | |||
2127 | DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id); | 2117 | DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id); |
2128 | 2118 | ||
2129 | mutex_lock(&dev->mode_config.mutex); | 2119 | mutex_lock(&dev->mode_config.mutex); |
2130 | drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); | ||
2131 | 2120 | ||
2132 | connector = drm_connector_find(dev, out_resp->connector_id); | 2121 | connector = drm_connector_find(dev, out_resp->connector_id); |
2133 | if (!connector) { | 2122 | if (!connector) { |
2134 | ret = -ENOENT; | 2123 | ret = -ENOENT; |
2135 | goto out; | 2124 | goto out_unlock; |
2136 | } | 2125 | } |
2137 | 2126 | ||
2138 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) | 2127 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) |
@@ -2157,6 +2146,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, | |||
2157 | out_resp->mm_height = connector->display_info.height_mm; | 2146 | out_resp->mm_height = connector->display_info.height_mm; |
2158 | out_resp->subpixel = connector->display_info.subpixel_order; | 2147 | out_resp->subpixel = connector->display_info.subpixel_order; |
2159 | out_resp->connection = connector->status; | 2148 | out_resp->connection = connector->status; |
2149 | |||
2150 | drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); | ||
2160 | encoder = drm_connector_get_encoder(connector); | 2151 | encoder = drm_connector_get_encoder(connector); |
2161 | if (encoder) | 2152 | if (encoder) |
2162 | out_resp->encoder_id = encoder->base.id; | 2153 | out_resp->encoder_id = encoder->base.id; |
@@ -2210,6 +2201,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, | |||
2210 | 2201 | ||
2211 | out: | 2202 | out: |
2212 | drm_modeset_unlock(&dev->mode_config.connection_mutex); | 2203 | drm_modeset_unlock(&dev->mode_config.connection_mutex); |
2204 | |||
2205 | out_unlock: | ||
2213 | mutex_unlock(&dev->mode_config.mutex); | 2206 | mutex_unlock(&dev->mode_config.mutex); |
2214 | 2207 | ||
2215 | return ret; | 2208 | return ret; |
@@ -2907,13 +2900,11 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc, | |||
2907 | */ | 2900 | */ |
2908 | if (req->flags & DRM_MODE_CURSOR_BO) { | 2901 | if (req->flags & DRM_MODE_CURSOR_BO) { |
2909 | if (req->handle) { | 2902 | if (req->handle) { |
2910 | fb = add_framebuffer_internal(dev, &fbreq, file_priv); | 2903 | fb = internal_framebuffer_create(dev, &fbreq, file_priv); |
2911 | if (IS_ERR(fb)) { | 2904 | if (IS_ERR(fb)) { |
2912 | DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); | 2905 | DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); |
2913 | return PTR_ERR(fb); | 2906 | return PTR_ERR(fb); |
2914 | } | 2907 | } |
2915 | |||
2916 | drm_framebuffer_reference(fb); | ||
2917 | } else { | 2908 | } else { |
2918 | fb = NULL; | 2909 | fb = NULL; |
2919 | } | 2910 | } |
@@ -3266,9 +3257,10 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) | |||
3266 | return 0; | 3257 | return 0; |
3267 | } | 3258 | } |
3268 | 3259 | ||
3269 | static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, | 3260 | static struct drm_framebuffer * |
3270 | struct drm_mode_fb_cmd2 *r, | 3261 | internal_framebuffer_create(struct drm_device *dev, |
3271 | struct drm_file *file_priv) | 3262 | struct drm_mode_fb_cmd2 *r, |
3263 | struct drm_file *file_priv) | ||
3272 | { | 3264 | { |
3273 | struct drm_mode_config *config = &dev->mode_config; | 3265 | struct drm_mode_config *config = &dev->mode_config; |
3274 | struct drm_framebuffer *fb; | 3266 | struct drm_framebuffer *fb; |
@@ -3300,12 +3292,6 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, | |||
3300 | return fb; | 3292 | return fb; |
3301 | } | 3293 | } |
3302 | 3294 | ||
3303 | mutex_lock(&file_priv->fbs_lock); | ||
3304 | r->fb_id = fb->base.id; | ||
3305 | list_add(&fb->filp_head, &file_priv->fbs); | ||
3306 | DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); | ||
3307 | mutex_unlock(&file_priv->fbs_lock); | ||
3308 | |||
3309 | return fb; | 3295 | return fb; |
3310 | } | 3296 | } |
3311 | 3297 | ||
@@ -3327,15 +3313,24 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, | |||
3327 | int drm_mode_addfb2(struct drm_device *dev, | 3313 | int drm_mode_addfb2(struct drm_device *dev, |
3328 | void *data, struct drm_file *file_priv) | 3314 | void *data, struct drm_file *file_priv) |
3329 | { | 3315 | { |
3316 | struct drm_mode_fb_cmd2 *r = data; | ||
3330 | struct drm_framebuffer *fb; | 3317 | struct drm_framebuffer *fb; |
3331 | 3318 | ||
3332 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 3319 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
3333 | return -EINVAL; | 3320 | return -EINVAL; |
3334 | 3321 | ||
3335 | fb = add_framebuffer_internal(dev, data, file_priv); | 3322 | fb = internal_framebuffer_create(dev, r, file_priv); |
3336 | if (IS_ERR(fb)) | 3323 | if (IS_ERR(fb)) |
3337 | return PTR_ERR(fb); | 3324 | return PTR_ERR(fb); |
3338 | 3325 | ||
3326 | /* Transfer ownership to the filp for reaping on close */ | ||
3327 | |||
3328 | DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); | ||
3329 | mutex_lock(&file_priv->fbs_lock); | ||
3330 | r->fb_id = fb->base.id; | ||
3331 | list_add(&fb->filp_head, &file_priv->fbs); | ||
3332 | mutex_unlock(&file_priv->fbs_lock); | ||
3333 | |||
3339 | return 0; | 3334 | return 0; |
3340 | } | 3335 | } |
3341 | 3336 | ||
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 9a5b68717ec8..379ab4555756 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c | |||
@@ -733,10 +733,14 @@ static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr, | |||
733 | struct drm_dp_sideband_msg_tx *txmsg) | 733 | struct drm_dp_sideband_msg_tx *txmsg) |
734 | { | 734 | { |
735 | bool ret; | 735 | bool ret; |
736 | mutex_lock(&mgr->qlock); | 736 | |
737 | /* | ||
738 | * All updates to txmsg->state are protected by mgr->qlock, and the two | ||
739 | * cases we check here are terminal states. For those the barriers | ||
740 | * provided by the wake_up/wait_event pair are enough. | ||
741 | */ | ||
737 | ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || | 742 | ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || |
738 | txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); | 743 | txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); |
739 | mutex_unlock(&mgr->qlock); | ||
740 | return ret; | 744 | return ret; |
741 | } | 745 | } |
742 | 746 | ||
@@ -1363,12 +1367,13 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, | |||
1363 | return 0; | 1367 | return 0; |
1364 | } | 1368 | } |
1365 | 1369 | ||
1366 | /* must be called holding qlock */ | ||
1367 | static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) | 1370 | static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) |
1368 | { | 1371 | { |
1369 | struct drm_dp_sideband_msg_tx *txmsg; | 1372 | struct drm_dp_sideband_msg_tx *txmsg; |
1370 | int ret; | 1373 | int ret; |
1371 | 1374 | ||
1375 | WARN_ON(!mutex_is_locked(&mgr->qlock)); | ||
1376 | |||
1372 | /* construct a chunk from the first msg in the tx_msg queue */ | 1377 | /* construct a chunk from the first msg in the tx_msg queue */ |
1373 | if (list_empty(&mgr->tx_msg_downq)) { | 1378 | if (list_empty(&mgr->tx_msg_downq)) { |
1374 | mgr->tx_down_in_progress = false; | 1379 | mgr->tx_down_in_progress = false; |
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c index 732cb6f8e653..4c0aa97aaf03 100644 --- a/drivers/gpu/drm/drm_edid_load.c +++ b/drivers/gpu/drm/drm_edid_load.c | |||
@@ -287,6 +287,7 @@ int drm_load_edid_firmware(struct drm_connector *connector) | |||
287 | 287 | ||
288 | drm_mode_connector_update_edid_property(connector, edid); | 288 | drm_mode_connector_update_edid_property(connector, edid); |
289 | ret = drm_add_edid_modes(connector, edid); | 289 | ret = drm_add_edid_modes(connector, edid); |
290 | drm_edid_to_eld(connector, edid); | ||
290 | kfree(edid); | 291 | kfree(edid); |
291 | 292 | ||
292 | return ret; | 293 | return ret; |
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 04a209e2b66d..1134526286c8 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c | |||
@@ -91,29 +91,29 @@ | |||
91 | */ | 91 | */ |
92 | 92 | ||
93 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, | 93 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, |
94 | unsigned long size, | 94 | u64 size, |
95 | unsigned alignment, | 95 | unsigned alignment, |
96 | unsigned long color, | 96 | unsigned long color, |
97 | enum drm_mm_search_flags flags); | 97 | enum drm_mm_search_flags flags); |
98 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, | 98 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, |
99 | unsigned long size, | 99 | u64 size, |
100 | unsigned alignment, | 100 | unsigned alignment, |
101 | unsigned long color, | 101 | unsigned long color, |
102 | unsigned long start, | 102 | u64 start, |
103 | unsigned long end, | 103 | u64 end, |
104 | enum drm_mm_search_flags flags); | 104 | enum drm_mm_search_flags flags); |
105 | 105 | ||
106 | static void drm_mm_insert_helper(struct drm_mm_node *hole_node, | 106 | static void drm_mm_insert_helper(struct drm_mm_node *hole_node, |
107 | struct drm_mm_node *node, | 107 | struct drm_mm_node *node, |
108 | unsigned long size, unsigned alignment, | 108 | u64 size, unsigned alignment, |
109 | unsigned long color, | 109 | unsigned long color, |
110 | enum drm_mm_allocator_flags flags) | 110 | enum drm_mm_allocator_flags flags) |
111 | { | 111 | { |
112 | struct drm_mm *mm = hole_node->mm; | 112 | struct drm_mm *mm = hole_node->mm; |
113 | unsigned long hole_start = drm_mm_hole_node_start(hole_node); | 113 | u64 hole_start = drm_mm_hole_node_start(hole_node); |
114 | unsigned long hole_end = drm_mm_hole_node_end(hole_node); | 114 | u64 hole_end = drm_mm_hole_node_end(hole_node); |
115 | unsigned long adj_start = hole_start; | 115 | u64 adj_start = hole_start; |
116 | unsigned long adj_end = hole_end; | 116 | u64 adj_end = hole_end; |
117 | 117 | ||
118 | BUG_ON(node->allocated); | 118 | BUG_ON(node->allocated); |
119 | 119 | ||
@@ -124,12 +124,15 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node, | |||
124 | adj_start = adj_end - size; | 124 | adj_start = adj_end - size; |
125 | 125 | ||
126 | if (alignment) { | 126 | if (alignment) { |
127 | unsigned tmp = adj_start % alignment; | 127 | u64 tmp = adj_start; |
128 | if (tmp) { | 128 | unsigned rem; |
129 | |||
130 | rem = do_div(tmp, alignment); | ||
131 | if (rem) { | ||
129 | if (flags & DRM_MM_CREATE_TOP) | 132 | if (flags & DRM_MM_CREATE_TOP) |
130 | adj_start -= tmp; | 133 | adj_start -= rem; |
131 | else | 134 | else |
132 | adj_start += alignment - tmp; | 135 | adj_start += alignment - rem; |
133 | } | 136 | } |
134 | } | 137 | } |
135 | 138 | ||
@@ -176,9 +179,9 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node, | |||
176 | int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) | 179 | int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) |
177 | { | 180 | { |
178 | struct drm_mm_node *hole; | 181 | struct drm_mm_node *hole; |
179 | unsigned long end = node->start + node->size; | 182 | u64 end = node->start + node->size; |
180 | unsigned long hole_start; | 183 | u64 hole_start; |
181 | unsigned long hole_end; | 184 | u64 hole_end; |
182 | 185 | ||
183 | BUG_ON(node == NULL); | 186 | BUG_ON(node == NULL); |
184 | 187 | ||
@@ -227,7 +230,7 @@ EXPORT_SYMBOL(drm_mm_reserve_node); | |||
227 | * 0 on success, -ENOSPC if there's no suitable hole. | 230 | * 0 on success, -ENOSPC if there's no suitable hole. |
228 | */ | 231 | */ |
229 | int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, | 232 | int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, |
230 | unsigned long size, unsigned alignment, | 233 | u64 size, unsigned alignment, |
231 | unsigned long color, | 234 | unsigned long color, |
232 | enum drm_mm_search_flags sflags, | 235 | enum drm_mm_search_flags sflags, |
233 | enum drm_mm_allocator_flags aflags) | 236 | enum drm_mm_allocator_flags aflags) |
@@ -246,16 +249,16 @@ EXPORT_SYMBOL(drm_mm_insert_node_generic); | |||
246 | 249 | ||
247 | static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, | 250 | static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, |
248 | struct drm_mm_node *node, | 251 | struct drm_mm_node *node, |
249 | unsigned long size, unsigned alignment, | 252 | u64 size, unsigned alignment, |
250 | unsigned long color, | 253 | unsigned long color, |
251 | unsigned long start, unsigned long end, | 254 | u64 start, u64 end, |
252 | enum drm_mm_allocator_flags flags) | 255 | enum drm_mm_allocator_flags flags) |
253 | { | 256 | { |
254 | struct drm_mm *mm = hole_node->mm; | 257 | struct drm_mm *mm = hole_node->mm; |
255 | unsigned long hole_start = drm_mm_hole_node_start(hole_node); | 258 | u64 hole_start = drm_mm_hole_node_start(hole_node); |
256 | unsigned long hole_end = drm_mm_hole_node_end(hole_node); | 259 | u64 hole_end = drm_mm_hole_node_end(hole_node); |
257 | unsigned long adj_start = hole_start; | 260 | u64 adj_start = hole_start; |
258 | unsigned long adj_end = hole_end; | 261 | u64 adj_end = hole_end; |
259 | 262 | ||
260 | BUG_ON(!hole_node->hole_follows || node->allocated); | 263 | BUG_ON(!hole_node->hole_follows || node->allocated); |
261 | 264 | ||
@@ -271,12 +274,15 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, | |||
271 | mm->color_adjust(hole_node, color, &adj_start, &adj_end); | 274 | mm->color_adjust(hole_node, color, &adj_start, &adj_end); |
272 | 275 | ||
273 | if (alignment) { | 276 | if (alignment) { |
274 | unsigned tmp = adj_start % alignment; | 277 | u64 tmp = adj_start; |
275 | if (tmp) { | 278 | unsigned rem; |
279 | |||
280 | rem = do_div(tmp, alignment); | ||
281 | if (rem) { | ||
276 | if (flags & DRM_MM_CREATE_TOP) | 282 | if (flags & DRM_MM_CREATE_TOP) |
277 | adj_start -= tmp; | 283 | adj_start -= rem; |
278 | else | 284 | else |
279 | adj_start += alignment - tmp; | 285 | adj_start += alignment - rem; |
280 | } | 286 | } |
281 | } | 287 | } |
282 | 288 | ||
@@ -324,9 +330,9 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, | |||
324 | * 0 on success, -ENOSPC if there's no suitable hole. | 330 | * 0 on success, -ENOSPC if there's no suitable hole. |
325 | */ | 331 | */ |
326 | int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, | 332 | int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, |
327 | unsigned long size, unsigned alignment, | 333 | u64 size, unsigned alignment, |
328 | unsigned long color, | 334 | unsigned long color, |
329 | unsigned long start, unsigned long end, | 335 | u64 start, u64 end, |
330 | enum drm_mm_search_flags sflags, | 336 | enum drm_mm_search_flags sflags, |
331 | enum drm_mm_allocator_flags aflags) | 337 | enum drm_mm_allocator_flags aflags) |
332 | { | 338 | { |
@@ -387,32 +393,34 @@ void drm_mm_remove_node(struct drm_mm_node *node) | |||
387 | } | 393 | } |
388 | EXPORT_SYMBOL(drm_mm_remove_node); | 394 | EXPORT_SYMBOL(drm_mm_remove_node); |
389 | 395 | ||
390 | static int check_free_hole(unsigned long start, unsigned long end, | 396 | static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment) |
391 | unsigned long size, unsigned alignment) | ||
392 | { | 397 | { |
393 | if (end - start < size) | 398 | if (end - start < size) |
394 | return 0; | 399 | return 0; |
395 | 400 | ||
396 | if (alignment) { | 401 | if (alignment) { |
397 | unsigned tmp = start % alignment; | 402 | u64 tmp = start; |
398 | if (tmp) | 403 | unsigned rem; |
399 | start += alignment - tmp; | 404 | |
405 | rem = do_div(tmp, alignment); | ||
406 | if (rem) | ||
407 | start += alignment - rem; | ||
400 | } | 408 | } |
401 | 409 | ||
402 | return end >= start + size; | 410 | return end >= start + size; |
403 | } | 411 | } |
404 | 412 | ||
405 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, | 413 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, |
406 | unsigned long size, | 414 | u64 size, |
407 | unsigned alignment, | 415 | unsigned alignment, |
408 | unsigned long color, | 416 | unsigned long color, |
409 | enum drm_mm_search_flags flags) | 417 | enum drm_mm_search_flags flags) |
410 | { | 418 | { |
411 | struct drm_mm_node *entry; | 419 | struct drm_mm_node *entry; |
412 | struct drm_mm_node *best; | 420 | struct drm_mm_node *best; |
413 | unsigned long adj_start; | 421 | u64 adj_start; |
414 | unsigned long adj_end; | 422 | u64 adj_end; |
415 | unsigned long best_size; | 423 | u64 best_size; |
416 | 424 | ||
417 | BUG_ON(mm->scanned_blocks); | 425 | BUG_ON(mm->scanned_blocks); |
418 | 426 | ||
@@ -421,7 +429,7 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, | |||
421 | 429 | ||
422 | __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, | 430 | __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, |
423 | flags & DRM_MM_SEARCH_BELOW) { | 431 | flags & DRM_MM_SEARCH_BELOW) { |
424 | unsigned long hole_size = adj_end - adj_start; | 432 | u64 hole_size = adj_end - adj_start; |
425 | 433 | ||
426 | if (mm->color_adjust) { | 434 | if (mm->color_adjust) { |
427 | mm->color_adjust(entry, color, &adj_start, &adj_end); | 435 | mm->color_adjust(entry, color, &adj_start, &adj_end); |
@@ -445,18 +453,18 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, | |||
445 | } | 453 | } |
446 | 454 | ||
447 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, | 455 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, |
448 | unsigned long size, | 456 | u64 size, |
449 | unsigned alignment, | 457 | unsigned alignment, |
450 | unsigned long color, | 458 | unsigned long color, |
451 | unsigned long start, | 459 | u64 start, |
452 | unsigned long end, | 460 | u64 end, |
453 | enum drm_mm_search_flags flags) | 461 | enum drm_mm_search_flags flags) |
454 | { | 462 | { |
455 | struct drm_mm_node *entry; | 463 | struct drm_mm_node *entry; |
456 | struct drm_mm_node *best; | 464 | struct drm_mm_node *best; |
457 | unsigned long adj_start; | 465 | u64 adj_start; |
458 | unsigned long adj_end; | 466 | u64 adj_end; |
459 | unsigned long best_size; | 467 | u64 best_size; |
460 | 468 | ||
461 | BUG_ON(mm->scanned_blocks); | 469 | BUG_ON(mm->scanned_blocks); |
462 | 470 | ||
@@ -465,7 +473,7 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_ | |||
465 | 473 | ||
466 | __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, | 474 | __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, |
467 | flags & DRM_MM_SEARCH_BELOW) { | 475 | flags & DRM_MM_SEARCH_BELOW) { |
468 | unsigned long hole_size = adj_end - adj_start; | 476 | u64 hole_size = adj_end - adj_start; |
469 | 477 | ||
470 | if (adj_start < start) | 478 | if (adj_start < start) |
471 | adj_start = start; | 479 | adj_start = start; |
@@ -561,7 +569,7 @@ EXPORT_SYMBOL(drm_mm_replace_node); | |||
561 | * adding/removing nodes to/from the scan list are allowed. | 569 | * adding/removing nodes to/from the scan list are allowed. |
562 | */ | 570 | */ |
563 | void drm_mm_init_scan(struct drm_mm *mm, | 571 | void drm_mm_init_scan(struct drm_mm *mm, |
564 | unsigned long size, | 572 | u64 size, |
565 | unsigned alignment, | 573 | unsigned alignment, |
566 | unsigned long color) | 574 | unsigned long color) |
567 | { | 575 | { |
@@ -594,11 +602,11 @@ EXPORT_SYMBOL(drm_mm_init_scan); | |||
594 | * adding/removing nodes to/from the scan list are allowed. | 602 | * adding/removing nodes to/from the scan list are allowed. |
595 | */ | 603 | */ |
596 | void drm_mm_init_scan_with_range(struct drm_mm *mm, | 604 | void drm_mm_init_scan_with_range(struct drm_mm *mm, |
597 | unsigned long size, | 605 | u64 size, |
598 | unsigned alignment, | 606 | unsigned alignment, |
599 | unsigned long color, | 607 | unsigned long color, |
600 | unsigned long start, | 608 | u64 start, |
601 | unsigned long end) | 609 | u64 end) |
602 | { | 610 | { |
603 | mm->scan_color = color; | 611 | mm->scan_color = color; |
604 | mm->scan_alignment = alignment; | 612 | mm->scan_alignment = alignment; |
@@ -627,8 +635,8 @@ bool drm_mm_scan_add_block(struct drm_mm_node *node) | |||
627 | { | 635 | { |
628 | struct drm_mm *mm = node->mm; | 636 | struct drm_mm *mm = node->mm; |
629 | struct drm_mm_node *prev_node; | 637 | struct drm_mm_node *prev_node; |
630 | unsigned long hole_start, hole_end; | 638 | u64 hole_start, hole_end; |
631 | unsigned long adj_start, adj_end; | 639 | u64 adj_start, adj_end; |
632 | 640 | ||
633 | mm->scanned_blocks++; | 641 | mm->scanned_blocks++; |
634 | 642 | ||
@@ -731,7 +739,7 @@ EXPORT_SYMBOL(drm_mm_clean); | |||
731 | * | 739 | * |
732 | * Note that @mm must be cleared to 0 before calling this function. | 740 | * Note that @mm must be cleared to 0 before calling this function. |
733 | */ | 741 | */ |
734 | void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) | 742 | void drm_mm_init(struct drm_mm * mm, u64 start, u64 size) |
735 | { | 743 | { |
736 | INIT_LIST_HEAD(&mm->hole_stack); | 744 | INIT_LIST_HEAD(&mm->hole_stack); |
737 | mm->scanned_blocks = 0; | 745 | mm->scanned_blocks = 0; |
@@ -766,18 +774,17 @@ void drm_mm_takedown(struct drm_mm * mm) | |||
766 | } | 774 | } |
767 | EXPORT_SYMBOL(drm_mm_takedown); | 775 | EXPORT_SYMBOL(drm_mm_takedown); |
768 | 776 | ||
769 | static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry, | 777 | static u64 drm_mm_debug_hole(struct drm_mm_node *entry, |
770 | const char *prefix) | 778 | const char *prefix) |
771 | { | 779 | { |
772 | unsigned long hole_start, hole_end, hole_size; | 780 | u64 hole_start, hole_end, hole_size; |
773 | 781 | ||
774 | if (entry->hole_follows) { | 782 | if (entry->hole_follows) { |
775 | hole_start = drm_mm_hole_node_start(entry); | 783 | hole_start = drm_mm_hole_node_start(entry); |
776 | hole_end = drm_mm_hole_node_end(entry); | 784 | hole_end = drm_mm_hole_node_end(entry); |
777 | hole_size = hole_end - hole_start; | 785 | hole_size = hole_end - hole_start; |
778 | printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n", | 786 | pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start, |
779 | prefix, hole_start, hole_end, | 787 | hole_end, hole_size); |
780 | hole_size); | ||
781 | return hole_size; | 788 | return hole_size; |
782 | } | 789 | } |
783 | 790 | ||
@@ -792,35 +799,34 @@ static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry, | |||
792 | void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) | 799 | void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) |
793 | { | 800 | { |
794 | struct drm_mm_node *entry; | 801 | struct drm_mm_node *entry; |
795 | unsigned long total_used = 0, total_free = 0, total = 0; | 802 | u64 total_used = 0, total_free = 0, total = 0; |
796 | 803 | ||
797 | total_free += drm_mm_debug_hole(&mm->head_node, prefix); | 804 | total_free += drm_mm_debug_hole(&mm->head_node, prefix); |
798 | 805 | ||
799 | drm_mm_for_each_node(entry, mm) { | 806 | drm_mm_for_each_node(entry, mm) { |
800 | printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n", | 807 | pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start, |
801 | prefix, entry->start, entry->start + entry->size, | 808 | entry->start + entry->size, entry->size); |
802 | entry->size); | ||
803 | total_used += entry->size; | 809 | total_used += entry->size; |
804 | total_free += drm_mm_debug_hole(entry, prefix); | 810 | total_free += drm_mm_debug_hole(entry, prefix); |
805 | } | 811 | } |
806 | total = total_free + total_used; | 812 | total = total_free + total_used; |
807 | 813 | ||
808 | printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total, | 814 | pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total, |
809 | total_used, total_free); | 815 | total_used, total_free); |
810 | } | 816 | } |
811 | EXPORT_SYMBOL(drm_mm_debug_table); | 817 | EXPORT_SYMBOL(drm_mm_debug_table); |
812 | 818 | ||
813 | #if defined(CONFIG_DEBUG_FS) | 819 | #if defined(CONFIG_DEBUG_FS) |
814 | static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) | 820 | static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) |
815 | { | 821 | { |
816 | unsigned long hole_start, hole_end, hole_size; | 822 | u64 hole_start, hole_end, hole_size; |
817 | 823 | ||
818 | if (entry->hole_follows) { | 824 | if (entry->hole_follows) { |
819 | hole_start = drm_mm_hole_node_start(entry); | 825 | hole_start = drm_mm_hole_node_start(entry); |
820 | hole_end = drm_mm_hole_node_end(entry); | 826 | hole_end = drm_mm_hole_node_end(entry); |
821 | hole_size = hole_end - hole_start; | 827 | hole_size = hole_end - hole_start; |
822 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", | 828 | seq_printf(m, "%#llx-%#llx: %llu: free\n", hole_start, |
823 | hole_start, hole_end, hole_size); | 829 | hole_end, hole_size); |
824 | return hole_size; | 830 | return hole_size; |
825 | } | 831 | } |
826 | 832 | ||
@@ -835,20 +841,20 @@ static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *en | |||
835 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) | 841 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) |
836 | { | 842 | { |
837 | struct drm_mm_node *entry; | 843 | struct drm_mm_node *entry; |
838 | unsigned long total_used = 0, total_free = 0, total = 0; | 844 | u64 total_used = 0, total_free = 0, total = 0; |
839 | 845 | ||
840 | total_free += drm_mm_dump_hole(m, &mm->head_node); | 846 | total_free += drm_mm_dump_hole(m, &mm->head_node); |
841 | 847 | ||
842 | drm_mm_for_each_node(entry, mm) { | 848 | drm_mm_for_each_node(entry, mm) { |
843 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", | 849 | seq_printf(m, "%#016llx-%#016llx: %llu: used\n", entry->start, |
844 | entry->start, entry->start + entry->size, | 850 | entry->start + entry->size, entry->size); |
845 | entry->size); | ||
846 | total_used += entry->size; | 851 | total_used += entry->size; |
847 | total_free += drm_mm_dump_hole(m, entry); | 852 | total_free += drm_mm_dump_hole(m, entry); |
848 | } | 853 | } |
849 | total = total_free + total_used; | 854 | total = total_free + total_used; |
850 | 855 | ||
851 | seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free); | 856 | seq_printf(m, "total: %llu, used %llu free %llu\n", total, |
857 | total_used, total_free); | ||
852 | return 0; | 858 | return 0; |
853 | } | 859 | } |
854 | EXPORT_SYMBOL(drm_mm_dump_table); | 860 | EXPORT_SYMBOL(drm_mm_dump_table); |
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 6591d48c1b9d..3fee587bc284 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c | |||
@@ -174,6 +174,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect | |||
174 | struct edid *edid = (struct edid *) connector->edid_blob_ptr->data; | 174 | struct edid *edid = (struct edid *) connector->edid_blob_ptr->data; |
175 | 175 | ||
176 | count = drm_add_edid_modes(connector, edid); | 176 | count = drm_add_edid_modes(connector, edid); |
177 | drm_edid_to_eld(connector, edid); | ||
177 | } else | 178 | } else |
178 | count = (*connector_funcs->get_modes)(connector); | 179 | count = (*connector_funcs->get_modes)(connector); |
179 | } | 180 | } |
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index a5e74612100e..0a6780367d28 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig | |||
@@ -50,7 +50,7 @@ config DRM_EXYNOS_DSI | |||
50 | 50 | ||
51 | config DRM_EXYNOS_DP | 51 | config DRM_EXYNOS_DP |
52 | bool "EXYNOS DRM DP driver support" | 52 | bool "EXYNOS DRM DP driver support" |
53 | depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) | 53 | depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) |
54 | default DRM_EXYNOS | 54 | default DRM_EXYNOS |
55 | select DRM_PANEL | 55 | select DRM_PANEL |
56 | help | 56 | help |
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index 63f02e2380ae..970046199608 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c | |||
@@ -888,8 +888,8 @@ static int decon_probe(struct platform_device *pdev) | |||
888 | of_node_put(i80_if_timings); | 888 | of_node_put(i80_if_timings); |
889 | 889 | ||
890 | ctx->regs = of_iomap(dev->of_node, 0); | 890 | ctx->regs = of_iomap(dev->of_node, 0); |
891 | if (IS_ERR(ctx->regs)) { | 891 | if (!ctx->regs) { |
892 | ret = PTR_ERR(ctx->regs); | 892 | ret = -ENOMEM; |
893 | goto err_del_component; | 893 | goto err_del_component; |
894 | } | 894 | } |
895 | 895 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c deleted file mode 100644 index ba9b3d5ed672..000000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.c +++ /dev/null | |||
@@ -1,245 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
3 | * Authors: | ||
4 | * Inki Dae <inki.dae@samsung.com> | ||
5 | * Joonyoung Shim <jy0922.shim@samsung.com> | ||
6 | * Seung-Woo Kim <sw0312.kim@samsung.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include <drm/drmP.h> | ||
15 | #include <drm/drm_crtc_helper.h> | ||
16 | |||
17 | #include <drm/exynos_drm.h> | ||
18 | #include "exynos_drm_drv.h" | ||
19 | #include "exynos_drm_encoder.h" | ||
20 | #include "exynos_drm_connector.h" | ||
21 | |||
22 | #define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\ | ||
23 | drm_connector) | ||
24 | |||
25 | struct exynos_drm_connector { | ||
26 | struct drm_connector drm_connector; | ||
27 | uint32_t encoder_id; | ||
28 | struct exynos_drm_display *display; | ||
29 | }; | ||
30 | |||
31 | static int exynos_drm_connector_get_modes(struct drm_connector *connector) | ||
32 | { | ||
33 | struct exynos_drm_connector *exynos_connector = | ||
34 | to_exynos_connector(connector); | ||
35 | struct exynos_drm_display *display = exynos_connector->display; | ||
36 | struct edid *edid = NULL; | ||
37 | unsigned int count = 0; | ||
38 | int ret; | ||
39 | |||
40 | /* | ||
41 | * if get_edid() exists then get_edid() callback of hdmi side | ||
42 | * is called to get edid data through i2c interface else | ||
43 | * get timing from the FIMD driver(display controller). | ||
44 | * | ||
45 | * P.S. in case of lcd panel, count is always 1 if success | ||
46 | * because lcd panel has only one mode. | ||
47 | */ | ||
48 | if (display->ops->get_edid) { | ||
49 | edid = display->ops->get_edid(display, connector); | ||
50 | if (IS_ERR_OR_NULL(edid)) { | ||
51 | ret = PTR_ERR(edid); | ||
52 | edid = NULL; | ||
53 | DRM_ERROR("Panel operation get_edid failed %d\n", ret); | ||
54 | goto out; | ||
55 | } | ||
56 | |||
57 | count = drm_add_edid_modes(connector, edid); | ||
58 | if (!count) { | ||
59 | DRM_ERROR("Add edid modes failed %d\n", count); | ||
60 | goto out; | ||
61 | } | ||
62 | |||
63 | drm_mode_connector_update_edid_property(connector, edid); | ||
64 | } else { | ||
65 | struct exynos_drm_panel_info *panel; | ||
66 | struct drm_display_mode *mode = drm_mode_create(connector->dev); | ||
67 | if (!mode) { | ||
68 | DRM_ERROR("failed to create a new display mode.\n"); | ||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | if (display->ops->get_panel) | ||
73 | panel = display->ops->get_panel(display); | ||
74 | else { | ||
75 | drm_mode_destroy(connector->dev, mode); | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | drm_display_mode_from_videomode(&panel->vm, mode); | ||
80 | mode->width_mm = panel->width_mm; | ||
81 | mode->height_mm = panel->height_mm; | ||
82 | connector->display_info.width_mm = mode->width_mm; | ||
83 | connector->display_info.height_mm = mode->height_mm; | ||
84 | |||
85 | mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; | ||
86 | drm_mode_set_name(mode); | ||
87 | drm_mode_probed_add(connector, mode); | ||
88 | |||
89 | count = 1; | ||
90 | } | ||
91 | |||
92 | out: | ||
93 | kfree(edid); | ||
94 | return count; | ||
95 | } | ||
96 | |||
97 | static int exynos_drm_connector_mode_valid(struct drm_connector *connector, | ||
98 | struct drm_display_mode *mode) | ||
99 | { | ||
100 | struct exynos_drm_connector *exynos_connector = | ||
101 | to_exynos_connector(connector); | ||
102 | struct exynos_drm_display *display = exynos_connector->display; | ||
103 | int ret = MODE_BAD; | ||
104 | |||
105 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
106 | |||
107 | if (display->ops->check_mode) | ||
108 | if (!display->ops->check_mode(display, mode)) | ||
109 | ret = MODE_OK; | ||
110 | |||
111 | return ret; | ||
112 | } | ||
113 | |||
114 | static struct drm_encoder *exynos_drm_best_encoder( | ||
115 | struct drm_connector *connector) | ||
116 | { | ||
117 | struct drm_device *dev = connector->dev; | ||
118 | struct exynos_drm_connector *exynos_connector = | ||
119 | to_exynos_connector(connector); | ||
120 | return drm_encoder_find(dev, exynos_connector->encoder_id); | ||
121 | } | ||
122 | |||
123 | static struct drm_connector_helper_funcs exynos_connector_helper_funcs = { | ||
124 | .get_modes = exynos_drm_connector_get_modes, | ||
125 | .mode_valid = exynos_drm_connector_mode_valid, | ||
126 | .best_encoder = exynos_drm_best_encoder, | ||
127 | }; | ||
128 | |||
129 | static int exynos_drm_connector_fill_modes(struct drm_connector *connector, | ||
130 | unsigned int max_width, unsigned int max_height) | ||
131 | { | ||
132 | struct exynos_drm_connector *exynos_connector = | ||
133 | to_exynos_connector(connector); | ||
134 | struct exynos_drm_display *display = exynos_connector->display; | ||
135 | unsigned int width, height; | ||
136 | |||
137 | width = max_width; | ||
138 | height = max_height; | ||
139 | |||
140 | /* | ||
141 | * if specific driver want to find desired_mode using maxmum | ||
142 | * resolution then get max width and height from that driver. | ||
143 | */ | ||
144 | if (display->ops->get_max_resol) | ||
145 | display->ops->get_max_resol(display, &width, &height); | ||
146 | |||
147 | return drm_helper_probe_single_connector_modes(connector, width, | ||
148 | height); | ||
149 | } | ||
150 | |||
151 | /* get detection status of display device. */ | ||
152 | static enum drm_connector_status | ||
153 | exynos_drm_connector_detect(struct drm_connector *connector, bool force) | ||
154 | { | ||
155 | struct exynos_drm_connector *exynos_connector = | ||
156 | to_exynos_connector(connector); | ||
157 | struct exynos_drm_display *display = exynos_connector->display; | ||
158 | enum drm_connector_status status = connector_status_disconnected; | ||
159 | |||
160 | if (display->ops->is_connected) { | ||
161 | if (display->ops->is_connected(display)) | ||
162 | status = connector_status_connected; | ||
163 | else | ||
164 | status = connector_status_disconnected; | ||
165 | } | ||
166 | |||
167 | return status; | ||
168 | } | ||
169 | |||
170 | static void exynos_drm_connector_destroy(struct drm_connector *connector) | ||
171 | { | ||
172 | struct exynos_drm_connector *exynos_connector = | ||
173 | to_exynos_connector(connector); | ||
174 | |||
175 | drm_connector_unregister(connector); | ||
176 | drm_connector_cleanup(connector); | ||
177 | kfree(exynos_connector); | ||
178 | } | ||
179 | |||
180 | static struct drm_connector_funcs exynos_connector_funcs = { | ||
181 | .dpms = drm_helper_connector_dpms, | ||
182 | .fill_modes = exynos_drm_connector_fill_modes, | ||
183 | .detect = exynos_drm_connector_detect, | ||
184 | .destroy = exynos_drm_connector_destroy, | ||
185 | }; | ||
186 | |||
187 | struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, | ||
188 | struct drm_encoder *encoder) | ||
189 | { | ||
190 | struct exynos_drm_connector *exynos_connector; | ||
191 | struct exynos_drm_display *display = exynos_drm_get_display(encoder); | ||
192 | struct drm_connector *connector; | ||
193 | int type; | ||
194 | int err; | ||
195 | |||
196 | exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL); | ||
197 | if (!exynos_connector) | ||
198 | return NULL; | ||
199 | |||
200 | connector = &exynos_connector->drm_connector; | ||
201 | |||
202 | switch (display->type) { | ||
203 | case EXYNOS_DISPLAY_TYPE_HDMI: | ||
204 | type = DRM_MODE_CONNECTOR_HDMIA; | ||
205 | connector->interlace_allowed = true; | ||
206 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
207 | break; | ||
208 | case EXYNOS_DISPLAY_TYPE_VIDI: | ||
209 | type = DRM_MODE_CONNECTOR_VIRTUAL; | ||
210 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
211 | break; | ||
212 | default: | ||
213 | type = DRM_MODE_CONNECTOR_Unknown; | ||
214 | break; | ||
215 | } | ||
216 | |||
217 | drm_connector_init(dev, connector, &exynos_connector_funcs, type); | ||
218 | drm_connector_helper_add(connector, &exynos_connector_helper_funcs); | ||
219 | |||
220 | err = drm_connector_register(connector); | ||
221 | if (err) | ||
222 | goto err_connector; | ||
223 | |||
224 | exynos_connector->encoder_id = encoder->base.id; | ||
225 | exynos_connector->display = display; | ||
226 | connector->dpms = DRM_MODE_DPMS_OFF; | ||
227 | connector->encoder = encoder; | ||
228 | |||
229 | err = drm_mode_connector_attach_encoder(connector, encoder); | ||
230 | if (err) { | ||
231 | DRM_ERROR("failed to attach a connector to a encoder\n"); | ||
232 | goto err_sysfs; | ||
233 | } | ||
234 | |||
235 | DRM_DEBUG_KMS("connector has been created\n"); | ||
236 | |||
237 | return connector; | ||
238 | |||
239 | err_sysfs: | ||
240 | drm_connector_unregister(connector); | ||
241 | err_connector: | ||
242 | drm_connector_cleanup(connector); | ||
243 | kfree(exynos_connector); | ||
244 | return NULL; | ||
245 | } | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.h b/drivers/gpu/drm/exynos/exynos_drm_connector.h deleted file mode 100644 index 4eb20d78379a..000000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.h +++ /dev/null | |||
@@ -1,20 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
3 | * Authors: | ||
4 | * Inki Dae <inki.dae@samsung.com> | ||
5 | * Joonyoung Shim <jy0922.shim@samsung.com> | ||
6 | * Seung-Woo Kim <sw0312.kim@samsung.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | */ | ||
13 | |||
14 | #ifndef _EXYNOS_DRM_CONNECTOR_H_ | ||
15 | #define _EXYNOS_DRM_CONNECTOR_H_ | ||
16 | |||
17 | struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, | ||
18 | struct drm_encoder *encoder); | ||
19 | |||
20 | #endif | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 925fc69af1a0..33a10ce967ea 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c | |||
@@ -147,6 +147,7 @@ struct fimd_win_data { | |||
147 | unsigned int ovl_height; | 147 | unsigned int ovl_height; |
148 | unsigned int fb_width; | 148 | unsigned int fb_width; |
149 | unsigned int fb_height; | 149 | unsigned int fb_height; |
150 | unsigned int fb_pitch; | ||
150 | unsigned int bpp; | 151 | unsigned int bpp; |
151 | unsigned int pixel_format; | 152 | unsigned int pixel_format; |
152 | dma_addr_t dma_addr; | 153 | dma_addr_t dma_addr; |
@@ -284,14 +285,9 @@ static void fimd_clear_channel(struct fimd_context *ctx) | |||
284 | } | 285 | } |
285 | } | 286 | } |
286 | 287 | ||
287 | static int fimd_ctx_initialize(struct fimd_context *ctx, | 288 | static int fimd_iommu_attach_devices(struct fimd_context *ctx, |
288 | struct drm_device *drm_dev) | 289 | struct drm_device *drm_dev) |
289 | { | 290 | { |
290 | struct exynos_drm_private *priv; | ||
291 | priv = drm_dev->dev_private; | ||
292 | |||
293 | ctx->drm_dev = drm_dev; | ||
294 | ctx->pipe = priv->pipe++; | ||
295 | 291 | ||
296 | /* attach this sub driver to iommu mapping if supported. */ | 292 | /* attach this sub driver to iommu mapping if supported. */ |
297 | if (is_drm_iommu_supported(ctx->drm_dev)) { | 293 | if (is_drm_iommu_supported(ctx->drm_dev)) { |
@@ -313,7 +309,7 @@ static int fimd_ctx_initialize(struct fimd_context *ctx, | |||
313 | return 0; | 309 | return 0; |
314 | } | 310 | } |
315 | 311 | ||
316 | static void fimd_ctx_remove(struct fimd_context *ctx) | 312 | static void fimd_iommu_detach_devices(struct fimd_context *ctx) |
317 | { | 313 | { |
318 | /* detach this sub driver from iommu mapping if supported. */ | 314 | /* detach this sub driver from iommu mapping if supported. */ |
319 | if (is_drm_iommu_supported(ctx->drm_dev)) | 315 | if (is_drm_iommu_supported(ctx->drm_dev)) |
@@ -537,13 +533,14 @@ static void fimd_win_mode_set(struct exynos_drm_crtc *crtc, | |||
537 | win_data->offset_y = plane->crtc_y; | 533 | win_data->offset_y = plane->crtc_y; |
538 | win_data->ovl_width = plane->crtc_width; | 534 | win_data->ovl_width = plane->crtc_width; |
539 | win_data->ovl_height = plane->crtc_height; | 535 | win_data->ovl_height = plane->crtc_height; |
536 | win_data->fb_pitch = plane->pitch; | ||
540 | win_data->fb_width = plane->fb_width; | 537 | win_data->fb_width = plane->fb_width; |
541 | win_data->fb_height = plane->fb_height; | 538 | win_data->fb_height = plane->fb_height; |
542 | win_data->dma_addr = plane->dma_addr[0] + offset; | 539 | win_data->dma_addr = plane->dma_addr[0] + offset; |
543 | win_data->bpp = plane->bpp; | 540 | win_data->bpp = plane->bpp; |
544 | win_data->pixel_format = plane->pixel_format; | 541 | win_data->pixel_format = plane->pixel_format; |
545 | win_data->buf_offsize = (plane->fb_width - plane->crtc_width) * | 542 | win_data->buf_offsize = |
546 | (plane->bpp >> 3); | 543 | plane->pitch - (plane->crtc_width * (plane->bpp >> 3)); |
547 | win_data->line_size = plane->crtc_width * (plane->bpp >> 3); | 544 | win_data->line_size = plane->crtc_width * (plane->bpp >> 3); |
548 | 545 | ||
549 | DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n", | 546 | DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n", |
@@ -709,7 +706,7 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, int zpos) | |||
709 | writel(val, ctx->regs + VIDWx_BUF_START(win, 0)); | 706 | writel(val, ctx->regs + VIDWx_BUF_START(win, 0)); |
710 | 707 | ||
711 | /* buffer end address */ | 708 | /* buffer end address */ |
712 | size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3); | 709 | size = win_data->fb_pitch * win_data->ovl_height * (win_data->bpp >> 3); |
713 | val = (unsigned long)(win_data->dma_addr + size); | 710 | val = (unsigned long)(win_data->dma_addr + size); |
714 | writel(val, ctx->regs + VIDWx_BUF_END(win, 0)); | 711 | writel(val, ctx->regs + VIDWx_BUF_END(win, 0)); |
715 | 712 | ||
@@ -1056,25 +1053,23 @@ static int fimd_bind(struct device *dev, struct device *master, void *data) | |||
1056 | { | 1053 | { |
1057 | struct fimd_context *ctx = dev_get_drvdata(dev); | 1054 | struct fimd_context *ctx = dev_get_drvdata(dev); |
1058 | struct drm_device *drm_dev = data; | 1055 | struct drm_device *drm_dev = data; |
1056 | struct exynos_drm_private *priv = drm_dev->dev_private; | ||
1059 | int ret; | 1057 | int ret; |
1060 | 1058 | ||
1061 | ret = fimd_ctx_initialize(ctx, drm_dev); | 1059 | ctx->drm_dev = drm_dev; |
1062 | if (ret) { | 1060 | ctx->pipe = priv->pipe++; |
1063 | DRM_ERROR("fimd_ctx_initialize failed.\n"); | ||
1064 | return ret; | ||
1065 | } | ||
1066 | 1061 | ||
1067 | ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe, | 1062 | ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe, |
1068 | EXYNOS_DISPLAY_TYPE_LCD, | 1063 | EXYNOS_DISPLAY_TYPE_LCD, |
1069 | &fimd_crtc_ops, ctx); | 1064 | &fimd_crtc_ops, ctx); |
1070 | if (IS_ERR(ctx->crtc)) { | ||
1071 | fimd_ctx_remove(ctx); | ||
1072 | return PTR_ERR(ctx->crtc); | ||
1073 | } | ||
1074 | 1065 | ||
1075 | if (ctx->display) | 1066 | if (ctx->display) |
1076 | exynos_drm_create_enc_conn(drm_dev, ctx->display); | 1067 | exynos_drm_create_enc_conn(drm_dev, ctx->display); |
1077 | 1068 | ||
1069 | ret = fimd_iommu_attach_devices(ctx, drm_dev); | ||
1070 | if (ret) | ||
1071 | return ret; | ||
1072 | |||
1078 | return 0; | 1073 | return 0; |
1079 | 1074 | ||
1080 | } | 1075 | } |
@@ -1086,10 +1081,10 @@ static void fimd_unbind(struct device *dev, struct device *master, | |||
1086 | 1081 | ||
1087 | fimd_dpms(ctx->crtc, DRM_MODE_DPMS_OFF); | 1082 | fimd_dpms(ctx->crtc, DRM_MODE_DPMS_OFF); |
1088 | 1083 | ||
1084 | fimd_iommu_detach_devices(ctx); | ||
1085 | |||
1089 | if (ctx->display) | 1086 | if (ctx->display) |
1090 | exynos_dpi_remove(ctx->display); | 1087 | exynos_dpi_remove(ctx->display); |
1091 | |||
1092 | fimd_ctx_remove(ctx); | ||
1093 | } | 1088 | } |
1094 | 1089 | ||
1095 | static const struct component_ops fimd_component_ops = { | 1090 | static const struct component_ops fimd_component_ops = { |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index a5616872eee7..8ad5b7294eb4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c | |||
@@ -175,7 +175,7 @@ static int exynos_disable_plane(struct drm_plane *plane) | |||
175 | struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); | 175 | struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); |
176 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc); | 176 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc); |
177 | 177 | ||
178 | if (exynos_crtc->ops->win_disable) | 178 | if (exynos_crtc && exynos_crtc->ops->win_disable) |
179 | exynos_crtc->ops->win_disable(exynos_crtc, | 179 | exynos_crtc->ops->win_disable(exynos_crtc, |
180 | exynos_plane->zpos); | 180 | exynos_plane->zpos); |
181 | 181 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 3518bc4654c5..2e3bc57ea50e 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c | |||
@@ -55,6 +55,7 @@ struct hdmi_win_data { | |||
55 | unsigned int fb_x; | 55 | unsigned int fb_x; |
56 | unsigned int fb_y; | 56 | unsigned int fb_y; |
57 | unsigned int fb_width; | 57 | unsigned int fb_width; |
58 | unsigned int fb_pitch; | ||
58 | unsigned int fb_height; | 59 | unsigned int fb_height; |
59 | unsigned int src_width; | 60 | unsigned int src_width; |
60 | unsigned int src_height; | 61 | unsigned int src_height; |
@@ -438,7 +439,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win) | |||
438 | } else { | 439 | } else { |
439 | luma_addr[0] = win_data->dma_addr; | 440 | luma_addr[0] = win_data->dma_addr; |
440 | chroma_addr[0] = win_data->dma_addr | 441 | chroma_addr[0] = win_data->dma_addr |
441 | + (win_data->fb_width * win_data->fb_height); | 442 | + (win_data->fb_pitch * win_data->fb_height); |
442 | } | 443 | } |
443 | 444 | ||
444 | if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) { | 445 | if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) { |
@@ -447,8 +448,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win) | |||
447 | luma_addr[1] = luma_addr[0] + 0x40; | 448 | luma_addr[1] = luma_addr[0] + 0x40; |
448 | chroma_addr[1] = chroma_addr[0] + 0x40; | 449 | chroma_addr[1] = chroma_addr[0] + 0x40; |
449 | } else { | 450 | } else { |
450 | luma_addr[1] = luma_addr[0] + win_data->fb_width; | 451 | luma_addr[1] = luma_addr[0] + win_data->fb_pitch; |
451 | chroma_addr[1] = chroma_addr[0] + win_data->fb_width; | 452 | chroma_addr[1] = chroma_addr[0] + win_data->fb_pitch; |
452 | } | 453 | } |
453 | } else { | 454 | } else { |
454 | ctx->interlace = false; | 455 | ctx->interlace = false; |
@@ -469,10 +470,10 @@ static void vp_video_buffer(struct mixer_context *ctx, int win) | |||
469 | vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK); | 470 | vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK); |
470 | 471 | ||
471 | /* setting size of input image */ | 472 | /* setting size of input image */ |
472 | vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_width) | | 473 | vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_pitch) | |
473 | VP_IMG_VSIZE(win_data->fb_height)); | 474 | VP_IMG_VSIZE(win_data->fb_height)); |
474 | /* chroma height has to reduced by 2 to avoid chroma distorions */ | 475 | /* chroma height has to reduced by 2 to avoid chroma distorions */ |
475 | vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_width) | | 476 | vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_pitch) | |
476 | VP_IMG_VSIZE(win_data->fb_height / 2)); | 477 | VP_IMG_VSIZE(win_data->fb_height / 2)); |
477 | 478 | ||
478 | vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width); | 479 | vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width); |
@@ -559,7 +560,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win) | |||
559 | /* converting dma address base and source offset */ | 560 | /* converting dma address base and source offset */ |
560 | dma_addr = win_data->dma_addr | 561 | dma_addr = win_data->dma_addr |
561 | + (win_data->fb_x * win_data->bpp >> 3) | 562 | + (win_data->fb_x * win_data->bpp >> 3) |
562 | + (win_data->fb_y * win_data->fb_width * win_data->bpp >> 3); | 563 | + (win_data->fb_y * win_data->fb_pitch); |
563 | src_x_offset = 0; | 564 | src_x_offset = 0; |
564 | src_y_offset = 0; | 565 | src_y_offset = 0; |
565 | 566 | ||
@@ -576,7 +577,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win) | |||
576 | MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK); | 577 | MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK); |
577 | 578 | ||
578 | /* setup geometry */ | 579 | /* setup geometry */ |
579 | mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), win_data->fb_width); | 580 | mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), |
581 | win_data->fb_pitch / (win_data->bpp >> 3)); | ||
580 | 582 | ||
581 | /* setup display size */ | 583 | /* setup display size */ |
582 | if (ctx->mxr_ver == MXR_VER_128_0_0_184 && | 584 | if (ctx->mxr_ver == MXR_VER_128_0_0_184 && |
@@ -961,6 +963,7 @@ static void mixer_win_mode_set(struct exynos_drm_crtc *crtc, | |||
961 | win_data->fb_y = plane->fb_y; | 963 | win_data->fb_y = plane->fb_y; |
962 | win_data->fb_width = plane->fb_width; | 964 | win_data->fb_width = plane->fb_width; |
963 | win_data->fb_height = plane->fb_height; | 965 | win_data->fb_height = plane->fb_height; |
966 | win_data->fb_pitch = plane->pitch; | ||
964 | win_data->src_width = plane->src_width; | 967 | win_data->src_width = plane->src_width; |
965 | win_data->src_height = plane->src_height; | 968 | win_data->src_height = plane->src_height; |
966 | 969 | ||
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 96e811fe24ca..e8b18e542da4 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -152,12 +152,12 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) | |||
152 | seq_puts(m, " (pp"); | 152 | seq_puts(m, " (pp"); |
153 | else | 153 | else |
154 | seq_puts(m, " (g"); | 154 | seq_puts(m, " (g"); |
155 | seq_printf(m, "gtt offset: %08lx, size: %08lx, type: %u)", | 155 | seq_printf(m, "gtt offset: %08llx, size: %08llx, type: %u)", |
156 | vma->node.start, vma->node.size, | 156 | vma->node.start, vma->node.size, |
157 | vma->ggtt_view.type); | 157 | vma->ggtt_view.type); |
158 | } | 158 | } |
159 | if (obj->stolen) | 159 | if (obj->stolen) |
160 | seq_printf(m, " (stolen: %08lx)", obj->stolen->start); | 160 | seq_printf(m, " (stolen: %08llx)", obj->stolen->start); |
161 | if (obj->pin_mappable || obj->fault_mappable) { | 161 | if (obj->pin_mappable || obj->fault_mappable) { |
162 | char s[3], *t = s; | 162 | char s[3], *t = s; |
163 | if (obj->pin_mappable) | 163 | if (obj->pin_mappable) |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 8039cec71fc2..5c66b568bb81 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -622,7 +622,7 @@ static int i915_drm_suspend(struct drm_device *dev) | |||
622 | return 0; | 622 | return 0; |
623 | } | 623 | } |
624 | 624 | ||
625 | static int i915_drm_suspend_late(struct drm_device *drm_dev) | 625 | static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) |
626 | { | 626 | { |
627 | struct drm_i915_private *dev_priv = drm_dev->dev_private; | 627 | struct drm_i915_private *dev_priv = drm_dev->dev_private; |
628 | int ret; | 628 | int ret; |
@@ -636,7 +636,17 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev) | |||
636 | } | 636 | } |
637 | 637 | ||
638 | pci_disable_device(drm_dev->pdev); | 638 | pci_disable_device(drm_dev->pdev); |
639 | pci_set_power_state(drm_dev->pdev, PCI_D3hot); | 639 | /* |
640 | * During hibernation on some GEN4 platforms the BIOS may try to access | ||
641 | * the device even though it's already in D3 and hang the machine. So | ||
642 | * leave the device in D0 on those platforms and hope the BIOS will | ||
643 | * power down the device properly. Platforms where this was seen: | ||
644 | * Lenovo Thinkpad X301, X61s | ||
645 | */ | ||
646 | if (!(hibernation && | ||
647 | drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO && | ||
648 | INTEL_INFO(dev_priv)->gen == 4)) | ||
649 | pci_set_power_state(drm_dev->pdev, PCI_D3hot); | ||
640 | 650 | ||
641 | return 0; | 651 | return 0; |
642 | } | 652 | } |
@@ -662,7 +672,7 @@ int i915_suspend_legacy(struct drm_device *dev, pm_message_t state) | |||
662 | if (error) | 672 | if (error) |
663 | return error; | 673 | return error; |
664 | 674 | ||
665 | return i915_drm_suspend_late(dev); | 675 | return i915_drm_suspend_late(dev, false); |
666 | } | 676 | } |
667 | 677 | ||
668 | static int i915_drm_resume(struct drm_device *dev) | 678 | static int i915_drm_resume(struct drm_device *dev) |
@@ -950,7 +960,17 @@ static int i915_pm_suspend_late(struct device *dev) | |||
950 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) | 960 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
951 | return 0; | 961 | return 0; |
952 | 962 | ||
953 | return i915_drm_suspend_late(drm_dev); | 963 | return i915_drm_suspend_late(drm_dev, false); |
964 | } | ||
965 | |||
966 | static int i915_pm_poweroff_late(struct device *dev) | ||
967 | { | ||
968 | struct drm_device *drm_dev = dev_to_i915(dev)->dev; | ||
969 | |||
970 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) | ||
971 | return 0; | ||
972 | |||
973 | return i915_drm_suspend_late(drm_dev, true); | ||
954 | } | 974 | } |
955 | 975 | ||
956 | static int i915_pm_resume_early(struct device *dev) | 976 | static int i915_pm_resume_early(struct device *dev) |
@@ -1075,6 +1095,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) | |||
1075 | /* Gunit-Display CZ domain, 0x182028-0x1821CF */ | 1095 | /* Gunit-Display CZ domain, 0x182028-0x1821CF */ |
1076 | s->gu_ctl0 = I915_READ(VLV_GU_CTL0); | 1096 | s->gu_ctl0 = I915_READ(VLV_GU_CTL0); |
1077 | s->gu_ctl1 = I915_READ(VLV_GU_CTL1); | 1097 | s->gu_ctl1 = I915_READ(VLV_GU_CTL1); |
1098 | s->pcbr = I915_READ(VLV_PCBR); | ||
1078 | s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2); | 1099 | s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2); |
1079 | 1100 | ||
1080 | /* | 1101 | /* |
@@ -1169,6 +1190,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) | |||
1169 | /* Gunit-Display CZ domain, 0x182028-0x1821CF */ | 1190 | /* Gunit-Display CZ domain, 0x182028-0x1821CF */ |
1170 | I915_WRITE(VLV_GU_CTL0, s->gu_ctl0); | 1191 | I915_WRITE(VLV_GU_CTL0, s->gu_ctl0); |
1171 | I915_WRITE(VLV_GU_CTL1, s->gu_ctl1); | 1192 | I915_WRITE(VLV_GU_CTL1, s->gu_ctl1); |
1193 | I915_WRITE(VLV_PCBR, s->pcbr); | ||
1172 | I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); | 1194 | I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); |
1173 | } | 1195 | } |
1174 | 1196 | ||
@@ -1177,19 +1199,7 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) | |||
1177 | u32 val; | 1199 | u32 val; |
1178 | int err; | 1200 | int err; |
1179 | 1201 | ||
1180 | val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); | ||
1181 | WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on); | ||
1182 | |||
1183 | #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT) | 1202 | #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT) |
1184 | /* Wait for a previous force-off to settle */ | ||
1185 | if (force_on) { | ||
1186 | err = wait_for(!COND, 20); | ||
1187 | if (err) { | ||
1188 | DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n", | ||
1189 | I915_READ(VLV_GTLC_SURVIVABILITY_REG)); | ||
1190 | return err; | ||
1191 | } | ||
1192 | } | ||
1193 | 1203 | ||
1194 | val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); | 1204 | val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); |
1195 | val &= ~VLV_GFX_CLK_FORCE_ON_BIT; | 1205 | val &= ~VLV_GFX_CLK_FORCE_ON_BIT; |
@@ -1520,7 +1530,7 @@ static const struct dev_pm_ops i915_pm_ops = { | |||
1520 | .thaw_early = i915_pm_resume_early, | 1530 | .thaw_early = i915_pm_resume_early, |
1521 | .thaw = i915_pm_resume, | 1531 | .thaw = i915_pm_resume, |
1522 | .poweroff = i915_pm_suspend, | 1532 | .poweroff = i915_pm_suspend, |
1523 | .poweroff_late = i915_pm_suspend_late, | 1533 | .poweroff_late = i915_pm_poweroff_late, |
1524 | .restore_early = i915_pm_resume_early, | 1534 | .restore_early = i915_pm_resume_early, |
1525 | .restore = i915_pm_resume, | 1535 | .restore = i915_pm_resume, |
1526 | 1536 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f2a825e39646..b4faa2df9d3d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -1094,6 +1094,7 @@ struct vlv_s0ix_state { | |||
1094 | /* Display 2 CZ domain */ | 1094 | /* Display 2 CZ domain */ |
1095 | u32 gu_ctl0; | 1095 | u32 gu_ctl0; |
1096 | u32 gu_ctl1; | 1096 | u32 gu_ctl1; |
1097 | u32 pcbr; | ||
1097 | u32 clock_gate_dis2; | 1098 | u32 clock_gate_dis2; |
1098 | }; | 1099 | }; |
1099 | 1100 | ||
@@ -2114,6 +2115,9 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, | |||
2114 | * number comparisons on buffer last_read|write_seqno. It also allows an | 2115 | * number comparisons on buffer last_read|write_seqno. It also allows an |
2115 | * emission time to be associated with the request for tracking how far ahead | 2116 | * emission time to be associated with the request for tracking how far ahead |
2116 | * of the GPU the submission is. | 2117 | * of the GPU the submission is. |
2118 | * | ||
2119 | * The requests are reference counted, so upon creation they should have an | ||
2120 | * initial reference taken using kref_init | ||
2117 | */ | 2121 | */ |
2118 | struct drm_i915_gem_request { | 2122 | struct drm_i915_gem_request { |
2119 | struct kref ref; | 2123 | struct kref ref; |
@@ -2137,7 +2141,16 @@ struct drm_i915_gem_request { | |||
2137 | /** Position in the ringbuffer of the end of the whole request */ | 2141 | /** Position in the ringbuffer of the end of the whole request */ |
2138 | u32 tail; | 2142 | u32 tail; |
2139 | 2143 | ||
2140 | /** Context related to this request */ | 2144 | /** |
2145 | * Context related to this request | ||
2146 | * Contexts are refcounted, so when this request is associated with a | ||
2147 | * context, we must increment the context's refcount, to guarantee that | ||
2148 | * it persists while any request is linked to it. Requests themselves | ||
2149 | * are also refcounted, so the request will only be freed when the last | ||
2150 | * reference to it is dismissed, and the code in | ||
2151 | * i915_gem_request_free() will then decrement the refcount on the | ||
2152 | * context. | ||
2153 | */ | ||
2141 | struct intel_context *ctx; | 2154 | struct intel_context *ctx; |
2142 | 2155 | ||
2143 | /** Batch buffer related to this request if any */ | 2156 | /** Batch buffer related to this request if any */ |
@@ -2374,6 +2387,7 @@ struct drm_i915_cmd_table { | |||
2374 | (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) | 2387 | (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) |
2375 | #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ | 2388 | #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ |
2376 | ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ | 2389 | ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ |
2390 | (INTEL_DEVID(dev) & 0xf) == 0xb || \ | ||
2377 | (INTEL_DEVID(dev) & 0xf) == 0xe)) | 2391 | (INTEL_DEVID(dev) & 0xf) == 0xe)) |
2378 | #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ | 2392 | #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ |
2379 | (INTEL_DEVID(dev) & 0x00F0) == 0x0020) | 2393 | (INTEL_DEVID(dev) & 0x00F0) == 0x0020) |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c26d36cc4b31..27ea6bdebce7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2659,8 +2659,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, | |||
2659 | if (submit_req->ctx != ring->default_context) | 2659 | if (submit_req->ctx != ring->default_context) |
2660 | intel_lr_context_unpin(ring, submit_req->ctx); | 2660 | intel_lr_context_unpin(ring, submit_req->ctx); |
2661 | 2661 | ||
2662 | i915_gem_context_unreference(submit_req->ctx); | 2662 | i915_gem_request_unreference(submit_req); |
2663 | kfree(submit_req); | ||
2664 | } | 2663 | } |
2665 | 2664 | ||
2666 | /* | 2665 | /* |
@@ -2738,24 +2737,11 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) | |||
2738 | 2737 | ||
2739 | WARN_ON(i915_verify_lists(ring->dev)); | 2738 | WARN_ON(i915_verify_lists(ring->dev)); |
2740 | 2739 | ||
2741 | /* Move any buffers on the active list that are no longer referenced | 2740 | /* Retire requests first as we use it above for the early return. |
2742 | * by the ringbuffer to the flushing/inactive lists as appropriate, | 2741 | * If we retire requests last, we may use a later seqno and so clear |
2743 | * before we free the context associated with the requests. | 2742 | * the requests lists without clearing the active list, leading to |
2743 | * confusion. | ||
2744 | */ | 2744 | */ |
2745 | while (!list_empty(&ring->active_list)) { | ||
2746 | struct drm_i915_gem_object *obj; | ||
2747 | |||
2748 | obj = list_first_entry(&ring->active_list, | ||
2749 | struct drm_i915_gem_object, | ||
2750 | ring_list); | ||
2751 | |||
2752 | if (!i915_gem_request_completed(obj->last_read_req, true)) | ||
2753 | break; | ||
2754 | |||
2755 | i915_gem_object_move_to_inactive(obj); | ||
2756 | } | ||
2757 | |||
2758 | |||
2759 | while (!list_empty(&ring->request_list)) { | 2745 | while (!list_empty(&ring->request_list)) { |
2760 | struct drm_i915_gem_request *request; | 2746 | struct drm_i915_gem_request *request; |
2761 | struct intel_ringbuffer *ringbuf; | 2747 | struct intel_ringbuffer *ringbuf; |
@@ -2790,6 +2776,23 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) | |||
2790 | i915_gem_free_request(request); | 2776 | i915_gem_free_request(request); |
2791 | } | 2777 | } |
2792 | 2778 | ||
2779 | /* Move any buffers on the active list that are no longer referenced | ||
2780 | * by the ringbuffer to the flushing/inactive lists as appropriate, | ||
2781 | * before we free the context associated with the requests. | ||
2782 | */ | ||
2783 | while (!list_empty(&ring->active_list)) { | ||
2784 | struct drm_i915_gem_object *obj; | ||
2785 | |||
2786 | obj = list_first_entry(&ring->active_list, | ||
2787 | struct drm_i915_gem_object, | ||
2788 | ring_list); | ||
2789 | |||
2790 | if (!i915_gem_request_completed(obj->last_read_req, true)) | ||
2791 | break; | ||
2792 | |||
2793 | i915_gem_object_move_to_inactive(obj); | ||
2794 | } | ||
2795 | |||
2793 | if (unlikely(ring->trace_irq_req && | 2796 | if (unlikely(ring->trace_irq_req && |
2794 | i915_gem_request_completed(ring->trace_irq_req, true))) { | 2797 | i915_gem_request_completed(ring->trace_irq_req, true))) { |
2795 | ring->irq_put(ring); | 2798 | ring->irq_put(ring); |
@@ -2937,9 +2940,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
2937 | req = obj->last_read_req; | 2940 | req = obj->last_read_req; |
2938 | 2941 | ||
2939 | /* Do this after OLR check to make sure we make forward progress polling | 2942 | /* Do this after OLR check to make sure we make forward progress polling |
2940 | * on this IOCTL with a timeout <=0 (like busy ioctl) | 2943 | * on this IOCTL with a timeout == 0 (like busy ioctl) |
2941 | */ | 2944 | */ |
2942 | if (args->timeout_ns <= 0) { | 2945 | if (args->timeout_ns == 0) { |
2943 | ret = -ETIME; | 2946 | ret = -ETIME; |
2944 | goto out; | 2947 | goto out; |
2945 | } | 2948 | } |
@@ -2949,7 +2952,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
2949 | i915_gem_request_reference(req); | 2952 | i915_gem_request_reference(req); |
2950 | mutex_unlock(&dev->struct_mutex); | 2953 | mutex_unlock(&dev->struct_mutex); |
2951 | 2954 | ||
2952 | ret = __i915_wait_request(req, reset_counter, true, &args->timeout_ns, | 2955 | ret = __i915_wait_request(req, reset_counter, true, |
2956 | args->timeout_ns > 0 ? &args->timeout_ns : NULL, | ||
2953 | file->driver_priv); | 2957 | file->driver_priv); |
2954 | mutex_lock(&dev->struct_mutex); | 2958 | mutex_lock(&dev->struct_mutex); |
2955 | i915_gem_request_unreference(req); | 2959 | i915_gem_request_unreference(req); |
@@ -4793,6 +4797,9 @@ i915_gem_init_hw(struct drm_device *dev) | |||
4793 | if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) | 4797 | if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) |
4794 | return -EIO; | 4798 | return -EIO; |
4795 | 4799 | ||
4800 | /* Double layer security blanket, see i915_gem_init() */ | ||
4801 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | ||
4802 | |||
4796 | if (dev_priv->ellc_size) | 4803 | if (dev_priv->ellc_size) |
4797 | I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); | 4804 | I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); |
4798 | 4805 | ||
@@ -4825,7 +4832,7 @@ i915_gem_init_hw(struct drm_device *dev) | |||
4825 | for_each_ring(ring, dev_priv, i) { | 4832 | for_each_ring(ring, dev_priv, i) { |
4826 | ret = ring->init_hw(ring); | 4833 | ret = ring->init_hw(ring); |
4827 | if (ret) | 4834 | if (ret) |
4828 | return ret; | 4835 | goto out; |
4829 | } | 4836 | } |
4830 | 4837 | ||
4831 | for (i = 0; i < NUM_L3_SLICES(dev); i++) | 4838 | for (i = 0; i < NUM_L3_SLICES(dev); i++) |
@@ -4842,9 +4849,11 @@ i915_gem_init_hw(struct drm_device *dev) | |||
4842 | DRM_ERROR("Context enable failed %d\n", ret); | 4849 | DRM_ERROR("Context enable failed %d\n", ret); |
4843 | i915_gem_cleanup_ringbuffer(dev); | 4850 | i915_gem_cleanup_ringbuffer(dev); |
4844 | 4851 | ||
4845 | return ret; | 4852 | goto out; |
4846 | } | 4853 | } |
4847 | 4854 | ||
4855 | out: | ||
4856 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | ||
4848 | return ret; | 4857 | return ret; |
4849 | } | 4858 | } |
4850 | 4859 | ||
@@ -4878,6 +4887,14 @@ int i915_gem_init(struct drm_device *dev) | |||
4878 | dev_priv->gt.stop_ring = intel_logical_ring_stop; | 4887 | dev_priv->gt.stop_ring = intel_logical_ring_stop; |
4879 | } | 4888 | } |
4880 | 4889 | ||
4890 | /* This is just a security blanket to placate dragons. | ||
4891 | * On some systems, we very sporadically observe that the first TLBs | ||
4892 | * used by the CS may be stale, despite us poking the TLB reset. If | ||
4893 | * we hold the forcewake during initialisation these problems | ||
4894 | * just magically go away. | ||
4895 | */ | ||
4896 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | ||
4897 | |||
4881 | ret = i915_gem_init_userptr(dev); | 4898 | ret = i915_gem_init_userptr(dev); |
4882 | if (ret) | 4899 | if (ret) |
4883 | goto out_unlock; | 4900 | goto out_unlock; |
@@ -4904,6 +4921,7 @@ int i915_gem_init(struct drm_device *dev) | |||
4904 | } | 4921 | } |
4905 | 4922 | ||
4906 | out_unlock: | 4923 | out_unlock: |
4924 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | ||
4907 | mutex_unlock(&dev->struct_mutex); | 4925 | mutex_unlock(&dev->struct_mutex); |
4908 | 4926 | ||
4909 | return ret; | 4927 | return ret; |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index b773368fc62c..38a742532c4f 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -1487,7 +1487,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1487 | goto err; | 1487 | goto err; |
1488 | } | 1488 | } |
1489 | 1489 | ||
1490 | if (i915_needs_cmd_parser(ring)) { | 1490 | if (i915_needs_cmd_parser(ring) && args->batch_len) { |
1491 | batch_obj = i915_gem_execbuffer_parse(ring, | 1491 | batch_obj = i915_gem_execbuffer_parse(ring, |
1492 | &shadow_exec_entry, | 1492 | &shadow_exec_entry, |
1493 | eb, | 1493 | eb, |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 746f77fb57a3..dccdc8aad2e2 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -1145,7 +1145,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) | |||
1145 | 1145 | ||
1146 | ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); | 1146 | ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); |
1147 | 1147 | ||
1148 | DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n", | 1148 | DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n", |
1149 | ppgtt->node.size >> 20, | 1149 | ppgtt->node.size >> 20, |
1150 | ppgtt->node.start / PAGE_SIZE); | 1150 | ppgtt->node.start / PAGE_SIZE); |
1151 | 1151 | ||
@@ -1713,8 +1713,8 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) | |||
1713 | 1713 | ||
1714 | static void i915_gtt_color_adjust(struct drm_mm_node *node, | 1714 | static void i915_gtt_color_adjust(struct drm_mm_node *node, |
1715 | unsigned long color, | 1715 | unsigned long color, |
1716 | unsigned long *start, | 1716 | u64 *start, |
1717 | unsigned long *end) | 1717 | u64 *end) |
1718 | { | 1718 | { |
1719 | if (node->color != color) | 1719 | if (node->color != color) |
1720 | *start += 4096; | 1720 | *start += 4096; |
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index a2045848bd1a..9c6f93ec886b 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c | |||
@@ -485,10 +485,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, | |||
485 | stolen_offset, gtt_offset, size); | 485 | stolen_offset, gtt_offset, size); |
486 | 486 | ||
487 | /* KISS and expect everything to be page-aligned */ | 487 | /* KISS and expect everything to be page-aligned */ |
488 | BUG_ON(stolen_offset & 4095); | 488 | if (WARN_ON(size == 0) || WARN_ON(size & 4095) || |
489 | BUG_ON(size & 4095); | 489 | WARN_ON(stolen_offset & 4095)) |
490 | |||
491 | if (WARN_ON(size == 0)) | ||
492 | return NULL; | 490 | return NULL; |
493 | 491 | ||
494 | stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); | 492 | stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 7a24bd1a51f6..6377b22269ad 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -335,9 +335,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
335 | return -EINVAL; | 335 | return -EINVAL; |
336 | } | 336 | } |
337 | 337 | ||
338 | mutex_lock(&dev->struct_mutex); | ||
338 | if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) { | 339 | if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) { |
339 | drm_gem_object_unreference_unlocked(&obj->base); | 340 | ret = -EBUSY; |
340 | return -EBUSY; | 341 | goto err; |
341 | } | 342 | } |
342 | 343 | ||
343 | if (args->tiling_mode == I915_TILING_NONE) { | 344 | if (args->tiling_mode == I915_TILING_NONE) { |
@@ -369,7 +370,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
369 | } | 370 | } |
370 | } | 371 | } |
371 | 372 | ||
372 | mutex_lock(&dev->struct_mutex); | ||
373 | if (args->tiling_mode != obj->tiling_mode || | 373 | if (args->tiling_mode != obj->tiling_mode || |
374 | args->stride != obj->stride) { | 374 | args->stride != obj->stride) { |
375 | /* We need to rebind the object if its current allocation | 375 | /* We need to rebind the object if its current allocation |
@@ -424,6 +424,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
424 | obj->bit_17 = NULL; | 424 | obj->bit_17 = NULL; |
425 | } | 425 | } |
426 | 426 | ||
427 | err: | ||
427 | drm_gem_object_unreference(&obj->base); | 428 | drm_gem_object_unreference(&obj->base); |
428 | mutex_unlock(&dev->struct_mutex); | 429 | mutex_unlock(&dev->struct_mutex); |
429 | 430 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4145d95902f5..ede5bbbd8a08 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -1892,6 +1892,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) | |||
1892 | u32 iir, gt_iir, pm_iir; | 1892 | u32 iir, gt_iir, pm_iir; |
1893 | irqreturn_t ret = IRQ_NONE; | 1893 | irqreturn_t ret = IRQ_NONE; |
1894 | 1894 | ||
1895 | if (!intel_irqs_enabled(dev_priv)) | ||
1896 | return IRQ_NONE; | ||
1897 | |||
1895 | while (true) { | 1898 | while (true) { |
1896 | /* Find, clear, then process each source of interrupt */ | 1899 | /* Find, clear, then process each source of interrupt */ |
1897 | 1900 | ||
@@ -1936,6 +1939,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) | |||
1936 | u32 master_ctl, iir; | 1939 | u32 master_ctl, iir; |
1937 | irqreturn_t ret = IRQ_NONE; | 1940 | irqreturn_t ret = IRQ_NONE; |
1938 | 1941 | ||
1942 | if (!intel_irqs_enabled(dev_priv)) | ||
1943 | return IRQ_NONE; | ||
1944 | |||
1939 | for (;;) { | 1945 | for (;;) { |
1940 | master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; | 1946 | master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; |
1941 | iir = I915_READ(VLV_IIR); | 1947 | iir = I915_READ(VLV_IIR); |
@@ -2208,6 +2214,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) | |||
2208 | u32 de_iir, gt_iir, de_ier, sde_ier = 0; | 2214 | u32 de_iir, gt_iir, de_ier, sde_ier = 0; |
2209 | irqreturn_t ret = IRQ_NONE; | 2215 | irqreturn_t ret = IRQ_NONE; |
2210 | 2216 | ||
2217 | if (!intel_irqs_enabled(dev_priv)) | ||
2218 | return IRQ_NONE; | ||
2219 | |||
2211 | /* We get interrupts on unclaimed registers, so check for this before we | 2220 | /* We get interrupts on unclaimed registers, so check for this before we |
2212 | * do any I915_{READ,WRITE}. */ | 2221 | * do any I915_{READ,WRITE}. */ |
2213 | intel_uncore_check_errors(dev); | 2222 | intel_uncore_check_errors(dev); |
@@ -2279,6 +2288,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) | |||
2279 | enum pipe pipe; | 2288 | enum pipe pipe; |
2280 | u32 aux_mask = GEN8_AUX_CHANNEL_A; | 2289 | u32 aux_mask = GEN8_AUX_CHANNEL_A; |
2281 | 2290 | ||
2291 | if (!intel_irqs_enabled(dev_priv)) | ||
2292 | return IRQ_NONE; | ||
2293 | |||
2282 | if (IS_GEN9(dev)) | 2294 | if (IS_GEN9(dev)) |
2283 | aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | | 2295 | aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | |
2284 | GEN9_AUX_CHANNEL_D; | 2296 | GEN9_AUX_CHANNEL_D; |
@@ -3771,6 +3783,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) | |||
3771 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | 3783 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
3772 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | 3784 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; |
3773 | 3785 | ||
3786 | if (!intel_irqs_enabled(dev_priv)) | ||
3787 | return IRQ_NONE; | ||
3788 | |||
3774 | iir = I915_READ16(IIR); | 3789 | iir = I915_READ16(IIR); |
3775 | if (iir == 0) | 3790 | if (iir == 0) |
3776 | return IRQ_NONE; | 3791 | return IRQ_NONE; |
@@ -3951,6 +3966,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) | |||
3951 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | 3966 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; |
3952 | int pipe, ret = IRQ_NONE; | 3967 | int pipe, ret = IRQ_NONE; |
3953 | 3968 | ||
3969 | if (!intel_irqs_enabled(dev_priv)) | ||
3970 | return IRQ_NONE; | ||
3971 | |||
3954 | iir = I915_READ(IIR); | 3972 | iir = I915_READ(IIR); |
3955 | do { | 3973 | do { |
3956 | bool irq_received = (iir & ~flip_mask) != 0; | 3974 | bool irq_received = (iir & ~flip_mask) != 0; |
@@ -4171,6 +4189,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) | |||
4171 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | 4189 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
4172 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | 4190 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; |
4173 | 4191 | ||
4192 | if (!intel_irqs_enabled(dev_priv)) | ||
4193 | return IRQ_NONE; | ||
4194 | |||
4174 | iir = I915_READ(IIR); | 4195 | iir = I915_READ(IIR); |
4175 | 4196 | ||
4176 | for (;;) { | 4197 | for (;;) { |
@@ -4520,6 +4541,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) | |||
4520 | { | 4541 | { |
4521 | dev_priv->dev->driver->irq_uninstall(dev_priv->dev); | 4542 | dev_priv->dev->driver->irq_uninstall(dev_priv->dev); |
4522 | dev_priv->pm.irqs_enabled = false; | 4543 | dev_priv->pm.irqs_enabled = false; |
4544 | synchronize_irq(dev_priv->dev->irq); | ||
4523 | } | 4545 | } |
4524 | 4546 | ||
4525 | /** | 4547 | /** |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3d220a67f865..f75173c20f47 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <drm/i915_drm.h> | 37 | #include <drm/i915_drm.h> |
38 | #include "i915_drv.h" | 38 | #include "i915_drv.h" |
39 | #include "i915_trace.h" | 39 | #include "i915_trace.h" |
40 | #include <drm/drm_atomic.h> | ||
40 | #include <drm/drm_atomic_helper.h> | 41 | #include <drm/drm_atomic_helper.h> |
41 | #include <drm/drm_dp_helper.h> | 42 | #include <drm/drm_dp_helper.h> |
42 | #include <drm/drm_crtc_helper.h> | 43 | #include <drm/drm_crtc_helper.h> |
@@ -2371,13 +2372,19 @@ intel_alloc_plane_obj(struct intel_crtc *crtc, | |||
2371 | struct drm_device *dev = crtc->base.dev; | 2372 | struct drm_device *dev = crtc->base.dev; |
2372 | struct drm_i915_gem_object *obj = NULL; | 2373 | struct drm_i915_gem_object *obj = NULL; |
2373 | struct drm_mode_fb_cmd2 mode_cmd = { 0 }; | 2374 | struct drm_mode_fb_cmd2 mode_cmd = { 0 }; |
2374 | u32 base = plane_config->base; | 2375 | u32 base_aligned = round_down(plane_config->base, PAGE_SIZE); |
2376 | u32 size_aligned = round_up(plane_config->base + plane_config->size, | ||
2377 | PAGE_SIZE); | ||
2378 | |||
2379 | size_aligned -= base_aligned; | ||
2375 | 2380 | ||
2376 | if (plane_config->size == 0) | 2381 | if (plane_config->size == 0) |
2377 | return false; | 2382 | return false; |
2378 | 2383 | ||
2379 | obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base, | 2384 | obj = i915_gem_object_create_stolen_for_preallocated(dev, |
2380 | plane_config->size); | 2385 | base_aligned, |
2386 | base_aligned, | ||
2387 | size_aligned); | ||
2381 | if (!obj) | 2388 | if (!obj) |
2382 | return false; | 2389 | return false; |
2383 | 2390 | ||
@@ -2410,6 +2417,14 @@ out_unref_obj: | |||
2410 | return false; | 2417 | return false; |
2411 | } | 2418 | } |
2412 | 2419 | ||
2420 | /* Update plane->state->fb to match plane->fb after driver-internal updates */ | ||
2421 | static void | ||
2422 | update_state_fb(struct drm_plane *plane) | ||
2423 | { | ||
2424 | if (plane->fb != plane->state->fb) | ||
2425 | drm_atomic_set_fb_for_plane(plane->state, plane->fb); | ||
2426 | } | ||
2427 | |||
2413 | static void | 2428 | static void |
2414 | intel_find_plane_obj(struct intel_crtc *intel_crtc, | 2429 | intel_find_plane_obj(struct intel_crtc *intel_crtc, |
2415 | struct intel_initial_plane_config *plane_config) | 2430 | struct intel_initial_plane_config *plane_config) |
@@ -2423,8 +2438,15 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc, | |||
2423 | if (!intel_crtc->base.primary->fb) | 2438 | if (!intel_crtc->base.primary->fb) |
2424 | return; | 2439 | return; |
2425 | 2440 | ||
2426 | if (intel_alloc_plane_obj(intel_crtc, plane_config)) | 2441 | if (intel_alloc_plane_obj(intel_crtc, plane_config)) { |
2442 | struct drm_plane *primary = intel_crtc->base.primary; | ||
2443 | |||
2444 | primary->state->crtc = &intel_crtc->base; | ||
2445 | primary->crtc = &intel_crtc->base; | ||
2446 | update_state_fb(primary); | ||
2447 | |||
2427 | return; | 2448 | return; |
2449 | } | ||
2428 | 2450 | ||
2429 | kfree(intel_crtc->base.primary->fb); | 2451 | kfree(intel_crtc->base.primary->fb); |
2430 | intel_crtc->base.primary->fb = NULL; | 2452 | intel_crtc->base.primary->fb = NULL; |
@@ -2447,15 +2469,21 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc, | |||
2447 | continue; | 2469 | continue; |
2448 | 2470 | ||
2449 | if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { | 2471 | if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { |
2472 | struct drm_plane *primary = intel_crtc->base.primary; | ||
2473 | |||
2450 | if (obj->tiling_mode != I915_TILING_NONE) | 2474 | if (obj->tiling_mode != I915_TILING_NONE) |
2451 | dev_priv->preserve_bios_swizzle = true; | 2475 | dev_priv->preserve_bios_swizzle = true; |
2452 | 2476 | ||
2453 | drm_framebuffer_reference(c->primary->fb); | 2477 | drm_framebuffer_reference(c->primary->fb); |
2454 | intel_crtc->base.primary->fb = c->primary->fb; | 2478 | primary->fb = c->primary->fb; |
2479 | primary->state->crtc = &intel_crtc->base; | ||
2480 | primary->crtc = &intel_crtc->base; | ||
2455 | obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); | 2481 | obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); |
2456 | break; | 2482 | break; |
2457 | } | 2483 | } |
2458 | } | 2484 | } |
2485 | |||
2486 | update_state_fb(intel_crtc->base.primary); | ||
2459 | } | 2487 | } |
2460 | 2488 | ||
2461 | static void i9xx_update_primary_plane(struct drm_crtc *crtc, | 2489 | static void i9xx_update_primary_plane(struct drm_crtc *crtc, |
@@ -2725,10 +2753,19 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc, | |||
2725 | case DRM_FORMAT_XRGB8888: | 2753 | case DRM_FORMAT_XRGB8888: |
2726 | plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; | 2754 | plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; |
2727 | break; | 2755 | break; |
2756 | case DRM_FORMAT_ARGB8888: | ||
2757 | plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; | ||
2758 | plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY; | ||
2759 | break; | ||
2728 | case DRM_FORMAT_XBGR8888: | 2760 | case DRM_FORMAT_XBGR8888: |
2729 | plane_ctl |= PLANE_CTL_ORDER_RGBX; | 2761 | plane_ctl |= PLANE_CTL_ORDER_RGBX; |
2730 | plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; | 2762 | plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; |
2731 | break; | 2763 | break; |
2764 | case DRM_FORMAT_ABGR8888: | ||
2765 | plane_ctl |= PLANE_CTL_ORDER_RGBX; | ||
2766 | plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; | ||
2767 | plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY; | ||
2768 | break; | ||
2732 | case DRM_FORMAT_XRGB2101010: | 2769 | case DRM_FORMAT_XRGB2101010: |
2733 | plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010; | 2770 | plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010; |
2734 | break; | 2771 | break; |
@@ -6587,6 +6624,10 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, | |||
6587 | struct drm_framebuffer *fb; | 6624 | struct drm_framebuffer *fb; |
6588 | struct intel_framebuffer *intel_fb; | 6625 | struct intel_framebuffer *intel_fb; |
6589 | 6626 | ||
6627 | val = I915_READ(DSPCNTR(plane)); | ||
6628 | if (!(val & DISPLAY_PLANE_ENABLE)) | ||
6629 | return; | ||
6630 | |||
6590 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); | 6631 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); |
6591 | if (!intel_fb) { | 6632 | if (!intel_fb) { |
6592 | DRM_DEBUG_KMS("failed to alloc fb\n"); | 6633 | DRM_DEBUG_KMS("failed to alloc fb\n"); |
@@ -6595,8 +6636,6 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, | |||
6595 | 6636 | ||
6596 | fb = &intel_fb->base; | 6637 | fb = &intel_fb->base; |
6597 | 6638 | ||
6598 | val = I915_READ(DSPCNTR(plane)); | ||
6599 | |||
6600 | if (INTEL_INFO(dev)->gen >= 4) | 6639 | if (INTEL_INFO(dev)->gen >= 4) |
6601 | if (val & DISPPLANE_TILED) | 6640 | if (val & DISPPLANE_TILED) |
6602 | plane_config->tiling = I915_TILING_X; | 6641 | plane_config->tiling = I915_TILING_X; |
@@ -6627,7 +6666,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, | |||
6627 | aligned_height = intel_fb_align_height(dev, fb->height, | 6666 | aligned_height = intel_fb_align_height(dev, fb->height, |
6628 | plane_config->tiling); | 6667 | plane_config->tiling); |
6629 | 6668 | ||
6630 | plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height); | 6669 | plane_config->size = fb->pitches[0] * aligned_height; |
6631 | 6670 | ||
6632 | DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", | 6671 | DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", |
6633 | pipe_name(pipe), plane, fb->width, fb->height, | 6672 | pipe_name(pipe), plane, fb->width, fb->height, |
@@ -7628,6 +7667,9 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, | |||
7628 | fb = &intel_fb->base; | 7667 | fb = &intel_fb->base; |
7629 | 7668 | ||
7630 | val = I915_READ(PLANE_CTL(pipe, 0)); | 7669 | val = I915_READ(PLANE_CTL(pipe, 0)); |
7670 | if (!(val & PLANE_CTL_ENABLE)) | ||
7671 | goto error; | ||
7672 | |||
7631 | if (val & PLANE_CTL_TILED_MASK) | 7673 | if (val & PLANE_CTL_TILED_MASK) |
7632 | plane_config->tiling = I915_TILING_X; | 7674 | plane_config->tiling = I915_TILING_X; |
7633 | 7675 | ||
@@ -7664,7 +7706,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, | |||
7664 | aligned_height = intel_fb_align_height(dev, fb->height, | 7706 | aligned_height = intel_fb_align_height(dev, fb->height, |
7665 | plane_config->tiling); | 7707 | plane_config->tiling); |
7666 | 7708 | ||
7667 | plane_config->size = ALIGN(fb->pitches[0] * aligned_height, PAGE_SIZE); | 7709 | plane_config->size = fb->pitches[0] * aligned_height; |
7668 | 7710 | ||
7669 | DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", | 7711 | DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", |
7670 | pipe_name(pipe), fb->width, fb->height, | 7712 | pipe_name(pipe), fb->width, fb->height, |
@@ -7715,6 +7757,10 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, | |||
7715 | struct drm_framebuffer *fb; | 7757 | struct drm_framebuffer *fb; |
7716 | struct intel_framebuffer *intel_fb; | 7758 | struct intel_framebuffer *intel_fb; |
7717 | 7759 | ||
7760 | val = I915_READ(DSPCNTR(pipe)); | ||
7761 | if (!(val & DISPLAY_PLANE_ENABLE)) | ||
7762 | return; | ||
7763 | |||
7718 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); | 7764 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); |
7719 | if (!intel_fb) { | 7765 | if (!intel_fb) { |
7720 | DRM_DEBUG_KMS("failed to alloc fb\n"); | 7766 | DRM_DEBUG_KMS("failed to alloc fb\n"); |
@@ -7723,8 +7769,6 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, | |||
7723 | 7769 | ||
7724 | fb = &intel_fb->base; | 7770 | fb = &intel_fb->base; |
7725 | 7771 | ||
7726 | val = I915_READ(DSPCNTR(pipe)); | ||
7727 | |||
7728 | if (INTEL_INFO(dev)->gen >= 4) | 7772 | if (INTEL_INFO(dev)->gen >= 4) |
7729 | if (val & DISPPLANE_TILED) | 7773 | if (val & DISPPLANE_TILED) |
7730 | plane_config->tiling = I915_TILING_X; | 7774 | plane_config->tiling = I915_TILING_X; |
@@ -7755,7 +7799,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, | |||
7755 | aligned_height = intel_fb_align_height(dev, fb->height, | 7799 | aligned_height = intel_fb_align_height(dev, fb->height, |
7756 | plane_config->tiling); | 7800 | plane_config->tiling); |
7757 | 7801 | ||
7758 | plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height); | 7802 | plane_config->size = fb->pitches[0] * aligned_height; |
7759 | 7803 | ||
7760 | DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", | 7804 | DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", |
7761 | pipe_name(pipe), fb->width, fb->height, | 7805 | pipe_name(pipe), fb->width, fb->height, |
@@ -8698,6 +8742,7 @@ retry: | |||
8698 | old->release_fb->funcs->destroy(old->release_fb); | 8742 | old->release_fb->funcs->destroy(old->release_fb); |
8699 | goto fail; | 8743 | goto fail; |
8700 | } | 8744 | } |
8745 | crtc->primary->crtc = crtc; | ||
8701 | 8746 | ||
8702 | /* let the connector get through one full cycle before testing */ | 8747 | /* let the connector get through one full cycle before testing */ |
8703 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 8748 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
@@ -9700,7 +9745,7 @@ void intel_check_page_flip(struct drm_device *dev, int pipe) | |||
9700 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 9745 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
9701 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 9746 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
9702 | 9747 | ||
9703 | WARN_ON(!in_irq()); | 9748 | WARN_ON(!in_interrupt()); |
9704 | 9749 | ||
9705 | if (crtc == NULL) | 9750 | if (crtc == NULL) |
9706 | return; | 9751 | return; |
@@ -9800,6 +9845,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
9800 | drm_gem_object_reference(&obj->base); | 9845 | drm_gem_object_reference(&obj->base); |
9801 | 9846 | ||
9802 | crtc->primary->fb = fb; | 9847 | crtc->primary->fb = fb; |
9848 | update_state_fb(crtc->primary); | ||
9803 | 9849 | ||
9804 | work->pending_flip_obj = obj; | 9850 | work->pending_flip_obj = obj; |
9805 | 9851 | ||
@@ -9868,6 +9914,7 @@ cleanup_unpin: | |||
9868 | cleanup_pending: | 9914 | cleanup_pending: |
9869 | atomic_dec(&intel_crtc->unpin_work_count); | 9915 | atomic_dec(&intel_crtc->unpin_work_count); |
9870 | crtc->primary->fb = old_fb; | 9916 | crtc->primary->fb = old_fb; |
9917 | update_state_fb(crtc->primary); | ||
9871 | drm_gem_object_unreference(&work->old_fb_obj->base); | 9918 | drm_gem_object_unreference(&work->old_fb_obj->base); |
9872 | drm_gem_object_unreference(&obj->base); | 9919 | drm_gem_object_unreference(&obj->base); |
9873 | mutex_unlock(&dev->struct_mutex); | 9920 | mutex_unlock(&dev->struct_mutex); |
@@ -12182,9 +12229,6 @@ intel_check_cursor_plane(struct drm_plane *plane, | |||
12182 | return -ENOMEM; | 12229 | return -ENOMEM; |
12183 | } | 12230 | } |
12184 | 12231 | ||
12185 | if (fb == crtc->cursor->fb) | ||
12186 | return 0; | ||
12187 | |||
12188 | /* we only need to pin inside GTT if cursor is non-phy */ | 12232 | /* we only need to pin inside GTT if cursor is non-phy */ |
12189 | mutex_lock(&dev->struct_mutex); | 12233 | mutex_lock(&dev->struct_mutex); |
12190 | if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) { | 12234 | if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) { |
@@ -13096,6 +13140,9 @@ static struct intel_quirk intel_quirks[] = { | |||
13096 | 13140 | ||
13097 | /* HP Chromebook 14 (Celeron 2955U) */ | 13141 | /* HP Chromebook 14 (Celeron 2955U) */ |
13098 | { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, | 13142 | { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, |
13143 | |||
13144 | /* Dell Chromebook 11 */ | ||
13145 | { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present }, | ||
13099 | }; | 13146 | }; |
13100 | 13147 | ||
13101 | static void intel_init_quirks(struct drm_device *dev) | 13148 | static void intel_init_quirks(struct drm_device *dev) |
@@ -13702,6 +13749,7 @@ void intel_modeset_gem_init(struct drm_device *dev) | |||
13702 | to_intel_crtc(c)->pipe); | 13749 | to_intel_crtc(c)->pipe); |
13703 | drm_framebuffer_unreference(c->primary->fb); | 13750 | drm_framebuffer_unreference(c->primary->fb); |
13704 | c->primary->fb = NULL; | 13751 | c->primary->fb = NULL; |
13752 | update_state_fb(c->primary); | ||
13705 | } | 13753 | } |
13706 | } | 13754 | } |
13707 | mutex_unlock(&dev->struct_mutex); | 13755 | mutex_unlock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c index 04e248dd2259..54daa66c6970 100644 --- a/drivers/gpu/drm/i915/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c | |||
@@ -282,16 +282,6 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv, | |||
282 | return ret; | 282 | return ret; |
283 | } | 283 | } |
284 | 284 | ||
285 | static bool | ||
286 | __cpu_fifo_underrun_reporting_enabled(struct drm_i915_private *dev_priv, | ||
287 | enum pipe pipe) | ||
288 | { | ||
289 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | ||
290 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
291 | |||
292 | return !intel_crtc->cpu_fifo_underrun_disabled; | ||
293 | } | ||
294 | |||
295 | /** | 285 | /** |
296 | * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state | 286 | * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state |
297 | * @dev_priv: i915 device instance | 287 | * @dev_priv: i915 device instance |
@@ -352,9 +342,15 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, | |||
352 | void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, | 342 | void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, |
353 | enum pipe pipe) | 343 | enum pipe pipe) |
354 | { | 344 | { |
345 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | ||
346 | |||
347 | /* We may be called too early in init, thanks BIOS! */ | ||
348 | if (crtc == NULL) | ||
349 | return; | ||
350 | |||
355 | /* GMCH can't disable fifo underruns, filter them. */ | 351 | /* GMCH can't disable fifo underruns, filter them. */ |
356 | if (HAS_GMCH_DISPLAY(dev_priv->dev) && | 352 | if (HAS_GMCH_DISPLAY(dev_priv->dev) && |
357 | !__cpu_fifo_underrun_reporting_enabled(dev_priv, pipe)) | 353 | to_intel_crtc(crtc)->cpu_fifo_underrun_disabled) |
358 | return; | 354 | return; |
359 | 355 | ||
360 | if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) | 356 | if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 0f358c5999ec..e8d3da9f3373 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
@@ -503,18 +503,19 @@ static int execlists_context_queue(struct intel_engine_cs *ring, | |||
503 | * If there isn't a request associated with this submission, | 503 | * If there isn't a request associated with this submission, |
504 | * create one as a temporary holder. | 504 | * create one as a temporary holder. |
505 | */ | 505 | */ |
506 | WARN(1, "execlist context submission without request"); | ||
507 | request = kzalloc(sizeof(*request), GFP_KERNEL); | 506 | request = kzalloc(sizeof(*request), GFP_KERNEL); |
508 | if (request == NULL) | 507 | if (request == NULL) |
509 | return -ENOMEM; | 508 | return -ENOMEM; |
510 | request->ring = ring; | 509 | request->ring = ring; |
511 | request->ctx = to; | 510 | request->ctx = to; |
511 | kref_init(&request->ref); | ||
512 | request->uniq = dev_priv->request_uniq++; | ||
513 | i915_gem_context_reference(request->ctx); | ||
512 | } else { | 514 | } else { |
515 | i915_gem_request_reference(request); | ||
513 | WARN_ON(to != request->ctx); | 516 | WARN_ON(to != request->ctx); |
514 | } | 517 | } |
515 | request->tail = tail; | 518 | request->tail = tail; |
516 | i915_gem_request_reference(request); | ||
517 | i915_gem_context_reference(request->ctx); | ||
518 | 519 | ||
519 | intel_runtime_pm_get(dev_priv); | 520 | intel_runtime_pm_get(dev_priv); |
520 | 521 | ||
@@ -731,7 +732,6 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring) | |||
731 | if (ctx_obj && (ctx != ring->default_context)) | 732 | if (ctx_obj && (ctx != ring->default_context)) |
732 | intel_lr_context_unpin(ring, ctx); | 733 | intel_lr_context_unpin(ring, ctx); |
733 | intel_runtime_pm_put(dev_priv); | 734 | intel_runtime_pm_put(dev_priv); |
734 | i915_gem_context_unreference(ctx); | ||
735 | list_del(&req->execlist_link); | 735 | list_del(&req->execlist_link); |
736 | i915_gem_request_unreference(req); | 736 | i915_gem_request_unreference(req); |
737 | } | 737 | } |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 0a52c44ad03d..9c5451c97942 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
@@ -1322,7 +1322,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data, | |||
1322 | drm_modeset_lock_all(dev); | 1322 | drm_modeset_lock_all(dev); |
1323 | 1323 | ||
1324 | plane = drm_plane_find(dev, set->plane_id); | 1324 | plane = drm_plane_find(dev, set->plane_id); |
1325 | if (!plane) { | 1325 | if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) { |
1326 | ret = -ENOENT; | 1326 | ret = -ENOENT; |
1327 | goto out_unlock; | 1327 | goto out_unlock; |
1328 | } | 1328 | } |
@@ -1349,7 +1349,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data, | |||
1349 | drm_modeset_lock_all(dev); | 1349 | drm_modeset_lock_all(dev); |
1350 | 1350 | ||
1351 | plane = drm_plane_find(dev, get->plane_id); | 1351 | plane = drm_plane_find(dev, get->plane_id); |
1352 | if (!plane) { | 1352 | if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) { |
1353 | ret = -ENOENT; | 1353 | ret = -ENOENT; |
1354 | goto out_unlock; | 1354 | goto out_unlock; |
1355 | } | 1355 | } |
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index c47a3baa53d5..4e8fb891d4ea 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
@@ -1048,8 +1048,14 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) | |||
1048 | 1048 | ||
1049 | /* We need to init first for ECOBUS access and then | 1049 | /* We need to init first for ECOBUS access and then |
1050 | * determine later if we want to reinit, in case of MT access is | 1050 | * determine later if we want to reinit, in case of MT access is |
1051 | * not working | 1051 | * not working. In this stage we don't know which flavour this |
1052 | * ivb is, so it is better to reset also the gen6 fw registers | ||
1053 | * before the ecobus check. | ||
1052 | */ | 1054 | */ |
1055 | |||
1056 | __raw_i915_write32(dev_priv, FORCEWAKE, 0); | ||
1057 | __raw_posting_read(dev_priv, ECOBUS); | ||
1058 | |||
1053 | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, | 1059 | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, |
1054 | FORCEWAKE_MT, FORCEWAKE_MT_ACK); | 1060 | FORCEWAKE_MT, FORCEWAKE_MT_ACK); |
1055 | 1061 | ||
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c index 121d30ca2d44..87fe8ed92ebe 100644 --- a/drivers/gpu/drm/imx/dw_hdmi-imx.c +++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c | |||
@@ -70,7 +70,9 @@ static const struct dw_hdmi_curr_ctrl imx_cur_ctr[] = { | |||
70 | 118800000, { 0x091c, 0x091c, 0x06dc }, | 70 | 118800000, { 0x091c, 0x091c, 0x06dc }, |
71 | }, { | 71 | }, { |
72 | 216000000, { 0x06dc, 0x0b5c, 0x091c }, | 72 | 216000000, { 0x06dc, 0x0b5c, 0x091c }, |
73 | } | 73 | }, { |
74 | ~0UL, { 0x0000, 0x0000, 0x0000 }, | ||
75 | }, | ||
74 | }; | 76 | }; |
75 | 77 | ||
76 | static const struct dw_hdmi_sym_term imx_sym_term[] = { | 78 | static const struct dw_hdmi_sym_term imx_sym_term[] = { |
@@ -136,11 +138,34 @@ static struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = { | |||
136 | .destroy = drm_encoder_cleanup, | 138 | .destroy = drm_encoder_cleanup, |
137 | }; | 139 | }; |
138 | 140 | ||
141 | static enum drm_mode_status imx6q_hdmi_mode_valid(struct drm_connector *con, | ||
142 | struct drm_display_mode *mode) | ||
143 | { | ||
144 | if (mode->clock < 13500) | ||
145 | return MODE_CLOCK_LOW; | ||
146 | if (mode->clock > 266000) | ||
147 | return MODE_CLOCK_HIGH; | ||
148 | |||
149 | return MODE_OK; | ||
150 | } | ||
151 | |||
152 | static enum drm_mode_status imx6dl_hdmi_mode_valid(struct drm_connector *con, | ||
153 | struct drm_display_mode *mode) | ||
154 | { | ||
155 | if (mode->clock < 13500) | ||
156 | return MODE_CLOCK_LOW; | ||
157 | if (mode->clock > 270000) | ||
158 | return MODE_CLOCK_HIGH; | ||
159 | |||
160 | return MODE_OK; | ||
161 | } | ||
162 | |||
139 | static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = { | 163 | static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = { |
140 | .mpll_cfg = imx_mpll_cfg, | 164 | .mpll_cfg = imx_mpll_cfg, |
141 | .cur_ctr = imx_cur_ctr, | 165 | .cur_ctr = imx_cur_ctr, |
142 | .sym_term = imx_sym_term, | 166 | .sym_term = imx_sym_term, |
143 | .dev_type = IMX6Q_HDMI, | 167 | .dev_type = IMX6Q_HDMI, |
168 | .mode_valid = imx6q_hdmi_mode_valid, | ||
144 | }; | 169 | }; |
145 | 170 | ||
146 | static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = { | 171 | static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = { |
@@ -148,6 +173,7 @@ static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = { | |||
148 | .cur_ctr = imx_cur_ctr, | 173 | .cur_ctr = imx_cur_ctr, |
149 | .sym_term = imx_sym_term, | 174 | .sym_term = imx_sym_term, |
150 | .dev_type = IMX6DL_HDMI, | 175 | .dev_type = IMX6DL_HDMI, |
176 | .mode_valid = imx6dl_hdmi_mode_valid, | ||
151 | }; | 177 | }; |
152 | 178 | ||
153 | static const struct of_device_id dw_hdmi_imx_dt_ids[] = { | 179 | static const struct of_device_id dw_hdmi_imx_dt_ids[] = { |
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 1b86aac0b341..2d6dc94e1e64 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c | |||
@@ -163,22 +163,7 @@ static void imx_ldb_encoder_prepare(struct drm_encoder *encoder) | |||
163 | { | 163 | { |
164 | struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); | 164 | struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); |
165 | struct imx_ldb *ldb = imx_ldb_ch->ldb; | 165 | struct imx_ldb *ldb = imx_ldb_ch->ldb; |
166 | struct drm_display_mode *mode = &encoder->crtc->hwmode; | ||
167 | u32 pixel_fmt; | 166 | u32 pixel_fmt; |
168 | unsigned long serial_clk; | ||
169 | unsigned long di_clk = mode->clock * 1000; | ||
170 | int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder); | ||
171 | |||
172 | if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) { | ||
173 | /* dual channel LVDS mode */ | ||
174 | serial_clk = 3500UL * mode->clock; | ||
175 | imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk); | ||
176 | imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk); | ||
177 | } else { | ||
178 | serial_clk = 7000UL * mode->clock; | ||
179 | imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk, | ||
180 | di_clk); | ||
181 | } | ||
182 | 167 | ||
183 | switch (imx_ldb_ch->chno) { | 168 | switch (imx_ldb_ch->chno) { |
184 | case 0: | 169 | case 0: |
@@ -247,6 +232,9 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder, | |||
247 | struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); | 232 | struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); |
248 | struct imx_ldb *ldb = imx_ldb_ch->ldb; | 233 | struct imx_ldb *ldb = imx_ldb_ch->ldb; |
249 | int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; | 234 | int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; |
235 | unsigned long serial_clk; | ||
236 | unsigned long di_clk = mode->clock * 1000; | ||
237 | int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder); | ||
250 | 238 | ||
251 | if (mode->clock > 170000) { | 239 | if (mode->clock > 170000) { |
252 | dev_warn(ldb->dev, | 240 | dev_warn(ldb->dev, |
@@ -257,6 +245,16 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder, | |||
257 | "%s: mode exceeds 85 MHz pixel clock\n", __func__); | 245 | "%s: mode exceeds 85 MHz pixel clock\n", __func__); |
258 | } | 246 | } |
259 | 247 | ||
248 | if (dual) { | ||
249 | serial_clk = 3500UL * mode->clock; | ||
250 | imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk); | ||
251 | imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk); | ||
252 | } else { | ||
253 | serial_clk = 7000UL * mode->clock; | ||
254 | imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk, | ||
255 | di_clk); | ||
256 | } | ||
257 | |||
260 | /* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */ | 258 | /* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */ |
261 | if (imx_ldb_ch == &ldb->channel[0]) { | 259 | if (imx_ldb_ch == &ldb->channel[0]) { |
262 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) | 260 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) |
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 5e83e007080f..900dda6a8e71 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c | |||
@@ -236,8 +236,11 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) | |||
236 | } | 236 | } |
237 | 237 | ||
238 | panel_node = of_parse_phandle(np, "fsl,panel", 0); | 238 | panel_node = of_parse_phandle(np, "fsl,panel", 0); |
239 | if (panel_node) | 239 | if (panel_node) { |
240 | imxpd->panel = of_drm_find_panel(panel_node); | 240 | imxpd->panel = of_drm_find_panel(panel_node); |
241 | if (!imxpd->panel) | ||
242 | return -EPROBE_DEFER; | ||
243 | } | ||
241 | 244 | ||
242 | imxpd->dev = dev; | 245 | imxpd->dev = dev; |
243 | 246 | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c index 8edd531cb621..7369ee7f0c55 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c | |||
@@ -32,7 +32,10 @@ static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) | |||
32 | void mdp4_irq_preinstall(struct msm_kms *kms) | 32 | void mdp4_irq_preinstall(struct msm_kms *kms) |
33 | { | 33 | { |
34 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); | 34 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); |
35 | mdp4_enable(mdp4_kms); | ||
35 | mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff); | 36 | mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff); |
37 | mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); | ||
38 | mdp4_disable(mdp4_kms); | ||
36 | } | 39 | } |
37 | 40 | ||
38 | int mdp4_irq_postinstall(struct msm_kms *kms) | 41 | int mdp4_irq_postinstall(struct msm_kms *kms) |
@@ -53,7 +56,9 @@ int mdp4_irq_postinstall(struct msm_kms *kms) | |||
53 | void mdp4_irq_uninstall(struct msm_kms *kms) | 56 | void mdp4_irq_uninstall(struct msm_kms *kms) |
54 | { | 57 | { |
55 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); | 58 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); |
59 | mdp4_enable(mdp4_kms); | ||
56 | mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); | 60 | mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); |
61 | mdp4_disable(mdp4_kms); | ||
57 | } | 62 | } |
58 | 63 | ||
59 | irqreturn_t mdp4_irq(struct msm_kms *kms) | 64 | irqreturn_t mdp4_irq(struct msm_kms *kms) |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h index 09b4a25eb553..c276624290af 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h | |||
@@ -8,17 +8,9 @@ http://github.com/freedreno/envytools/ | |||
8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
9 | 9 | ||
10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) | 11 | - /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml ( 27229 bytes, from 2015-02-10 17:00:41) |
12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | 12 | - /local/mnt2/workspace2/sviau/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2014-06-02 18:31:15) |
13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) | 13 | - /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2015-01-23 16:20:19) |
14 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) | ||
15 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) | ||
16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) | ||
17 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) | ||
18 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) | ||
19 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) | ||
20 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) | ||
21 | - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) | ||
22 | 14 | ||
23 | Copyright (C) 2013-2015 by the following authors: | 15 | Copyright (C) 2013-2015 by the following authors: |
24 | - Rob Clark <robdclark@gmail.com> (robclark) | 16 | - Rob Clark <robdclark@gmail.com> (robclark) |
@@ -910,6 +902,7 @@ static inline uint32_t __offset_LM(uint32_t idx) | |||
910 | case 2: return (mdp5_cfg->lm.base[2]); | 902 | case 2: return (mdp5_cfg->lm.base[2]); |
911 | case 3: return (mdp5_cfg->lm.base[3]); | 903 | case 3: return (mdp5_cfg->lm.base[3]); |
912 | case 4: return (mdp5_cfg->lm.base[4]); | 904 | case 4: return (mdp5_cfg->lm.base[4]); |
905 | case 5: return (mdp5_cfg->lm.base[5]); | ||
913 | default: return INVALID_IDX(idx); | 906 | default: return INVALID_IDX(idx); |
914 | } | 907 | } |
915 | } | 908 | } |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 46fac545dc2b..2f2863cf8b45 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | |||
@@ -62,8 +62,8 @@ struct mdp5_crtc { | |||
62 | 62 | ||
63 | /* current cursor being scanned out: */ | 63 | /* current cursor being scanned out: */ |
64 | struct drm_gem_object *scanout_bo; | 64 | struct drm_gem_object *scanout_bo; |
65 | uint32_t width; | 65 | uint32_t width, height; |
66 | uint32_t height; | 66 | uint32_t x, y; |
67 | } cursor; | 67 | } cursor; |
68 | }; | 68 | }; |
69 | #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base) | 69 | #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base) |
@@ -103,8 +103,8 @@ static void crtc_flush_all(struct drm_crtc *crtc) | |||
103 | struct drm_plane *plane; | 103 | struct drm_plane *plane; |
104 | uint32_t flush_mask = 0; | 104 | uint32_t flush_mask = 0; |
105 | 105 | ||
106 | /* we could have already released CTL in the disable path: */ | 106 | /* this should not happen: */ |
107 | if (!mdp5_crtc->ctl) | 107 | if (WARN_ON(!mdp5_crtc->ctl)) |
108 | return; | 108 | return; |
109 | 109 | ||
110 | drm_atomic_crtc_for_each_plane(plane, crtc) { | 110 | drm_atomic_crtc_for_each_plane(plane, crtc) { |
@@ -143,6 +143,11 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) | |||
143 | drm_atomic_crtc_for_each_plane(plane, crtc) { | 143 | drm_atomic_crtc_for_each_plane(plane, crtc) { |
144 | mdp5_plane_complete_flip(plane); | 144 | mdp5_plane_complete_flip(plane); |
145 | } | 145 | } |
146 | |||
147 | if (mdp5_crtc->ctl && !crtc->state->enable) { | ||
148 | mdp5_ctl_release(mdp5_crtc->ctl); | ||
149 | mdp5_crtc->ctl = NULL; | ||
150 | } | ||
146 | } | 151 | } |
147 | 152 | ||
148 | static void unref_cursor_worker(struct drm_flip_work *work, void *val) | 153 | static void unref_cursor_worker(struct drm_flip_work *work, void *val) |
@@ -386,14 +391,17 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc) | |||
386 | mdp5_crtc->event = crtc->state->event; | 391 | mdp5_crtc->event = crtc->state->event; |
387 | spin_unlock_irqrestore(&dev->event_lock, flags); | 392 | spin_unlock_irqrestore(&dev->event_lock, flags); |
388 | 393 | ||
394 | /* | ||
395 | * If no CTL has been allocated in mdp5_crtc_atomic_check(), | ||
396 | * it means we are trying to flush a CRTC whose state is disabled: | ||
397 | * nothing else needs to be done. | ||
398 | */ | ||
399 | if (unlikely(!mdp5_crtc->ctl)) | ||
400 | return; | ||
401 | |||
389 | blend_setup(crtc); | 402 | blend_setup(crtc); |
390 | crtc_flush_all(crtc); | 403 | crtc_flush_all(crtc); |
391 | request_pending(crtc, PENDING_FLIP); | 404 | request_pending(crtc, PENDING_FLIP); |
392 | |||
393 | if (mdp5_crtc->ctl && !crtc->state->enable) { | ||
394 | mdp5_ctl_release(mdp5_crtc->ctl); | ||
395 | mdp5_crtc->ctl = NULL; | ||
396 | } | ||
397 | } | 405 | } |
398 | 406 | ||
399 | static int mdp5_crtc_set_property(struct drm_crtc *crtc, | 407 | static int mdp5_crtc_set_property(struct drm_crtc *crtc, |
@@ -403,6 +411,32 @@ static int mdp5_crtc_set_property(struct drm_crtc *crtc, | |||
403 | return -EINVAL; | 411 | return -EINVAL; |
404 | } | 412 | } |
405 | 413 | ||
414 | static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h) | ||
415 | { | ||
416 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
417 | uint32_t xres = crtc->mode.hdisplay; | ||
418 | uint32_t yres = crtc->mode.vdisplay; | ||
419 | |||
420 | /* | ||
421 | * Cursor Region Of Interest (ROI) is a plane read from cursor | ||
422 | * buffer to render. The ROI region is determined by the visibility of | ||
423 | * the cursor point. In the default Cursor image the cursor point will | ||
424 | * be at the top left of the cursor image, unless it is specified | ||
425 | * otherwise using hotspot feature. | ||
426 | * | ||
427 | * If the cursor point reaches the right (xres - x < cursor.width) or | ||
428 | * bottom (yres - y < cursor.height) boundary of the screen, then ROI | ||
429 | * width and ROI height need to be evaluated to crop the cursor image | ||
430 | * accordingly. | ||
431 | * (xres-x) will be new cursor width when x > (xres - cursor.width) | ||
432 | * (yres-y) will be new cursor height when y > (yres - cursor.height) | ||
433 | */ | ||
434 | *roi_w = min(mdp5_crtc->cursor.width, xres - | ||
435 | mdp5_crtc->cursor.x); | ||
436 | *roi_h = min(mdp5_crtc->cursor.height, yres - | ||
437 | mdp5_crtc->cursor.y); | ||
438 | } | ||
439 | |||
406 | static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, | 440 | static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, |
407 | struct drm_file *file, uint32_t handle, | 441 | struct drm_file *file, uint32_t handle, |
408 | uint32_t width, uint32_t height) | 442 | uint32_t width, uint32_t height) |
@@ -416,6 +450,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, | |||
416 | unsigned int depth; | 450 | unsigned int depth; |
417 | enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; | 451 | enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; |
418 | uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); | 452 | uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); |
453 | uint32_t roi_w, roi_h; | ||
419 | unsigned long flags; | 454 | unsigned long flags; |
420 | 455 | ||
421 | if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { | 456 | if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { |
@@ -446,6 +481,12 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, | |||
446 | spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); | 481 | spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); |
447 | old_bo = mdp5_crtc->cursor.scanout_bo; | 482 | old_bo = mdp5_crtc->cursor.scanout_bo; |
448 | 483 | ||
484 | mdp5_crtc->cursor.scanout_bo = cursor_bo; | ||
485 | mdp5_crtc->cursor.width = width; | ||
486 | mdp5_crtc->cursor.height = height; | ||
487 | |||
488 | get_roi(crtc, &roi_w, &roi_h); | ||
489 | |||
449 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); | 490 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); |
450 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), | 491 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), |
451 | MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); | 492 | MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); |
@@ -453,19 +494,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, | |||
453 | MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) | | 494 | MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) | |
454 | MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width)); | 495 | MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width)); |
455 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), | 496 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), |
456 | MDP5_LM_CURSOR_SIZE_ROI_H(height) | | 497 | MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | |
457 | MDP5_LM_CURSOR_SIZE_ROI_W(width)); | 498 | MDP5_LM_CURSOR_SIZE_ROI_W(roi_w)); |
458 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr); | 499 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr); |
459 | 500 | ||
460 | |||
461 | blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN; | 501 | blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN; |
462 | blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN; | ||
463 | blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha); | 502 | blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha); |
464 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg); | 503 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg); |
465 | 504 | ||
466 | mdp5_crtc->cursor.scanout_bo = cursor_bo; | ||
467 | mdp5_crtc->cursor.width = width; | ||
468 | mdp5_crtc->cursor.height = height; | ||
469 | spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); | 505 | spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); |
470 | 506 | ||
471 | ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true); | 507 | ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true); |
@@ -489,31 +525,18 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | |||
489 | struct mdp5_kms *mdp5_kms = get_kms(crtc); | 525 | struct mdp5_kms *mdp5_kms = get_kms(crtc); |
490 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | 526 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); |
491 | uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); | 527 | uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); |
492 | uint32_t xres = crtc->mode.hdisplay; | ||
493 | uint32_t yres = crtc->mode.vdisplay; | ||
494 | uint32_t roi_w; | 528 | uint32_t roi_w; |
495 | uint32_t roi_h; | 529 | uint32_t roi_h; |
496 | unsigned long flags; | 530 | unsigned long flags; |
497 | 531 | ||
498 | x = (x > 0) ? x : 0; | 532 | /* In case the CRTC is disabled, just drop the cursor update */ |
499 | y = (y > 0) ? y : 0; | 533 | if (unlikely(!crtc->state->enable)) |
534 | return 0; | ||
500 | 535 | ||
501 | /* | 536 | mdp5_crtc->cursor.x = x = max(x, 0); |
502 | * Cursor Region Of Interest (ROI) is a plane read from cursor | 537 | mdp5_crtc->cursor.y = y = max(y, 0); |
503 | * buffer to render. The ROI region is determined by the visiblity of | 538 | |
504 | * the cursor point. In the default Cursor image the cursor point will | 539 | get_roi(crtc, &roi_w, &roi_h); |
505 | * be at the top left of the cursor image, unless it is specified | ||
506 | * otherwise using hotspot feature. | ||
507 | * | ||
508 | * If the cursor point reaches the right (xres - x < cursor.width) or | ||
509 | * bottom (yres - y < cursor.height) boundary of the screen, then ROI | ||
510 | * width and ROI height need to be evaluated to crop the cursor image | ||
511 | * accordingly. | ||
512 | * (xres-x) will be new cursor width when x > (xres - cursor.width) | ||
513 | * (yres-y) will be new cursor height when y > (yres - cursor.height) | ||
514 | */ | ||
515 | roi_w = min(mdp5_crtc->cursor.width, xres - x); | ||
516 | roi_h = min(mdp5_crtc->cursor.height, yres - y); | ||
517 | 540 | ||
518 | spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); | 541 | spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); |
519 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm), | 542 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm), |
@@ -544,8 +567,8 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = { | |||
544 | static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { | 567 | static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { |
545 | .mode_fixup = mdp5_crtc_mode_fixup, | 568 | .mode_fixup = mdp5_crtc_mode_fixup, |
546 | .mode_set_nofb = mdp5_crtc_mode_set_nofb, | 569 | .mode_set_nofb = mdp5_crtc_mode_set_nofb, |
547 | .prepare = mdp5_crtc_disable, | 570 | .disable = mdp5_crtc_disable, |
548 | .commit = mdp5_crtc_enable, | 571 | .enable = mdp5_crtc_enable, |
549 | .atomic_check = mdp5_crtc_atomic_check, | 572 | .atomic_check = mdp5_crtc_atomic_check, |
550 | .atomic_begin = mdp5_crtc_atomic_begin, | 573 | .atomic_begin = mdp5_crtc_atomic_begin, |
551 | .atomic_flush = mdp5_crtc_atomic_flush, | 574 | .atomic_flush = mdp5_crtc_atomic_flush, |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index d6a14bb99988..af0e02fa4f48 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c | |||
@@ -267,14 +267,14 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder) | |||
267 | mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); | 267 | mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); |
268 | spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); | 268 | spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); |
269 | 269 | ||
270 | mdp5_encoder->enabled = false; | 270 | mdp5_encoder->enabled = true; |
271 | } | 271 | } |
272 | 272 | ||
273 | static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { | 273 | static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { |
274 | .mode_fixup = mdp5_encoder_mode_fixup, | 274 | .mode_fixup = mdp5_encoder_mode_fixup, |
275 | .mode_set = mdp5_encoder_mode_set, | 275 | .mode_set = mdp5_encoder_mode_set, |
276 | .prepare = mdp5_encoder_disable, | 276 | .disable = mdp5_encoder_disable, |
277 | .commit = mdp5_encoder_enable, | 277 | .enable = mdp5_encoder_enable, |
278 | }; | 278 | }; |
279 | 279 | ||
280 | /* initialize encoder */ | 280 | /* initialize encoder */ |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c index 70ac81edd40f..a9407105b9b7 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c | |||
@@ -34,7 +34,10 @@ static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) | |||
34 | void mdp5_irq_preinstall(struct msm_kms *kms) | 34 | void mdp5_irq_preinstall(struct msm_kms *kms) |
35 | { | 35 | { |
36 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); | 36 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); |
37 | mdp5_enable(mdp5_kms); | ||
37 | mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff); | 38 | mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff); |
39 | mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); | ||
40 | mdp5_disable(mdp5_kms); | ||
38 | } | 41 | } |
39 | 42 | ||
40 | int mdp5_irq_postinstall(struct msm_kms *kms) | 43 | int mdp5_irq_postinstall(struct msm_kms *kms) |
@@ -57,7 +60,9 @@ int mdp5_irq_postinstall(struct msm_kms *kms) | |||
57 | void mdp5_irq_uninstall(struct msm_kms *kms) | 60 | void mdp5_irq_uninstall(struct msm_kms *kms) |
58 | { | 61 | { |
59 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); | 62 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); |
63 | mdp5_enable(mdp5_kms); | ||
60 | mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); | 64 | mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); |
65 | mdp5_disable(mdp5_kms); | ||
61 | } | 66 | } |
62 | 67 | ||
63 | static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) | 68 | static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) |
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 871aa2108dc6..18fd643b6e69 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c | |||
@@ -219,8 +219,10 @@ int msm_atomic_commit(struct drm_device *dev, | |||
219 | * mark our set of crtc's as busy: | 219 | * mark our set of crtc's as busy: |
220 | */ | 220 | */ |
221 | ret = start_atomic(dev->dev_private, c->crtc_mask); | 221 | ret = start_atomic(dev->dev_private, c->crtc_mask); |
222 | if (ret) | 222 | if (ret) { |
223 | kfree(c); | ||
223 | return ret; | 224 | return ret; |
225 | } | ||
224 | 226 | ||
225 | /* | 227 | /* |
226 | * This is the point of no return - everything below never fails except | 228 | * This is the point of no return - everything below never fails except |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 79924e4b1b49..6751553abe4a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -418,7 +418,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, | |||
418 | nouveau_fbcon_zfill(dev, fbcon); | 418 | nouveau_fbcon_zfill(dev, fbcon); |
419 | 419 | ||
420 | /* To allow resizeing without swapping buffers */ | 420 | /* To allow resizeing without swapping buffers */ |
421 | NV_INFO(drm, "allocated %dx%d fb: 0x%lx, bo %p\n", | 421 | NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n", |
422 | nouveau_fb->base.width, nouveau_fb->base.height, | 422 | nouveau_fb->base.width, nouveau_fb->base.height, |
423 | nvbo->bo.offset, nvbo); | 423 | nvbo->bo.offset, nvbo); |
424 | 424 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index 29bd539af183..6efa8f38ff54 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | |||
@@ -340,11 +340,13 @@ nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | |||
340 | 340 | ||
341 | /* switch mmio to cpu's native endianness */ | 341 | /* switch mmio to cpu's native endianness */ |
342 | #ifndef __BIG_ENDIAN | 342 | #ifndef __BIG_ENDIAN |
343 | if (ioread32_native(map + 0x000004) != 0x00000000) | 343 | if (ioread32_native(map + 0x000004) != 0x00000000) { |
344 | #else | 344 | #else |
345 | if (ioread32_native(map + 0x000004) == 0x00000000) | 345 | if (ioread32_native(map + 0x000004) == 0x00000000) { |
346 | #endif | 346 | #endif |
347 | iowrite32_native(0x01000001, map + 0x000004); | 347 | iowrite32_native(0x01000001, map + 0x000004); |
348 | ioread32_native(map); | ||
349 | } | ||
348 | 350 | ||
349 | /* read boot0 and strapping information */ | 351 | /* read boot0 and strapping information */ |
350 | boot0 = ioread32_native(map + 0x000000); | 352 | boot0 = ioread32_native(map + 0x000000); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c index 539561ed3281..108d048da764 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c | |||
@@ -142,6 +142,49 @@ gm100_identify(struct nvkm_device *device) | |||
142 | device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass; | 142 | device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass; |
143 | #endif | 143 | #endif |
144 | break; | 144 | break; |
145 | case 0x126: | ||
146 | device->cname = "GM206"; | ||
147 | device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass; | ||
148 | device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass; | ||
149 | device->oclass[NVDEV_SUBDEV_I2C ] = gm204_i2c_oclass; | ||
150 | device->oclass[NVDEV_SUBDEV_FUSE ] = &gm107_fuse_oclass; | ||
151 | #if 0 | ||
152 | /* looks to be some non-trivial changes */ | ||
153 | device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass; | ||
154 | /* priv ring says no to 0x10eb14 writes */ | ||
155 | device->oclass[NVDEV_SUBDEV_THERM ] = &gm107_therm_oclass; | ||
156 | #endif | ||
157 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | ||
158 | device->oclass[NVDEV_SUBDEV_DEVINIT] = gm204_devinit_oclass; | ||
159 | device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass; | ||
160 | device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass; | ||
161 | device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass; | ||
162 | device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass; | ||
163 | device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass; | ||
164 | device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass; | ||
165 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; | ||
166 | device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass; | ||
167 | device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass; | ||
168 | device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass; | ||
169 | #if 0 | ||
170 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | ||
171 | #endif | ||
172 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass; | ||
173 | #if 0 | ||
174 | device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass; | ||
175 | device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass; | ||
176 | device->oclass[NVDEV_ENGINE_GR ] = gm107_gr_oclass; | ||
177 | #endif | ||
178 | device->oclass[NVDEV_ENGINE_DISP ] = gm204_disp_oclass; | ||
179 | #if 0 | ||
180 | device->oclass[NVDEV_ENGINE_CE0 ] = &gm204_ce0_oclass; | ||
181 | device->oclass[NVDEV_ENGINE_CE1 ] = &gm204_ce1_oclass; | ||
182 | device->oclass[NVDEV_ENGINE_CE2 ] = &gm204_ce2_oclass; | ||
183 | device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass; | ||
184 | device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass; | ||
185 | device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass; | ||
186 | #endif | ||
187 | break; | ||
145 | default: | 188 | default: |
146 | nv_fatal(device, "unknown Maxwell chipset\n"); | 189 | nv_fatal(device, "unknown Maxwell chipset\n"); |
147 | return -EINVAL; | 190 | return -EINVAL; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c index b038b6eb51db..043e4296084c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c | |||
@@ -502,72 +502,57 @@ nv04_fifo_intr(struct nvkm_subdev *subdev) | |||
502 | { | 502 | { |
503 | struct nvkm_device *device = nv_device(subdev); | 503 | struct nvkm_device *device = nv_device(subdev); |
504 | struct nv04_fifo_priv *priv = (void *)subdev; | 504 | struct nv04_fifo_priv *priv = (void *)subdev; |
505 | uint32_t status, reassign; | 505 | u32 mask = nv_rd32(priv, NV03_PFIFO_INTR_EN_0); |
506 | int cnt = 0; | 506 | u32 stat = nv_rd32(priv, NV03_PFIFO_INTR_0) & mask; |
507 | u32 reassign, chid, get, sem; | ||
507 | 508 | ||
508 | reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1; | 509 | reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1; |
509 | while ((status = nv_rd32(priv, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) { | 510 | nv_wr32(priv, NV03_PFIFO_CACHES, 0); |
510 | uint32_t chid, get; | ||
511 | |||
512 | nv_wr32(priv, NV03_PFIFO_CACHES, 0); | ||
513 | |||
514 | chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; | ||
515 | get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET); | ||
516 | 511 | ||
517 | if (status & NV_PFIFO_INTR_CACHE_ERROR) { | 512 | chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; |
518 | nv04_fifo_cache_error(device, priv, chid, get); | 513 | get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET); |
519 | status &= ~NV_PFIFO_INTR_CACHE_ERROR; | ||
520 | } | ||
521 | 514 | ||
522 | if (status & NV_PFIFO_INTR_DMA_PUSHER) { | 515 | if (stat & NV_PFIFO_INTR_CACHE_ERROR) { |
523 | nv04_fifo_dma_pusher(device, priv, chid); | 516 | nv04_fifo_cache_error(device, priv, chid, get); |
524 | status &= ~NV_PFIFO_INTR_DMA_PUSHER; | 517 | stat &= ~NV_PFIFO_INTR_CACHE_ERROR; |
525 | } | 518 | } |
526 | 519 | ||
527 | if (status & NV_PFIFO_INTR_SEMAPHORE) { | 520 | if (stat & NV_PFIFO_INTR_DMA_PUSHER) { |
528 | uint32_t sem; | 521 | nv04_fifo_dma_pusher(device, priv, chid); |
522 | stat &= ~NV_PFIFO_INTR_DMA_PUSHER; | ||
523 | } | ||
529 | 524 | ||
530 | status &= ~NV_PFIFO_INTR_SEMAPHORE; | 525 | if (stat & NV_PFIFO_INTR_SEMAPHORE) { |
531 | nv_wr32(priv, NV03_PFIFO_INTR_0, | 526 | stat &= ~NV_PFIFO_INTR_SEMAPHORE; |
532 | NV_PFIFO_INTR_SEMAPHORE); | 527 | nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE); |
533 | 528 | ||
534 | sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE); | 529 | sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE); |
535 | nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); | 530 | nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); |
536 | 531 | ||
537 | nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); | 532 | nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); |
538 | nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); | 533 | nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); |
539 | } | 534 | } |
540 | 535 | ||
541 | if (device->card_type == NV_50) { | 536 | if (device->card_type == NV_50) { |
542 | if (status & 0x00000010) { | 537 | if (stat & 0x00000010) { |
543 | status &= ~0x00000010; | 538 | stat &= ~0x00000010; |
544 | nv_wr32(priv, 0x002100, 0x00000010); | 539 | nv_wr32(priv, 0x002100, 0x00000010); |
545 | } | ||
546 | |||
547 | if (status & 0x40000000) { | ||
548 | nv_wr32(priv, 0x002100, 0x40000000); | ||
549 | nvkm_fifo_uevent(&priv->base); | ||
550 | status &= ~0x40000000; | ||
551 | } | ||
552 | } | 540 | } |
553 | 541 | ||
554 | if (status) { | 542 | if (stat & 0x40000000) { |
555 | nv_warn(priv, "unknown intr 0x%08x, ch %d\n", | 543 | nv_wr32(priv, 0x002100, 0x40000000); |
556 | status, chid); | 544 | nvkm_fifo_uevent(&priv->base); |
557 | nv_wr32(priv, NV03_PFIFO_INTR_0, status); | 545 | stat &= ~0x40000000; |
558 | status = 0; | ||
559 | } | 546 | } |
560 | |||
561 | nv_wr32(priv, NV03_PFIFO_CACHES, reassign); | ||
562 | } | 547 | } |
563 | 548 | ||
564 | if (status) { | 549 | if (stat) { |
565 | nv_error(priv, "still angry after %d spins, halt\n", cnt); | 550 | nv_warn(priv, "unknown intr 0x%08x\n", stat); |
566 | nv_wr32(priv, 0x002140, 0); | 551 | nv_mask(priv, NV03_PFIFO_INTR_EN_0, stat, 0x00000000); |
567 | nv_wr32(priv, 0x000140, 0); | 552 | nv_wr32(priv, NV03_PFIFO_INTR_0, stat); |
568 | } | 553 | } |
569 | 554 | ||
570 | nv_wr32(priv, 0x000100, 0x00000100); | 555 | nv_wr32(priv, NV03_PFIFO_CACHES, reassign); |
571 | } | 556 | } |
572 | 557 | ||
573 | static int | 558 | static int |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c index 2e7ec389eea7..57e2c5b13123 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c | |||
@@ -1032,9 +1032,9 @@ gf100_grctx_generate_bundle(struct gf100_grctx *info) | |||
1032 | const int s = 8; | 1032 | const int s = 8; |
1033 | const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); | 1033 | const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); |
1034 | mmio_refn(info, 0x408004, 0x00000000, s, b); | 1034 | mmio_refn(info, 0x408004, 0x00000000, s, b); |
1035 | mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); | 1035 | mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s)); |
1036 | mmio_refn(info, 0x418808, 0x00000000, s, b); | 1036 | mmio_refn(info, 0x418808, 0x00000000, s, b); |
1037 | mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); | 1037 | mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s)); |
1038 | } | 1038 | } |
1039 | 1039 | ||
1040 | void | 1040 | void |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c index b52300d8861a..5e9454ba158f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c | |||
@@ -851,9 +851,9 @@ gk104_grctx_generate_bundle(struct gf100_grctx *info) | |||
851 | const int s = 8; | 851 | const int s = 8; |
852 | const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); | 852 | const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); |
853 | mmio_refn(info, 0x408004, 0x00000000, s, b); | 853 | mmio_refn(info, 0x408004, 0x00000000, s, b); |
854 | mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); | 854 | mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s)); |
855 | mmio_refn(info, 0x418808, 0x00000000, s, b); | 855 | mmio_refn(info, 0x418808, 0x00000000, s, b); |
856 | mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); | 856 | mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s)); |
857 | mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); | 857 | mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); |
858 | } | 858 | } |
859 | 859 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c index 956f4dce960c..b2fae6e389e2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c | |||
@@ -871,9 +871,9 @@ gm107_grctx_generate_bundle(struct gf100_grctx *info) | |||
871 | const int s = 8; | 871 | const int s = 8; |
872 | const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); | 872 | const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); |
873 | mmio_refn(info, 0x408004, 0x00000000, s, b); | 873 | mmio_refn(info, 0x408004, 0x00000000, s, b); |
874 | mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); | 874 | mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s)); |
875 | mmio_refn(info, 0x418e24, 0x00000000, s, b); | 875 | mmio_refn(info, 0x418e24, 0x00000000, s, b); |
876 | mmio_refn(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s), 0, b); | 876 | mmio_wr32(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s)); |
877 | mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); | 877 | mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); |
878 | } | 878 | } |
879 | 879 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c index d1a89b2bd5c1..c4e1f085ee10 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c | |||
@@ -74,7 +74,11 @@ dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info) | |||
74 | u16 ent = dcb_i2c_entry(bios, idx, &ver, &len); | 74 | u16 ent = dcb_i2c_entry(bios, idx, &ver, &len); |
75 | if (ent) { | 75 | if (ent) { |
76 | if (ver >= 0x41) { | 76 | if (ver >= 0x41) { |
77 | if (!(nv_ro32(bios, ent) & 0x80000000)) | 77 | u32 ent_value = nv_ro32(bios, ent); |
78 | u8 i2c_port = (ent_value >> 27) & 0x1f; | ||
79 | u8 dpaux_port = (ent_value >> 22) & 0x1f; | ||
80 | /* value 0x1f means unused according to DCB 4.x spec */ | ||
81 | if (i2c_port == 0x1f && dpaux_port == 0x1f) | ||
78 | info->type = DCB_I2C_UNUSED; | 82 | info->type = DCB_I2C_UNUSED; |
79 | else | 83 | else |
80 | info->type = DCB_I2C_PMGR; | 84 | info->type = DCB_I2C_PMGR; |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index ed644a4f6f57..86807ee91bd1 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -1405,6 +1405,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, | |||
1405 | (x << 16) | y); | 1405 | (x << 16) | y); |
1406 | viewport_w = crtc->mode.hdisplay; | 1406 | viewport_w = crtc->mode.hdisplay; |
1407 | viewport_h = (crtc->mode.vdisplay + 1) & ~1; | 1407 | viewport_h = (crtc->mode.vdisplay + 1) & ~1; |
1408 | if ((rdev->family >= CHIP_BONAIRE) && | ||
1409 | (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)) | ||
1410 | viewport_h *= 2; | ||
1408 | WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, | 1411 | WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, |
1409 | (viewport_w << 16) | viewport_h); | 1412 | (viewport_w << 16) | viewport_h); |
1410 | 1413 | ||
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 5bf825dfaa09..8d74de82456e 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -178,6 +178,13 @@ radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) | |||
178 | switch (msg->request & ~DP_AUX_I2C_MOT) { | 178 | switch (msg->request & ~DP_AUX_I2C_MOT) { |
179 | case DP_AUX_NATIVE_WRITE: | 179 | case DP_AUX_NATIVE_WRITE: |
180 | case DP_AUX_I2C_WRITE: | 180 | case DP_AUX_I2C_WRITE: |
181 | /* The atom implementation only supports writes with a max payload of | ||
182 | * 12 bytes since it uses 4 bits for the total count (header + payload) | ||
183 | * in the parameter space. The atom interface supports 16 byte | ||
184 | * payloads for reads. The hw itself supports up to 16 bytes of payload. | ||
185 | */ | ||
186 | if (WARN_ON_ONCE(msg->size > 12)) | ||
187 | return -E2BIG; | ||
181 | /* tx_size needs to be 4 even for bare address packets since the atom | 188 | /* tx_size needs to be 4 even for bare address packets since the atom |
182 | * table needs the info in tx_buf[3]. | 189 | * table needs the info in tx_buf[3]. |
183 | */ | 190 | */ |
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 7c9df1eac065..c39c1d0d9d4e 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c | |||
@@ -731,7 +731,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
731 | dig_connector = radeon_connector->con_priv; | 731 | dig_connector = radeon_connector->con_priv; |
732 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || | 732 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || |
733 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { | 733 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { |
734 | if (radeon_audio != 0 && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev)) | 734 | if (radeon_audio != 0 && |
735 | drm_detect_monitor_audio(radeon_connector_edid(connector)) && | ||
736 | ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev)) | ||
735 | return ATOM_ENCODER_MODE_DP_AUDIO; | 737 | return ATOM_ENCODER_MODE_DP_AUDIO; |
736 | return ATOM_ENCODER_MODE_DP; | 738 | return ATOM_ENCODER_MODE_DP; |
737 | } else if (radeon_audio != 0) { | 739 | } else if (radeon_audio != 0) { |
@@ -747,7 +749,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
747 | } | 749 | } |
748 | break; | 750 | break; |
749 | case DRM_MODE_CONNECTOR_eDP: | 751 | case DRM_MODE_CONNECTOR_eDP: |
750 | if (radeon_audio != 0 && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev)) | 752 | if (radeon_audio != 0 && |
753 | drm_detect_monitor_audio(radeon_connector_edid(connector)) && | ||
754 | ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev)) | ||
751 | return ATOM_ENCODER_MODE_DP_AUDIO; | 755 | return ATOM_ENCODER_MODE_DP_AUDIO; |
752 | return ATOM_ENCODER_MODE_DP; | 756 | return ATOM_ENCODER_MODE_DP; |
753 | case DRM_MODE_CONNECTOR_DVIA: | 757 | case DRM_MODE_CONNECTOR_DVIA: |
@@ -1622,7 +1626,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) | |||
1622 | struct radeon_connector *radeon_connector = NULL; | 1626 | struct radeon_connector *radeon_connector = NULL; |
1623 | struct radeon_connector_atom_dig *radeon_dig_connector = NULL; | 1627 | struct radeon_connector_atom_dig *radeon_dig_connector = NULL; |
1624 | bool travis_quirk = false; | 1628 | bool travis_quirk = false; |
1625 | int encoder_mode; | ||
1626 | 1629 | ||
1627 | if (connector) { | 1630 | if (connector) { |
1628 | radeon_connector = to_radeon_connector(connector); | 1631 | radeon_connector = to_radeon_connector(connector); |
@@ -1718,11 +1721,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) | |||
1718 | } | 1721 | } |
1719 | break; | 1722 | break; |
1720 | } | 1723 | } |
1721 | |||
1722 | encoder_mode = atombios_get_encoder_mode(encoder); | ||
1723 | if (radeon_audio != 0 && | ||
1724 | (encoder_mode == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(encoder_mode))) | ||
1725 | radeon_audio_dpms(encoder, mode); | ||
1726 | } | 1724 | } |
1727 | 1725 | ||
1728 | static void | 1726 | static void |
@@ -1731,10 +1729,19 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1731 | struct drm_device *dev = encoder->dev; | 1729 | struct drm_device *dev = encoder->dev; |
1732 | struct radeon_device *rdev = dev->dev_private; | 1730 | struct radeon_device *rdev = dev->dev_private; |
1733 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1731 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
1732 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
1733 | int encoder_mode = atombios_get_encoder_mode(encoder); | ||
1734 | 1734 | ||
1735 | DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", | 1735 | DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", |
1736 | radeon_encoder->encoder_id, mode, radeon_encoder->devices, | 1736 | radeon_encoder->encoder_id, mode, radeon_encoder->devices, |
1737 | radeon_encoder->active_device); | 1737 | radeon_encoder->active_device); |
1738 | |||
1739 | if (connector && (radeon_audio != 0) && | ||
1740 | ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || | ||
1741 | (ENCODER_MODE_IS_DP(encoder_mode) && | ||
1742 | drm_detect_monitor_audio(radeon_connector_edid(connector))))) | ||
1743 | radeon_audio_dpms(encoder, mode); | ||
1744 | |||
1738 | switch (radeon_encoder->encoder_id) { | 1745 | switch (radeon_encoder->encoder_id) { |
1739 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: | 1746 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: |
1740 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | 1747 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: |
@@ -2136,6 +2143,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
2136 | struct drm_device *dev = encoder->dev; | 2143 | struct drm_device *dev = encoder->dev; |
2137 | struct radeon_device *rdev = dev->dev_private; | 2144 | struct radeon_device *rdev = dev->dev_private; |
2138 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 2145 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
2146 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
2139 | int encoder_mode; | 2147 | int encoder_mode; |
2140 | 2148 | ||
2141 | radeon_encoder->pixel_clock = adjusted_mode->clock; | 2149 | radeon_encoder->pixel_clock = adjusted_mode->clock; |
@@ -2163,10 +2171,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
2163 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: | 2171 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: |
2164 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | 2172 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
2165 | /* handled in dpms */ | 2173 | /* handled in dpms */ |
2166 | encoder_mode = atombios_get_encoder_mode(encoder); | ||
2167 | if (radeon_audio != 0 && | ||
2168 | (encoder_mode == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(encoder_mode))) | ||
2169 | radeon_audio_mode_set(encoder, adjusted_mode); | ||
2170 | break; | 2174 | break; |
2171 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | 2175 | case ENCODER_OBJECT_ID_INTERNAL_DDI: |
2172 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | 2176 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: |
@@ -2188,6 +2192,13 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
2188 | } | 2192 | } |
2189 | 2193 | ||
2190 | atombios_apply_encoder_quirks(encoder, adjusted_mode); | 2194 | atombios_apply_encoder_quirks(encoder, adjusted_mode); |
2195 | |||
2196 | encoder_mode = atombios_get_encoder_mode(encoder); | ||
2197 | if (connector && (radeon_audio != 0) && | ||
2198 | ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || | ||
2199 | (ENCODER_MODE_IS_DP(encoder_mode) && | ||
2200 | drm_detect_monitor_audio(radeon_connector_edid(connector))))) | ||
2201 | radeon_audio_mode_set(encoder, adjusted_mode); | ||
2191 | } | 2202 | } |
2192 | 2203 | ||
2193 | static bool | 2204 | static bool |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index e6a4ba236c70..3e670d344a20 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
@@ -3613,6 +3613,8 @@ static void cik_gpu_init(struct radeon_device *rdev) | |||
3613 | } | 3613 | } |
3614 | 3614 | ||
3615 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); | 3615 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); |
3616 | WREG32(SRBM_INT_CNTL, 0x1); | ||
3617 | WREG32(SRBM_INT_ACK, 0x1); | ||
3616 | 3618 | ||
3617 | WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN); | 3619 | WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN); |
3618 | 3620 | ||
@@ -7230,6 +7232,8 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev) | |||
7230 | WREG32(CP_ME2_PIPE3_INT_CNTL, 0); | 7232 | WREG32(CP_ME2_PIPE3_INT_CNTL, 0); |
7231 | /* grbm */ | 7233 | /* grbm */ |
7232 | WREG32(GRBM_INT_CNTL, 0); | 7234 | WREG32(GRBM_INT_CNTL, 0); |
7235 | /* SRBM */ | ||
7236 | WREG32(SRBM_INT_CNTL, 0); | ||
7233 | /* vline/vblank, etc. */ | 7237 | /* vline/vblank, etc. */ |
7234 | WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | 7238 | WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); |
7235 | WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | 7239 | WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); |
@@ -7551,6 +7555,9 @@ int cik_irq_set(struct radeon_device *rdev) | |||
7551 | WREG32(DC_HPD5_INT_CONTROL, hpd5); | 7555 | WREG32(DC_HPD5_INT_CONTROL, hpd5); |
7552 | WREG32(DC_HPD6_INT_CONTROL, hpd6); | 7556 | WREG32(DC_HPD6_INT_CONTROL, hpd6); |
7553 | 7557 | ||
7558 | /* posting read */ | ||
7559 | RREG32(SRBM_STATUS); | ||
7560 | |||
7554 | return 0; | 7561 | return 0; |
7555 | } | 7562 | } |
7556 | 7563 | ||
@@ -8046,6 +8053,10 @@ restart_ih: | |||
8046 | break; | 8053 | break; |
8047 | } | 8054 | } |
8048 | break; | 8055 | break; |
8056 | case 96: | ||
8057 | DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR)); | ||
8058 | WREG32(SRBM_INT_ACK, 0x1); | ||
8059 | break; | ||
8049 | case 124: /* UVD */ | 8060 | case 124: /* UVD */ |
8050 | DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); | 8061 | DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); |
8051 | radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); | 8062 | radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); |
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index 03003f8a6de6..243a36c93b8f 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h | |||
@@ -482,6 +482,10 @@ | |||
482 | #define SOFT_RESET_ORB (1 << 23) | 482 | #define SOFT_RESET_ORB (1 << 23) |
483 | #define SOFT_RESET_VCE (1 << 24) | 483 | #define SOFT_RESET_VCE (1 << 24) |
484 | 484 | ||
485 | #define SRBM_READ_ERROR 0xE98 | ||
486 | #define SRBM_INT_CNTL 0xEA0 | ||
487 | #define SRBM_INT_ACK 0xEA8 | ||
488 | |||
485 | #define VM_L2_CNTL 0x1400 | 489 | #define VM_L2_CNTL 0x1400 |
486 | #define ENABLE_L2_CACHE (1 << 0) | 490 | #define ENABLE_L2_CACHE (1 << 0) |
487 | #define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1) | 491 | #define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1) |
@@ -2125,6 +2129,7 @@ | |||
2125 | #define VCE_UENC_REG_CLOCK_GATING 0x207c0 | 2129 | #define VCE_UENC_REG_CLOCK_GATING 0x207c0 |
2126 | #define VCE_SYS_INT_EN 0x21300 | 2130 | #define VCE_SYS_INT_EN 0x21300 |
2127 | # define VCE_SYS_INT_TRAP_INTERRUPT_EN (1 << 3) | 2131 | # define VCE_SYS_INT_TRAP_INTERRUPT_EN (1 << 3) |
2132 | #define VCE_LMI_VCPU_CACHE_40BIT_BAR 0x2145c | ||
2128 | #define VCE_LMI_CTRL2 0x21474 | 2133 | #define VCE_LMI_CTRL2 0x21474 |
2129 | #define VCE_LMI_CTRL 0x21498 | 2134 | #define VCE_LMI_CTRL 0x21498 |
2130 | #define VCE_LMI_VM_CTRL 0x214a0 | 2135 | #define VCE_LMI_VM_CTRL 0x214a0 |
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index 192c80389151..3adc2afe32aa 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c | |||
@@ -26,6 +26,9 @@ | |||
26 | #include "radeon_audio.h" | 26 | #include "radeon_audio.h" |
27 | #include "sid.h" | 27 | #include "sid.h" |
28 | 28 | ||
29 | #define DCE8_DCCG_AUDIO_DTO1_PHASE 0x05b8 | ||
30 | #define DCE8_DCCG_AUDIO_DTO1_MODULE 0x05bc | ||
31 | |||
29 | u32 dce6_endpoint_rreg(struct radeon_device *rdev, | 32 | u32 dce6_endpoint_rreg(struct radeon_device *rdev, |
30 | u32 block_offset, u32 reg) | 33 | u32 block_offset, u32 reg) |
31 | { | 34 | { |
@@ -252,72 +255,67 @@ void dce6_audio_enable(struct radeon_device *rdev, | |||
252 | void dce6_hdmi_audio_set_dto(struct radeon_device *rdev, | 255 | void dce6_hdmi_audio_set_dto(struct radeon_device *rdev, |
253 | struct radeon_crtc *crtc, unsigned int clock) | 256 | struct radeon_crtc *crtc, unsigned int clock) |
254 | { | 257 | { |
255 | /* Two dtos; generally use dto0 for HDMI */ | 258 | /* Two dtos; generally use dto0 for HDMI */ |
256 | u32 value = 0; | 259 | u32 value = 0; |
257 | 260 | ||
258 | if (crtc) | 261 | if (crtc) |
259 | value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); | 262 | value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); |
260 | 263 | ||
261 | WREG32(DCCG_AUDIO_DTO_SOURCE, value); | 264 | WREG32(DCCG_AUDIO_DTO_SOURCE, value); |
262 | 265 | ||
263 | /* Express [24MHz / target pixel clock] as an exact rational | 266 | /* Express [24MHz / target pixel clock] as an exact rational |
264 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE | 267 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE |
265 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator | 268 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator |
266 | */ | 269 | */ |
267 | WREG32(DCCG_AUDIO_DTO0_PHASE, 24000); | 270 | WREG32(DCCG_AUDIO_DTO0_PHASE, 24000); |
268 | WREG32(DCCG_AUDIO_DTO0_MODULE, clock); | 271 | WREG32(DCCG_AUDIO_DTO0_MODULE, clock); |
269 | } | 272 | } |
270 | 273 | ||
271 | void dce6_dp_audio_set_dto(struct radeon_device *rdev, | 274 | void dce6_dp_audio_set_dto(struct radeon_device *rdev, |
272 | struct radeon_crtc *crtc, unsigned int clock) | 275 | struct radeon_crtc *crtc, unsigned int clock) |
273 | { | 276 | { |
274 | /* Two dtos; generally use dto1 for DP */ | 277 | /* Two dtos; generally use dto1 for DP */ |
275 | u32 value = 0; | 278 | u32 value = 0; |
276 | value |= DCCG_AUDIO_DTO_SEL; | 279 | value |= DCCG_AUDIO_DTO_SEL; |
277 | 280 | ||
278 | if (crtc) | 281 | if (crtc) |
279 | value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); | 282 | value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); |
280 | 283 | ||
281 | WREG32(DCCG_AUDIO_DTO_SOURCE, value); | 284 | WREG32(DCCG_AUDIO_DTO_SOURCE, value); |
282 | 285 | ||
283 | /* Express [24MHz / target pixel clock] as an exact rational | 286 | /* Express [24MHz / target pixel clock] as an exact rational |
284 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE | 287 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE |
285 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator | 288 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator |
286 | */ | 289 | */ |
287 | WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); | 290 | if (ASIC_IS_DCE8(rdev)) { |
288 | WREG32(DCCG_AUDIO_DTO1_MODULE, clock); | 291 | WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000); |
292 | WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock); | ||
293 | } else { | ||
294 | WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); | ||
295 | WREG32(DCCG_AUDIO_DTO1_MODULE, clock); | ||
296 | } | ||
289 | } | 297 | } |
290 | 298 | ||
291 | void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable) | 299 | void dce6_dp_enable(struct drm_encoder *encoder, bool enable) |
292 | { | 300 | { |
293 | struct drm_device *dev = encoder->dev; | 301 | struct drm_device *dev = encoder->dev; |
294 | struct radeon_device *rdev = dev->dev_private; | 302 | struct radeon_device *rdev = dev->dev_private; |
295 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 303 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
296 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 304 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
297 | uint32_t offset; | ||
298 | 305 | ||
299 | if (!dig || !dig->afmt) | 306 | if (!dig || !dig->afmt) |
300 | return; | 307 | return; |
301 | 308 | ||
302 | offset = dig->afmt->offset; | ||
303 | |||
304 | if (enable) { | 309 | if (enable) { |
305 | if (dig->afmt->enabled) | 310 | WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset, |
306 | return; | 311 | EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); |
307 | 312 | WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, | |
308 | WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); | 313 | EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ |
309 | WREG32(EVERGREEN_DP_SEC_CNTL + offset, | 314 | EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ |
310 | EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ | 315 | EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */ |
311 | EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ | 316 | EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ |
312 | EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */ | ||
313 | EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ | ||
314 | radeon_audio_enable(rdev, dig->afmt->pin, true); | ||
315 | } else { | 317 | } else { |
316 | if (!dig->afmt->enabled) | 318 | WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0); |
317 | return; | ||
318 | |||
319 | WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0); | ||
320 | radeon_audio_enable(rdev, dig->afmt->pin, false); | ||
321 | } | 319 | } |
322 | 320 | ||
323 | dig->afmt->enabled = enable; | 321 | dig->afmt->enabled = enable; |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 78600f534c80..973df064c14f 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -3253,6 +3253,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
3253 | } | 3253 | } |
3254 | 3254 | ||
3255 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); | 3255 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); |
3256 | WREG32(SRBM_INT_CNTL, 0x1); | ||
3257 | WREG32(SRBM_INT_ACK, 0x1); | ||
3256 | 3258 | ||
3257 | evergreen_fix_pci_max_read_req_size(rdev); | 3259 | evergreen_fix_pci_max_read_req_size(rdev); |
3258 | 3260 | ||
@@ -4324,6 +4326,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev) | |||
4324 | tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE; | 4326 | tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE; |
4325 | WREG32(DMA_CNTL, tmp); | 4327 | WREG32(DMA_CNTL, tmp); |
4326 | WREG32(GRBM_INT_CNTL, 0); | 4328 | WREG32(GRBM_INT_CNTL, 0); |
4329 | WREG32(SRBM_INT_CNTL, 0); | ||
4327 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | 4330 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); |
4328 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | 4331 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); |
4329 | if (rdev->num_crtc >= 4) { | 4332 | if (rdev->num_crtc >= 4) { |
@@ -4590,6 +4593,9 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
4590 | WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5); | 4593 | WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5); |
4591 | WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6); | 4594 | WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6); |
4592 | 4595 | ||
4596 | /* posting read */ | ||
4597 | RREG32(SRBM_STATUS); | ||
4598 | |||
4593 | return 0; | 4599 | return 0; |
4594 | } | 4600 | } |
4595 | 4601 | ||
@@ -5066,6 +5072,10 @@ restart_ih: | |||
5066 | DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); | 5072 | DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); |
5067 | break; | 5073 | break; |
5068 | } | 5074 | } |
5075 | case 96: | ||
5076 | DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR)); | ||
5077 | WREG32(SRBM_INT_ACK, 0x1); | ||
5078 | break; | ||
5069 | case 124: /* UVD */ | 5079 | case 124: /* UVD */ |
5070 | DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); | 5080 | DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); |
5071 | radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); | 5081 | radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); |
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index 1d9aebc79595..c18d4ecbd95d 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c | |||
@@ -272,7 +272,7 @@ void dce4_hdmi_audio_set_dto(struct radeon_device *rdev, | |||
272 | } | 272 | } |
273 | 273 | ||
274 | void dce4_dp_audio_set_dto(struct radeon_device *rdev, | 274 | void dce4_dp_audio_set_dto(struct radeon_device *rdev, |
275 | struct radeon_crtc *crtc, unsigned int clock) | 275 | struct radeon_crtc *crtc, unsigned int clock) |
276 | { | 276 | { |
277 | u32 value; | 277 | u32 value; |
278 | 278 | ||
@@ -294,7 +294,7 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev, | |||
294 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator | 294 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator |
295 | */ | 295 | */ |
296 | WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); | 296 | WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); |
297 | WREG32(DCCG_AUDIO_DTO1_MODULE, rdev->clock.max_pixel_clock * 10); | 297 | WREG32(DCCG_AUDIO_DTO1_MODULE, clock); |
298 | } | 298 | } |
299 | 299 | ||
300 | void dce4_set_vbi_packet(struct drm_encoder *encoder, u32 offset) | 300 | void dce4_set_vbi_packet(struct drm_encoder *encoder, u32 offset) |
@@ -350,20 +350,9 @@ void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset) | |||
350 | struct drm_device *dev = encoder->dev; | 350 | struct drm_device *dev = encoder->dev; |
351 | struct radeon_device *rdev = dev->dev_private; | 351 | struct radeon_device *rdev = dev->dev_private; |
352 | 352 | ||
353 | WREG32(HDMI_INFOFRAME_CONTROL0 + offset, | ||
354 | HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ | ||
355 | HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */ | ||
356 | |||
357 | WREG32(AFMT_INFOFRAME_CONTROL0 + offset, | 353 | WREG32(AFMT_INFOFRAME_CONTROL0 + offset, |
358 | AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */ | 354 | AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */ |
359 | 355 | ||
360 | WREG32(HDMI_INFOFRAME_CONTROL1 + offset, | ||
361 | HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */ | ||
362 | |||
363 | WREG32(HDMI_AUDIO_PACKET_CONTROL + offset, | ||
364 | HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */ | ||
365 | HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */ | ||
366 | |||
367 | WREG32(AFMT_60958_0 + offset, | 356 | WREG32(AFMT_60958_0 + offset, |
368 | AFMT_60958_CS_CHANNEL_NUMBER_L(1)); | 357 | AFMT_60958_CS_CHANNEL_NUMBER_L(1)); |
369 | 358 | ||
@@ -408,15 +397,19 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) | |||
408 | if (!dig || !dig->afmt) | 397 | if (!dig || !dig->afmt) |
409 | return; | 398 | return; |
410 | 399 | ||
411 | /* Silent, r600_hdmi_enable will raise WARN for us */ | 400 | if (enable) { |
412 | if (enable && dig->afmt->enabled) | 401 | WREG32(HDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, |
413 | return; | 402 | HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */ |
414 | if (!enable && !dig->afmt->enabled) | 403 | |
415 | return; | 404 | WREG32(HDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, |
405 | HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */ | ||
406 | HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */ | ||
416 | 407 | ||
417 | if (!enable && dig->afmt->pin) { | 408 | WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, |
418 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | 409 | HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ |
419 | dig->afmt->pin = NULL; | 410 | HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */ |
411 | } else { | ||
412 | WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0); | ||
420 | } | 413 | } |
421 | 414 | ||
422 | dig->afmt->enabled = enable; | 415 | dig->afmt->enabled = enable; |
@@ -425,33 +418,28 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) | |||
425 | enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id); | 418 | enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id); |
426 | } | 419 | } |
427 | 420 | ||
428 | void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable) | 421 | void evergreen_dp_enable(struct drm_encoder *encoder, bool enable) |
429 | { | 422 | { |
430 | struct drm_device *dev = encoder->dev; | 423 | struct drm_device *dev = encoder->dev; |
431 | struct radeon_device *rdev = dev->dev_private; | 424 | struct radeon_device *rdev = dev->dev_private; |
432 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 425 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
433 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 426 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
434 | uint32_t offset; | ||
435 | 427 | ||
436 | if (!dig || !dig->afmt) | 428 | if (!dig || !dig->afmt) |
437 | return; | 429 | return; |
438 | 430 | ||
439 | offset = dig->afmt->offset; | ||
440 | |||
441 | if (enable) { | 431 | if (enable) { |
442 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 432 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
443 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 433 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
444 | struct radeon_connector_atom_dig *dig_connector; | 434 | struct radeon_connector_atom_dig *dig_connector; |
445 | uint32_t val; | 435 | uint32_t val; |
446 | 436 | ||
447 | if (dig->afmt->enabled) | 437 | WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset, |
448 | return; | 438 | EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); |
449 | |||
450 | WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); | ||
451 | 439 | ||
452 | if (radeon_connector->con_priv) { | 440 | if (radeon_connector->con_priv) { |
453 | dig_connector = radeon_connector->con_priv; | 441 | dig_connector = radeon_connector->con_priv; |
454 | val = RREG32(EVERGREEN_DP_SEC_AUD_N + offset); | 442 | val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset); |
455 | val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf); | 443 | val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf); |
456 | 444 | ||
457 | if (dig_connector->dp_clock == 162000) | 445 | if (dig_connector->dp_clock == 162000) |
@@ -459,21 +447,16 @@ void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable) | |||
459 | else | 447 | else |
460 | val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(5); | 448 | val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(5); |
461 | 449 | ||
462 | WREG32(EVERGREEN_DP_SEC_AUD_N + offset, val); | 450 | WREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset, val); |
463 | } | 451 | } |
464 | 452 | ||
465 | WREG32(EVERGREEN_DP_SEC_CNTL + offset, | 453 | WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, |
466 | EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ | 454 | EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ |
467 | EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ | 455 | EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ |
468 | EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */ | 456 | EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */ |
469 | EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ | 457 | EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ |
470 | radeon_audio_enable(rdev, dig->afmt->pin, 0xf); | ||
471 | } else { | 458 | } else { |
472 | if (!dig->afmt->enabled) | 459 | WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0); |
473 | return; | ||
474 | |||
475 | WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0); | ||
476 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | ||
477 | } | 460 | } |
478 | 461 | ||
479 | dig->afmt->enabled = enable; | 462 | dig->afmt->enabled = enable; |
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index ee83d2a88750..a8d1d5240fcb 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
@@ -1191,6 +1191,10 @@ | |||
1191 | #define SOFT_RESET_REGBB (1 << 22) | 1191 | #define SOFT_RESET_REGBB (1 << 22) |
1192 | #define SOFT_RESET_ORB (1 << 23) | 1192 | #define SOFT_RESET_ORB (1 << 23) |
1193 | 1193 | ||
1194 | #define SRBM_READ_ERROR 0xE98 | ||
1195 | #define SRBM_INT_CNTL 0xEA0 | ||
1196 | #define SRBM_INT_ACK 0xEA8 | ||
1197 | |||
1194 | /* display watermarks */ | 1198 | /* display watermarks */ |
1195 | #define DC_LB_MEMORY_SPLIT 0x6b0c | 1199 | #define DC_LB_MEMORY_SPLIT 0x6b0c |
1196 | #define PRIORITY_A_CNT 0x6b18 | 1200 | #define PRIORITY_A_CNT 0x6b18 |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 24242a7f0ac3..dab00812abaa 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -962,6 +962,8 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
962 | } | 962 | } |
963 | 963 | ||
964 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); | 964 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); |
965 | WREG32(SRBM_INT_CNTL, 0x1); | ||
966 | WREG32(SRBM_INT_ACK, 0x1); | ||
965 | 967 | ||
966 | evergreen_fix_pci_max_read_req_size(rdev); | 968 | evergreen_fix_pci_max_read_req_size(rdev); |
967 | 969 | ||
@@ -1086,12 +1088,12 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
1086 | 1088 | ||
1087 | if ((rdev->config.cayman.max_backends_per_se == 1) && | 1089 | if ((rdev->config.cayman.max_backends_per_se == 1) && |
1088 | (rdev->flags & RADEON_IS_IGP)) { | 1090 | (rdev->flags & RADEON_IS_IGP)) { |
1089 | if ((disabled_rb_mask & 3) == 1) { | 1091 | if ((disabled_rb_mask & 3) == 2) { |
1090 | /* RB0 disabled, RB1 enabled */ | ||
1091 | tmp = 0x11111111; | ||
1092 | } else { | ||
1093 | /* RB1 disabled, RB0 enabled */ | 1092 | /* RB1 disabled, RB0 enabled */ |
1094 | tmp = 0x00000000; | 1093 | tmp = 0x00000000; |
1094 | } else { | ||
1095 | /* RB0 disabled, RB1 enabled */ | ||
1096 | tmp = 0x11111111; | ||
1095 | } | 1097 | } |
1096 | } else { | 1098 | } else { |
1097 | tmp = gb_addr_config & NUM_PIPES_MASK; | 1099 | tmp = gb_addr_config & NUM_PIPES_MASK; |
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h index ad7125486894..6b44580440d0 100644 --- a/drivers/gpu/drm/radeon/nid.h +++ b/drivers/gpu/drm/radeon/nid.h | |||
@@ -82,6 +82,10 @@ | |||
82 | #define SOFT_RESET_REGBB (1 << 22) | 82 | #define SOFT_RESET_REGBB (1 << 22) |
83 | #define SOFT_RESET_ORB (1 << 23) | 83 | #define SOFT_RESET_ORB (1 << 23) |
84 | 84 | ||
85 | #define SRBM_READ_ERROR 0xE98 | ||
86 | #define SRBM_INT_CNTL 0xEA0 | ||
87 | #define SRBM_INT_ACK 0xEA8 | ||
88 | |||
85 | #define SRBM_STATUS2 0x0EC4 | 89 | #define SRBM_STATUS2 0x0EC4 |
86 | #define DMA_BUSY (1 << 5) | 90 | #define DMA_BUSY (1 << 5) |
87 | #define DMA1_BUSY (1 << 6) | 91 | #define DMA1_BUSY (1 << 6) |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 279801ca5110..04f2514f7564 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -728,6 +728,10 @@ int r100_irq_set(struct radeon_device *rdev) | |||
728 | tmp |= RADEON_FP2_DETECT_MASK; | 728 | tmp |= RADEON_FP2_DETECT_MASK; |
729 | } | 729 | } |
730 | WREG32(RADEON_GEN_INT_CNTL, tmp); | 730 | WREG32(RADEON_GEN_INT_CNTL, tmp); |
731 | |||
732 | /* read back to post the write */ | ||
733 | RREG32(RADEON_GEN_INT_CNTL); | ||
734 | |||
731 | return 0; | 735 | return 0; |
732 | } | 736 | } |
733 | 737 | ||
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 07a71a2488c9..2fcad344492f 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -3784,6 +3784,9 @@ int r600_irq_set(struct radeon_device *rdev) | |||
3784 | WREG32(RV770_CG_THERMAL_INT, thermal_int); | 3784 | WREG32(RV770_CG_THERMAL_INT, thermal_int); |
3785 | } | 3785 | } |
3786 | 3786 | ||
3787 | /* posting read */ | ||
3788 | RREG32(R_000E50_SRBM_STATUS); | ||
3789 | |||
3787 | return 0; | 3790 | return 0; |
3788 | } | 3791 | } |
3789 | 3792 | ||
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index 843b65f46ece..fa2154493cf1 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c | |||
@@ -188,7 +188,7 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev) | |||
188 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 188 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
189 | radeon_crtc = to_radeon_crtc(crtc); | 189 | radeon_crtc = to_radeon_crtc(crtc); |
190 | if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { | 190 | if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { |
191 | vrefresh = radeon_crtc->hw_mode.vrefresh; | 191 | vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode); |
192 | break; | 192 | break; |
193 | } | 193 | } |
194 | } | 194 | } |
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index 62c91ed669ce..dd6606b8e23c 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c | |||
@@ -476,17 +476,6 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable) | |||
476 | if (!dig || !dig->afmt) | 476 | if (!dig || !dig->afmt) |
477 | return; | 477 | return; |
478 | 478 | ||
479 | /* Silent, r600_hdmi_enable will raise WARN for us */ | ||
480 | if (enable && dig->afmt->enabled) | ||
481 | return; | ||
482 | if (!enable && !dig->afmt->enabled) | ||
483 | return; | ||
484 | |||
485 | if (!enable && dig->afmt->pin) { | ||
486 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | ||
487 | dig->afmt->pin = NULL; | ||
488 | } | ||
489 | |||
490 | /* Older chipsets require setting HDMI and routing manually */ | 479 | /* Older chipsets require setting HDMI and routing manually */ |
491 | if (!ASIC_IS_DCE3(rdev)) { | 480 | if (!ASIC_IS_DCE3(rdev)) { |
492 | if (enable) | 481 | if (enable) |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 5587603b4a89..33d5a4f4eebd 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -1565,6 +1565,7 @@ struct radeon_dpm { | |||
1565 | int new_active_crtc_count; | 1565 | int new_active_crtc_count; |
1566 | u32 current_active_crtcs; | 1566 | u32 current_active_crtcs; |
1567 | int current_active_crtc_count; | 1567 | int current_active_crtc_count; |
1568 | bool single_display; | ||
1568 | struct radeon_dpm_dynamic_state dyn_state; | 1569 | struct radeon_dpm_dynamic_state dyn_state; |
1569 | struct radeon_dpm_fan fan; | 1570 | struct radeon_dpm_fan fan; |
1570 | u32 tdp_limit; | 1571 | u32 tdp_limit; |
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c index a3ceef6d9632..b21ef69a34ac 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.c +++ b/drivers/gpu/drm/radeon/radeon_audio.c | |||
@@ -101,8 +101,8 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, | |||
101 | struct drm_display_mode *mode); | 101 | struct drm_display_mode *mode); |
102 | void r600_hdmi_enable(struct drm_encoder *encoder, bool enable); | 102 | void r600_hdmi_enable(struct drm_encoder *encoder, bool enable); |
103 | void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable); | 103 | void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable); |
104 | void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable); | 104 | void evergreen_dp_enable(struct drm_encoder *encoder, bool enable); |
105 | void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable); | 105 | void dce6_dp_enable(struct drm_encoder *encoder, bool enable); |
106 | 106 | ||
107 | static const u32 pin_offsets[7] = | 107 | static const u32 pin_offsets[7] = |
108 | { | 108 | { |
@@ -210,7 +210,7 @@ static struct radeon_audio_funcs dce4_dp_funcs = { | |||
210 | .set_avi_packet = evergreen_set_avi_packet, | 210 | .set_avi_packet = evergreen_set_avi_packet, |
211 | .set_audio_packet = dce4_set_audio_packet, | 211 | .set_audio_packet = dce4_set_audio_packet, |
212 | .mode_set = radeon_audio_dp_mode_set, | 212 | .mode_set = radeon_audio_dp_mode_set, |
213 | .dpms = evergreen_enable_dp_audio_packets, | 213 | .dpms = evergreen_dp_enable, |
214 | }; | 214 | }; |
215 | 215 | ||
216 | static struct radeon_audio_funcs dce6_hdmi_funcs = { | 216 | static struct radeon_audio_funcs dce6_hdmi_funcs = { |
@@ -240,7 +240,7 @@ static struct radeon_audio_funcs dce6_dp_funcs = { | |||
240 | .set_avi_packet = evergreen_set_avi_packet, | 240 | .set_avi_packet = evergreen_set_avi_packet, |
241 | .set_audio_packet = dce4_set_audio_packet, | 241 | .set_audio_packet = dce4_set_audio_packet, |
242 | .mode_set = radeon_audio_dp_mode_set, | 242 | .mode_set = radeon_audio_dp_mode_set, |
243 | .dpms = dce6_enable_dp_audio_packets, | 243 | .dpms = dce6_dp_enable, |
244 | }; | 244 | }; |
245 | 245 | ||
246 | static void radeon_audio_interface_init(struct radeon_device *rdev) | 246 | static void radeon_audio_interface_init(struct radeon_device *rdev) |
@@ -452,7 +452,7 @@ void radeon_audio_enable(struct radeon_device *rdev, | |||
452 | } | 452 | } |
453 | 453 | ||
454 | void radeon_audio_detect(struct drm_connector *connector, | 454 | void radeon_audio_detect(struct drm_connector *connector, |
455 | enum drm_connector_status status) | 455 | enum drm_connector_status status) |
456 | { | 456 | { |
457 | struct radeon_device *rdev; | 457 | struct radeon_device *rdev; |
458 | struct radeon_encoder *radeon_encoder; | 458 | struct radeon_encoder *radeon_encoder; |
@@ -483,14 +483,11 @@ void radeon_audio_detect(struct drm_connector *connector, | |||
483 | else | 483 | else |
484 | radeon_encoder->audio = rdev->audio.hdmi_funcs; | 484 | radeon_encoder->audio = rdev->audio.hdmi_funcs; |
485 | 485 | ||
486 | radeon_audio_write_speaker_allocation(connector->encoder); | 486 | dig->afmt->pin = radeon_audio_get_pin(connector->encoder); |
487 | radeon_audio_write_sad_regs(connector->encoder); | ||
488 | if (connector->encoder->crtc) | ||
489 | radeon_audio_write_latency_fields(connector->encoder, | ||
490 | &connector->encoder->crtc->mode); | ||
491 | radeon_audio_enable(rdev, dig->afmt->pin, 0xf); | 487 | radeon_audio_enable(rdev, dig->afmt->pin, 0xf); |
492 | } else { | 488 | } else { |
493 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | 489 | radeon_audio_enable(rdev, dig->afmt->pin, 0); |
490 | dig->afmt->pin = NULL; | ||
494 | } | 491 | } |
495 | } | 492 | } |
496 | 493 | ||
@@ -694,23 +691,22 @@ static void radeon_audio_set_mute(struct drm_encoder *encoder, bool mute) | |||
694 | * update the info frames with the data from the current display mode | 691 | * update the info frames with the data from the current display mode |
695 | */ | 692 | */ |
696 | static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder, | 693 | static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder, |
697 | struct drm_display_mode *mode) | 694 | struct drm_display_mode *mode) |
698 | { | 695 | { |
699 | struct radeon_device *rdev = encoder->dev->dev_private; | ||
700 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 696 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
701 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 697 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
702 | 698 | ||
703 | if (!dig || !dig->afmt) | 699 | if (!dig || !dig->afmt) |
704 | return; | 700 | return; |
705 | 701 | ||
706 | /* disable audio prior to setting up hw */ | 702 | radeon_audio_set_mute(encoder, true); |
707 | dig->afmt->pin = radeon_audio_get_pin(encoder); | ||
708 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | ||
709 | 703 | ||
704 | radeon_audio_write_speaker_allocation(encoder); | ||
705 | radeon_audio_write_sad_regs(encoder); | ||
706 | radeon_audio_write_latency_fields(encoder, mode); | ||
710 | radeon_audio_set_dto(encoder, mode->clock); | 707 | radeon_audio_set_dto(encoder, mode->clock); |
711 | radeon_audio_set_vbi_packet(encoder); | 708 | radeon_audio_set_vbi_packet(encoder); |
712 | radeon_hdmi_set_color_depth(encoder); | 709 | radeon_hdmi_set_color_depth(encoder); |
713 | radeon_audio_set_mute(encoder, false); | ||
714 | radeon_audio_update_acr(encoder, mode->clock); | 710 | radeon_audio_update_acr(encoder, mode->clock); |
715 | radeon_audio_set_audio_packet(encoder); | 711 | radeon_audio_set_audio_packet(encoder); |
716 | radeon_audio_select_pin(encoder); | 712 | radeon_audio_select_pin(encoder); |
@@ -718,8 +714,7 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder, | |||
718 | if (radeon_audio_set_avi_packet(encoder, mode) < 0) | 714 | if (radeon_audio_set_avi_packet(encoder, mode) < 0) |
719 | return; | 715 | return; |
720 | 716 | ||
721 | /* enable audio after to setting up hw */ | 717 | radeon_audio_set_mute(encoder, false); |
722 | radeon_audio_enable(rdev, dig->afmt->pin, 0xf); | ||
723 | } | 718 | } |
724 | 719 | ||
725 | static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, | 720 | static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, |
@@ -729,23 +724,26 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, | |||
729 | struct radeon_device *rdev = dev->dev_private; | 724 | struct radeon_device *rdev = dev->dev_private; |
730 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 725 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
731 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 726 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
727 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
728 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
729 | struct radeon_connector_atom_dig *dig_connector = | ||
730 | radeon_connector->con_priv; | ||
732 | 731 | ||
733 | if (!dig || !dig->afmt) | 732 | if (!dig || !dig->afmt) |
734 | return; | 733 | return; |
735 | 734 | ||
736 | /* disable audio prior to setting up hw */ | 735 | radeon_audio_write_speaker_allocation(encoder); |
737 | dig->afmt->pin = radeon_audio_get_pin(encoder); | 736 | radeon_audio_write_sad_regs(encoder); |
738 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | 737 | radeon_audio_write_latency_fields(encoder, mode); |
739 | 738 | if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev)) | |
740 | radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10); | 739 | radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10); |
740 | else | ||
741 | radeon_audio_set_dto(encoder, dig_connector->dp_clock); | ||
741 | radeon_audio_set_audio_packet(encoder); | 742 | radeon_audio_set_audio_packet(encoder); |
742 | radeon_audio_select_pin(encoder); | 743 | radeon_audio_select_pin(encoder); |
743 | 744 | ||
744 | if (radeon_audio_set_avi_packet(encoder, mode) < 0) | 745 | if (radeon_audio_set_avi_packet(encoder, mode) < 0) |
745 | return; | 746 | return; |
746 | |||
747 | /* enable audio after to setting up hw */ | ||
748 | radeon_audio_enable(rdev, dig->afmt->pin, 0xf); | ||
749 | } | 747 | } |
750 | 748 | ||
751 | void radeon_audio_mode_set(struct drm_encoder *encoder, | 749 | void radeon_audio_mode_set(struct drm_encoder *encoder, |
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 63ccb8fa799c..d27e4ccb848c 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c | |||
@@ -76,7 +76,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev) | |||
76 | 76 | ||
77 | static bool radeon_read_bios(struct radeon_device *rdev) | 77 | static bool radeon_read_bios(struct radeon_device *rdev) |
78 | { | 78 | { |
79 | uint8_t __iomem *bios; | 79 | uint8_t __iomem *bios, val1, val2; |
80 | size_t size; | 80 | size_t size; |
81 | 81 | ||
82 | rdev->bios = NULL; | 82 | rdev->bios = NULL; |
@@ -86,15 +86,19 @@ static bool radeon_read_bios(struct radeon_device *rdev) | |||
86 | return false; | 86 | return false; |
87 | } | 87 | } |
88 | 88 | ||
89 | if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { | 89 | val1 = readb(&bios[0]); |
90 | val2 = readb(&bios[1]); | ||
91 | |||
92 | if (size == 0 || val1 != 0x55 || val2 != 0xaa) { | ||
90 | pci_unmap_rom(rdev->pdev, bios); | 93 | pci_unmap_rom(rdev->pdev, bios); |
91 | return false; | 94 | return false; |
92 | } | 95 | } |
93 | rdev->bios = kmemdup(bios, size, GFP_KERNEL); | 96 | rdev->bios = kzalloc(size, GFP_KERNEL); |
94 | if (rdev->bios == NULL) { | 97 | if (rdev->bios == NULL) { |
95 | pci_unmap_rom(rdev->pdev, bios); | 98 | pci_unmap_rom(rdev->pdev, bios); |
96 | return false; | 99 | return false; |
97 | } | 100 | } |
101 | memcpy_fromio(rdev->bios, bios, size); | ||
98 | pci_unmap_rom(rdev->pdev, bios); | 102 | pci_unmap_rom(rdev->pdev, bios); |
99 | return true; | 103 | return true; |
100 | } | 104 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index c830863bc98a..4d0f96cc3da4 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -256,11 +256,13 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | |||
256 | u32 ring = RADEON_CS_RING_GFX; | 256 | u32 ring = RADEON_CS_RING_GFX; |
257 | s32 priority = 0; | 257 | s32 priority = 0; |
258 | 258 | ||
259 | INIT_LIST_HEAD(&p->validated); | ||
260 | |||
259 | if (!cs->num_chunks) { | 261 | if (!cs->num_chunks) { |
260 | return 0; | 262 | return 0; |
261 | } | 263 | } |
264 | |||
262 | /* get chunks */ | 265 | /* get chunks */ |
263 | INIT_LIST_HEAD(&p->validated); | ||
264 | p->idx = 0; | 266 | p->idx = 0; |
265 | p->ib.sa_bo = NULL; | 267 | p->ib.sa_bo = NULL; |
266 | p->const_ib.sa_bo = NULL; | 268 | p->const_ib.sa_bo = NULL; |
@@ -715,6 +717,7 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p, | |||
715 | struct radeon_cs_chunk *ib_chunk = p->chunk_ib; | 717 | struct radeon_cs_chunk *ib_chunk = p->chunk_ib; |
716 | struct radeon_device *rdev = p->rdev; | 718 | struct radeon_device *rdev = p->rdev; |
717 | uint32_t header; | 719 | uint32_t header; |
720 | int ret = 0, i; | ||
718 | 721 | ||
719 | if (idx >= ib_chunk->length_dw) { | 722 | if (idx >= ib_chunk->length_dw) { |
720 | DRM_ERROR("Can not parse packet at %d after CS end %d !\n", | 723 | DRM_ERROR("Can not parse packet at %d after CS end %d !\n", |
@@ -743,14 +746,25 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p, | |||
743 | break; | 746 | break; |
744 | default: | 747 | default: |
745 | DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); | 748 | DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); |
746 | return -EINVAL; | 749 | ret = -EINVAL; |
750 | goto dump_ib; | ||
747 | } | 751 | } |
748 | if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { | 752 | if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { |
749 | DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", | 753 | DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", |
750 | pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); | 754 | pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); |
751 | return -EINVAL; | 755 | ret = -EINVAL; |
756 | goto dump_ib; | ||
752 | } | 757 | } |
753 | return 0; | 758 | return 0; |
759 | |||
760 | dump_ib: | ||
761 | for (i = 0; i < ib_chunk->length_dw; i++) { | ||
762 | if (i == idx) | ||
763 | printk("\t0x%08x <---\n", radeon_get_ib_value(p, i)); | ||
764 | else | ||
765 | printk("\t0x%08x\n", radeon_get_ib_value(p, i)); | ||
766 | } | ||
767 | return ret; | ||
754 | } | 768 | } |
755 | 769 | ||
756 | /** | 770 | /** |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 6b670b0bc47b..3a297037cc17 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -179,9 +179,12 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder, | |||
179 | (rdev->pdev->subsystem_vendor == 0x1734) && | 179 | (rdev->pdev->subsystem_vendor == 0x1734) && |
180 | (rdev->pdev->subsystem_device == 0x1107)) | 180 | (rdev->pdev->subsystem_device == 0x1107)) |
181 | use_bl = false; | 181 | use_bl = false; |
182 | /* Older PPC macs use on-GPU backlight controller */ | ||
183 | #ifndef CONFIG_PPC_PMAC | ||
182 | /* disable native backlight control on older asics */ | 184 | /* disable native backlight control on older asics */ |
183 | else if (rdev->family < CHIP_R600) | 185 | else if (rdev->family < CHIP_R600) |
184 | use_bl = false; | 186 | use_bl = false; |
187 | #endif | ||
185 | else | 188 | else |
186 | use_bl = true; | 189 | use_bl = true; |
187 | } | 190 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index d13d1b5a859f..df09ca7c4889 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -1030,37 +1030,59 @@ static inline bool radeon_test_signaled(struct radeon_fence *fence) | |||
1030 | return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); | 1030 | return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); |
1031 | } | 1031 | } |
1032 | 1032 | ||
1033 | struct radeon_wait_cb { | ||
1034 | struct fence_cb base; | ||
1035 | struct task_struct *task; | ||
1036 | }; | ||
1037 | |||
1038 | static void | ||
1039 | radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb) | ||
1040 | { | ||
1041 | struct radeon_wait_cb *wait = | ||
1042 | container_of(cb, struct radeon_wait_cb, base); | ||
1043 | |||
1044 | wake_up_process(wait->task); | ||
1045 | } | ||
1046 | |||
1033 | static signed long radeon_fence_default_wait(struct fence *f, bool intr, | 1047 | static signed long radeon_fence_default_wait(struct fence *f, bool intr, |
1034 | signed long t) | 1048 | signed long t) |
1035 | { | 1049 | { |
1036 | struct radeon_fence *fence = to_radeon_fence(f); | 1050 | struct radeon_fence *fence = to_radeon_fence(f); |
1037 | struct radeon_device *rdev = fence->rdev; | 1051 | struct radeon_device *rdev = fence->rdev; |
1038 | bool signaled; | 1052 | struct radeon_wait_cb cb; |
1039 | 1053 | ||
1040 | fence_enable_sw_signaling(&fence->base); | 1054 | cb.task = current; |
1041 | 1055 | ||
1042 | /* | 1056 | if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb)) |
1043 | * This function has to return -EDEADLK, but cannot hold | 1057 | return t; |
1044 | * exclusive_lock during the wait because some callers | 1058 | |
1045 | * may already hold it. This means checking needs_reset without | 1059 | while (t > 0) { |
1046 | * lock, and not fiddling with any gpu internals. | 1060 | if (intr) |
1047 | * | 1061 | set_current_state(TASK_INTERRUPTIBLE); |
1048 | * The callback installed with fence_enable_sw_signaling will | 1062 | else |
1049 | * run before our wait_event_*timeout call, so we will see | 1063 | set_current_state(TASK_UNINTERRUPTIBLE); |
1050 | * both the signaled fence and the changes to needs_reset. | 1064 | |
1051 | */ | 1065 | /* |
1066 | * radeon_test_signaled must be called after | ||
1067 | * set_current_state to prevent a race with wake_up_process | ||
1068 | */ | ||
1069 | if (radeon_test_signaled(fence)) | ||
1070 | break; | ||
1071 | |||
1072 | if (rdev->needs_reset) { | ||
1073 | t = -EDEADLK; | ||
1074 | break; | ||
1075 | } | ||
1076 | |||
1077 | t = schedule_timeout(t); | ||
1078 | |||
1079 | if (t > 0 && intr && signal_pending(current)) | ||
1080 | t = -ERESTARTSYS; | ||
1081 | } | ||
1082 | |||
1083 | __set_current_state(TASK_RUNNING); | ||
1084 | fence_remove_callback(f, &cb.base); | ||
1052 | 1085 | ||
1053 | if (intr) | ||
1054 | t = wait_event_interruptible_timeout(rdev->fence_queue, | ||
1055 | ((signaled = radeon_test_signaled(fence)) || | ||
1056 | rdev->needs_reset), t); | ||
1057 | else | ||
1058 | t = wait_event_timeout(rdev->fence_queue, | ||
1059 | ((signaled = radeon_test_signaled(fence)) || | ||
1060 | rdev->needs_reset), t); | ||
1061 | |||
1062 | if (t > 0 && !signaled) | ||
1063 | return -EDEADLK; | ||
1064 | return t; | 1086 | return t; |
1065 | } | 1087 | } |
1066 | 1088 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c index 061eaa9c19c7..122eb5693ba1 100644 --- a/drivers/gpu/drm/radeon/radeon_kfd.c +++ b/drivers/gpu/drm/radeon/radeon_kfd.c | |||
@@ -153,7 +153,7 @@ void radeon_kfd_device_init(struct radeon_device *rdev) | |||
153 | .compute_vmid_bitmap = 0xFF00, | 153 | .compute_vmid_bitmap = 0xFF00, |
154 | 154 | ||
155 | .first_compute_pipe = 1, | 155 | .first_compute_pipe = 1, |
156 | .compute_pipe_count = 8 - 1, | 156 | .compute_pipe_count = 4 - 1, |
157 | }; | 157 | }; |
158 | 158 | ||
159 | radeon_doorbell_get_kfd_info(rdev, | 159 | radeon_doorbell_get_kfd_info(rdev, |
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index a69bd441dd2d..572b4dbec186 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c | |||
@@ -122,7 +122,6 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, | |||
122 | it = interval_tree_iter_first(&rmn->objects, start, end); | 122 | it = interval_tree_iter_first(&rmn->objects, start, end); |
123 | while (it) { | 123 | while (it) { |
124 | struct radeon_bo *bo; | 124 | struct radeon_bo *bo; |
125 | struct fence *fence; | ||
126 | int r; | 125 | int r; |
127 | 126 | ||
128 | bo = container_of(it, struct radeon_bo, mn_it); | 127 | bo = container_of(it, struct radeon_bo, mn_it); |
@@ -134,12 +133,10 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, | |||
134 | continue; | 133 | continue; |
135 | } | 134 | } |
136 | 135 | ||
137 | fence = reservation_object_get_excl(bo->tbo.resv); | 136 | r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, |
138 | if (fence) { | 137 | false, MAX_SCHEDULE_TIMEOUT); |
139 | r = radeon_fence_wait((struct radeon_fence *)fence, false); | 138 | if (r) |
140 | if (r) | 139 | DRM_ERROR("(%d) failed to wait for user bo\n", r); |
141 | DRM_ERROR("(%d) failed to wait for user bo\n", r); | ||
142 | } | ||
143 | 140 | ||
144 | radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); | 141 | radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); |
145 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | 142 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 43e09942823e..318165d4855c 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -173,17 +173,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) | |||
173 | else | 173 | else |
174 | rbo->placements[i].lpfn = 0; | 174 | rbo->placements[i].lpfn = 0; |
175 | } | 175 | } |
176 | |||
177 | /* | ||
178 | * Use two-ended allocation depending on the buffer size to | ||
179 | * improve fragmentation quality. | ||
180 | * 512kb was measured as the most optimal number. | ||
181 | */ | ||
182 | if (rbo->tbo.mem.size > 512 * 1024) { | ||
183 | for (i = 0; i < c; i++) { | ||
184 | rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN; | ||
185 | } | ||
186 | } | ||
187 | } | 176 | } |
188 | 177 | ||
189 | int radeon_bo_create(struct radeon_device *rdev, | 178 | int radeon_bo_create(struct radeon_device *rdev, |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 9f758d39420d..c1ba83a8dd8c 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -837,12 +837,8 @@ static void radeon_dpm_thermal_work_handler(struct work_struct *work) | |||
837 | radeon_pm_compute_clocks(rdev); | 837 | radeon_pm_compute_clocks(rdev); |
838 | } | 838 | } |
839 | 839 | ||
840 | static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, | 840 | static bool radeon_dpm_single_display(struct radeon_device *rdev) |
841 | enum radeon_pm_state_type dpm_state) | ||
842 | { | 841 | { |
843 | int i; | ||
844 | struct radeon_ps *ps; | ||
845 | u32 ui_class; | ||
846 | bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ? | 842 | bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ? |
847 | true : false; | 843 | true : false; |
848 | 844 | ||
@@ -852,6 +848,23 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, | |||
852 | single_display = false; | 848 | single_display = false; |
853 | } | 849 | } |
854 | 850 | ||
851 | /* 120hz tends to be problematic even if they are under the | ||
852 | * vblank limit. | ||
853 | */ | ||
854 | if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120)) | ||
855 | single_display = false; | ||
856 | |||
857 | return single_display; | ||
858 | } | ||
859 | |||
860 | static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, | ||
861 | enum radeon_pm_state_type dpm_state) | ||
862 | { | ||
863 | int i; | ||
864 | struct radeon_ps *ps; | ||
865 | u32 ui_class; | ||
866 | bool single_display = radeon_dpm_single_display(rdev); | ||
867 | |||
855 | /* certain older asics have a separare 3D performance state, | 868 | /* certain older asics have a separare 3D performance state, |
856 | * so try that first if the user selected performance | 869 | * so try that first if the user selected performance |
857 | */ | 870 | */ |
@@ -977,6 +990,7 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) | |||
977 | struct radeon_ps *ps; | 990 | struct radeon_ps *ps; |
978 | enum radeon_pm_state_type dpm_state; | 991 | enum radeon_pm_state_type dpm_state; |
979 | int ret; | 992 | int ret; |
993 | bool single_display = radeon_dpm_single_display(rdev); | ||
980 | 994 | ||
981 | /* if dpm init failed */ | 995 | /* if dpm init failed */ |
982 | if (!rdev->pm.dpm_enabled) | 996 | if (!rdev->pm.dpm_enabled) |
@@ -1001,6 +1015,9 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) | |||
1001 | /* vce just modifies an existing state so force a change */ | 1015 | /* vce just modifies an existing state so force a change */ |
1002 | if (ps->vce_active != rdev->pm.dpm.vce_active) | 1016 | if (ps->vce_active != rdev->pm.dpm.vce_active) |
1003 | goto force; | 1017 | goto force; |
1018 | /* user has made a display change (such as timing) */ | ||
1019 | if (rdev->pm.dpm.single_display != single_display) | ||
1020 | goto force; | ||
1004 | if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) { | 1021 | if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) { |
1005 | /* for pre-BTC and APUs if the num crtcs changed but state is the same, | 1022 | /* for pre-BTC and APUs if the num crtcs changed but state is the same, |
1006 | * all we need to do is update the display configuration. | 1023 | * all we need to do is update the display configuration. |
@@ -1063,6 +1080,7 @@ force: | |||
1063 | 1080 | ||
1064 | rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; | 1081 | rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; |
1065 | rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; | 1082 | rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; |
1083 | rdev->pm.dpm.single_display = single_display; | ||
1066 | 1084 | ||
1067 | /* wait for the rings to drain */ | 1085 | /* wait for the rings to drain */ |
1068 | for (i = 0; i < RADEON_NUM_RINGS; i++) { | 1086 | for (i = 0; i < RADEON_NUM_RINGS; i++) { |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 2456f69efd23..8c7872339c2a 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -495,7 +495,7 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data) | |||
495 | seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); | 495 | seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); |
496 | seq_printf(m, "%u dwords in ring\n", count); | 496 | seq_printf(m, "%u dwords in ring\n", count); |
497 | 497 | ||
498 | if (!ring->ready) | 498 | if (!ring->ring) |
499 | return 0; | 499 | return 0; |
500 | 500 | ||
501 | /* print 8 dw before current rptr as often it's the last executed | 501 | /* print 8 dw before current rptr as often it's the last executed |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index d02aa1d0f588..b292aca0f342 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -598,6 +598,10 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) | |||
598 | enum dma_data_direction direction = write ? | 598 | enum dma_data_direction direction = write ? |
599 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | 599 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; |
600 | 600 | ||
601 | /* double check that we don't free the table twice */ | ||
602 | if (!ttm->sg->sgl) | ||
603 | return; | ||
604 | |||
601 | /* free the sg table and pages again */ | 605 | /* free the sg table and pages again */ |
602 | dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); | 606 | dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); |
603 | 607 | ||
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index d81182ad53ec..97a904835759 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -694,6 +694,10 @@ int rs600_irq_set(struct radeon_device *rdev) | |||
694 | WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); | 694 | WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); |
695 | if (ASIC_IS_DCE2(rdev)) | 695 | if (ASIC_IS_DCE2(rdev)) |
696 | WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0); | 696 | WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0); |
697 | |||
698 | /* posting read */ | ||
699 | RREG32(R_000040_GEN_INT_CNTL); | ||
700 | |||
697 | return 0; | 701 | return 0; |
698 | } | 702 | } |
699 | 703 | ||
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 73107fe9e46f..a7fb2735d4a9 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -3162,6 +3162,8 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
3162 | } | 3162 | } |
3163 | 3163 | ||
3164 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); | 3164 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); |
3165 | WREG32(SRBM_INT_CNTL, 1); | ||
3166 | WREG32(SRBM_INT_ACK, 1); | ||
3165 | 3167 | ||
3166 | evergreen_fix_pci_max_read_req_size(rdev); | 3168 | evergreen_fix_pci_max_read_req_size(rdev); |
3167 | 3169 | ||
@@ -4699,12 +4701,6 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) | |||
4699 | switch (pkt.type) { | 4701 | switch (pkt.type) { |
4700 | case RADEON_PACKET_TYPE0: | 4702 | case RADEON_PACKET_TYPE0: |
4701 | dev_err(rdev->dev, "Packet0 not allowed!\n"); | 4703 | dev_err(rdev->dev, "Packet0 not allowed!\n"); |
4702 | for (i = 0; i < ib->length_dw; i++) { | ||
4703 | if (i == idx) | ||
4704 | printk("\t0x%08x <---\n", ib->ptr[i]); | ||
4705 | else | ||
4706 | printk("\t0x%08x\n", ib->ptr[i]); | ||
4707 | } | ||
4708 | ret = -EINVAL; | 4704 | ret = -EINVAL; |
4709 | break; | 4705 | break; |
4710 | case RADEON_PACKET_TYPE2: | 4706 | case RADEON_PACKET_TYPE2: |
@@ -4736,8 +4732,15 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) | |||
4736 | ret = -EINVAL; | 4732 | ret = -EINVAL; |
4737 | break; | 4733 | break; |
4738 | } | 4734 | } |
4739 | if (ret) | 4735 | if (ret) { |
4736 | for (i = 0; i < ib->length_dw; i++) { | ||
4737 | if (i == idx) | ||
4738 | printk("\t0x%08x <---\n", ib->ptr[i]); | ||
4739 | else | ||
4740 | printk("\t0x%08x\n", ib->ptr[i]); | ||
4741 | } | ||
4740 | break; | 4742 | break; |
4743 | } | ||
4741 | } while (idx < ib->length_dw); | 4744 | } while (idx < ib->length_dw); |
4742 | 4745 | ||
4743 | return ret; | 4746 | return ret; |
@@ -5910,6 +5913,7 @@ static void si_disable_interrupt_state(struct radeon_device *rdev) | |||
5910 | tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; | 5913 | tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; |
5911 | WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp); | 5914 | WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp); |
5912 | WREG32(GRBM_INT_CNTL, 0); | 5915 | WREG32(GRBM_INT_CNTL, 0); |
5916 | WREG32(SRBM_INT_CNTL, 0); | ||
5913 | if (rdev->num_crtc >= 2) { | 5917 | if (rdev->num_crtc >= 2) { |
5914 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | 5918 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); |
5915 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | 5919 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); |
@@ -6199,6 +6203,9 @@ int si_irq_set(struct radeon_device *rdev) | |||
6199 | 6203 | ||
6200 | WREG32(CG_THERMAL_INT, thermal_int); | 6204 | WREG32(CG_THERMAL_INT, thermal_int); |
6201 | 6205 | ||
6206 | /* posting read */ | ||
6207 | RREG32(SRBM_STATUS); | ||
6208 | |||
6202 | return 0; | 6209 | return 0; |
6203 | } | 6210 | } |
6204 | 6211 | ||
@@ -6609,6 +6616,10 @@ restart_ih: | |||
6609 | break; | 6616 | break; |
6610 | } | 6617 | } |
6611 | break; | 6618 | break; |
6619 | case 96: | ||
6620 | DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR)); | ||
6621 | WREG32(SRBM_INT_ACK, 0x1); | ||
6622 | break; | ||
6612 | case 124: /* UVD */ | 6623 | case 124: /* UVD */ |
6613 | DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); | 6624 | DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); |
6614 | radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); | 6625 | radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); |
@@ -7119,8 +7130,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) | |||
7119 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); | 7130 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); |
7120 | 7131 | ||
7121 | if (!vclk || !dclk) { | 7132 | if (!vclk || !dclk) { |
7122 | /* keep the Bypass mode, put PLL to sleep */ | 7133 | /* keep the Bypass mode */ |
7123 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); | ||
7124 | return 0; | 7134 | return 0; |
7125 | } | 7135 | } |
7126 | 7136 | ||
@@ -7136,8 +7146,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) | |||
7136 | /* set VCO_MODE to 1 */ | 7146 | /* set VCO_MODE to 1 */ |
7137 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); | 7147 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); |
7138 | 7148 | ||
7139 | /* toggle UPLL_SLEEP to 1 then back to 0 */ | 7149 | /* disable sleep mode */ |
7140 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); | ||
7141 | WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK); | 7150 | WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK); |
7142 | 7151 | ||
7143 | /* deassert UPLL_RESET */ | 7152 | /* deassert UPLL_RESET */ |
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index cbd91d226f3c..99a9835c9f61 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h | |||
@@ -358,6 +358,10 @@ | |||
358 | #define CC_SYS_RB_BACKEND_DISABLE 0xe80 | 358 | #define CC_SYS_RB_BACKEND_DISABLE 0xe80 |
359 | #define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84 | 359 | #define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84 |
360 | 360 | ||
361 | #define SRBM_READ_ERROR 0xE98 | ||
362 | #define SRBM_INT_CNTL 0xEA0 | ||
363 | #define SRBM_INT_ACK 0xEA8 | ||
364 | |||
361 | #define SRBM_STATUS2 0x0EC4 | 365 | #define SRBM_STATUS2 0x0EC4 |
362 | #define DMA_BUSY (1 << 5) | 366 | #define DMA_BUSY (1 << 5) |
363 | #define DMA1_BUSY (1 << 6) | 367 | #define DMA1_BUSY (1 << 6) |
@@ -908,8 +912,8 @@ | |||
908 | 912 | ||
909 | #define DCCG_AUDIO_DTO0_PHASE 0x05b0 | 913 | #define DCCG_AUDIO_DTO0_PHASE 0x05b0 |
910 | #define DCCG_AUDIO_DTO0_MODULE 0x05b4 | 914 | #define DCCG_AUDIO_DTO0_MODULE 0x05b4 |
911 | #define DCCG_AUDIO_DTO1_PHASE 0x05b8 | 915 | #define DCCG_AUDIO_DTO1_PHASE 0x05c0 |
912 | #define DCCG_AUDIO_DTO1_MODULE 0x05bc | 916 | #define DCCG_AUDIO_DTO1_MODULE 0x05c4 |
913 | 917 | ||
914 | #define AFMT_AUDIO_SRC_CONTROL 0x713c | 918 | #define AFMT_AUDIO_SRC_CONTROL 0x713c |
915 | #define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0) | 919 | #define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0) |
diff --git a/drivers/gpu/drm/radeon/vce_v2_0.c b/drivers/gpu/drm/radeon/vce_v2_0.c index 1ac7bb825a1b..fbbe78fbd087 100644 --- a/drivers/gpu/drm/radeon/vce_v2_0.c +++ b/drivers/gpu/drm/radeon/vce_v2_0.c | |||
@@ -156,6 +156,9 @@ int vce_v2_0_resume(struct radeon_device *rdev) | |||
156 | WREG32(VCE_LMI_SWAP_CNTL1, 0); | 156 | WREG32(VCE_LMI_SWAP_CNTL1, 0); |
157 | WREG32(VCE_LMI_VM_CTRL, 0); | 157 | WREG32(VCE_LMI_VM_CTRL, 0); |
158 | 158 | ||
159 | WREG32(VCE_LMI_VCPU_CACHE_40BIT_BAR, addr >> 8); | ||
160 | |||
161 | addr &= 0xff; | ||
159 | size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size); | 162 | size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size); |
160 | WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff); | 163 | WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff); |
161 | WREG32(VCE_VCPU_CACHE_SIZE0, size); | 164 | WREG32(VCE_VCPU_CACHE_SIZE0, size); |
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 3aaa84ae2681..1a52522f5da7 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c | |||
@@ -997,8 +997,10 @@ static void tegra_crtc_reset(struct drm_crtc *crtc) | |||
997 | crtc->state = NULL; | 997 | crtc->state = NULL; |
998 | 998 | ||
999 | state = kzalloc(sizeof(*state), GFP_KERNEL); | 999 | state = kzalloc(sizeof(*state), GFP_KERNEL); |
1000 | if (state) | 1000 | if (state) { |
1001 | crtc->state = &state->base; | 1001 | crtc->state = &state->base; |
1002 | crtc->state->crtc = crtc; | ||
1003 | } | ||
1002 | } | 1004 | } |
1003 | 1005 | ||
1004 | static struct drm_crtc_state * | 1006 | static struct drm_crtc_state * |
@@ -1012,6 +1014,7 @@ tegra_crtc_atomic_duplicate_state(struct drm_crtc *crtc) | |||
1012 | return NULL; | 1014 | return NULL; |
1013 | 1015 | ||
1014 | copy->base.mode_changed = false; | 1016 | copy->base.mode_changed = false; |
1017 | copy->base.active_changed = false; | ||
1015 | copy->base.planes_changed = false; | 1018 | copy->base.planes_changed = false; |
1016 | copy->base.event = NULL; | 1019 | copy->base.event = NULL; |
1017 | 1020 | ||
@@ -1227,9 +1230,6 @@ static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc) | |||
1227 | /* program display mode */ | 1230 | /* program display mode */ |
1228 | tegra_dc_set_timings(dc, mode); | 1231 | tegra_dc_set_timings(dc, mode); |
1229 | 1232 | ||
1230 | if (dc->soc->supports_border_color) | ||
1231 | tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR); | ||
1232 | |||
1233 | /* interlacing isn't supported yet, so disable it */ | 1233 | /* interlacing isn't supported yet, so disable it */ |
1234 | if (dc->soc->supports_interlacing) { | 1234 | if (dc->soc->supports_interlacing) { |
1235 | value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL); | 1235 | value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL); |
@@ -1252,42 +1252,7 @@ static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc) | |||
1252 | 1252 | ||
1253 | static void tegra_crtc_prepare(struct drm_crtc *crtc) | 1253 | static void tegra_crtc_prepare(struct drm_crtc *crtc) |
1254 | { | 1254 | { |
1255 | struct tegra_dc *dc = to_tegra_dc(crtc); | ||
1256 | unsigned int syncpt; | ||
1257 | unsigned long value; | ||
1258 | |||
1259 | drm_crtc_vblank_off(crtc); | 1255 | drm_crtc_vblank_off(crtc); |
1260 | |||
1261 | if (dc->pipe) | ||
1262 | syncpt = SYNCPT_VBLANK1; | ||
1263 | else | ||
1264 | syncpt = SYNCPT_VBLANK0; | ||
1265 | |||
1266 | /* initialize display controller */ | ||
1267 | tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL); | ||
1268 | tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC); | ||
1269 | |||
1270 | value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT; | ||
1271 | tegra_dc_writel(dc, value, DC_CMD_INT_TYPE); | ||
1272 | |||
1273 | value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | | ||
1274 | WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT; | ||
1275 | tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY); | ||
1276 | |||
1277 | /* initialize timer */ | ||
1278 | value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) | | ||
1279 | WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20); | ||
1280 | tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY); | ||
1281 | |||
1282 | value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) | | ||
1283 | WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1); | ||
1284 | tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER); | ||
1285 | |||
1286 | value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT; | ||
1287 | tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE); | ||
1288 | |||
1289 | value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT; | ||
1290 | tegra_dc_writel(dc, value, DC_CMD_INT_MASK); | ||
1291 | } | 1256 | } |
1292 | 1257 | ||
1293 | static void tegra_crtc_commit(struct drm_crtc *crtc) | 1258 | static void tegra_crtc_commit(struct drm_crtc *crtc) |
@@ -1664,6 +1629,8 @@ static int tegra_dc_init(struct host1x_client *client) | |||
1664 | struct tegra_drm *tegra = drm->dev_private; | 1629 | struct tegra_drm *tegra = drm->dev_private; |
1665 | struct drm_plane *primary = NULL; | 1630 | struct drm_plane *primary = NULL; |
1666 | struct drm_plane *cursor = NULL; | 1631 | struct drm_plane *cursor = NULL; |
1632 | unsigned int syncpt; | ||
1633 | u32 value; | ||
1667 | int err; | 1634 | int err; |
1668 | 1635 | ||
1669 | if (tegra->domain) { | 1636 | if (tegra->domain) { |
@@ -1730,6 +1697,40 @@ static int tegra_dc_init(struct host1x_client *client) | |||
1730 | goto cleanup; | 1697 | goto cleanup; |
1731 | } | 1698 | } |
1732 | 1699 | ||
1700 | /* initialize display controller */ | ||
1701 | if (dc->pipe) | ||
1702 | syncpt = SYNCPT_VBLANK1; | ||
1703 | else | ||
1704 | syncpt = SYNCPT_VBLANK0; | ||
1705 | |||
1706 | tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL); | ||
1707 | tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC); | ||
1708 | |||
1709 | value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT; | ||
1710 | tegra_dc_writel(dc, value, DC_CMD_INT_TYPE); | ||
1711 | |||
1712 | value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | | ||
1713 | WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT; | ||
1714 | tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY); | ||
1715 | |||
1716 | /* initialize timer */ | ||
1717 | value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) | | ||
1718 | WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20); | ||
1719 | tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY); | ||
1720 | |||
1721 | value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) | | ||
1722 | WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1); | ||
1723 | tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER); | ||
1724 | |||
1725 | value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT; | ||
1726 | tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE); | ||
1727 | |||
1728 | value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT; | ||
1729 | tegra_dc_writel(dc, value, DC_CMD_INT_MASK); | ||
1730 | |||
1731 | if (dc->soc->supports_border_color) | ||
1732 | tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR); | ||
1733 | |||
1733 | return 0; | 1734 | return 0; |
1734 | 1735 | ||
1735 | cleanup: | 1736 | cleanup: |
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index 7e06657ae58b..7eaaee74a039 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c | |||
@@ -851,6 +851,14 @@ static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder, | |||
851 | h_back_porch = mode->htotal - mode->hsync_end; | 851 | h_back_porch = mode->htotal - mode->hsync_end; |
852 | h_front_porch = mode->hsync_start - mode->hdisplay; | 852 | h_front_porch = mode->hsync_start - mode->hdisplay; |
853 | 853 | ||
854 | err = clk_set_rate(hdmi->clk, pclk); | ||
855 | if (err < 0) { | ||
856 | dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n", | ||
857 | err); | ||
858 | } | ||
859 | |||
860 | DRM_DEBUG_KMS("HDMI clock rate: %lu Hz\n", clk_get_rate(hdmi->clk)); | ||
861 | |||
854 | /* power up sequence */ | 862 | /* power up sequence */ |
855 | value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0); | 863 | value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0); |
856 | value &= ~SOR_PLL_PDBG; | 864 | value &= ~SOR_PLL_PDBG; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index d395b0bef73b..8d9b7de25613 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -74,7 +74,7 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) | |||
74 | pr_err(" has_type: %d\n", man->has_type); | 74 | pr_err(" has_type: %d\n", man->has_type); |
75 | pr_err(" use_type: %d\n", man->use_type); | 75 | pr_err(" use_type: %d\n", man->use_type); |
76 | pr_err(" flags: 0x%08X\n", man->flags); | 76 | pr_err(" flags: 0x%08X\n", man->flags); |
77 | pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset); | 77 | pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset); |
78 | pr_err(" size: %llu\n", man->size); | 78 | pr_err(" size: %llu\n", man->size); |
79 | pr_err(" available_caching: 0x%08X\n", man->available_caching); | 79 | pr_err(" available_caching: 0x%08X\n", man->available_caching); |
80 | pr_err(" default_caching: 0x%08X\n", man->default_caching); | 80 | pr_err(" default_caching: 0x%08X\n", man->default_caching); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 6c6b655defcf..e13b9cbc304e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -725,32 +725,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
725 | goto out_err1; | 725 | goto out_err1; |
726 | } | 726 | } |
727 | 727 | ||
728 | ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, | ||
729 | (dev_priv->vram_size >> PAGE_SHIFT)); | ||
730 | if (unlikely(ret != 0)) { | ||
731 | DRM_ERROR("Failed initializing memory manager for VRAM.\n"); | ||
732 | goto out_err2; | ||
733 | } | ||
734 | |||
735 | dev_priv->has_gmr = true; | ||
736 | if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || | ||
737 | refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, | ||
738 | VMW_PL_GMR) != 0) { | ||
739 | DRM_INFO("No GMR memory available. " | ||
740 | "Graphics memory resources are very limited.\n"); | ||
741 | dev_priv->has_gmr = false; | ||
742 | } | ||
743 | |||
744 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | ||
745 | dev_priv->has_mob = true; | ||
746 | if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, | ||
747 | VMW_PL_MOB) != 0) { | ||
748 | DRM_INFO("No MOB memory available. " | ||
749 | "3D will be disabled.\n"); | ||
750 | dev_priv->has_mob = false; | ||
751 | } | ||
752 | } | ||
753 | |||
754 | dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, | 728 | dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, |
755 | dev_priv->mmio_size); | 729 | dev_priv->mmio_size); |
756 | 730 | ||
@@ -813,6 +787,33 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
813 | goto out_no_fman; | 787 | goto out_no_fman; |
814 | } | 788 | } |
815 | 789 | ||
790 | |||
791 | ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, | ||
792 | (dev_priv->vram_size >> PAGE_SHIFT)); | ||
793 | if (unlikely(ret != 0)) { | ||
794 | DRM_ERROR("Failed initializing memory manager for VRAM.\n"); | ||
795 | goto out_no_vram; | ||
796 | } | ||
797 | |||
798 | dev_priv->has_gmr = true; | ||
799 | if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || | ||
800 | refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, | ||
801 | VMW_PL_GMR) != 0) { | ||
802 | DRM_INFO("No GMR memory available. " | ||
803 | "Graphics memory resources are very limited.\n"); | ||
804 | dev_priv->has_gmr = false; | ||
805 | } | ||
806 | |||
807 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | ||
808 | dev_priv->has_mob = true; | ||
809 | if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, | ||
810 | VMW_PL_MOB) != 0) { | ||
811 | DRM_INFO("No MOB memory available. " | ||
812 | "3D will be disabled.\n"); | ||
813 | dev_priv->has_mob = false; | ||
814 | } | ||
815 | } | ||
816 | |||
816 | vmw_kms_save_vga(dev_priv); | 817 | vmw_kms_save_vga(dev_priv); |
817 | 818 | ||
818 | /* Start kms and overlay systems, needs fifo. */ | 819 | /* Start kms and overlay systems, needs fifo. */ |
@@ -838,6 +839,12 @@ out_no_fifo: | |||
838 | vmw_kms_close(dev_priv); | 839 | vmw_kms_close(dev_priv); |
839 | out_no_kms: | 840 | out_no_kms: |
840 | vmw_kms_restore_vga(dev_priv); | 841 | vmw_kms_restore_vga(dev_priv); |
842 | if (dev_priv->has_mob) | ||
843 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
844 | if (dev_priv->has_gmr) | ||
845 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | ||
846 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
847 | out_no_vram: | ||
841 | vmw_fence_manager_takedown(dev_priv->fman); | 848 | vmw_fence_manager_takedown(dev_priv->fman); |
842 | out_no_fman: | 849 | out_no_fman: |
843 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | 850 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
@@ -853,12 +860,6 @@ out_err4: | |||
853 | iounmap(dev_priv->mmio_virt); | 860 | iounmap(dev_priv->mmio_virt); |
854 | out_err3: | 861 | out_err3: |
855 | arch_phys_wc_del(dev_priv->mmio_mtrr); | 862 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
856 | if (dev_priv->has_mob) | ||
857 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
858 | if (dev_priv->has_gmr) | ||
859 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | ||
860 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
861 | out_err2: | ||
862 | (void)ttm_bo_device_release(&dev_priv->bdev); | 863 | (void)ttm_bo_device_release(&dev_priv->bdev); |
863 | out_err1: | 864 | out_err1: |
864 | vmw_ttm_global_release(dev_priv); | 865 | vmw_ttm_global_release(dev_priv); |
@@ -887,6 +888,13 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
887 | } | 888 | } |
888 | vmw_kms_close(dev_priv); | 889 | vmw_kms_close(dev_priv); |
889 | vmw_overlay_close(dev_priv); | 890 | vmw_overlay_close(dev_priv); |
891 | |||
892 | if (dev_priv->has_mob) | ||
893 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
894 | if (dev_priv->has_gmr) | ||
895 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | ||
896 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
897 | |||
890 | vmw_fence_manager_takedown(dev_priv->fman); | 898 | vmw_fence_manager_takedown(dev_priv->fman); |
891 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | 899 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
892 | drm_irq_uninstall(dev_priv->dev); | 900 | drm_irq_uninstall(dev_priv->dev); |
@@ -898,11 +906,6 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
898 | ttm_object_device_release(&dev_priv->tdev); | 906 | ttm_object_device_release(&dev_priv->tdev); |
899 | iounmap(dev_priv->mmio_virt); | 907 | iounmap(dev_priv->mmio_virt); |
900 | arch_phys_wc_del(dev_priv->mmio_mtrr); | 908 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
901 | if (dev_priv->has_mob) | ||
902 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
903 | if (dev_priv->has_gmr) | ||
904 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | ||
905 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
906 | (void)ttm_bo_device_release(&dev_priv->bdev); | 909 | (void)ttm_bo_device_release(&dev_priv->bdev); |
907 | vmw_ttm_global_release(dev_priv); | 910 | vmw_ttm_global_release(dev_priv); |
908 | 911 | ||
@@ -1235,6 +1238,7 @@ static void vmw_remove(struct pci_dev *pdev) | |||
1235 | { | 1238 | { |
1236 | struct drm_device *dev = pci_get_drvdata(pdev); | 1239 | struct drm_device *dev = pci_get_drvdata(pdev); |
1237 | 1240 | ||
1241 | pci_disable_device(pdev); | ||
1238 | drm_put_dev(dev); | 1242 | drm_put_dev(dev); |
1239 | } | 1243 | } |
1240 | 1244 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 33176d05db35..654c8daeb5ab 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -890,7 +890,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, | |||
890 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); | 890 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); |
891 | if (unlikely(ret != 0)) { | 891 | if (unlikely(ret != 0)) { |
892 | DRM_ERROR("Could not find or use MOB buffer.\n"); | 892 | DRM_ERROR("Could not find or use MOB buffer.\n"); |
893 | return -EINVAL; | 893 | ret = -EINVAL; |
894 | goto out_no_reloc; | ||
894 | } | 895 | } |
895 | bo = &vmw_bo->base; | 896 | bo = &vmw_bo->base; |
896 | 897 | ||
@@ -914,7 +915,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, | |||
914 | 915 | ||
915 | out_no_reloc: | 916 | out_no_reloc: |
916 | vmw_dmabuf_unreference(&vmw_bo); | 917 | vmw_dmabuf_unreference(&vmw_bo); |
917 | vmw_bo_p = NULL; | 918 | *vmw_bo_p = NULL; |
918 | return ret; | 919 | return ret; |
919 | } | 920 | } |
920 | 921 | ||
@@ -951,7 +952,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | |||
951 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); | 952 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); |
952 | if (unlikely(ret != 0)) { | 953 | if (unlikely(ret != 0)) { |
953 | DRM_ERROR("Could not find or use GMR region.\n"); | 954 | DRM_ERROR("Could not find or use GMR region.\n"); |
954 | return -EINVAL; | 955 | ret = -EINVAL; |
956 | goto out_no_reloc; | ||
955 | } | 957 | } |
956 | bo = &vmw_bo->base; | 958 | bo = &vmw_bo->base; |
957 | 959 | ||
@@ -974,7 +976,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | |||
974 | 976 | ||
975 | out_no_reloc: | 977 | out_no_reloc: |
976 | vmw_dmabuf_unreference(&vmw_bo); | 978 | vmw_dmabuf_unreference(&vmw_bo); |
977 | vmw_bo_p = NULL; | 979 | *vmw_bo_p = NULL; |
978 | return ret; | 980 | return ret; |
979 | } | 981 | } |
980 | 982 | ||
@@ -2780,13 +2782,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
2780 | NULL, arg->command_size, arg->throttle_us, | 2782 | NULL, arg->command_size, arg->throttle_us, |
2781 | (void __user *)(unsigned long)arg->fence_rep, | 2783 | (void __user *)(unsigned long)arg->fence_rep, |
2782 | NULL); | 2784 | NULL); |
2783 | 2785 | ttm_read_unlock(&dev_priv->reservation_sem); | |
2784 | if (unlikely(ret != 0)) | 2786 | if (unlikely(ret != 0)) |
2785 | goto out_unlock; | 2787 | return ret; |
2786 | 2788 | ||
2787 | vmw_kms_cursor_post_execbuf(dev_priv); | 2789 | vmw_kms_cursor_post_execbuf(dev_priv); |
2788 | 2790 | ||
2789 | out_unlock: | 2791 | return 0; |
2790 | ttm_read_unlock(&dev_priv->reservation_sem); | ||
2791 | return ret; | ||
2792 | } | 2792 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 8725b79e7847..07cda8cbbddb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -2033,23 +2033,17 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | |||
2033 | int i; | 2033 | int i; |
2034 | struct drm_mode_config *mode_config = &dev->mode_config; | 2034 | struct drm_mode_config *mode_config = &dev->mode_config; |
2035 | 2035 | ||
2036 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); | ||
2037 | if (unlikely(ret != 0)) | ||
2038 | return ret; | ||
2039 | |||
2040 | if (!arg->num_outputs) { | 2036 | if (!arg->num_outputs) { |
2041 | struct drm_vmw_rect def_rect = {0, 0, 800, 600}; | 2037 | struct drm_vmw_rect def_rect = {0, 0, 800, 600}; |
2042 | vmw_du_update_layout(dev_priv, 1, &def_rect); | 2038 | vmw_du_update_layout(dev_priv, 1, &def_rect); |
2043 | goto out_unlock; | 2039 | return 0; |
2044 | } | 2040 | } |
2045 | 2041 | ||
2046 | rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); | 2042 | rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); |
2047 | rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), | 2043 | rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), |
2048 | GFP_KERNEL); | 2044 | GFP_KERNEL); |
2049 | if (unlikely(!rects)) { | 2045 | if (unlikely(!rects)) |
2050 | ret = -ENOMEM; | 2046 | return -ENOMEM; |
2051 | goto out_unlock; | ||
2052 | } | ||
2053 | 2047 | ||
2054 | user_rects = (void __user *)(unsigned long)arg->rects; | 2048 | user_rects = (void __user *)(unsigned long)arg->rects; |
2055 | ret = copy_from_user(rects, user_rects, rects_size); | 2049 | ret = copy_from_user(rects, user_rects, rects_size); |
@@ -2074,7 +2068,5 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | |||
2074 | 2068 | ||
2075 | out_free: | 2069 | out_free: |
2076 | kfree(rects); | 2070 | kfree(rects); |
2077 | out_unlock: | ||
2078 | ttm_read_unlock(&dev_priv->reservation_sem); | ||
2079 | return ret; | 2071 | return ret; |
2080 | } | 2072 | } |
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c index b61d6be97602..3ddfb3d0b64d 100644 --- a/drivers/gpu/ipu-v3/ipu-di.c +++ b/drivers/gpu/ipu-v3/ipu-di.c | |||
@@ -459,6 +459,8 @@ static void ipu_di_config_clock(struct ipu_di *di, | |||
459 | 459 | ||
460 | clkrate = clk_get_rate(di->clk_ipu); | 460 | clkrate = clk_get_rate(di->clk_ipu); |
461 | div = DIV_ROUND_CLOSEST(clkrate, sig->mode.pixelclock); | 461 | div = DIV_ROUND_CLOSEST(clkrate, sig->mode.pixelclock); |
462 | if (div == 0) | ||
463 | div = 1; | ||
462 | rate = clkrate / div; | 464 | rate = clkrate / div; |
463 | 465 | ||
464 | error = rate / (sig->mode.pixelclock / 1000); | 466 | error = rate / (sig->mode.pixelclock / 1000); |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index db4fb6e1cc5b..56ce8c2b5530 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
@@ -1872,6 +1872,7 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
1872 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) }, | 1872 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) }, |
1873 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) }, | 1873 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) }, |
1874 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) }, | 1874 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) }, |
1875 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K) }, | ||
1875 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) }, | 1876 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) }, |
1876 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) }, | 1877 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) }, |
1877 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) }, | 1878 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) }, |
@@ -1926,6 +1927,7 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
1926 | #endif | 1927 | #endif |
1927 | #if IS_ENABLED(CONFIG_HID_SAITEK) | 1928 | #if IS_ENABLED(CONFIG_HID_SAITEK) |
1928 | { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) }, | 1929 | { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) }, |
1930 | { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD) }, | ||
1929 | { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) }, | 1931 | { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) }, |
1930 | { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) }, | 1932 | { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) }, |
1931 | { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) }, | 1933 | { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) }, |
@@ -1957,6 +1959,7 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
1957 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) }, | 1959 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) }, |
1958 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, | 1960 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, |
1959 | { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, | 1961 | { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, |
1962 | { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) }, | ||
1960 | { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, | 1963 | { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, |
1961 | { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, | 1964 | { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, |
1962 | { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, | 1965 | { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 46edb4d3ed28..9c4786759f16 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -586,6 +586,7 @@ | |||
586 | #define USB_VENDOR_ID_LOGITECH 0x046d | 586 | #define USB_VENDOR_ID_LOGITECH 0x046d |
587 | #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e | 587 | #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e |
588 | #define USB_DEVICE_ID_LOGITECH_T651 0xb00c | 588 | #define USB_DEVICE_ID_LOGITECH_T651 0xb00c |
589 | #define USB_DEVICE_ID_LOGITECH_C077 0xc007 | ||
589 | #define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 | 590 | #define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 |
590 | #define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110 | 591 | #define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110 |
591 | #define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f | 592 | #define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f |
@@ -654,6 +655,7 @@ | |||
654 | #define USB_DEVICE_ID_MS_LK6K 0x00f9 | 655 | #define USB_DEVICE_ID_MS_LK6K 0x00f9 |
655 | #define USB_DEVICE_ID_MS_PRESENTER_8K_BT 0x0701 | 656 | #define USB_DEVICE_ID_MS_PRESENTER_8K_BT 0x0701 |
656 | #define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713 | 657 | #define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713 |
658 | #define USB_DEVICE_ID_MS_NE7K 0x071d | ||
657 | #define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730 | 659 | #define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730 |
658 | #define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c | 660 | #define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c |
659 | #define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799 | 661 | #define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799 |
@@ -802,6 +804,7 @@ | |||
802 | #define USB_VENDOR_ID_SAITEK 0x06a3 | 804 | #define USB_VENDOR_ID_SAITEK 0x06a3 |
803 | #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17 | 805 | #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17 |
804 | #define USB_DEVICE_ID_SAITEK_PS1000 0x0621 | 806 | #define USB_DEVICE_ID_SAITEK_PS1000 0x0621 |
807 | #define USB_DEVICE_ID_SAITEK_RAT7_OLD 0x0ccb | ||
805 | #define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7 | 808 | #define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7 |
806 | #define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0 | 809 | #define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0 |
807 | 810 | ||
@@ -896,6 +899,7 @@ | |||
896 | #define USB_VENDOR_ID_TIVO 0x150a | 899 | #define USB_VENDOR_ID_TIVO 0x150a |
897 | #define USB_DEVICE_ID_TIVO_SLIDE_BT 0x1200 | 900 | #define USB_DEVICE_ID_TIVO_SLIDE_BT 0x1200 |
898 | #define USB_DEVICE_ID_TIVO_SLIDE 0x1201 | 901 | #define USB_DEVICE_ID_TIVO_SLIDE 0x1201 |
902 | #define USB_DEVICE_ID_TIVO_SLIDE_PRO 0x1203 | ||
899 | 903 | ||
900 | #define USB_VENDOR_ID_TOPSEED 0x0766 | 904 | #define USB_VENDOR_ID_TOPSEED 0x0766 |
901 | #define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204 | 905 | #define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204 |
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c index fbaea6eb882e..af935eb198c9 100644 --- a/drivers/hid/hid-microsoft.c +++ b/drivers/hid/hid-microsoft.c | |||
@@ -264,6 +264,8 @@ static const struct hid_device_id ms_devices[] = { | |||
264 | .driver_data = MS_ERGONOMY }, | 264 | .driver_data = MS_ERGONOMY }, |
265 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP), | 265 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP), |
266 | .driver_data = MS_ERGONOMY }, | 266 | .driver_data = MS_ERGONOMY }, |
267 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K), | ||
268 | .driver_data = MS_ERGONOMY }, | ||
267 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K), | 269 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K), |
268 | .driver_data = MS_ERGONOMY | MS_RDESC }, | 270 | .driver_data = MS_ERGONOMY | MS_RDESC }, |
269 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB), | 271 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB), |
diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c index 5632c54eadf0..a014f21275d8 100644 --- a/drivers/hid/hid-saitek.c +++ b/drivers/hid/hid-saitek.c | |||
@@ -177,6 +177,8 @@ static int saitek_event(struct hid_device *hdev, struct hid_field *field, | |||
177 | static const struct hid_device_id saitek_devices[] = { | 177 | static const struct hid_device_id saitek_devices[] = { |
178 | { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000), | 178 | { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000), |
179 | .driver_data = SAITEK_FIX_PS1000 }, | 179 | .driver_data = SAITEK_FIX_PS1000 }, |
180 | { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD), | ||
181 | .driver_data = SAITEK_RELEASE_MODE_RAT7 }, | ||
180 | { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7), | 182 | { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7), |
181 | .driver_data = SAITEK_RELEASE_MODE_RAT7 }, | 183 | .driver_data = SAITEK_RELEASE_MODE_RAT7 }, |
182 | { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9), | 184 | { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9), |
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index 6a58b6c723aa..e54ce1097e2c 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c | |||
@@ -135,8 +135,9 @@ static struct hid_sensor_hub_callbacks *sensor_hub_get_callback( | |||
135 | { | 135 | { |
136 | struct hid_sensor_hub_callbacks_list *callback; | 136 | struct hid_sensor_hub_callbacks_list *callback; |
137 | struct sensor_hub_data *pdata = hid_get_drvdata(hdev); | 137 | struct sensor_hub_data *pdata = hid_get_drvdata(hdev); |
138 | unsigned long flags; | ||
138 | 139 | ||
139 | spin_lock(&pdata->dyn_callback_lock); | 140 | spin_lock_irqsave(&pdata->dyn_callback_lock, flags); |
140 | list_for_each_entry(callback, &pdata->dyn_callback_list, list) | 141 | list_for_each_entry(callback, &pdata->dyn_callback_list, list) |
141 | if (callback->usage_id == usage_id && | 142 | if (callback->usage_id == usage_id && |
142 | (collection_index >= | 143 | (collection_index >= |
@@ -145,10 +146,11 @@ static struct hid_sensor_hub_callbacks *sensor_hub_get_callback( | |||
145 | callback->hsdev->end_collection_index)) { | 146 | callback->hsdev->end_collection_index)) { |
146 | *priv = callback->priv; | 147 | *priv = callback->priv; |
147 | *hsdev = callback->hsdev; | 148 | *hsdev = callback->hsdev; |
148 | spin_unlock(&pdata->dyn_callback_lock); | 149 | spin_unlock_irqrestore(&pdata->dyn_callback_lock, |
150 | flags); | ||
149 | return callback->usage_callback; | 151 | return callback->usage_callback; |
150 | } | 152 | } |
151 | spin_unlock(&pdata->dyn_callback_lock); | 153 | spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags); |
152 | 154 | ||
153 | return NULL; | 155 | return NULL; |
154 | } | 156 | } |
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index 31e9d2561106..1896c019e302 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c | |||
@@ -804,7 +804,7 @@ union sixaxis_output_report_01 { | |||
804 | #define DS4_REPORT_0x81_SIZE 7 | 804 | #define DS4_REPORT_0x81_SIZE 7 |
805 | #define SIXAXIS_REPORT_0xF2_SIZE 18 | 805 | #define SIXAXIS_REPORT_0xF2_SIZE 18 |
806 | 806 | ||
807 | static spinlock_t sony_dev_list_lock; | 807 | static DEFINE_SPINLOCK(sony_dev_list_lock); |
808 | static LIST_HEAD(sony_device_list); | 808 | static LIST_HEAD(sony_device_list); |
809 | static DEFINE_IDA(sony_device_id_allocator); | 809 | static DEFINE_IDA(sony_device_id_allocator); |
810 | 810 | ||
@@ -1944,6 +1944,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
1944 | return -ENOMEM; | 1944 | return -ENOMEM; |
1945 | } | 1945 | } |
1946 | 1946 | ||
1947 | spin_lock_init(&sc->lock); | ||
1948 | |||
1947 | sc->quirks = quirks; | 1949 | sc->quirks = quirks; |
1948 | hid_set_drvdata(hdev, sc); | 1950 | hid_set_drvdata(hdev, sc); |
1949 | sc->hdev = hdev; | 1951 | sc->hdev = hdev; |
@@ -2147,8 +2149,8 @@ static void __exit sony_exit(void) | |||
2147 | { | 2149 | { |
2148 | dbg_hid("Sony:%s\n", __func__); | 2150 | dbg_hid("Sony:%s\n", __func__); |
2149 | 2151 | ||
2150 | ida_destroy(&sony_device_id_allocator); | ||
2151 | hid_unregister_driver(&sony_driver); | 2152 | hid_unregister_driver(&sony_driver); |
2153 | ida_destroy(&sony_device_id_allocator); | ||
2152 | } | 2154 | } |
2153 | module_init(sony_init); | 2155 | module_init(sony_init); |
2154 | module_exit(sony_exit); | 2156 | module_exit(sony_exit); |
diff --git a/drivers/hid/hid-tivo.c b/drivers/hid/hid-tivo.c index d790d8d71f7f..d98696927453 100644 --- a/drivers/hid/hid-tivo.c +++ b/drivers/hid/hid-tivo.c | |||
@@ -64,6 +64,7 @@ static const struct hid_device_id tivo_devices[] = { | |||
64 | /* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */ | 64 | /* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */ |
65 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, | 65 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, |
66 | { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, | 66 | { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, |
67 | { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) }, | ||
67 | { } | 68 | { } |
68 | }; | 69 | }; |
69 | MODULE_DEVICE_TABLE(hid, tivo_devices); | 70 | MODULE_DEVICE_TABLE(hid, tivo_devices); |
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index d43e967e7533..36053f33d6d9 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c | |||
@@ -370,7 +370,10 @@ static int i2c_hid_hwreset(struct i2c_client *client) | |||
370 | static void i2c_hid_get_input(struct i2c_hid *ihid) | 370 | static void i2c_hid_get_input(struct i2c_hid *ihid) |
371 | { | 371 | { |
372 | int ret, ret_size; | 372 | int ret, ret_size; |
373 | int size = ihid->bufsize; | 373 | int size = le16_to_cpu(ihid->hdesc.wMaxInputLength); |
374 | |||
375 | if (size > ihid->bufsize) | ||
376 | size = ihid->bufsize; | ||
374 | 377 | ||
375 | ret = i2c_master_recv(ihid->client, ihid->inbuf, size); | 378 | ret = i2c_master_recv(ihid->client, ihid->inbuf, size); |
376 | if (ret != size) { | 379 | if (ret != size) { |
@@ -785,7 +788,7 @@ static int i2c_hid_init_irq(struct i2c_client *client) | |||
785 | dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq); | 788 | dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq); |
786 | 789 | ||
787 | ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq, | 790 | ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq, |
788 | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, | 791 | IRQF_TRIGGER_LOW | IRQF_ONESHOT, |
789 | client->name, ihid); | 792 | client->name, ihid); |
790 | if (ret < 0) { | 793 | if (ret < 0) { |
791 | dev_warn(&client->dev, | 794 | dev_warn(&client->dev, |
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 9be99a67bfe2..a82127753461 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
@@ -78,6 +78,7 @@ static const struct hid_blacklist { | |||
78 | { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, | 78 | { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, |
79 | { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, | 79 | { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, |
80 | { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, | 80 | { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, |
81 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL }, | ||
81 | { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, | 82 | { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, |
82 | { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS }, | 83 | { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS }, |
83 | { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS }, | 84 | { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS }, |
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 1a6507999a65..bbe32d66e500 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c | |||
@@ -551,9 +551,13 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) | |||
551 | (features->type == CINTIQ && !(data[1] & 0x40))) | 551 | (features->type == CINTIQ && !(data[1] & 0x40))) |
552 | return 1; | 552 | return 1; |
553 | 553 | ||
554 | if (features->quirks & WACOM_QUIRK_MULTI_INPUT) | 554 | if (wacom->shared) { |
555 | wacom->shared->stylus_in_proximity = true; | 555 | wacom->shared->stylus_in_proximity = true; |
556 | 556 | ||
557 | if (wacom->shared->touch_down) | ||
558 | return 1; | ||
559 | } | ||
560 | |||
557 | /* in Range while exiting */ | 561 | /* in Range while exiting */ |
558 | if (((data[1] & 0xfe) == 0x20) && wacom->reporting_data) { | 562 | if (((data[1] & 0xfe) == 0x20) && wacom->reporting_data) { |
559 | input_report_key(input, BTN_TOUCH, 0); | 563 | input_report_key(input, BTN_TOUCH, 0); |
@@ -778,6 +782,11 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) | |||
778 | input_report_abs(input, ABS_X, be16_to_cpup((__be16 *)&data[4])); | 782 | input_report_abs(input, ABS_X, be16_to_cpup((__be16 *)&data[4])); |
779 | input_report_abs(input, ABS_Y, be16_to_cpup((__be16 *)&data[6])); | 783 | input_report_abs(input, ABS_Y, be16_to_cpup((__be16 *)&data[6])); |
780 | input_report_abs(input, ABS_Z, be16_to_cpup((__be16 *)&data[8])); | 784 | input_report_abs(input, ABS_Z, be16_to_cpup((__be16 *)&data[8])); |
785 | if ((data[2] & 0x07) | data[4] | data[5] | data[6] | data[7] | data[8] | data[9]) { | ||
786 | input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); | ||
787 | } else { | ||
788 | input_report_abs(input, ABS_MISC, 0); | ||
789 | } | ||
781 | } else if (features->type == CINTIQ_HYBRID) { | 790 | } else if (features->type == CINTIQ_HYBRID) { |
782 | /* | 791 | /* |
783 | * Do not send hardware buttons under Android. They | 792 | * Do not send hardware buttons under Android. They |
@@ -1038,27 +1047,28 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom) | |||
1038 | struct input_dev *input = wacom->input; | 1047 | struct input_dev *input = wacom->input; |
1039 | unsigned char *data = wacom->data; | 1048 | unsigned char *data = wacom->data; |
1040 | int i; | 1049 | int i; |
1041 | int current_num_contacts = 0; | 1050 | int current_num_contacts = data[61]; |
1042 | int contacts_to_send = 0; | 1051 | int contacts_to_send = 0; |
1043 | int num_contacts_left = 4; /* maximum contacts per packet */ | 1052 | int num_contacts_left = 4; /* maximum contacts per packet */ |
1044 | int byte_per_packet = WACOM_BYTES_PER_24HDT_PACKET; | 1053 | int byte_per_packet = WACOM_BYTES_PER_24HDT_PACKET; |
1045 | int y_offset = 2; | 1054 | int y_offset = 2; |
1055 | static int contact_with_no_pen_down_count = 0; | ||
1046 | 1056 | ||
1047 | if (wacom->features.type == WACOM_27QHDT) { | 1057 | if (wacom->features.type == WACOM_27QHDT) { |
1048 | current_num_contacts = data[63]; | 1058 | current_num_contacts = data[63]; |
1049 | num_contacts_left = 10; | 1059 | num_contacts_left = 10; |
1050 | byte_per_packet = WACOM_BYTES_PER_QHDTHID_PACKET; | 1060 | byte_per_packet = WACOM_BYTES_PER_QHDTHID_PACKET; |
1051 | y_offset = 0; | 1061 | y_offset = 0; |
1052 | } else { | ||
1053 | current_num_contacts = data[61]; | ||
1054 | } | 1062 | } |
1055 | 1063 | ||
1056 | /* | 1064 | /* |
1057 | * First packet resets the counter since only the first | 1065 | * First packet resets the counter since only the first |
1058 | * packet in series will have non-zero current_num_contacts. | 1066 | * packet in series will have non-zero current_num_contacts. |
1059 | */ | 1067 | */ |
1060 | if (current_num_contacts) | 1068 | if (current_num_contacts) { |
1061 | wacom->num_contacts_left = current_num_contacts; | 1069 | wacom->num_contacts_left = current_num_contacts; |
1070 | contact_with_no_pen_down_count = 0; | ||
1071 | } | ||
1062 | 1072 | ||
1063 | contacts_to_send = min(num_contacts_left, wacom->num_contacts_left); | 1073 | contacts_to_send = min(num_contacts_left, wacom->num_contacts_left); |
1064 | 1074 | ||
@@ -1091,15 +1101,16 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom) | |||
1091 | input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h)); | 1101 | input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h)); |
1092 | input_report_abs(input, ABS_MT_ORIENTATION, w > h); | 1102 | input_report_abs(input, ABS_MT_ORIENTATION, w > h); |
1093 | } | 1103 | } |
1104 | contact_with_no_pen_down_count++; | ||
1094 | } | 1105 | } |
1095 | } | 1106 | } |
1096 | input_mt_report_pointer_emulation(input, true); | 1107 | input_mt_report_pointer_emulation(input, true); |
1097 | 1108 | ||
1098 | wacom->num_contacts_left -= contacts_to_send; | 1109 | wacom->num_contacts_left -= contacts_to_send; |
1099 | if (wacom->num_contacts_left <= 0) | 1110 | if (wacom->num_contacts_left <= 0) { |
1100 | wacom->num_contacts_left = 0; | 1111 | wacom->num_contacts_left = 0; |
1101 | 1112 | wacom->shared->touch_down = (contact_with_no_pen_down_count > 0); | |
1102 | wacom->shared->touch_down = (wacom->num_contacts_left > 0); | 1113 | } |
1103 | return 1; | 1114 | return 1; |
1104 | } | 1115 | } |
1105 | 1116 | ||
@@ -1111,6 +1122,7 @@ static int wacom_mt_touch(struct wacom_wac *wacom) | |||
1111 | int current_num_contacts = data[2]; | 1122 | int current_num_contacts = data[2]; |
1112 | int contacts_to_send = 0; | 1123 | int contacts_to_send = 0; |
1113 | int x_offset = 0; | 1124 | int x_offset = 0; |
1125 | static int contact_with_no_pen_down_count = 0; | ||
1114 | 1126 | ||
1115 | /* MTTPC does not support Height and Width */ | 1127 | /* MTTPC does not support Height and Width */ |
1116 | if (wacom->features.type == MTTPC || wacom->features.type == MTTPC_B) | 1128 | if (wacom->features.type == MTTPC || wacom->features.type == MTTPC_B) |
@@ -1120,8 +1132,10 @@ static int wacom_mt_touch(struct wacom_wac *wacom) | |||
1120 | * First packet resets the counter since only the first | 1132 | * First packet resets the counter since only the first |
1121 | * packet in series will have non-zero current_num_contacts. | 1133 | * packet in series will have non-zero current_num_contacts. |
1122 | */ | 1134 | */ |
1123 | if (current_num_contacts) | 1135 | if (current_num_contacts) { |
1124 | wacom->num_contacts_left = current_num_contacts; | 1136 | wacom->num_contacts_left = current_num_contacts; |
1137 | contact_with_no_pen_down_count = 0; | ||
1138 | } | ||
1125 | 1139 | ||
1126 | /* There are at most 5 contacts per packet */ | 1140 | /* There are at most 5 contacts per packet */ |
1127 | contacts_to_send = min(5, wacom->num_contacts_left); | 1141 | contacts_to_send = min(5, wacom->num_contacts_left); |
@@ -1142,15 +1156,16 @@ static int wacom_mt_touch(struct wacom_wac *wacom) | |||
1142 | int y = get_unaligned_le16(&data[offset + x_offset + 9]); | 1156 | int y = get_unaligned_le16(&data[offset + x_offset + 9]); |
1143 | input_report_abs(input, ABS_MT_POSITION_X, x); | 1157 | input_report_abs(input, ABS_MT_POSITION_X, x); |
1144 | input_report_abs(input, ABS_MT_POSITION_Y, y); | 1158 | input_report_abs(input, ABS_MT_POSITION_Y, y); |
1159 | contact_with_no_pen_down_count++; | ||
1145 | } | 1160 | } |
1146 | } | 1161 | } |
1147 | input_mt_report_pointer_emulation(input, true); | 1162 | input_mt_report_pointer_emulation(input, true); |
1148 | 1163 | ||
1149 | wacom->num_contacts_left -= contacts_to_send; | 1164 | wacom->num_contacts_left -= contacts_to_send; |
1150 | if (wacom->num_contacts_left < 0) | 1165 | if (wacom->num_contacts_left <= 0) { |
1151 | wacom->num_contacts_left = 0; | 1166 | wacom->num_contacts_left = 0; |
1152 | 1167 | wacom->shared->touch_down = (contact_with_no_pen_down_count > 0); | |
1153 | wacom->shared->touch_down = (wacom->num_contacts_left > 0); | 1168 | } |
1154 | return 1; | 1169 | return 1; |
1155 | } | 1170 | } |
1156 | 1171 | ||
@@ -1188,29 +1203,25 @@ static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len) | |||
1188 | { | 1203 | { |
1189 | unsigned char *data = wacom->data; | 1204 | unsigned char *data = wacom->data; |
1190 | struct input_dev *input = wacom->input; | 1205 | struct input_dev *input = wacom->input; |
1191 | bool prox; | 1206 | bool prox = !wacom->shared->stylus_in_proximity; |
1192 | int x = 0, y = 0; | 1207 | int x = 0, y = 0; |
1193 | 1208 | ||
1194 | if (wacom->features.touch_max > 1 || len > WACOM_PKGLEN_TPC2FG) | 1209 | if (wacom->features.touch_max > 1 || len > WACOM_PKGLEN_TPC2FG) |
1195 | return 0; | 1210 | return 0; |
1196 | 1211 | ||
1197 | if (!wacom->shared->stylus_in_proximity) { | 1212 | if (len == WACOM_PKGLEN_TPC1FG) { |
1198 | if (len == WACOM_PKGLEN_TPC1FG) { | 1213 | prox = prox && (data[0] & 0x01); |
1199 | prox = data[0] & 0x01; | 1214 | x = get_unaligned_le16(&data[1]); |
1200 | x = get_unaligned_le16(&data[1]); | 1215 | y = get_unaligned_le16(&data[3]); |
1201 | y = get_unaligned_le16(&data[3]); | 1216 | } else if (len == WACOM_PKGLEN_TPC1FG_B) { |
1202 | } else if (len == WACOM_PKGLEN_TPC1FG_B) { | 1217 | prox = prox && (data[2] & 0x01); |
1203 | prox = data[2] & 0x01; | 1218 | x = get_unaligned_le16(&data[3]); |
1204 | x = get_unaligned_le16(&data[3]); | 1219 | y = get_unaligned_le16(&data[5]); |
1205 | y = get_unaligned_le16(&data[5]); | 1220 | } else { |
1206 | } else { | 1221 | prox = prox && (data[1] & 0x01); |
1207 | prox = data[1] & 0x01; | 1222 | x = le16_to_cpup((__le16 *)&data[2]); |
1208 | x = le16_to_cpup((__le16 *)&data[2]); | 1223 | y = le16_to_cpup((__le16 *)&data[4]); |
1209 | y = le16_to_cpup((__le16 *)&data[4]); | 1224 | } |
1210 | } | ||
1211 | } else | ||
1212 | /* force touch out when pen is in prox */ | ||
1213 | prox = 0; | ||
1214 | 1225 | ||
1215 | if (prox) { | 1226 | if (prox) { |
1216 | input_report_abs(input, ABS_X, x); | 1227 | input_report_abs(input, ABS_X, x); |
@@ -1608,6 +1619,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom) | |||
1608 | struct input_dev *pad_input = wacom->pad_input; | 1619 | struct input_dev *pad_input = wacom->pad_input; |
1609 | unsigned char *data = wacom->data; | 1620 | unsigned char *data = wacom->data; |
1610 | int i; | 1621 | int i; |
1622 | int contact_with_no_pen_down_count = 0; | ||
1611 | 1623 | ||
1612 | if (data[0] != 0x02) | 1624 | if (data[0] != 0x02) |
1613 | return 0; | 1625 | return 0; |
@@ -1635,6 +1647,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom) | |||
1635 | } | 1647 | } |
1636 | input_report_abs(input, ABS_MT_POSITION_X, x); | 1648 | input_report_abs(input, ABS_MT_POSITION_X, x); |
1637 | input_report_abs(input, ABS_MT_POSITION_Y, y); | 1649 | input_report_abs(input, ABS_MT_POSITION_Y, y); |
1650 | contact_with_no_pen_down_count++; | ||
1638 | } | 1651 | } |
1639 | } | 1652 | } |
1640 | 1653 | ||
@@ -1644,11 +1657,12 @@ static int wacom_bpt_touch(struct wacom_wac *wacom) | |||
1644 | input_report_key(pad_input, BTN_FORWARD, (data[1] & 0x04) != 0); | 1657 | input_report_key(pad_input, BTN_FORWARD, (data[1] & 0x04) != 0); |
1645 | input_report_key(pad_input, BTN_BACK, (data[1] & 0x02) != 0); | 1658 | input_report_key(pad_input, BTN_BACK, (data[1] & 0x02) != 0); |
1646 | input_report_key(pad_input, BTN_RIGHT, (data[1] & 0x01) != 0); | 1659 | input_report_key(pad_input, BTN_RIGHT, (data[1] & 0x01) != 0); |
1660 | wacom->shared->touch_down = (contact_with_no_pen_down_count > 0); | ||
1647 | 1661 | ||
1648 | return 1; | 1662 | return 1; |
1649 | } | 1663 | } |
1650 | 1664 | ||
1651 | static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) | 1665 | static int wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data, int last_touch_count) |
1652 | { | 1666 | { |
1653 | struct wacom_features *features = &wacom->features; | 1667 | struct wacom_features *features = &wacom->features; |
1654 | struct input_dev *input = wacom->input; | 1668 | struct input_dev *input = wacom->input; |
@@ -1656,7 +1670,7 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) | |||
1656 | int slot = input_mt_get_slot_by_key(input, data[0]); | 1670 | int slot = input_mt_get_slot_by_key(input, data[0]); |
1657 | 1671 | ||
1658 | if (slot < 0) | 1672 | if (slot < 0) |
1659 | return; | 1673 | return 0; |
1660 | 1674 | ||
1661 | touch = touch && !wacom->shared->stylus_in_proximity; | 1675 | touch = touch && !wacom->shared->stylus_in_proximity; |
1662 | 1676 | ||
@@ -1688,7 +1702,9 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) | |||
1688 | input_report_abs(input, ABS_MT_POSITION_Y, y); | 1702 | input_report_abs(input, ABS_MT_POSITION_Y, y); |
1689 | input_report_abs(input, ABS_MT_TOUCH_MAJOR, width); | 1703 | input_report_abs(input, ABS_MT_TOUCH_MAJOR, width); |
1690 | input_report_abs(input, ABS_MT_TOUCH_MINOR, height); | 1704 | input_report_abs(input, ABS_MT_TOUCH_MINOR, height); |
1705 | last_touch_count++; | ||
1691 | } | 1706 | } |
1707 | return last_touch_count; | ||
1692 | } | 1708 | } |
1693 | 1709 | ||
1694 | static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data) | 1710 | static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data) |
@@ -1713,6 +1729,7 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom) | |||
1713 | unsigned char *data = wacom->data; | 1729 | unsigned char *data = wacom->data; |
1714 | int count = data[1] & 0x07; | 1730 | int count = data[1] & 0x07; |
1715 | int i; | 1731 | int i; |
1732 | int contact_with_no_pen_down_count = 0; | ||
1716 | 1733 | ||
1717 | if (data[0] != 0x02) | 1734 | if (data[0] != 0x02) |
1718 | return 0; | 1735 | return 0; |
@@ -1723,12 +1740,15 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom) | |||
1723 | int msg_id = data[offset]; | 1740 | int msg_id = data[offset]; |
1724 | 1741 | ||
1725 | if (msg_id >= 2 && msg_id <= 17) | 1742 | if (msg_id >= 2 && msg_id <= 17) |
1726 | wacom_bpt3_touch_msg(wacom, data + offset); | 1743 | contact_with_no_pen_down_count = |
1744 | wacom_bpt3_touch_msg(wacom, data + offset, | ||
1745 | contact_with_no_pen_down_count); | ||
1727 | else if (msg_id == 128) | 1746 | else if (msg_id == 128) |
1728 | wacom_bpt3_button_msg(wacom, data + offset); | 1747 | wacom_bpt3_button_msg(wacom, data + offset); |
1729 | 1748 | ||
1730 | } | 1749 | } |
1731 | input_mt_report_pointer_emulation(input, true); | 1750 | input_mt_report_pointer_emulation(input, true); |
1751 | wacom->shared->touch_down = (contact_with_no_pen_down_count > 0); | ||
1732 | 1752 | ||
1733 | return 1; | 1753 | return 1; |
1734 | } | 1754 | } |
@@ -1754,6 +1774,9 @@ static int wacom_bpt_pen(struct wacom_wac *wacom) | |||
1754 | return 0; | 1774 | return 0; |
1755 | } | 1775 | } |
1756 | 1776 | ||
1777 | if (wacom->shared->touch_down) | ||
1778 | return 0; | ||
1779 | |||
1757 | prox = (data[1] & 0x20) == 0x20; | 1780 | prox = (data[1] & 0x20) == 0x20; |
1758 | 1781 | ||
1759 | /* | 1782 | /* |
@@ -2725,9 +2748,9 @@ static const struct wacom_features wacom_features_0xF6 = | |||
2725 | .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10, | 2748 | .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10, |
2726 | .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; | 2749 | .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; |
2727 | static const struct wacom_features wacom_features_0x32A = | 2750 | static const struct wacom_features wacom_features_0x32A = |
2728 | { "Wacom Cintiq 27QHD", 119740, 67520, 2047, | 2751 | { "Wacom Cintiq 27QHD", 119740, 67520, 2047, 63, |
2729 | 63, WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, | 2752 | WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, |
2730 | WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; | 2753 | WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET }; |
2731 | static const struct wacom_features wacom_features_0x32B = | 2754 | static const struct wacom_features wacom_features_0x32B = |
2732 | { "Wacom Cintiq 27QHD touch", 119740, 67520, 2047, 63, | 2755 | { "Wacom Cintiq 27QHD touch", 119740, 67520, 2047, 63, |
2733 | WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, | 2756 | WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, |
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c index bce4e9ff21bf..6c99ee7bafa3 100644 --- a/drivers/hwmon/ads7828.c +++ b/drivers/hwmon/ads7828.c | |||
@@ -147,6 +147,9 @@ static int ads7828_probe(struct i2c_client *client, | |||
147 | &ads2830_regmap_config); | 147 | &ads2830_regmap_config); |
148 | } | 148 | } |
149 | 149 | ||
150 | if (IS_ERR(data->regmap)) | ||
151 | return PTR_ERR(data->regmap); | ||
152 | |||
150 | data->cmd_byte = ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3; | 153 | data->cmd_byte = ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3; |
151 | if (!diff_input) | 154 | if (!diff_input) |
152 | data->cmd_byte |= ADS7828_CMD_SD_SE; | 155 | data->cmd_byte |= ADS7828_CMD_SD_SE; |
diff --git a/drivers/i2c/busses/i2c-designware-baytrail.c b/drivers/i2c/busses/i2c-designware-baytrail.c index 5f1ff4cc5c34..7d7ae97476e2 100644 --- a/drivers/i2c/busses/i2c-designware-baytrail.c +++ b/drivers/i2c/busses/i2c-designware-baytrail.c | |||
@@ -17,27 +17,31 @@ | |||
17 | #include <linux/acpi.h> | 17 | #include <linux/acpi.h> |
18 | #include <linux/i2c.h> | 18 | #include <linux/i2c.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | |||
20 | #include <asm/iosf_mbi.h> | 21 | #include <asm/iosf_mbi.h> |
22 | |||
21 | #include "i2c-designware-core.h" | 23 | #include "i2c-designware-core.h" |
22 | 24 | ||
23 | #define SEMAPHORE_TIMEOUT 100 | 25 | #define SEMAPHORE_TIMEOUT 100 |
24 | #define PUNIT_SEMAPHORE 0x7 | 26 | #define PUNIT_SEMAPHORE 0x7 |
27 | #define PUNIT_SEMAPHORE_BIT BIT(0) | ||
28 | #define PUNIT_SEMAPHORE_ACQUIRE BIT(1) | ||
25 | 29 | ||
26 | static unsigned long acquired; | 30 | static unsigned long acquired; |
27 | 31 | ||
28 | static int get_sem(struct device *dev, u32 *sem) | 32 | static int get_sem(struct device *dev, u32 *sem) |
29 | { | 33 | { |
30 | u32 reg_val; | 34 | u32 data; |
31 | int ret; | 35 | int ret; |
32 | 36 | ||
33 | ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, PUNIT_SEMAPHORE, | 37 | ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, PUNIT_SEMAPHORE, |
34 | ®_val); | 38 | &data); |
35 | if (ret) { | 39 | if (ret) { |
36 | dev_err(dev, "iosf failed to read punit semaphore\n"); | 40 | dev_err(dev, "iosf failed to read punit semaphore\n"); |
37 | return ret; | 41 | return ret; |
38 | } | 42 | } |
39 | 43 | ||
40 | *sem = reg_val & 0x1; | 44 | *sem = data & PUNIT_SEMAPHORE_BIT; |
41 | 45 | ||
42 | return 0; | 46 | return 0; |
43 | } | 47 | } |
@@ -52,27 +56,29 @@ static void reset_semaphore(struct device *dev) | |||
52 | return; | 56 | return; |
53 | } | 57 | } |
54 | 58 | ||
55 | data = data & 0xfffffffe; | 59 | data &= ~PUNIT_SEMAPHORE_BIT; |
56 | if (iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, | 60 | if (iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, |
57 | PUNIT_SEMAPHORE, data)) | 61 | PUNIT_SEMAPHORE, data)) |
58 | dev_err(dev, "iosf failed to reset punit semaphore during write\n"); | 62 | dev_err(dev, "iosf failed to reset punit semaphore during write\n"); |
59 | } | 63 | } |
60 | 64 | ||
61 | int baytrail_i2c_acquire(struct dw_i2c_dev *dev) | 65 | static int baytrail_i2c_acquire(struct dw_i2c_dev *dev) |
62 | { | 66 | { |
63 | u32 sem = 0; | 67 | u32 sem; |
64 | int ret; | 68 | int ret; |
65 | unsigned long start, end; | 69 | unsigned long start, end; |
66 | 70 | ||
71 | might_sleep(); | ||
72 | |||
67 | if (!dev || !dev->dev) | 73 | if (!dev || !dev->dev) |
68 | return -ENODEV; | 74 | return -ENODEV; |
69 | 75 | ||
70 | if (!dev->acquire_lock) | 76 | if (!dev->release_lock) |
71 | return 0; | 77 | return 0; |
72 | 78 | ||
73 | /* host driver writes 0x2 to side band semaphore register */ | 79 | /* host driver writes to side band semaphore register */ |
74 | ret = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, | 80 | ret = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, |
75 | PUNIT_SEMAPHORE, 0x2); | 81 | PUNIT_SEMAPHORE, PUNIT_SEMAPHORE_ACQUIRE); |
76 | if (ret) { | 82 | if (ret) { |
77 | dev_err(dev->dev, "iosf punit semaphore request failed\n"); | 83 | dev_err(dev->dev, "iosf punit semaphore request failed\n"); |
78 | return ret; | 84 | return ret; |
@@ -81,7 +87,7 @@ int baytrail_i2c_acquire(struct dw_i2c_dev *dev) | |||
81 | /* host driver waits for bit 0 to be set in semaphore register */ | 87 | /* host driver waits for bit 0 to be set in semaphore register */ |
82 | start = jiffies; | 88 | start = jiffies; |
83 | end = start + msecs_to_jiffies(SEMAPHORE_TIMEOUT); | 89 | end = start + msecs_to_jiffies(SEMAPHORE_TIMEOUT); |
84 | while (!time_after(jiffies, end)) { | 90 | do { |
85 | ret = get_sem(dev->dev, &sem); | 91 | ret = get_sem(dev->dev, &sem); |
86 | if (!ret && sem) { | 92 | if (!ret && sem) { |
87 | acquired = jiffies; | 93 | acquired = jiffies; |
@@ -91,14 +97,14 @@ int baytrail_i2c_acquire(struct dw_i2c_dev *dev) | |||
91 | } | 97 | } |
92 | 98 | ||
93 | usleep_range(1000, 2000); | 99 | usleep_range(1000, 2000); |
94 | } | 100 | } while (time_before(jiffies, end)); |
95 | 101 | ||
96 | dev_err(dev->dev, "punit semaphore timed out, resetting\n"); | 102 | dev_err(dev->dev, "punit semaphore timed out, resetting\n"); |
97 | reset_semaphore(dev->dev); | 103 | reset_semaphore(dev->dev); |
98 | 104 | ||
99 | ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, | 105 | ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, |
100 | PUNIT_SEMAPHORE, &sem); | 106 | PUNIT_SEMAPHORE, &sem); |
101 | if (!ret) | 107 | if (ret) |
102 | dev_err(dev->dev, "iosf failed to read punit semaphore\n"); | 108 | dev_err(dev->dev, "iosf failed to read punit semaphore\n"); |
103 | else | 109 | else |
104 | dev_err(dev->dev, "PUNIT SEM: %d\n", sem); | 110 | dev_err(dev->dev, "PUNIT SEM: %d\n", sem); |
@@ -107,9 +113,8 @@ int baytrail_i2c_acquire(struct dw_i2c_dev *dev) | |||
107 | 113 | ||
108 | return -ETIMEDOUT; | 114 | return -ETIMEDOUT; |
109 | } | 115 | } |
110 | EXPORT_SYMBOL(baytrail_i2c_acquire); | ||
111 | 116 | ||
112 | void baytrail_i2c_release(struct dw_i2c_dev *dev) | 117 | static void baytrail_i2c_release(struct dw_i2c_dev *dev) |
113 | { | 118 | { |
114 | if (!dev || !dev->dev) | 119 | if (!dev || !dev->dev) |
115 | return; | 120 | return; |
@@ -121,7 +126,6 @@ void baytrail_i2c_release(struct dw_i2c_dev *dev) | |||
121 | dev_dbg(dev->dev, "punit semaphore held for %ums\n", | 126 | dev_dbg(dev->dev, "punit semaphore held for %ums\n", |
122 | jiffies_to_msecs(jiffies - acquired)); | 127 | jiffies_to_msecs(jiffies - acquired)); |
123 | } | 128 | } |
124 | EXPORT_SYMBOL(baytrail_i2c_release); | ||
125 | 129 | ||
126 | int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev) | 130 | int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev) |
127 | { | 131 | { |
@@ -137,7 +141,6 @@ int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev) | |||
137 | return 0; | 141 | return 0; |
138 | 142 | ||
139 | status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host); | 143 | status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host); |
140 | |||
141 | if (ACPI_FAILURE(status)) | 144 | if (ACPI_FAILURE(status)) |
142 | return 0; | 145 | return 0; |
143 | 146 | ||
@@ -153,7 +156,6 @@ int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev) | |||
153 | 156 | ||
154 | return 0; | 157 | return 0; |
155 | } | 158 | } |
156 | EXPORT_SYMBOL(i2c_dw_eval_lock_support); | ||
157 | 159 | ||
158 | MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>"); | 160 | MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>"); |
159 | MODULE_DESCRIPTION("Baytrail I2C Semaphore driver"); | 161 | MODULE_DESCRIPTION("Baytrail I2C Semaphore driver"); |
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 210cf4874cb7..edf274cabe81 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
@@ -679,9 +679,6 @@ static int i2c_device_remove(struct device *dev) | |||
679 | status = driver->remove(client); | 679 | status = driver->remove(client); |
680 | } | 680 | } |
681 | 681 | ||
682 | if (dev->of_node) | ||
683 | irq_dispose_mapping(client->irq); | ||
684 | |||
685 | dev_pm_domain_detach(&client->dev, true); | 682 | dev_pm_domain_detach(&client->dev, true); |
686 | return status; | 683 | return status; |
687 | } | 684 | } |
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 1793aea4a7d2..6eb738ca6d2f 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c | |||
@@ -1793,11 +1793,11 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor) | |||
1793 | tape->best_dsc_rw_freq = clamp_t(unsigned long, t, IDETAPE_DSC_RW_MIN, | 1793 | tape->best_dsc_rw_freq = clamp_t(unsigned long, t, IDETAPE_DSC_RW_MIN, |
1794 | IDETAPE_DSC_RW_MAX); | 1794 | IDETAPE_DSC_RW_MAX); |
1795 | printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, " | 1795 | printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, " |
1796 | "%lums tDSC%s\n", | 1796 | "%ums tDSC%s\n", |
1797 | drive->name, tape->name, *(u16 *)&tape->caps[14], | 1797 | drive->name, tape->name, *(u16 *)&tape->caps[14], |
1798 | (*(u16 *)&tape->caps[16] * 512) / tape->buffer_size, | 1798 | (*(u16 *)&tape->caps[16] * 512) / tape->buffer_size, |
1799 | tape->buffer_size / 1024, | 1799 | tape->buffer_size / 1024, |
1800 | tape->best_dsc_rw_freq * 1000 / HZ, | 1800 | jiffies_to_msecs(tape->best_dsc_rw_freq), |
1801 | (drive->dev_flags & IDE_DFLAG_USING_DMA) ? ", DMA" : ""); | 1801 | (drive->dev_flags & IDE_DFLAG_USING_DMA) ? ", DMA" : ""); |
1802 | 1802 | ||
1803 | ide_proc_register_driver(drive, tape->driver); | 1803 | ide_proc_register_driver(drive, tape->driver); |
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c index 1096da327130..75c6d2103e07 100644 --- a/drivers/iio/accel/bma180.c +++ b/drivers/iio/accel/bma180.c | |||
@@ -659,7 +659,7 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p) | |||
659 | 659 | ||
660 | mutex_lock(&data->mutex); | 660 | mutex_lock(&data->mutex); |
661 | 661 | ||
662 | for_each_set_bit(bit, indio_dev->buffer->scan_mask, | 662 | for_each_set_bit(bit, indio_dev->active_scan_mask, |
663 | indio_dev->masklength) { | 663 | indio_dev->masklength) { |
664 | ret = bma180_get_data_reg(data, bit); | 664 | ret = bma180_get_data_reg(data, bit); |
665 | if (ret < 0) { | 665 | if (ret < 0) { |
diff --git a/drivers/iio/accel/bmc150-accel.c b/drivers/iio/accel/bmc150-accel.c index 066d0c04072c..75567fd457dc 100644 --- a/drivers/iio/accel/bmc150-accel.c +++ b/drivers/iio/accel/bmc150-accel.c | |||
@@ -168,14 +168,14 @@ static const struct { | |||
168 | int val; | 168 | int val; |
169 | int val2; | 169 | int val2; |
170 | u8 bw_bits; | 170 | u8 bw_bits; |
171 | } bmc150_accel_samp_freq_table[] = { {7, 810000, 0x08}, | 171 | } bmc150_accel_samp_freq_table[] = { {15, 620000, 0x08}, |
172 | {15, 630000, 0x09}, | 172 | {31, 260000, 0x09}, |
173 | {31, 250000, 0x0A}, | 173 | {62, 500000, 0x0A}, |
174 | {62, 500000, 0x0B}, | 174 | {125, 0, 0x0B}, |
175 | {125, 0, 0x0C}, | 175 | {250, 0, 0x0C}, |
176 | {250, 0, 0x0D}, | 176 | {500, 0, 0x0D}, |
177 | {500, 0, 0x0E}, | 177 | {1000, 0, 0x0E}, |
178 | {1000, 0, 0x0F} }; | 178 | {2000, 0, 0x0F} }; |
179 | 179 | ||
180 | static const struct { | 180 | static const struct { |
181 | int bw_bits; | 181 | int bw_bits; |
@@ -840,7 +840,7 @@ static int bmc150_accel_validate_trigger(struct iio_dev *indio_dev, | |||
840 | } | 840 | } |
841 | 841 | ||
842 | static IIO_CONST_ATTR_SAMP_FREQ_AVAIL( | 842 | static IIO_CONST_ATTR_SAMP_FREQ_AVAIL( |
843 | "7.810000 15.630000 31.250000 62.500000 125 250 500 1000"); | 843 | "15.620000 31.260000 62.50000 125 250 500 1000 2000"); |
844 | 844 | ||
845 | static struct attribute *bmc150_accel_attributes[] = { | 845 | static struct attribute *bmc150_accel_attributes[] = { |
846 | &iio_const_attr_sampling_frequency_available.dev_attr.attr, | 846 | &iio_const_attr_sampling_frequency_available.dev_attr.attr, |
@@ -986,7 +986,7 @@ static irqreturn_t bmc150_accel_trigger_handler(int irq, void *p) | |||
986 | int bit, ret, i = 0; | 986 | int bit, ret, i = 0; |
987 | 987 | ||
988 | mutex_lock(&data->mutex); | 988 | mutex_lock(&data->mutex); |
989 | for_each_set_bit(bit, indio_dev->buffer->scan_mask, | 989 | for_each_set_bit(bit, indio_dev->active_scan_mask, |
990 | indio_dev->masklength) { | 990 | indio_dev->masklength) { |
991 | ret = i2c_smbus_read_word_data(data->client, | 991 | ret = i2c_smbus_read_word_data(data->client, |
992 | BMC150_ACCEL_AXIS_TO_REG(bit)); | 992 | BMC150_ACCEL_AXIS_TO_REG(bit)); |
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c index 567de269cc00..1a6379525fa4 100644 --- a/drivers/iio/accel/kxcjk-1013.c +++ b/drivers/iio/accel/kxcjk-1013.c | |||
@@ -956,7 +956,7 @@ static irqreturn_t kxcjk1013_trigger_handler(int irq, void *p) | |||
956 | 956 | ||
957 | mutex_lock(&data->mutex); | 957 | mutex_lock(&data->mutex); |
958 | 958 | ||
959 | for_each_set_bit(bit, indio_dev->buffer->scan_mask, | 959 | for_each_set_bit(bit, indio_dev->active_scan_mask, |
960 | indio_dev->masklength) { | 960 | indio_dev->masklength) { |
961 | ret = kxcjk1013_get_acc_reg(data, bit); | 961 | ret = kxcjk1013_get_acc_reg(data, bit); |
962 | if (ret < 0) { | 962 | if (ret < 0) { |
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 202daf889be2..46379b1fb25b 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig | |||
@@ -137,7 +137,8 @@ config AXP288_ADC | |||
137 | 137 | ||
138 | config CC10001_ADC | 138 | config CC10001_ADC |
139 | tristate "Cosmic Circuits 10001 ADC driver" | 139 | tristate "Cosmic Circuits 10001 ADC driver" |
140 | depends on HAS_IOMEM || HAVE_CLK || REGULATOR | 140 | depends on HAVE_CLK || REGULATOR |
141 | depends on HAS_IOMEM | ||
141 | select IIO_BUFFER | 142 | select IIO_BUFFER |
142 | select IIO_TRIGGERED_BUFFER | 143 | select IIO_TRIGGERED_BUFFER |
143 | help | 144 | help |
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c index ff61ae55dd3f..8a0eb4a04fb5 100644 --- a/drivers/iio/adc/at91_adc.c +++ b/drivers/iio/adc/at91_adc.c | |||
@@ -544,7 +544,6 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state) | |||
544 | { | 544 | { |
545 | struct iio_dev *idev = iio_trigger_get_drvdata(trig); | 545 | struct iio_dev *idev = iio_trigger_get_drvdata(trig); |
546 | struct at91_adc_state *st = iio_priv(idev); | 546 | struct at91_adc_state *st = iio_priv(idev); |
547 | struct iio_buffer *buffer = idev->buffer; | ||
548 | struct at91_adc_reg_desc *reg = st->registers; | 547 | struct at91_adc_reg_desc *reg = st->registers; |
549 | u32 status = at91_adc_readl(st, reg->trigger_register); | 548 | u32 status = at91_adc_readl(st, reg->trigger_register); |
550 | int value; | 549 | int value; |
@@ -564,7 +563,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state) | |||
564 | at91_adc_writel(st, reg->trigger_register, | 563 | at91_adc_writel(st, reg->trigger_register, |
565 | status | value); | 564 | status | value); |
566 | 565 | ||
567 | for_each_set_bit(bit, buffer->scan_mask, | 566 | for_each_set_bit(bit, idev->active_scan_mask, |
568 | st->num_channels) { | 567 | st->num_channels) { |
569 | struct iio_chan_spec const *chan = idev->channels + bit; | 568 | struct iio_chan_spec const *chan = idev->channels + bit; |
570 | at91_adc_writel(st, AT91_ADC_CHER, | 569 | at91_adc_writel(st, AT91_ADC_CHER, |
@@ -579,7 +578,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state) | |||
579 | at91_adc_writel(st, reg->trigger_register, | 578 | at91_adc_writel(st, reg->trigger_register, |
580 | status & ~value); | 579 | status & ~value); |
581 | 580 | ||
582 | for_each_set_bit(bit, buffer->scan_mask, | 581 | for_each_set_bit(bit, idev->active_scan_mask, |
583 | st->num_channels) { | 582 | st->num_channels) { |
584 | struct iio_chan_spec const *chan = idev->channels + bit; | 583 | struct iio_chan_spec const *chan = idev->channels + bit; |
585 | at91_adc_writel(st, AT91_ADC_CHDR, | 584 | at91_adc_writel(st, AT91_ADC_CHDR, |
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c index 51672256072b..b96c636470ef 100644 --- a/drivers/iio/adc/mcp3422.c +++ b/drivers/iio/adc/mcp3422.c | |||
@@ -58,20 +58,11 @@ | |||
58 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ), \ | 58 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ), \ |
59 | } | 59 | } |
60 | 60 | ||
61 | /* LSB is in nV to eliminate floating point */ | ||
62 | static const u32 rates_to_lsb[] = {1000000, 250000, 62500, 15625}; | ||
63 | |||
64 | /* | ||
65 | * scales calculated as: | ||
66 | * rates_to_lsb[sample_rate] / (1 << pga); | ||
67 | * pga is 1 for 0, 2 | ||
68 | */ | ||
69 | |||
70 | static const int mcp3422_scales[4][4] = { | 61 | static const int mcp3422_scales[4][4] = { |
71 | { 1000000, 250000, 62500, 15625 }, | 62 | { 1000000, 500000, 250000, 125000 }, |
72 | { 500000 , 125000, 31250, 7812 }, | 63 | { 250000 , 125000, 62500 , 31250 }, |
73 | { 250000 , 62500 , 15625, 3906 }, | 64 | { 62500 , 31250 , 15625 , 7812 }, |
74 | { 125000 , 31250 , 7812 , 1953 } }; | 65 | { 15625 , 7812 , 3906 , 1953 } }; |
75 | 66 | ||
76 | /* Constant msleep times for data acquisitions */ | 67 | /* Constant msleep times for data acquisitions */ |
77 | static const int mcp3422_read_times[4] = { | 68 | static const int mcp3422_read_times[4] = { |
diff --git a/drivers/iio/adc/qcom-spmi-iadc.c b/drivers/iio/adc/qcom-spmi-iadc.c index b9666f2f5e51..fabd24edc2a1 100644 --- a/drivers/iio/adc/qcom-spmi-iadc.c +++ b/drivers/iio/adc/qcom-spmi-iadc.c | |||
@@ -296,7 +296,8 @@ static int iadc_do_conversion(struct iadc_chip *iadc, int chan, u16 *data) | |||
296 | if (iadc->poll_eoc) { | 296 | if (iadc->poll_eoc) { |
297 | ret = iadc_poll_wait_eoc(iadc, wait); | 297 | ret = iadc_poll_wait_eoc(iadc, wait); |
298 | } else { | 298 | } else { |
299 | ret = wait_for_completion_timeout(&iadc->complete, wait); | 299 | ret = wait_for_completion_timeout(&iadc->complete, |
300 | usecs_to_jiffies(wait)); | ||
300 | if (!ret) | 301 | if (!ret) |
301 | ret = -ETIMEDOUT; | 302 | ret = -ETIMEDOUT; |
302 | else | 303 | else |
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index 2e5cc4409f78..a0e7161f040c 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c | |||
@@ -188,12 +188,11 @@ static int tiadc_buffer_preenable(struct iio_dev *indio_dev) | |||
188 | static int tiadc_buffer_postenable(struct iio_dev *indio_dev) | 188 | static int tiadc_buffer_postenable(struct iio_dev *indio_dev) |
189 | { | 189 | { |
190 | struct tiadc_device *adc_dev = iio_priv(indio_dev); | 190 | struct tiadc_device *adc_dev = iio_priv(indio_dev); |
191 | struct iio_buffer *buffer = indio_dev->buffer; | ||
192 | unsigned int enb = 0; | 191 | unsigned int enb = 0; |
193 | u8 bit; | 192 | u8 bit; |
194 | 193 | ||
195 | tiadc_step_config(indio_dev); | 194 | tiadc_step_config(indio_dev); |
196 | for_each_set_bit(bit, buffer->scan_mask, adc_dev->channels) | 195 | for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels) |
197 | enb |= (get_adc_step_bit(adc_dev, bit) << 1); | 196 | enb |= (get_adc_step_bit(adc_dev, bit) << 1); |
198 | adc_dev->buffer_en_ch_steps = enb; | 197 | adc_dev->buffer_en_ch_steps = enb; |
199 | 198 | ||
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c index 8ec353c01d98..e63b8e76d4c3 100644 --- a/drivers/iio/adc/vf610_adc.c +++ b/drivers/iio/adc/vf610_adc.c | |||
@@ -141,9 +141,13 @@ struct vf610_adc { | |||
141 | struct regulator *vref; | 141 | struct regulator *vref; |
142 | struct vf610_adc_feature adc_feature; | 142 | struct vf610_adc_feature adc_feature; |
143 | 143 | ||
144 | u32 sample_freq_avail[5]; | ||
145 | |||
144 | struct completion completion; | 146 | struct completion completion; |
145 | }; | 147 | }; |
146 | 148 | ||
149 | static const u32 vf610_hw_avgs[] = { 1, 4, 8, 16, 32 }; | ||
150 | |||
147 | #define VF610_ADC_CHAN(_idx, _chan_type) { \ | 151 | #define VF610_ADC_CHAN(_idx, _chan_type) { \ |
148 | .type = (_chan_type), \ | 152 | .type = (_chan_type), \ |
149 | .indexed = 1, \ | 153 | .indexed = 1, \ |
@@ -180,35 +184,47 @@ static const struct iio_chan_spec vf610_adc_iio_channels[] = { | |||
180 | /* sentinel */ | 184 | /* sentinel */ |
181 | }; | 185 | }; |
182 | 186 | ||
183 | /* | 187 | static inline void vf610_adc_calculate_rates(struct vf610_adc *info) |
184 | * ADC sample frequency, unit is ADCK cycles. | 188 | { |
185 | * ADC clk source is ipg clock, which is the same as bus clock. | 189 | unsigned long adck_rate, ipg_rate = clk_get_rate(info->clk); |
186 | * | 190 | int i; |
187 | * ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder) | 191 | |
188 | * SFCAdder: fixed to 6 ADCK cycles | 192 | /* |
189 | * AverageNum: 1, 4, 8, 16, 32 samples for hardware average. | 193 | * Calculate ADC sample frequencies |
190 | * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode | 194 | * Sample time unit is ADCK cycles. ADCK clk source is ipg clock, |
191 | * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles | 195 | * which is the same as bus clock. |
192 | * | 196 | * |
193 | * By default, enable 12 bit resolution mode, clock source | 197 | * ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder) |
194 | * set to ipg clock, So get below frequency group: | 198 | * SFCAdder: fixed to 6 ADCK cycles |
195 | */ | 199 | * AverageNum: 1, 4, 8, 16, 32 samples for hardware average. |
196 | static const u32 vf610_sample_freq_avail[5] = | 200 | * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode |
197 | {1941176, 559332, 286957, 145374, 73171}; | 201 | * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles |
202 | */ | ||
203 | adck_rate = ipg_rate / info->adc_feature.clk_div; | ||
204 | for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++) | ||
205 | info->sample_freq_avail[i] = | ||
206 | adck_rate / (6 + vf610_hw_avgs[i] * (25 + 3)); | ||
207 | } | ||
198 | 208 | ||
199 | static inline void vf610_adc_cfg_init(struct vf610_adc *info) | 209 | static inline void vf610_adc_cfg_init(struct vf610_adc *info) |
200 | { | 210 | { |
211 | struct vf610_adc_feature *adc_feature = &info->adc_feature; | ||
212 | |||
201 | /* set default Configuration for ADC controller */ | 213 | /* set default Configuration for ADC controller */ |
202 | info->adc_feature.clk_sel = VF610_ADCIOC_BUSCLK_SET; | 214 | adc_feature->clk_sel = VF610_ADCIOC_BUSCLK_SET; |
203 | info->adc_feature.vol_ref = VF610_ADCIOC_VR_VREF_SET; | 215 | adc_feature->vol_ref = VF610_ADCIOC_VR_VREF_SET; |
216 | |||
217 | adc_feature->calibration = true; | ||
218 | adc_feature->ovwren = true; | ||
219 | |||
220 | adc_feature->res_mode = 12; | ||
221 | adc_feature->sample_rate = 1; | ||
222 | adc_feature->lpm = true; | ||
204 | 223 | ||
205 | info->adc_feature.calibration = true; | 224 | /* Use a save ADCK which is below 20MHz on all devices */ |
206 | info->adc_feature.ovwren = true; | 225 | adc_feature->clk_div = 8; |
207 | 226 | ||
208 | info->adc_feature.clk_div = 1; | 227 | vf610_adc_calculate_rates(info); |
209 | info->adc_feature.res_mode = 12; | ||
210 | info->adc_feature.sample_rate = 1; | ||
211 | info->adc_feature.lpm = true; | ||
212 | } | 228 | } |
213 | 229 | ||
214 | static void vf610_adc_cfg_post_set(struct vf610_adc *info) | 230 | static void vf610_adc_cfg_post_set(struct vf610_adc *info) |
@@ -290,12 +306,10 @@ static void vf610_adc_cfg_set(struct vf610_adc *info) | |||
290 | 306 | ||
291 | cfg_data = readl(info->regs + VF610_REG_ADC_CFG); | 307 | cfg_data = readl(info->regs + VF610_REG_ADC_CFG); |
292 | 308 | ||
293 | /* low power configuration */ | ||
294 | cfg_data &= ~VF610_ADC_ADLPC_EN; | 309 | cfg_data &= ~VF610_ADC_ADLPC_EN; |
295 | if (adc_feature->lpm) | 310 | if (adc_feature->lpm) |
296 | cfg_data |= VF610_ADC_ADLPC_EN; | 311 | cfg_data |= VF610_ADC_ADLPC_EN; |
297 | 312 | ||
298 | /* disable high speed */ | ||
299 | cfg_data &= ~VF610_ADC_ADHSC_EN; | 313 | cfg_data &= ~VF610_ADC_ADHSC_EN; |
300 | 314 | ||
301 | writel(cfg_data, info->regs + VF610_REG_ADC_CFG); | 315 | writel(cfg_data, info->regs + VF610_REG_ADC_CFG); |
@@ -435,10 +449,27 @@ static irqreturn_t vf610_adc_isr(int irq, void *dev_id) | |||
435 | return IRQ_HANDLED; | 449 | return IRQ_HANDLED; |
436 | } | 450 | } |
437 | 451 | ||
438 | static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1941176, 559332, 286957, 145374, 73171"); | 452 | static ssize_t vf610_show_samp_freq_avail(struct device *dev, |
453 | struct device_attribute *attr, char *buf) | ||
454 | { | ||
455 | struct vf610_adc *info = iio_priv(dev_to_iio_dev(dev)); | ||
456 | size_t len = 0; | ||
457 | int i; | ||
458 | |||
459 | for (i = 0; i < ARRAY_SIZE(info->sample_freq_avail); i++) | ||
460 | len += scnprintf(buf + len, PAGE_SIZE - len, | ||
461 | "%u ", info->sample_freq_avail[i]); | ||
462 | |||
463 | /* replace trailing space by newline */ | ||
464 | buf[len - 1] = '\n'; | ||
465 | |||
466 | return len; | ||
467 | } | ||
468 | |||
469 | static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(vf610_show_samp_freq_avail); | ||
439 | 470 | ||
440 | static struct attribute *vf610_attributes[] = { | 471 | static struct attribute *vf610_attributes[] = { |
441 | &iio_const_attr_sampling_frequency_available.dev_attr.attr, | 472 | &iio_dev_attr_sampling_frequency_available.dev_attr.attr, |
442 | NULL | 473 | NULL |
443 | }; | 474 | }; |
444 | 475 | ||
@@ -502,7 +533,7 @@ static int vf610_read_raw(struct iio_dev *indio_dev, | |||
502 | return IIO_VAL_FRACTIONAL_LOG2; | 533 | return IIO_VAL_FRACTIONAL_LOG2; |
503 | 534 | ||
504 | case IIO_CHAN_INFO_SAMP_FREQ: | 535 | case IIO_CHAN_INFO_SAMP_FREQ: |
505 | *val = vf610_sample_freq_avail[info->adc_feature.sample_rate]; | 536 | *val = info->sample_freq_avail[info->adc_feature.sample_rate]; |
506 | *val2 = 0; | 537 | *val2 = 0; |
507 | return IIO_VAL_INT; | 538 | return IIO_VAL_INT; |
508 | 539 | ||
@@ -525,9 +556,9 @@ static int vf610_write_raw(struct iio_dev *indio_dev, | |||
525 | switch (mask) { | 556 | switch (mask) { |
526 | case IIO_CHAN_INFO_SAMP_FREQ: | 557 | case IIO_CHAN_INFO_SAMP_FREQ: |
527 | for (i = 0; | 558 | for (i = 0; |
528 | i < ARRAY_SIZE(vf610_sample_freq_avail); | 559 | i < ARRAY_SIZE(info->sample_freq_avail); |
529 | i++) | 560 | i++) |
530 | if (val == vf610_sample_freq_avail[i]) { | 561 | if (val == info->sample_freq_avail[i]) { |
531 | info->adc_feature.sample_rate = i; | 562 | info->adc_feature.sample_rate = i; |
532 | vf610_adc_sample_set(info); | 563 | vf610_adc_sample_set(info); |
533 | return 0; | 564 | return 0; |
diff --git a/drivers/iio/common/ssp_sensors/ssp_dev.c b/drivers/iio/common/ssp_sensors/ssp_dev.c index 52d70435f5a1..55a90082a29b 100644 --- a/drivers/iio/common/ssp_sensors/ssp_dev.c +++ b/drivers/iio/common/ssp_sensors/ssp_dev.c | |||
@@ -640,6 +640,7 @@ static int ssp_remove(struct spi_device *spi) | |||
640 | return 0; | 640 | return 0; |
641 | } | 641 | } |
642 | 642 | ||
643 | #ifdef CONFIG_PM_SLEEP | ||
643 | static int ssp_suspend(struct device *dev) | 644 | static int ssp_suspend(struct device *dev) |
644 | { | 645 | { |
645 | int ret; | 646 | int ret; |
@@ -688,6 +689,7 @@ static int ssp_resume(struct device *dev) | |||
688 | 689 | ||
689 | return 0; | 690 | return 0; |
690 | } | 691 | } |
692 | #endif /* CONFIG_PM_SLEEP */ | ||
691 | 693 | ||
692 | static const struct dev_pm_ops ssp_pm_ops = { | 694 | static const struct dev_pm_ops ssp_pm_ops = { |
693 | SET_SYSTEM_SLEEP_PM_OPS(ssp_suspend, ssp_resume) | 695 | SET_SYSTEM_SLEEP_PM_OPS(ssp_suspend, ssp_resume) |
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c index f57562aa396f..15c73e20272d 100644 --- a/drivers/iio/dac/ad5686.c +++ b/drivers/iio/dac/ad5686.c | |||
@@ -322,7 +322,7 @@ static int ad5686_probe(struct spi_device *spi) | |||
322 | st = iio_priv(indio_dev); | 322 | st = iio_priv(indio_dev); |
323 | spi_set_drvdata(spi, indio_dev); | 323 | spi_set_drvdata(spi, indio_dev); |
324 | 324 | ||
325 | st->reg = devm_regulator_get(&spi->dev, "vcc"); | 325 | st->reg = devm_regulator_get_optional(&spi->dev, "vcc"); |
326 | if (!IS_ERR(st->reg)) { | 326 | if (!IS_ERR(st->reg)) { |
327 | ret = regulator_enable(st->reg); | 327 | ret = regulator_enable(st->reg); |
328 | if (ret) | 328 | if (ret) |
diff --git a/drivers/iio/gyro/bmg160.c b/drivers/iio/gyro/bmg160.c index 60451b328242..ccf3ea7e1afa 100644 --- a/drivers/iio/gyro/bmg160.c +++ b/drivers/iio/gyro/bmg160.c | |||
@@ -822,7 +822,7 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p) | |||
822 | int bit, ret, i = 0; | 822 | int bit, ret, i = 0; |
823 | 823 | ||
824 | mutex_lock(&data->mutex); | 824 | mutex_lock(&data->mutex); |
825 | for_each_set_bit(bit, indio_dev->buffer->scan_mask, | 825 | for_each_set_bit(bit, indio_dev->active_scan_mask, |
826 | indio_dev->masklength) { | 826 | indio_dev->masklength) { |
827 | ret = i2c_smbus_read_word_data(data->client, | 827 | ret = i2c_smbus_read_word_data(data->client, |
828 | BMG160_AXIS_TO_REG(bit)); | 828 | BMG160_AXIS_TO_REG(bit)); |
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c index 623c145d8a97..7d79a1ac5f5f 100644 --- a/drivers/iio/humidity/dht11.c +++ b/drivers/iio/humidity/dht11.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/wait.h> | 29 | #include <linux/wait.h> |
30 | #include <linux/bitops.h> | 30 | #include <linux/bitops.h> |
31 | #include <linux/completion.h> | 31 | #include <linux/completion.h> |
32 | #include <linux/mutex.h> | ||
32 | #include <linux/delay.h> | 33 | #include <linux/delay.h> |
33 | #include <linux/gpio.h> | 34 | #include <linux/gpio.h> |
34 | #include <linux/of_gpio.h> | 35 | #include <linux/of_gpio.h> |
@@ -39,8 +40,12 @@ | |||
39 | 40 | ||
40 | #define DHT11_DATA_VALID_TIME 2000000000 /* 2s in ns */ | 41 | #define DHT11_DATA_VALID_TIME 2000000000 /* 2s in ns */ |
41 | 42 | ||
42 | #define DHT11_EDGES_PREAMBLE 4 | 43 | #define DHT11_EDGES_PREAMBLE 2 |
43 | #define DHT11_BITS_PER_READ 40 | 44 | #define DHT11_BITS_PER_READ 40 |
45 | /* | ||
46 | * Note that when reading the sensor actually 84 edges are detected, but | ||
47 | * since the last edge is not significant, we only store 83: | ||
48 | */ | ||
44 | #define DHT11_EDGES_PER_READ (2*DHT11_BITS_PER_READ + DHT11_EDGES_PREAMBLE + 1) | 49 | #define DHT11_EDGES_PER_READ (2*DHT11_BITS_PER_READ + DHT11_EDGES_PREAMBLE + 1) |
45 | 50 | ||
46 | /* Data transmission timing (nano seconds) */ | 51 | /* Data transmission timing (nano seconds) */ |
@@ -57,6 +62,7 @@ struct dht11 { | |||
57 | int irq; | 62 | int irq; |
58 | 63 | ||
59 | struct completion completion; | 64 | struct completion completion; |
65 | struct mutex lock; | ||
60 | 66 | ||
61 | s64 timestamp; | 67 | s64 timestamp; |
62 | int temperature; | 68 | int temperature; |
@@ -88,7 +94,7 @@ static int dht11_decode(struct dht11 *dht11, int offset) | |||
88 | unsigned char temp_int, temp_dec, hum_int, hum_dec, checksum; | 94 | unsigned char temp_int, temp_dec, hum_int, hum_dec, checksum; |
89 | 95 | ||
90 | /* Calculate timestamp resolution */ | 96 | /* Calculate timestamp resolution */ |
91 | for (i = 0; i < dht11->num_edges; ++i) { | 97 | for (i = 1; i < dht11->num_edges; ++i) { |
92 | t = dht11->edges[i].ts - dht11->edges[i-1].ts; | 98 | t = dht11->edges[i].ts - dht11->edges[i-1].ts; |
93 | if (t > 0 && t < timeres) | 99 | if (t > 0 && t < timeres) |
94 | timeres = t; | 100 | timeres = t; |
@@ -138,6 +144,27 @@ static int dht11_decode(struct dht11 *dht11, int offset) | |||
138 | return 0; | 144 | return 0; |
139 | } | 145 | } |
140 | 146 | ||
147 | /* | ||
148 | * IRQ handler called on GPIO edges | ||
149 | */ | ||
150 | static irqreturn_t dht11_handle_irq(int irq, void *data) | ||
151 | { | ||
152 | struct iio_dev *iio = data; | ||
153 | struct dht11 *dht11 = iio_priv(iio); | ||
154 | |||
155 | /* TODO: Consider making the handler safe for IRQ sharing */ | ||
156 | if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) { | ||
157 | dht11->edges[dht11->num_edges].ts = iio_get_time_ns(); | ||
158 | dht11->edges[dht11->num_edges++].value = | ||
159 | gpio_get_value(dht11->gpio); | ||
160 | |||
161 | if (dht11->num_edges >= DHT11_EDGES_PER_READ) | ||
162 | complete(&dht11->completion); | ||
163 | } | ||
164 | |||
165 | return IRQ_HANDLED; | ||
166 | } | ||
167 | |||
141 | static int dht11_read_raw(struct iio_dev *iio_dev, | 168 | static int dht11_read_raw(struct iio_dev *iio_dev, |
142 | const struct iio_chan_spec *chan, | 169 | const struct iio_chan_spec *chan, |
143 | int *val, int *val2, long m) | 170 | int *val, int *val2, long m) |
@@ -145,6 +172,7 @@ static int dht11_read_raw(struct iio_dev *iio_dev, | |||
145 | struct dht11 *dht11 = iio_priv(iio_dev); | 172 | struct dht11 *dht11 = iio_priv(iio_dev); |
146 | int ret; | 173 | int ret; |
147 | 174 | ||
175 | mutex_lock(&dht11->lock); | ||
148 | if (dht11->timestamp + DHT11_DATA_VALID_TIME < iio_get_time_ns()) { | 176 | if (dht11->timestamp + DHT11_DATA_VALID_TIME < iio_get_time_ns()) { |
149 | reinit_completion(&dht11->completion); | 177 | reinit_completion(&dht11->completion); |
150 | 178 | ||
@@ -157,8 +185,17 @@ static int dht11_read_raw(struct iio_dev *iio_dev, | |||
157 | if (ret) | 185 | if (ret) |
158 | goto err; | 186 | goto err; |
159 | 187 | ||
188 | ret = request_irq(dht11->irq, dht11_handle_irq, | ||
189 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, | ||
190 | iio_dev->name, iio_dev); | ||
191 | if (ret) | ||
192 | goto err; | ||
193 | |||
160 | ret = wait_for_completion_killable_timeout(&dht11->completion, | 194 | ret = wait_for_completion_killable_timeout(&dht11->completion, |
161 | HZ); | 195 | HZ); |
196 | |||
197 | free_irq(dht11->irq, iio_dev); | ||
198 | |||
162 | if (ret == 0 && dht11->num_edges < DHT11_EDGES_PER_READ - 1) { | 199 | if (ret == 0 && dht11->num_edges < DHT11_EDGES_PER_READ - 1) { |
163 | dev_err(&iio_dev->dev, | 200 | dev_err(&iio_dev->dev, |
164 | "Only %d signal edges detected\n", | 201 | "Only %d signal edges detected\n", |
@@ -185,6 +222,7 @@ static int dht11_read_raw(struct iio_dev *iio_dev, | |||
185 | ret = -EINVAL; | 222 | ret = -EINVAL; |
186 | err: | 223 | err: |
187 | dht11->num_edges = -1; | 224 | dht11->num_edges = -1; |
225 | mutex_unlock(&dht11->lock); | ||
188 | return ret; | 226 | return ret; |
189 | } | 227 | } |
190 | 228 | ||
@@ -193,27 +231,6 @@ static const struct iio_info dht11_iio_info = { | |||
193 | .read_raw = dht11_read_raw, | 231 | .read_raw = dht11_read_raw, |
194 | }; | 232 | }; |
195 | 233 | ||
196 | /* | ||
197 | * IRQ handler called on GPIO edges | ||
198 | */ | ||
199 | static irqreturn_t dht11_handle_irq(int irq, void *data) | ||
200 | { | ||
201 | struct iio_dev *iio = data; | ||
202 | struct dht11 *dht11 = iio_priv(iio); | ||
203 | |||
204 | /* TODO: Consider making the handler safe for IRQ sharing */ | ||
205 | if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) { | ||
206 | dht11->edges[dht11->num_edges].ts = iio_get_time_ns(); | ||
207 | dht11->edges[dht11->num_edges++].value = | ||
208 | gpio_get_value(dht11->gpio); | ||
209 | |||
210 | if (dht11->num_edges >= DHT11_EDGES_PER_READ) | ||
211 | complete(&dht11->completion); | ||
212 | } | ||
213 | |||
214 | return IRQ_HANDLED; | ||
215 | } | ||
216 | |||
217 | static const struct iio_chan_spec dht11_chan_spec[] = { | 234 | static const struct iio_chan_spec dht11_chan_spec[] = { |
218 | { .type = IIO_TEMP, | 235 | { .type = IIO_TEMP, |
219 | .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), }, | 236 | .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), }, |
@@ -256,11 +273,6 @@ static int dht11_probe(struct platform_device *pdev) | |||
256 | dev_err(dev, "GPIO %d has no interrupt\n", dht11->gpio); | 273 | dev_err(dev, "GPIO %d has no interrupt\n", dht11->gpio); |
257 | return -EINVAL; | 274 | return -EINVAL; |
258 | } | 275 | } |
259 | ret = devm_request_irq(dev, dht11->irq, dht11_handle_irq, | ||
260 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, | ||
261 | pdev->name, iio); | ||
262 | if (ret) | ||
263 | return ret; | ||
264 | 276 | ||
265 | dht11->timestamp = iio_get_time_ns() - DHT11_DATA_VALID_TIME - 1; | 277 | dht11->timestamp = iio_get_time_ns() - DHT11_DATA_VALID_TIME - 1; |
266 | dht11->num_edges = -1; | 278 | dht11->num_edges = -1; |
@@ -268,6 +280,7 @@ static int dht11_probe(struct platform_device *pdev) | |||
268 | platform_set_drvdata(pdev, iio); | 280 | platform_set_drvdata(pdev, iio); |
269 | 281 | ||
270 | init_completion(&dht11->completion); | 282 | init_completion(&dht11->completion); |
283 | mutex_init(&dht11->lock); | ||
271 | iio->name = pdev->name; | 284 | iio->name = pdev->name; |
272 | iio->dev.parent = &pdev->dev; | 285 | iio->dev.parent = &pdev->dev; |
273 | iio->info = &dht11_iio_info; | 286 | iio->info = &dht11_iio_info; |
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c index b54164677b89..fa3b809aff5e 100644 --- a/drivers/iio/humidity/si7020.c +++ b/drivers/iio/humidity/si7020.c | |||
@@ -45,12 +45,12 @@ static int si7020_read_raw(struct iio_dev *indio_dev, | |||
45 | struct iio_chan_spec const *chan, int *val, | 45 | struct iio_chan_spec const *chan, int *val, |
46 | int *val2, long mask) | 46 | int *val2, long mask) |
47 | { | 47 | { |
48 | struct i2c_client *client = iio_priv(indio_dev); | 48 | struct i2c_client **client = iio_priv(indio_dev); |
49 | int ret; | 49 | int ret; |
50 | 50 | ||
51 | switch (mask) { | 51 | switch (mask) { |
52 | case IIO_CHAN_INFO_RAW: | 52 | case IIO_CHAN_INFO_RAW: |
53 | ret = i2c_smbus_read_word_data(client, | 53 | ret = i2c_smbus_read_word_data(*client, |
54 | chan->type == IIO_TEMP ? | 54 | chan->type == IIO_TEMP ? |
55 | SI7020CMD_TEMP_HOLD : | 55 | SI7020CMD_TEMP_HOLD : |
56 | SI7020CMD_RH_HOLD); | 56 | SI7020CMD_RH_HOLD); |
@@ -126,7 +126,7 @@ static int si7020_probe(struct i2c_client *client, | |||
126 | /* Wait the maximum power-up time after software reset. */ | 126 | /* Wait the maximum power-up time after software reset. */ |
127 | msleep(15); | 127 | msleep(15); |
128 | 128 | ||
129 | indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*client)); | 129 | indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); |
130 | if (!indio_dev) | 130 | if (!indio_dev) |
131 | return -ENOMEM; | 131 | return -ENOMEM; |
132 | 132 | ||
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c index b70873de04ea..fa795dcd5f75 100644 --- a/drivers/iio/imu/adis16400_core.c +++ b/drivers/iio/imu/adis16400_core.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/list.h> | 26 | #include <linux/list.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/debugfs.h> | 28 | #include <linux/debugfs.h> |
29 | #include <linux/bitops.h> | ||
29 | 30 | ||
30 | #include <linux/iio/iio.h> | 31 | #include <linux/iio/iio.h> |
31 | #include <linux/iio/sysfs.h> | 32 | #include <linux/iio/sysfs.h> |
@@ -414,7 +415,7 @@ static int adis16400_read_raw(struct iio_dev *indio_dev, | |||
414 | mutex_unlock(&indio_dev->mlock); | 415 | mutex_unlock(&indio_dev->mlock); |
415 | if (ret) | 416 | if (ret) |
416 | return ret; | 417 | return ret; |
417 | val16 = ((val16 & 0xFFF) << 4) >> 4; | 418 | val16 = sign_extend32(val16, 11); |
418 | *val = val16; | 419 | *val = val16; |
419 | return IIO_VAL_INT; | 420 | return IIO_VAL_INT; |
420 | case IIO_CHAN_INFO_OFFSET: | 421 | case IIO_CHAN_INFO_OFFSET: |
diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c index e0017c22bb9c..f53e9a803a0e 100644 --- a/drivers/iio/imu/adis_trigger.c +++ b/drivers/iio/imu/adis_trigger.c | |||
@@ -60,7 +60,7 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev) | |||
60 | iio_trigger_set_drvdata(adis->trig, adis); | 60 | iio_trigger_set_drvdata(adis->trig, adis); |
61 | ret = iio_trigger_register(adis->trig); | 61 | ret = iio_trigger_register(adis->trig); |
62 | 62 | ||
63 | indio_dev->trig = adis->trig; | 63 | indio_dev->trig = iio_trigger_get(adis->trig); |
64 | if (ret) | 64 | if (ret) |
65 | goto error_free_irq; | 65 | goto error_free_irq; |
66 | 66 | ||
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index f73e60b7a796..ef76afe2643c 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c | |||
@@ -410,42 +410,46 @@ error_read_raw: | |||
410 | } | 410 | } |
411 | } | 411 | } |
412 | 412 | ||
413 | static int inv_mpu6050_write_fsr(struct inv_mpu6050_state *st, int fsr) | 413 | static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val) |
414 | { | 414 | { |
415 | int result; | 415 | int result, i; |
416 | u8 d; | 416 | u8 d; |
417 | 417 | ||
418 | if (fsr < 0 || fsr > INV_MPU6050_MAX_GYRO_FS_PARAM) | 418 | for (i = 0; i < ARRAY_SIZE(gyro_scale_6050); ++i) { |
419 | return -EINVAL; | 419 | if (gyro_scale_6050[i] == val) { |
420 | if (fsr == st->chip_config.fsr) | 420 | d = (i << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT); |
421 | return 0; | 421 | result = inv_mpu6050_write_reg(st, |
422 | st->reg->gyro_config, d); | ||
423 | if (result) | ||
424 | return result; | ||
422 | 425 | ||
423 | d = (fsr << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT); | 426 | st->chip_config.fsr = i; |
424 | result = inv_mpu6050_write_reg(st, st->reg->gyro_config, d); | 427 | return 0; |
425 | if (result) | 428 | } |
426 | return result; | 429 | } |
427 | st->chip_config.fsr = fsr; | ||
428 | 430 | ||
429 | return 0; | 431 | return -EINVAL; |
430 | } | 432 | } |
431 | 433 | ||
432 | static int inv_mpu6050_write_accel_fs(struct inv_mpu6050_state *st, int fs) | 434 | static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val) |
433 | { | 435 | { |
434 | int result; | 436 | int result, i; |
435 | u8 d; | 437 | u8 d; |
436 | 438 | ||
437 | if (fs < 0 || fs > INV_MPU6050_MAX_ACCL_FS_PARAM) | 439 | for (i = 0; i < ARRAY_SIZE(accel_scale); ++i) { |
438 | return -EINVAL; | 440 | if (accel_scale[i] == val) { |
439 | if (fs == st->chip_config.accl_fs) | 441 | d = (i << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT); |
440 | return 0; | 442 | result = inv_mpu6050_write_reg(st, |
443 | st->reg->accl_config, d); | ||
444 | if (result) | ||
445 | return result; | ||
441 | 446 | ||
442 | d = (fs << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT); | 447 | st->chip_config.accl_fs = i; |
443 | result = inv_mpu6050_write_reg(st, st->reg->accl_config, d); | 448 | return 0; |
444 | if (result) | 449 | } |
445 | return result; | 450 | } |
446 | st->chip_config.accl_fs = fs; | ||
447 | 451 | ||
448 | return 0; | 452 | return -EINVAL; |
449 | } | 453 | } |
450 | 454 | ||
451 | static int inv_mpu6050_write_raw(struct iio_dev *indio_dev, | 455 | static int inv_mpu6050_write_raw(struct iio_dev *indio_dev, |
@@ -471,10 +475,10 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev, | |||
471 | case IIO_CHAN_INFO_SCALE: | 475 | case IIO_CHAN_INFO_SCALE: |
472 | switch (chan->type) { | 476 | switch (chan->type) { |
473 | case IIO_ANGL_VEL: | 477 | case IIO_ANGL_VEL: |
474 | result = inv_mpu6050_write_fsr(st, val); | 478 | result = inv_mpu6050_write_gyro_scale(st, val2); |
475 | break; | 479 | break; |
476 | case IIO_ACCEL: | 480 | case IIO_ACCEL: |
477 | result = inv_mpu6050_write_accel_fs(st, val); | 481 | result = inv_mpu6050_write_accel_scale(st, val2); |
478 | break; | 482 | break; |
479 | default: | 483 | default: |
480 | result = -EINVAL; | 484 | result = -EINVAL; |
@@ -780,7 +784,11 @@ static int inv_mpu_probe(struct i2c_client *client, | |||
780 | 784 | ||
781 | i2c_set_clientdata(client, indio_dev); | 785 | i2c_set_clientdata(client, indio_dev); |
782 | indio_dev->dev.parent = &client->dev; | 786 | indio_dev->dev.parent = &client->dev; |
783 | indio_dev->name = id->name; | 787 | /* id will be NULL when enumerated via ACPI */ |
788 | if (id) | ||
789 | indio_dev->name = (char *)id->name; | ||
790 | else | ||
791 | indio_dev->name = (char *)dev_name(&client->dev); | ||
784 | indio_dev->channels = inv_mpu_channels; | 792 | indio_dev->channels = inv_mpu_channels; |
785 | indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels); | 793 | indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels); |
786 | 794 | ||
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c index 0cd306a72a6e..ba27e277511f 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c | |||
@@ -24,6 +24,16 @@ | |||
24 | #include <linux/poll.h> | 24 | #include <linux/poll.h> |
25 | #include "inv_mpu_iio.h" | 25 | #include "inv_mpu_iio.h" |
26 | 26 | ||
27 | static void inv_clear_kfifo(struct inv_mpu6050_state *st) | ||
28 | { | ||
29 | unsigned long flags; | ||
30 | |||
31 | /* take the spin lock sem to avoid interrupt kick in */ | ||
32 | spin_lock_irqsave(&st->time_stamp_lock, flags); | ||
33 | kfifo_reset(&st->timestamps); | ||
34 | spin_unlock_irqrestore(&st->time_stamp_lock, flags); | ||
35 | } | ||
36 | |||
27 | int inv_reset_fifo(struct iio_dev *indio_dev) | 37 | int inv_reset_fifo(struct iio_dev *indio_dev) |
28 | { | 38 | { |
29 | int result; | 39 | int result; |
@@ -50,6 +60,10 @@ int inv_reset_fifo(struct iio_dev *indio_dev) | |||
50 | INV_MPU6050_BIT_FIFO_RST); | 60 | INV_MPU6050_BIT_FIFO_RST); |
51 | if (result) | 61 | if (result) |
52 | goto reset_fifo_fail; | 62 | goto reset_fifo_fail; |
63 | |||
64 | /* clear timestamps fifo */ | ||
65 | inv_clear_kfifo(st); | ||
66 | |||
53 | /* enable interrupt */ | 67 | /* enable interrupt */ |
54 | if (st->chip_config.accl_fifo_enable || | 68 | if (st->chip_config.accl_fifo_enable || |
55 | st->chip_config.gyro_fifo_enable) { | 69 | st->chip_config.gyro_fifo_enable) { |
@@ -83,16 +97,6 @@ reset_fifo_fail: | |||
83 | return result; | 97 | return result; |
84 | } | 98 | } |
85 | 99 | ||
86 | static void inv_clear_kfifo(struct inv_mpu6050_state *st) | ||
87 | { | ||
88 | unsigned long flags; | ||
89 | |||
90 | /* take the spin lock sem to avoid interrupt kick in */ | ||
91 | spin_lock_irqsave(&st->time_stamp_lock, flags); | ||
92 | kfifo_reset(&st->timestamps); | ||
93 | spin_unlock_irqrestore(&st->time_stamp_lock, flags); | ||
94 | } | ||
95 | |||
96 | /** | 100 | /** |
97 | * inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt. | 101 | * inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt. |
98 | */ | 102 | */ |
@@ -184,7 +188,6 @@ end_session: | |||
184 | flush_fifo: | 188 | flush_fifo: |
185 | /* Flush HW and SW FIFOs. */ | 189 | /* Flush HW and SW FIFOs. */ |
186 | inv_reset_fifo(indio_dev); | 190 | inv_reset_fifo(indio_dev); |
187 | inv_clear_kfifo(st); | ||
188 | mutex_unlock(&indio_dev->mlock); | 191 | mutex_unlock(&indio_dev->mlock); |
189 | iio_trigger_notify_done(indio_dev->trig); | 192 | iio_trigger_notify_done(indio_dev->trig); |
190 | 193 | ||
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c index 5cc3692acf37..b3a36376c719 100644 --- a/drivers/iio/imu/kmx61.c +++ b/drivers/iio/imu/kmx61.c | |||
@@ -1227,7 +1227,7 @@ static irqreturn_t kmx61_trigger_handler(int irq, void *p) | |||
1227 | base = KMX61_MAG_XOUT_L; | 1227 | base = KMX61_MAG_XOUT_L; |
1228 | 1228 | ||
1229 | mutex_lock(&data->lock); | 1229 | mutex_lock(&data->lock); |
1230 | for_each_set_bit(bit, indio_dev->buffer->scan_mask, | 1230 | for_each_set_bit(bit, indio_dev->active_scan_mask, |
1231 | indio_dev->masklength) { | 1231 | indio_dev->masklength) { |
1232 | ret = kmx61_read_measurement(data, base, bit); | 1232 | ret = kmx61_read_measurement(data, base, bit); |
1233 | if (ret < 0) { | 1233 | if (ret < 0) { |
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index aaba9d3d980e..4df97f650e44 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c | |||
@@ -847,8 +847,7 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev, | |||
847 | * @attr_list: List of IIO device attributes | 847 | * @attr_list: List of IIO device attributes |
848 | * | 848 | * |
849 | * This function frees the memory allocated for each of the IIO device | 849 | * This function frees the memory allocated for each of the IIO device |
850 | * attributes in the list. Note: if you want to reuse the list after calling | 850 | * attributes in the list. |
851 | * this function you have to reinitialize it using INIT_LIST_HEAD(). | ||
852 | */ | 851 | */ |
853 | void iio_free_chan_devattr_list(struct list_head *attr_list) | 852 | void iio_free_chan_devattr_list(struct list_head *attr_list) |
854 | { | 853 | { |
@@ -856,6 +855,7 @@ void iio_free_chan_devattr_list(struct list_head *attr_list) | |||
856 | 855 | ||
857 | list_for_each_entry_safe(p, n, attr_list, l) { | 856 | list_for_each_entry_safe(p, n, attr_list, l) { |
858 | kfree(p->dev_attr.attr.name); | 857 | kfree(p->dev_attr.attr.name); |
858 | list_del(&p->l); | ||
859 | kfree(p); | 859 | kfree(p); |
860 | } | 860 | } |
861 | } | 861 | } |
@@ -936,6 +936,7 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev) | |||
936 | 936 | ||
937 | iio_free_chan_devattr_list(&indio_dev->channel_attr_list); | 937 | iio_free_chan_devattr_list(&indio_dev->channel_attr_list); |
938 | kfree(indio_dev->chan_attr_group.attrs); | 938 | kfree(indio_dev->chan_attr_group.attrs); |
939 | indio_dev->chan_attr_group.attrs = NULL; | ||
939 | } | 940 | } |
940 | 941 | ||
941 | static void iio_dev_release(struct device *device) | 942 | static void iio_dev_release(struct device *device) |
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c index a4b397048f71..a99692ba91bc 100644 --- a/drivers/iio/industrialio-event.c +++ b/drivers/iio/industrialio-event.c | |||
@@ -500,6 +500,7 @@ int iio_device_register_eventset(struct iio_dev *indio_dev) | |||
500 | error_free_setup_event_lines: | 500 | error_free_setup_event_lines: |
501 | iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list); | 501 | iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list); |
502 | kfree(indio_dev->event_interface); | 502 | kfree(indio_dev->event_interface); |
503 | indio_dev->event_interface = NULL; | ||
503 | return ret; | 504 | return ret; |
504 | } | 505 | } |
505 | 506 | ||
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig index ae68c64bdad3..a224afd6380c 100644 --- a/drivers/iio/light/Kconfig +++ b/drivers/iio/light/Kconfig | |||
@@ -73,6 +73,7 @@ config CM36651 | |||
73 | config GP2AP020A00F | 73 | config GP2AP020A00F |
74 | tristate "Sharp GP2AP020A00F Proximity/ALS sensor" | 74 | tristate "Sharp GP2AP020A00F Proximity/ALS sensor" |
75 | depends on I2C | 75 | depends on I2C |
76 | select REGMAP_I2C | ||
76 | select IIO_BUFFER | 77 | select IIO_BUFFER |
77 | select IIO_TRIGGERED_BUFFER | 78 | select IIO_TRIGGERED_BUFFER |
78 | select IRQ_WORK | 79 | select IRQ_WORK |
@@ -126,6 +127,7 @@ config HID_SENSOR_PROX | |||
126 | config JSA1212 | 127 | config JSA1212 |
127 | tristate "JSA1212 ALS and proximity sensor driver" | 128 | tristate "JSA1212 ALS and proximity sensor driver" |
128 | depends on I2C | 129 | depends on I2C |
130 | select REGMAP_I2C | ||
129 | help | 131 | help |
130 | Say Y here if you want to build a IIO driver for JSA1212 | 132 | Say Y here if you want to build a IIO driver for JSA1212 |
131 | proximity & ALS sensor device. | 133 | proximity & ALS sensor device. |
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig index 4c7a4c52dd06..a5d6de72c523 100644 --- a/drivers/iio/magnetometer/Kconfig +++ b/drivers/iio/magnetometer/Kconfig | |||
@@ -18,6 +18,8 @@ config AK8975 | |||
18 | 18 | ||
19 | config AK09911 | 19 | config AK09911 |
20 | tristate "Asahi Kasei AK09911 3-axis Compass" | 20 | tristate "Asahi Kasei AK09911 3-axis Compass" |
21 | depends on I2C | ||
22 | depends on GPIOLIB | ||
21 | select AK8975 | 23 | select AK8975 |
22 | help | 24 | help |
23 | Deprecated: AK09911 is now supported by AK8975 driver. | 25 | Deprecated: AK09911 is now supported by AK8975 driver. |
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c index 74dff4e4a11a..89fca3a70750 100644 --- a/drivers/iio/proximity/sx9500.c +++ b/drivers/iio/proximity/sx9500.c | |||
@@ -494,7 +494,7 @@ static irqreturn_t sx9500_trigger_handler(int irq, void *private) | |||
494 | 494 | ||
495 | mutex_lock(&data->mutex); | 495 | mutex_lock(&data->mutex); |
496 | 496 | ||
497 | for_each_set_bit(bit, indio_dev->buffer->scan_mask, | 497 | for_each_set_bit(bit, indio_dev->active_scan_mask, |
498 | indio_dev->masklength) { | 498 | indio_dev->masklength) { |
499 | ret = sx9500_read_proximity(data, &indio_dev->channels[bit], | 499 | ret = sx9500_read_proximity(data, &indio_dev->channels[bit], |
500 | &val); | 500 | &val); |
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index aec7a6aa2951..8c014b5dab4c 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
@@ -99,6 +99,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
99 | if (dmasync) | 99 | if (dmasync) |
100 | dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); | 100 | dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); |
101 | 101 | ||
102 | /* | ||
103 | * If the combination of the addr and size requested for this memory | ||
104 | * region causes an integer overflow, return error. | ||
105 | */ | ||
106 | if ((PAGE_ALIGN(addr + size) <= size) || | ||
107 | (PAGE_ALIGN(addr + size) <= addr)) | ||
108 | return ERR_PTR(-EINVAL); | ||
109 | |||
102 | if (!can_do_mlock()) | 110 | if (!can_do_mlock()) |
103 | return ERR_PTR(-EPERM); | 111 | return ERR_PTR(-EPERM); |
104 | 112 | ||
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index c7619716c31d..59040265e361 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
@@ -64,6 +64,14 @@ enum { | |||
64 | #define GUID_TBL_BLK_NUM_ENTRIES 8 | 64 | #define GUID_TBL_BLK_NUM_ENTRIES 8 |
65 | #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES) | 65 | #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES) |
66 | 66 | ||
67 | /* Counters should be saturate once they reach their maximum value */ | ||
68 | #define ASSIGN_32BIT_COUNTER(counter, value) do {\ | ||
69 | if ((value) > U32_MAX) \ | ||
70 | counter = cpu_to_be32(U32_MAX); \ | ||
71 | else \ | ||
72 | counter = cpu_to_be32(value); \ | ||
73 | } while (0) | ||
74 | |||
67 | struct mlx4_mad_rcv_buf { | 75 | struct mlx4_mad_rcv_buf { |
68 | struct ib_grh grh; | 76 | struct ib_grh grh; |
69 | u8 payload[256]; | 77 | u8 payload[256]; |
@@ -806,10 +814,14 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, | |||
806 | static void edit_counter(struct mlx4_counter *cnt, | 814 | static void edit_counter(struct mlx4_counter *cnt, |
807 | struct ib_pma_portcounters *pma_cnt) | 815 | struct ib_pma_portcounters *pma_cnt) |
808 | { | 816 | { |
809 | pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2)); | 817 | ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data, |
810 | pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2)); | 818 | (be64_to_cpu(cnt->tx_bytes) >> 2)); |
811 | pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames)); | 819 | ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data, |
812 | pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames)); | 820 | (be64_to_cpu(cnt->rx_bytes) >> 2)); |
821 | ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets, | ||
822 | be64_to_cpu(cnt->tx_frames)); | ||
823 | ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets, | ||
824 | be64_to_cpu(cnt->rx_frames)); | ||
813 | } | 825 | } |
814 | 826 | ||
815 | static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, | 827 | static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index ac6e2b710ea6..b972c0b41799 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -2697,8 +2697,12 @@ static void handle_bonded_port_state_event(struct work_struct *work) | |||
2697 | spin_lock_bh(&ibdev->iboe.lock); | 2697 | spin_lock_bh(&ibdev->iboe.lock); |
2698 | for (i = 0; i < MLX4_MAX_PORTS; ++i) { | 2698 | for (i = 0; i < MLX4_MAX_PORTS; ++i) { |
2699 | struct net_device *curr_netdev = ibdev->iboe.netdevs[i]; | 2699 | struct net_device *curr_netdev = ibdev->iboe.netdevs[i]; |
2700 | enum ib_port_state curr_port_state; | ||
2700 | 2701 | ||
2701 | enum ib_port_state curr_port_state = | 2702 | if (!curr_netdev) |
2703 | continue; | ||
2704 | |||
2705 | curr_port_state = | ||
2702 | (netif_running(curr_netdev) && | 2706 | (netif_running(curr_netdev) && |
2703 | netif_carrier_ok(curr_netdev)) ? | 2707 | netif_carrier_ok(curr_netdev)) ? |
2704 | IB_PORT_ACTIVE : IB_PORT_DOWN; | 2708 | IB_PORT_ACTIVE : IB_PORT_DOWN; |
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c index 8ff612d160b0..563932500ff1 100644 --- a/drivers/input/keyboard/tc3589x-keypad.c +++ b/drivers/input/keyboard/tc3589x-keypad.c | |||
@@ -411,9 +411,9 @@ static int tc3589x_keypad_probe(struct platform_device *pdev) | |||
411 | 411 | ||
412 | input_set_drvdata(input, keypad); | 412 | input_set_drvdata(input, keypad); |
413 | 413 | ||
414 | error = request_threaded_irq(irq, NULL, | 414 | error = request_threaded_irq(irq, NULL, tc3589x_keypad_irq, |
415 | tc3589x_keypad_irq, plat->irqtype, | 415 | plat->irqtype | IRQF_ONESHOT, |
416 | "tc3589x-keypad", keypad); | 416 | "tc3589x-keypad", keypad); |
417 | if (error < 0) { | 417 | if (error < 0) { |
418 | dev_err(&pdev->dev, | 418 | dev_err(&pdev->dev, |
419 | "Could not allocate irq %d,error %d\n", | 419 | "Could not allocate irq %d,error %d\n", |
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c index 59d4dcddf6de..98228773a111 100644 --- a/drivers/input/misc/mma8450.c +++ b/drivers/input/misc/mma8450.c | |||
@@ -187,6 +187,7 @@ static int mma8450_probe(struct i2c_client *c, | |||
187 | idev->private = m; | 187 | idev->private = m; |
188 | idev->input->name = MMA8450_DRV_NAME; | 188 | idev->input->name = MMA8450_DRV_NAME; |
189 | idev->input->id.bustype = BUS_I2C; | 189 | idev->input->id.bustype = BUS_I2C; |
190 | idev->input->dev.parent = &c->dev; | ||
190 | idev->poll = mma8450_poll; | 191 | idev->poll = mma8450_poll; |
191 | idev->poll_interval = POLL_INTERVAL; | 192 | idev->poll_interval = POLL_INTERVAL; |
192 | idev->poll_interval_max = POLL_INTERVAL_MAX; | 193 | idev->poll_interval_max = POLL_INTERVAL_MAX; |
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index d28726a0ef85..27bcdbc950c9 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c | |||
@@ -1154,10 +1154,28 @@ out: | |||
1154 | mutex_unlock(&alps_mutex); | 1154 | mutex_unlock(&alps_mutex); |
1155 | } | 1155 | } |
1156 | 1156 | ||
1157 | static void alps_report_bare_ps2_packet(struct input_dev *dev, | 1157 | static void alps_report_bare_ps2_packet(struct psmouse *psmouse, |
1158 | unsigned char packet[], | 1158 | unsigned char packet[], |
1159 | bool report_buttons) | 1159 | bool report_buttons) |
1160 | { | 1160 | { |
1161 | struct alps_data *priv = psmouse->private; | ||
1162 | struct input_dev *dev; | ||
1163 | |||
1164 | /* Figure out which device to use to report the bare packet */ | ||
1165 | if (priv->proto_version == ALPS_PROTO_V2 && | ||
1166 | (priv->flags & ALPS_DUALPOINT)) { | ||
1167 | /* On V2 devices the DualPoint Stick reports bare packets */ | ||
1168 | dev = priv->dev2; | ||
1169 | } else if (unlikely(IS_ERR_OR_NULL(priv->dev3))) { | ||
1170 | /* Register dev3 mouse if we received PS/2 packet first time */ | ||
1171 | if (!IS_ERR(priv->dev3)) | ||
1172 | psmouse_queue_work(psmouse, &priv->dev3_register_work, | ||
1173 | 0); | ||
1174 | return; | ||
1175 | } else { | ||
1176 | dev = priv->dev3; | ||
1177 | } | ||
1178 | |||
1161 | if (report_buttons) | 1179 | if (report_buttons) |
1162 | alps_report_buttons(dev, NULL, | 1180 | alps_report_buttons(dev, NULL, |
1163 | packet[0] & 1, packet[0] & 2, packet[0] & 4); | 1181 | packet[0] & 1, packet[0] & 2, packet[0] & 4); |
@@ -1232,8 +1250,8 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse) | |||
1232 | * de-synchronization. | 1250 | * de-synchronization. |
1233 | */ | 1251 | */ |
1234 | 1252 | ||
1235 | alps_report_bare_ps2_packet(priv->dev2, | 1253 | alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3], |
1236 | &psmouse->packet[3], false); | 1254 | false); |
1237 | 1255 | ||
1238 | /* | 1256 | /* |
1239 | * Continue with the standard ALPS protocol handling, | 1257 | * Continue with the standard ALPS protocol handling, |
@@ -1289,18 +1307,9 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse) | |||
1289 | * properly we only do this if the device is fully synchronized. | 1307 | * properly we only do this if the device is fully synchronized. |
1290 | */ | 1308 | */ |
1291 | if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) { | 1309 | if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) { |
1292 | |||
1293 | /* Register dev3 mouse if we received PS/2 packet first time */ | ||
1294 | if (unlikely(!priv->dev3)) | ||
1295 | psmouse_queue_work(psmouse, | ||
1296 | &priv->dev3_register_work, 0); | ||
1297 | |||
1298 | if (psmouse->pktcnt == 3) { | 1310 | if (psmouse->pktcnt == 3) { |
1299 | /* Once dev3 mouse device is registered report data */ | 1311 | alps_report_bare_ps2_packet(psmouse, psmouse->packet, |
1300 | if (likely(!IS_ERR_OR_NULL(priv->dev3))) | 1312 | true); |
1301 | alps_report_bare_ps2_packet(priv->dev3, | ||
1302 | psmouse->packet, | ||
1303 | true); | ||
1304 | return PSMOUSE_FULL_PACKET; | 1313 | return PSMOUSE_FULL_PACKET; |
1305 | } | 1314 | } |
1306 | return PSMOUSE_GOOD_DATA; | 1315 | return PSMOUSE_GOOD_DATA; |
@@ -2281,10 +2290,12 @@ static int alps_set_protocol(struct psmouse *psmouse, | |||
2281 | priv->set_abs_params = alps_set_abs_params_mt; | 2290 | priv->set_abs_params = alps_set_abs_params_mt; |
2282 | priv->nibble_commands = alps_v3_nibble_commands; | 2291 | priv->nibble_commands = alps_v3_nibble_commands; |
2283 | priv->addr_command = PSMOUSE_CMD_RESET_WRAP; | 2292 | priv->addr_command = PSMOUSE_CMD_RESET_WRAP; |
2284 | priv->x_max = 1360; | ||
2285 | priv->y_max = 660; | ||
2286 | priv->x_bits = 23; | 2293 | priv->x_bits = 23; |
2287 | priv->y_bits = 12; | 2294 | priv->y_bits = 12; |
2295 | |||
2296 | if (alps_dolphin_get_device_area(psmouse, priv)) | ||
2297 | return -EIO; | ||
2298 | |||
2288 | break; | 2299 | break; |
2289 | 2300 | ||
2290 | case ALPS_PROTO_V6: | 2301 | case ALPS_PROTO_V6: |
@@ -2303,9 +2314,8 @@ static int alps_set_protocol(struct psmouse *psmouse, | |||
2303 | priv->set_abs_params = alps_set_abs_params_mt; | 2314 | priv->set_abs_params = alps_set_abs_params_mt; |
2304 | priv->nibble_commands = alps_v3_nibble_commands; | 2315 | priv->nibble_commands = alps_v3_nibble_commands; |
2305 | priv->addr_command = PSMOUSE_CMD_RESET_WRAP; | 2316 | priv->addr_command = PSMOUSE_CMD_RESET_WRAP; |
2306 | 2317 | priv->x_max = 0xfff; | |
2307 | if (alps_dolphin_get_device_area(psmouse, priv)) | 2318 | priv->y_max = 0x7ff; |
2308 | return -EIO; | ||
2309 | 2319 | ||
2310 | if (priv->fw_ver[1] != 0xba) | 2320 | if (priv->fw_ver[1] != 0xba) |
2311 | priv->flags |= ALPS_BUTTONPAD; | 2321 | priv->flags |= ALPS_BUTTONPAD; |
@@ -2605,8 +2615,10 @@ int alps_detect(struct psmouse *psmouse, bool set_properties) | |||
2605 | return -ENOMEM; | 2615 | return -ENOMEM; |
2606 | 2616 | ||
2607 | error = alps_identify(psmouse, priv); | 2617 | error = alps_identify(psmouse, priv); |
2608 | if (error) | 2618 | if (error) { |
2619 | kfree(priv); | ||
2609 | return error; | 2620 | return error; |
2621 | } | ||
2610 | 2622 | ||
2611 | if (set_properties) { | 2623 | if (set_properties) { |
2612 | psmouse->vendor = "ALPS"; | 2624 | psmouse->vendor = "ALPS"; |
diff --git a/drivers/input/mouse/cyapa_gen3.c b/drivers/input/mouse/cyapa_gen3.c index 77e9d70a986b..1e2291c378fe 100644 --- a/drivers/input/mouse/cyapa_gen3.c +++ b/drivers/input/mouse/cyapa_gen3.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <linux/input/mt.h> | 20 | #include <linux/input/mt.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/unaligned/access_ok.h> | 23 | #include <asm/unaligned.h> |
24 | #include "cyapa.h" | 24 | #include "cyapa.h" |
25 | 25 | ||
26 | 26 | ||
diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c index ddf5393a1180..5b611dd71e79 100644 --- a/drivers/input/mouse/cyapa_gen5.c +++ b/drivers/input/mouse/cyapa_gen5.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/mutex.h> | 17 | #include <linux/mutex.h> |
18 | #include <linux/completion.h> | 18 | #include <linux/completion.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/unaligned/access_ok.h> | 20 | #include <asm/unaligned.h> |
21 | #include <linux/crc-itu-t.h> | 21 | #include <linux/crc-itu-t.h> |
22 | #include "cyapa.h" | 22 | #include "cyapa.h" |
23 | 23 | ||
@@ -1926,7 +1926,7 @@ static int cyapa_gen5_read_idac_data(struct cyapa *cyapa, | |||
1926 | electrodes_tx = cyapa->electrodes_x; | 1926 | electrodes_tx = cyapa->electrodes_x; |
1927 | max_element_cnt = ((cyapa->aligned_electrodes_rx + 7) & | 1927 | max_element_cnt = ((cyapa->aligned_electrodes_rx + 7) & |
1928 | ~7u) * electrodes_tx; | 1928 | ~7u) * electrodes_tx; |
1929 | } else if (idac_data_type == GEN5_RETRIEVE_SELF_CAP_PWC_DATA) { | 1929 | } else { |
1930 | offset = 2; | 1930 | offset = 2; |
1931 | max_element_cnt = cyapa->electrodes_x + | 1931 | max_element_cnt = cyapa->electrodes_x + |
1932 | cyapa->electrodes_y; | 1932 | cyapa->electrodes_y; |
diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c index 757f78a94aec..23d259416f2f 100644 --- a/drivers/input/mouse/focaltech.c +++ b/drivers/input/mouse/focaltech.c | |||
@@ -67,9 +67,6 @@ static void focaltech_reset(struct psmouse *psmouse) | |||
67 | 67 | ||
68 | #define FOC_MAX_FINGERS 5 | 68 | #define FOC_MAX_FINGERS 5 |
69 | 69 | ||
70 | #define FOC_MAX_X 2431 | ||
71 | #define FOC_MAX_Y 1663 | ||
72 | |||
73 | /* | 70 | /* |
74 | * Current state of a single finger on the touchpad. | 71 | * Current state of a single finger on the touchpad. |
75 | */ | 72 | */ |
@@ -129,9 +126,17 @@ static void focaltech_report_state(struct psmouse *psmouse) | |||
129 | input_mt_slot(dev, i); | 126 | input_mt_slot(dev, i); |
130 | input_mt_report_slot_state(dev, MT_TOOL_FINGER, active); | 127 | input_mt_report_slot_state(dev, MT_TOOL_FINGER, active); |
131 | if (active) { | 128 | if (active) { |
132 | input_report_abs(dev, ABS_MT_POSITION_X, finger->x); | 129 | unsigned int clamped_x, clamped_y; |
130 | /* | ||
131 | * The touchpad might report invalid data, so we clamp | ||
132 | * the resulting values so that we do not confuse | ||
133 | * userspace. | ||
134 | */ | ||
135 | clamped_x = clamp(finger->x, 0U, priv->x_max); | ||
136 | clamped_y = clamp(finger->y, 0U, priv->y_max); | ||
137 | input_report_abs(dev, ABS_MT_POSITION_X, clamped_x); | ||
133 | input_report_abs(dev, ABS_MT_POSITION_Y, | 138 | input_report_abs(dev, ABS_MT_POSITION_Y, |
134 | FOC_MAX_Y - finger->y); | 139 | priv->y_max - clamped_y); |
135 | } | 140 | } |
136 | } | 141 | } |
137 | input_mt_report_pointer_emulation(dev, true); | 142 | input_mt_report_pointer_emulation(dev, true); |
@@ -180,16 +185,6 @@ static void focaltech_process_abs_packet(struct psmouse *psmouse, | |||
180 | 185 | ||
181 | state->pressed = (packet[0] >> 4) & 1; | 186 | state->pressed = (packet[0] >> 4) & 1; |
182 | 187 | ||
183 | /* | ||
184 | * packet[5] contains some kind of tool size in the most | ||
185 | * significant nibble. 0xff is a special value (latching) that | ||
186 | * signals a large contact area. | ||
187 | */ | ||
188 | if (packet[5] == 0xff) { | ||
189 | state->fingers[finger].valid = false; | ||
190 | return; | ||
191 | } | ||
192 | |||
193 | state->fingers[finger].x = ((packet[1] & 0xf) << 8) | packet[2]; | 188 | state->fingers[finger].x = ((packet[1] & 0xf) << 8) | packet[2]; |
194 | state->fingers[finger].y = (packet[3] << 8) | packet[4]; | 189 | state->fingers[finger].y = (packet[3] << 8) | packet[4]; |
195 | state->fingers[finger].valid = true; | 190 | state->fingers[finger].valid = true; |
@@ -381,6 +376,23 @@ static int focaltech_read_size(struct psmouse *psmouse) | |||
381 | 376 | ||
382 | return 0; | 377 | return 0; |
383 | } | 378 | } |
379 | |||
380 | void focaltech_set_resolution(struct psmouse *psmouse, unsigned int resolution) | ||
381 | { | ||
382 | /* not supported yet */ | ||
383 | } | ||
384 | |||
385 | static void focaltech_set_rate(struct psmouse *psmouse, unsigned int rate) | ||
386 | { | ||
387 | /* not supported yet */ | ||
388 | } | ||
389 | |||
390 | static void focaltech_set_scale(struct psmouse *psmouse, | ||
391 | enum psmouse_scale scale) | ||
392 | { | ||
393 | /* not supported yet */ | ||
394 | } | ||
395 | |||
384 | int focaltech_init(struct psmouse *psmouse) | 396 | int focaltech_init(struct psmouse *psmouse) |
385 | { | 397 | { |
386 | struct focaltech_data *priv; | 398 | struct focaltech_data *priv; |
@@ -415,6 +427,14 @@ int focaltech_init(struct psmouse *psmouse) | |||
415 | psmouse->cleanup = focaltech_reset; | 427 | psmouse->cleanup = focaltech_reset; |
416 | /* resync is not supported yet */ | 428 | /* resync is not supported yet */ |
417 | psmouse->resync_time = 0; | 429 | psmouse->resync_time = 0; |
430 | /* | ||
431 | * rate/resolution/scale changes are not supported yet, and | ||
432 | * the generic implementations of these functions seem to | ||
433 | * confuse some touchpads | ||
434 | */ | ||
435 | psmouse->set_resolution = focaltech_set_resolution; | ||
436 | psmouse->set_rate = focaltech_set_rate; | ||
437 | psmouse->set_scale = focaltech_set_scale; | ||
418 | 438 | ||
419 | return 0; | 439 | return 0; |
420 | 440 | ||
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 4ccd01d7a48d..8bc61237bc1b 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c | |||
@@ -454,6 +454,17 @@ static void psmouse_set_rate(struct psmouse *psmouse, unsigned int rate) | |||
454 | } | 454 | } |
455 | 455 | ||
456 | /* | 456 | /* |
457 | * Here we set the mouse scaling. | ||
458 | */ | ||
459 | |||
460 | static void psmouse_set_scale(struct psmouse *psmouse, enum psmouse_scale scale) | ||
461 | { | ||
462 | ps2_command(&psmouse->ps2dev, NULL, | ||
463 | scale == PSMOUSE_SCALE21 ? PSMOUSE_CMD_SETSCALE21 : | ||
464 | PSMOUSE_CMD_SETSCALE11); | ||
465 | } | ||
466 | |||
467 | /* | ||
457 | * psmouse_poll() - default poll handler. Everyone except for ALPS uses it. | 468 | * psmouse_poll() - default poll handler. Everyone except for ALPS uses it. |
458 | */ | 469 | */ |
459 | 470 | ||
@@ -689,6 +700,7 @@ static void psmouse_apply_defaults(struct psmouse *psmouse) | |||
689 | 700 | ||
690 | psmouse->set_rate = psmouse_set_rate; | 701 | psmouse->set_rate = psmouse_set_rate; |
691 | psmouse->set_resolution = psmouse_set_resolution; | 702 | psmouse->set_resolution = psmouse_set_resolution; |
703 | psmouse->set_scale = psmouse_set_scale; | ||
692 | psmouse->poll = psmouse_poll; | 704 | psmouse->poll = psmouse_poll; |
693 | psmouse->protocol_handler = psmouse_process_byte; | 705 | psmouse->protocol_handler = psmouse_process_byte; |
694 | psmouse->pktsize = 3; | 706 | psmouse->pktsize = 3; |
@@ -1160,7 +1172,7 @@ static void psmouse_initialize(struct psmouse *psmouse) | |||
1160 | if (psmouse_max_proto != PSMOUSE_PS2) { | 1172 | if (psmouse_max_proto != PSMOUSE_PS2) { |
1161 | psmouse->set_rate(psmouse, psmouse->rate); | 1173 | psmouse->set_rate(psmouse, psmouse->rate); |
1162 | psmouse->set_resolution(psmouse, psmouse->resolution); | 1174 | psmouse->set_resolution(psmouse, psmouse->resolution); |
1163 | ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11); | 1175 | psmouse->set_scale(psmouse, PSMOUSE_SCALE11); |
1164 | } | 1176 | } |
1165 | } | 1177 | } |
1166 | 1178 | ||
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h index c2ff137ecbdb..d02e1bdc9ae4 100644 --- a/drivers/input/mouse/psmouse.h +++ b/drivers/input/mouse/psmouse.h | |||
@@ -36,6 +36,11 @@ typedef enum { | |||
36 | PSMOUSE_FULL_PACKET | 36 | PSMOUSE_FULL_PACKET |
37 | } psmouse_ret_t; | 37 | } psmouse_ret_t; |
38 | 38 | ||
39 | enum psmouse_scale { | ||
40 | PSMOUSE_SCALE11, | ||
41 | PSMOUSE_SCALE21 | ||
42 | }; | ||
43 | |||
39 | struct psmouse { | 44 | struct psmouse { |
40 | void *private; | 45 | void *private; |
41 | struct input_dev *dev; | 46 | struct input_dev *dev; |
@@ -67,6 +72,7 @@ struct psmouse { | |||
67 | psmouse_ret_t (*protocol_handler)(struct psmouse *psmouse); | 72 | psmouse_ret_t (*protocol_handler)(struct psmouse *psmouse); |
68 | void (*set_rate)(struct psmouse *psmouse, unsigned int rate); | 73 | void (*set_rate)(struct psmouse *psmouse, unsigned int rate); |
69 | void (*set_resolution)(struct psmouse *psmouse, unsigned int resolution); | 74 | void (*set_resolution)(struct psmouse *psmouse, unsigned int resolution); |
75 | void (*set_scale)(struct psmouse *psmouse, enum psmouse_scale scale); | ||
70 | 76 | ||
71 | int (*reconnect)(struct psmouse *psmouse); | 77 | int (*reconnect)(struct psmouse *psmouse); |
72 | void (*disconnect)(struct psmouse *psmouse); | 78 | void (*disconnect)(struct psmouse *psmouse); |
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index f2cceb6493a0..3b06c8a360b6 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c | |||
@@ -67,9 +67,6 @@ | |||
67 | #define X_MAX_POSITIVE 8176 | 67 | #define X_MAX_POSITIVE 8176 |
68 | #define Y_MAX_POSITIVE 8176 | 68 | #define Y_MAX_POSITIVE 8176 |
69 | 69 | ||
70 | /* maximum ABS_MT_POSITION displacement (in mm) */ | ||
71 | #define DMAX 10 | ||
72 | |||
73 | /***************************************************************************** | 70 | /***************************************************************************** |
74 | * Stuff we need even when we do not want native Synaptics support | 71 | * Stuff we need even when we do not want native Synaptics support |
75 | ****************************************************************************/ | 72 | ****************************************************************************/ |
@@ -123,32 +120,46 @@ void synaptics_reset(struct psmouse *psmouse) | |||
123 | 120 | ||
124 | static bool cr48_profile_sensor; | 121 | static bool cr48_profile_sensor; |
125 | 122 | ||
123 | #define ANY_BOARD_ID 0 | ||
126 | struct min_max_quirk { | 124 | struct min_max_quirk { |
127 | const char * const *pnp_ids; | 125 | const char * const *pnp_ids; |
126 | struct { | ||
127 | unsigned long int min, max; | ||
128 | } board_id; | ||
128 | int x_min, x_max, y_min, y_max; | 129 | int x_min, x_max, y_min, y_max; |
129 | }; | 130 | }; |
130 | 131 | ||
131 | static const struct min_max_quirk min_max_pnpid_table[] = { | 132 | static const struct min_max_quirk min_max_pnpid_table[] = { |
132 | { | 133 | { |
133 | (const char * const []){"LEN0033", NULL}, | 134 | (const char * const []){"LEN0033", NULL}, |
135 | {ANY_BOARD_ID, ANY_BOARD_ID}, | ||
134 | 1024, 5052, 2258, 4832 | 136 | 1024, 5052, 2258, 4832 |
135 | }, | 137 | }, |
136 | { | 138 | { |
137 | (const char * const []){"LEN0035", "LEN0042", NULL}, | 139 | (const char * const []){"LEN0042", NULL}, |
140 | {ANY_BOARD_ID, ANY_BOARD_ID}, | ||
138 | 1232, 5710, 1156, 4696 | 141 | 1232, 5710, 1156, 4696 |
139 | }, | 142 | }, |
140 | { | 143 | { |
141 | (const char * const []){"LEN0034", "LEN0036", "LEN0037", | 144 | (const char * const []){"LEN0034", "LEN0036", "LEN0037", |
142 | "LEN0039", "LEN2002", "LEN2004", | 145 | "LEN0039", "LEN2002", "LEN2004", |
143 | NULL}, | 146 | NULL}, |
147 | {ANY_BOARD_ID, 2961}, | ||
144 | 1024, 5112, 2024, 4832 | 148 | 1024, 5112, 2024, 4832 |
145 | }, | 149 | }, |
146 | { | 150 | { |
147 | (const char * const []){"LEN2001", NULL}, | 151 | (const char * const []){"LEN2001", NULL}, |
152 | {ANY_BOARD_ID, ANY_BOARD_ID}, | ||
148 | 1024, 5022, 2508, 4832 | 153 | 1024, 5022, 2508, 4832 |
149 | }, | 154 | }, |
150 | { | 155 | { |
151 | (const char * const []){"LEN2006", NULL}, | 156 | (const char * const []){"LEN2006", NULL}, |
157 | {2691, 2691}, | ||
158 | 1024, 5045, 2457, 4832 | ||
159 | }, | ||
160 | { | ||
161 | (const char * const []){"LEN2006", NULL}, | ||
162 | {ANY_BOARD_ID, ANY_BOARD_ID}, | ||
152 | 1264, 5675, 1171, 4688 | 163 | 1264, 5675, 1171, 4688 |
153 | }, | 164 | }, |
154 | { } | 165 | { } |
@@ -175,9 +186,7 @@ static const char * const topbuttonpad_pnp_ids[] = { | |||
175 | "LEN0041", | 186 | "LEN0041", |
176 | "LEN0042", /* Yoga */ | 187 | "LEN0042", /* Yoga */ |
177 | "LEN0045", | 188 | "LEN0045", |
178 | "LEN0046", | ||
179 | "LEN0047", | 189 | "LEN0047", |
180 | "LEN0048", | ||
181 | "LEN0049", | 190 | "LEN0049", |
182 | "LEN2000", | 191 | "LEN2000", |
183 | "LEN2001", /* Edge E431 */ | 192 | "LEN2001", /* Edge E431 */ |
@@ -185,7 +194,7 @@ static const char * const topbuttonpad_pnp_ids[] = { | |||
185 | "LEN2003", | 194 | "LEN2003", |
186 | "LEN2004", /* L440 */ | 195 | "LEN2004", /* L440 */ |
187 | "LEN2005", | 196 | "LEN2005", |
188 | "LEN2006", | 197 | "LEN2006", /* Edge E440/E540 */ |
189 | "LEN2007", | 198 | "LEN2007", |
190 | "LEN2008", | 199 | "LEN2008", |
191 | "LEN2009", | 200 | "LEN2009", |
@@ -235,18 +244,39 @@ static int synaptics_model_id(struct psmouse *psmouse) | |||
235 | return 0; | 244 | return 0; |
236 | } | 245 | } |
237 | 246 | ||
247 | static int synaptics_more_extended_queries(struct psmouse *psmouse) | ||
248 | { | ||
249 | struct synaptics_data *priv = psmouse->private; | ||
250 | unsigned char buf[3]; | ||
251 | |||
252 | if (synaptics_send_cmd(psmouse, SYN_QUE_MEXT_CAPAB_10, buf)) | ||
253 | return -1; | ||
254 | |||
255 | priv->ext_cap_10 = (buf[0]<<16) | (buf[1]<<8) | buf[2]; | ||
256 | |||
257 | return 0; | ||
258 | } | ||
259 | |||
238 | /* | 260 | /* |
239 | * Read the board id from the touchpad | 261 | * Read the board id and the "More Extended Queries" from the touchpad |
240 | * The board id is encoded in the "QUERY MODES" response | 262 | * The board id is encoded in the "QUERY MODES" response |
241 | */ | 263 | */ |
242 | static int synaptics_board_id(struct psmouse *psmouse) | 264 | static int synaptics_query_modes(struct psmouse *psmouse) |
243 | { | 265 | { |
244 | struct synaptics_data *priv = psmouse->private; | 266 | struct synaptics_data *priv = psmouse->private; |
245 | unsigned char bid[3]; | 267 | unsigned char bid[3]; |
246 | 268 | ||
269 | /* firmwares prior 7.5 have no board_id encoded */ | ||
270 | if (SYN_ID_FULL(priv->identity) < 0x705) | ||
271 | return 0; | ||
272 | |||
247 | if (synaptics_send_cmd(psmouse, SYN_QUE_MODES, bid)) | 273 | if (synaptics_send_cmd(psmouse, SYN_QUE_MODES, bid)) |
248 | return -1; | 274 | return -1; |
249 | priv->board_id = ((bid[0] & 0xfc) << 6) | bid[1]; | 275 | priv->board_id = ((bid[0] & 0xfc) << 6) | bid[1]; |
276 | |||
277 | if (SYN_MEXT_CAP_BIT(bid[0])) | ||
278 | return synaptics_more_extended_queries(psmouse); | ||
279 | |||
250 | return 0; | 280 | return 0; |
251 | } | 281 | } |
252 | 282 | ||
@@ -346,7 +376,6 @@ static int synaptics_resolution(struct psmouse *psmouse) | |||
346 | { | 376 | { |
347 | struct synaptics_data *priv = psmouse->private; | 377 | struct synaptics_data *priv = psmouse->private; |
348 | unsigned char resp[3]; | 378 | unsigned char resp[3]; |
349 | int i; | ||
350 | 379 | ||
351 | if (SYN_ID_MAJOR(priv->identity) < 4) | 380 | if (SYN_ID_MAJOR(priv->identity) < 4) |
352 | return 0; | 381 | return 0; |
@@ -358,17 +387,6 @@ static int synaptics_resolution(struct psmouse *psmouse) | |||
358 | } | 387 | } |
359 | } | 388 | } |
360 | 389 | ||
361 | for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) { | ||
362 | if (psmouse_matches_pnp_id(psmouse, | ||
363 | min_max_pnpid_table[i].pnp_ids)) { | ||
364 | priv->x_min = min_max_pnpid_table[i].x_min; | ||
365 | priv->x_max = min_max_pnpid_table[i].x_max; | ||
366 | priv->y_min = min_max_pnpid_table[i].y_min; | ||
367 | priv->y_max = min_max_pnpid_table[i].y_max; | ||
368 | return 0; | ||
369 | } | ||
370 | } | ||
371 | |||
372 | if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 && | 390 | if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 && |
373 | SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) { | 391 | SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) { |
374 | if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) { | 392 | if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) { |
@@ -377,23 +395,69 @@ static int synaptics_resolution(struct psmouse *psmouse) | |||
377 | } else { | 395 | } else { |
378 | priv->x_max = (resp[0] << 5) | ((resp[1] & 0x0f) << 1); | 396 | priv->x_max = (resp[0] << 5) | ((resp[1] & 0x0f) << 1); |
379 | priv->y_max = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3); | 397 | priv->y_max = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3); |
398 | psmouse_info(psmouse, | ||
399 | "queried max coordinates: x [..%d], y [..%d]\n", | ||
400 | priv->x_max, priv->y_max); | ||
380 | } | 401 | } |
381 | } | 402 | } |
382 | 403 | ||
383 | if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 && | 404 | if (SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c) && |
384 | SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c)) { | 405 | (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 || |
406 | /* | ||
407 | * Firmware v8.1 does not report proper number of extended | ||
408 | * capabilities, but has been proven to report correct min | ||
409 | * coordinates. | ||
410 | */ | ||
411 | SYN_ID_FULL(priv->identity) == 0x801)) { | ||
385 | if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MIN_COORDS, resp)) { | 412 | if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MIN_COORDS, resp)) { |
386 | psmouse_warn(psmouse, | 413 | psmouse_warn(psmouse, |
387 | "device claims to have min coordinates query, but I'm not able to read it.\n"); | 414 | "device claims to have min coordinates query, but I'm not able to read it.\n"); |
388 | } else { | 415 | } else { |
389 | priv->x_min = (resp[0] << 5) | ((resp[1] & 0x0f) << 1); | 416 | priv->x_min = (resp[0] << 5) | ((resp[1] & 0x0f) << 1); |
390 | priv->y_min = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3); | 417 | priv->y_min = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3); |
418 | psmouse_info(psmouse, | ||
419 | "queried min coordinates: x [%d..], y [%d..]\n", | ||
420 | priv->x_min, priv->y_min); | ||
391 | } | 421 | } |
392 | } | 422 | } |
393 | 423 | ||
394 | return 0; | 424 | return 0; |
395 | } | 425 | } |
396 | 426 | ||
427 | /* | ||
428 | * Apply quirk(s) if the hardware matches | ||
429 | */ | ||
430 | |||
431 | static void synaptics_apply_quirks(struct psmouse *psmouse) | ||
432 | { | ||
433 | struct synaptics_data *priv = psmouse->private; | ||
434 | int i; | ||
435 | |||
436 | for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) { | ||
437 | if (!psmouse_matches_pnp_id(psmouse, | ||
438 | min_max_pnpid_table[i].pnp_ids)) | ||
439 | continue; | ||
440 | |||
441 | if (min_max_pnpid_table[i].board_id.min != ANY_BOARD_ID && | ||
442 | priv->board_id < min_max_pnpid_table[i].board_id.min) | ||
443 | continue; | ||
444 | |||
445 | if (min_max_pnpid_table[i].board_id.max != ANY_BOARD_ID && | ||
446 | priv->board_id > min_max_pnpid_table[i].board_id.max) | ||
447 | continue; | ||
448 | |||
449 | priv->x_min = min_max_pnpid_table[i].x_min; | ||
450 | priv->x_max = min_max_pnpid_table[i].x_max; | ||
451 | priv->y_min = min_max_pnpid_table[i].y_min; | ||
452 | priv->y_max = min_max_pnpid_table[i].y_max; | ||
453 | psmouse_info(psmouse, | ||
454 | "quirked min/max coordinates: x [%d..%d], y [%d..%d]\n", | ||
455 | priv->x_min, priv->x_max, | ||
456 | priv->y_min, priv->y_max); | ||
457 | break; | ||
458 | } | ||
459 | } | ||
460 | |||
397 | static int synaptics_query_hardware(struct psmouse *psmouse) | 461 | static int synaptics_query_hardware(struct psmouse *psmouse) |
398 | { | 462 | { |
399 | if (synaptics_identify(psmouse)) | 463 | if (synaptics_identify(psmouse)) |
@@ -402,13 +466,15 @@ static int synaptics_query_hardware(struct psmouse *psmouse) | |||
402 | return -1; | 466 | return -1; |
403 | if (synaptics_firmware_id(psmouse)) | 467 | if (synaptics_firmware_id(psmouse)) |
404 | return -1; | 468 | return -1; |
405 | if (synaptics_board_id(psmouse)) | 469 | if (synaptics_query_modes(psmouse)) |
406 | return -1; | 470 | return -1; |
407 | if (synaptics_capability(psmouse)) | 471 | if (synaptics_capability(psmouse)) |
408 | return -1; | 472 | return -1; |
409 | if (synaptics_resolution(psmouse)) | 473 | if (synaptics_resolution(psmouse)) |
410 | return -1; | 474 | return -1; |
411 | 475 | ||
476 | synaptics_apply_quirks(psmouse); | ||
477 | |||
412 | return 0; | 478 | return 0; |
413 | } | 479 | } |
414 | 480 | ||
@@ -516,18 +582,22 @@ static int synaptics_is_pt_packet(unsigned char *buf) | |||
516 | return (buf[0] & 0xFC) == 0x84 && (buf[3] & 0xCC) == 0xC4; | 582 | return (buf[0] & 0xFC) == 0x84 && (buf[3] & 0xCC) == 0xC4; |
517 | } | 583 | } |
518 | 584 | ||
519 | static void synaptics_pass_pt_packet(struct serio *ptport, unsigned char *packet) | 585 | static void synaptics_pass_pt_packet(struct psmouse *psmouse, |
586 | struct serio *ptport, | ||
587 | unsigned char *packet) | ||
520 | { | 588 | { |
589 | struct synaptics_data *priv = psmouse->private; | ||
521 | struct psmouse *child = serio_get_drvdata(ptport); | 590 | struct psmouse *child = serio_get_drvdata(ptport); |
522 | 591 | ||
523 | if (child && child->state == PSMOUSE_ACTIVATED) { | 592 | if (child && child->state == PSMOUSE_ACTIVATED) { |
524 | serio_interrupt(ptport, packet[1], 0); | 593 | serio_interrupt(ptport, packet[1] | priv->pt_buttons, 0); |
525 | serio_interrupt(ptport, packet[4], 0); | 594 | serio_interrupt(ptport, packet[4], 0); |
526 | serio_interrupt(ptport, packet[5], 0); | 595 | serio_interrupt(ptport, packet[5], 0); |
527 | if (child->pktsize == 4) | 596 | if (child->pktsize == 4) |
528 | serio_interrupt(ptport, packet[2], 0); | 597 | serio_interrupt(ptport, packet[2], 0); |
529 | } else | 598 | } else { |
530 | serio_interrupt(ptport, packet[1], 0); | 599 | serio_interrupt(ptport, packet[1], 0); |
600 | } | ||
531 | } | 601 | } |
532 | 602 | ||
533 | static void synaptics_pt_activate(struct psmouse *psmouse) | 603 | static void synaptics_pt_activate(struct psmouse *psmouse) |
@@ -605,6 +675,18 @@ static void synaptics_parse_agm(const unsigned char buf[], | |||
605 | } | 675 | } |
606 | } | 676 | } |
607 | 677 | ||
678 | static void synaptics_parse_ext_buttons(const unsigned char buf[], | ||
679 | struct synaptics_data *priv, | ||
680 | struct synaptics_hw_state *hw) | ||
681 | { | ||
682 | unsigned int ext_bits = | ||
683 | (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) + 1) >> 1; | ||
684 | unsigned int ext_mask = GENMASK(ext_bits - 1, 0); | ||
685 | |||
686 | hw->ext_buttons = buf[4] & ext_mask; | ||
687 | hw->ext_buttons |= (buf[5] & ext_mask) << ext_bits; | ||
688 | } | ||
689 | |||
608 | static bool is_forcepad; | 690 | static bool is_forcepad; |
609 | 691 | ||
610 | static int synaptics_parse_hw_state(const unsigned char buf[], | 692 | static int synaptics_parse_hw_state(const unsigned char buf[], |
@@ -691,28 +773,9 @@ static int synaptics_parse_hw_state(const unsigned char buf[], | |||
691 | hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0; | 773 | hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0; |
692 | } | 774 | } |
693 | 775 | ||
694 | if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) && | 776 | if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) > 0 && |
695 | ((buf[0] ^ buf[3]) & 0x02)) { | 777 | ((buf[0] ^ buf[3]) & 0x02)) { |
696 | switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) { | 778 | synaptics_parse_ext_buttons(buf, priv, hw); |
697 | default: | ||
698 | /* | ||
699 | * if nExtBtn is greater than 8 it should be | ||
700 | * considered invalid and treated as 0 | ||
701 | */ | ||
702 | break; | ||
703 | case 8: | ||
704 | hw->ext_buttons |= ((buf[5] & 0x08)) ? 0x80 : 0; | ||
705 | hw->ext_buttons |= ((buf[4] & 0x08)) ? 0x40 : 0; | ||
706 | case 6: | ||
707 | hw->ext_buttons |= ((buf[5] & 0x04)) ? 0x20 : 0; | ||
708 | hw->ext_buttons |= ((buf[4] & 0x04)) ? 0x10 : 0; | ||
709 | case 4: | ||
710 | hw->ext_buttons |= ((buf[5] & 0x02)) ? 0x08 : 0; | ||
711 | hw->ext_buttons |= ((buf[4] & 0x02)) ? 0x04 : 0; | ||
712 | case 2: | ||
713 | hw->ext_buttons |= ((buf[5] & 0x01)) ? 0x02 : 0; | ||
714 | hw->ext_buttons |= ((buf[4] & 0x01)) ? 0x01 : 0; | ||
715 | } | ||
716 | } | 779 | } |
717 | } else { | 780 | } else { |
718 | hw->x = (((buf[1] & 0x1f) << 8) | buf[2]); | 781 | hw->x = (((buf[1] & 0x1f) << 8) | buf[2]); |
@@ -774,12 +837,54 @@ static void synaptics_report_semi_mt_data(struct input_dev *dev, | |||
774 | } | 837 | } |
775 | } | 838 | } |
776 | 839 | ||
840 | static void synaptics_report_ext_buttons(struct psmouse *psmouse, | ||
841 | const struct synaptics_hw_state *hw) | ||
842 | { | ||
843 | struct input_dev *dev = psmouse->dev; | ||
844 | struct synaptics_data *priv = psmouse->private; | ||
845 | int ext_bits = (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) + 1) >> 1; | ||
846 | char buf[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; | ||
847 | int i; | ||
848 | |||
849 | if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap)) | ||
850 | return; | ||
851 | |||
852 | /* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */ | ||
853 | if (SYN_ID_FULL(priv->identity) == 0x801 && | ||
854 | !((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02)) | ||
855 | return; | ||
856 | |||
857 | if (!SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10)) { | ||
858 | for (i = 0; i < ext_bits; i++) { | ||
859 | input_report_key(dev, BTN_0 + 2 * i, | ||
860 | hw->ext_buttons & (1 << i)); | ||
861 | input_report_key(dev, BTN_1 + 2 * i, | ||
862 | hw->ext_buttons & (1 << (i + ext_bits))); | ||
863 | } | ||
864 | return; | ||
865 | } | ||
866 | |||
867 | /* | ||
868 | * This generation of touchpads has the trackstick buttons | ||
869 | * physically wired to the touchpad. Re-route them through | ||
870 | * the pass-through interface. | ||
871 | */ | ||
872 | if (!priv->pt_port) | ||
873 | return; | ||
874 | |||
875 | /* The trackstick expects at most 3 buttons */ | ||
876 | priv->pt_buttons = SYN_CAP_EXT_BUTTON_STICK_L(hw->ext_buttons) | | ||
877 | SYN_CAP_EXT_BUTTON_STICK_R(hw->ext_buttons) << 1 | | ||
878 | SYN_CAP_EXT_BUTTON_STICK_M(hw->ext_buttons) << 2; | ||
879 | |||
880 | synaptics_pass_pt_packet(psmouse, priv->pt_port, buf); | ||
881 | } | ||
882 | |||
777 | static void synaptics_report_buttons(struct psmouse *psmouse, | 883 | static void synaptics_report_buttons(struct psmouse *psmouse, |
778 | const struct synaptics_hw_state *hw) | 884 | const struct synaptics_hw_state *hw) |
779 | { | 885 | { |
780 | struct input_dev *dev = psmouse->dev; | 886 | struct input_dev *dev = psmouse->dev; |
781 | struct synaptics_data *priv = psmouse->private; | 887 | struct synaptics_data *priv = psmouse->private; |
782 | int i; | ||
783 | 888 | ||
784 | input_report_key(dev, BTN_LEFT, hw->left); | 889 | input_report_key(dev, BTN_LEFT, hw->left); |
785 | input_report_key(dev, BTN_RIGHT, hw->right); | 890 | input_report_key(dev, BTN_RIGHT, hw->right); |
@@ -792,8 +897,7 @@ static void synaptics_report_buttons(struct psmouse *psmouse, | |||
792 | input_report_key(dev, BTN_BACK, hw->down); | 897 | input_report_key(dev, BTN_BACK, hw->down); |
793 | } | 898 | } |
794 | 899 | ||
795 | for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++) | 900 | synaptics_report_ext_buttons(psmouse, hw); |
796 | input_report_key(dev, BTN_0 + i, hw->ext_buttons & (1 << i)); | ||
797 | } | 901 | } |
798 | 902 | ||
799 | static void synaptics_report_mt_data(struct psmouse *psmouse, | 903 | static void synaptics_report_mt_data(struct psmouse *psmouse, |
@@ -813,7 +917,7 @@ static void synaptics_report_mt_data(struct psmouse *psmouse, | |||
813 | pos[i].y = synaptics_invert_y(hw[i]->y); | 917 | pos[i].y = synaptics_invert_y(hw[i]->y); |
814 | } | 918 | } |
815 | 919 | ||
816 | input_mt_assign_slots(dev, slot, pos, nsemi, DMAX * priv->x_res); | 920 | input_mt_assign_slots(dev, slot, pos, nsemi, 0); |
817 | 921 | ||
818 | for (i = 0; i < nsemi; i++) { | 922 | for (i = 0; i < nsemi; i++) { |
819 | input_mt_slot(dev, slot[i]); | 923 | input_mt_slot(dev, slot[i]); |
@@ -1014,7 +1118,8 @@ static psmouse_ret_t synaptics_process_byte(struct psmouse *psmouse) | |||
1014 | if (SYN_CAP_PASS_THROUGH(priv->capabilities) && | 1118 | if (SYN_CAP_PASS_THROUGH(priv->capabilities) && |
1015 | synaptics_is_pt_packet(psmouse->packet)) { | 1119 | synaptics_is_pt_packet(psmouse->packet)) { |
1016 | if (priv->pt_port) | 1120 | if (priv->pt_port) |
1017 | synaptics_pass_pt_packet(priv->pt_port, psmouse->packet); | 1121 | synaptics_pass_pt_packet(psmouse, priv->pt_port, |
1122 | psmouse->packet); | ||
1018 | } else | 1123 | } else |
1019 | synaptics_process_packet(psmouse); | 1124 | synaptics_process_packet(psmouse); |
1020 | 1125 | ||
@@ -1116,8 +1221,9 @@ static void set_input_params(struct psmouse *psmouse, | |||
1116 | __set_bit(BTN_BACK, dev->keybit); | 1221 | __set_bit(BTN_BACK, dev->keybit); |
1117 | } | 1222 | } |
1118 | 1223 | ||
1119 | for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++) | 1224 | if (!SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10)) |
1120 | __set_bit(BTN_0 + i, dev->keybit); | 1225 | for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++) |
1226 | __set_bit(BTN_0 + i, dev->keybit); | ||
1121 | 1227 | ||
1122 | __clear_bit(EV_REL, dev->evbit); | 1228 | __clear_bit(EV_REL, dev->evbit); |
1123 | __clear_bit(REL_X, dev->relbit); | 1229 | __clear_bit(REL_X, dev->relbit); |
@@ -1125,7 +1231,8 @@ static void set_input_params(struct psmouse *psmouse, | |||
1125 | 1231 | ||
1126 | if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { | 1232 | if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { |
1127 | __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit); | 1233 | __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit); |
1128 | if (psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids)) | 1234 | if (psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) && |
1235 | !SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10)) | ||
1129 | __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit); | 1236 | __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit); |
1130 | /* Clickpads report only left button */ | 1237 | /* Clickpads report only left button */ |
1131 | __clear_bit(BTN_RIGHT, dev->keybit); | 1238 | __clear_bit(BTN_RIGHT, dev->keybit); |
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h index aedc3299b14e..ee4bd0d12b26 100644 --- a/drivers/input/mouse/synaptics.h +++ b/drivers/input/mouse/synaptics.h | |||
@@ -22,6 +22,7 @@ | |||
22 | #define SYN_QUE_EXT_CAPAB_0C 0x0c | 22 | #define SYN_QUE_EXT_CAPAB_0C 0x0c |
23 | #define SYN_QUE_EXT_MAX_COORDS 0x0d | 23 | #define SYN_QUE_EXT_MAX_COORDS 0x0d |
24 | #define SYN_QUE_EXT_MIN_COORDS 0x0f | 24 | #define SYN_QUE_EXT_MIN_COORDS 0x0f |
25 | #define SYN_QUE_MEXT_CAPAB_10 0x10 | ||
25 | 26 | ||
26 | /* synatics modes */ | 27 | /* synatics modes */ |
27 | #define SYN_BIT_ABSOLUTE_MODE (1 << 7) | 28 | #define SYN_BIT_ABSOLUTE_MODE (1 << 7) |
@@ -53,6 +54,7 @@ | |||
53 | #define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) | 54 | #define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) |
54 | #define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) | 55 | #define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) |
55 | #define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) | 56 | #define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) |
57 | #define SYN_MEXT_CAP_BIT(m) ((m) & (1 << 1)) | ||
56 | 58 | ||
57 | /* | 59 | /* |
58 | * The following describes response for the 0x0c query. | 60 | * The following describes response for the 0x0c query. |
@@ -89,6 +91,30 @@ | |||
89 | #define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400) | 91 | #define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400) |
90 | #define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800) | 92 | #define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800) |
91 | 93 | ||
94 | /* | ||
95 | * The following descibes response for the 0x10 query. | ||
96 | * | ||
97 | * byte mask name meaning | ||
98 | * ---- ---- ------- ------------ | ||
99 | * 1 0x01 ext buttons are stick buttons exported in the extended | ||
100 | * capability are actually meant to be used | ||
101 | * by the tracktick (pass-through). | ||
102 | * 1 0x02 SecurePad the touchpad is a SecurePad, so it | ||
103 | * contains a built-in fingerprint reader. | ||
104 | * 1 0xe0 more ext count how many more extented queries are | ||
105 | * available after this one. | ||
106 | * 2 0xff SecurePad width the width of the SecurePad fingerprint | ||
107 | * reader. | ||
108 | * 3 0xff SecurePad height the height of the SecurePad fingerprint | ||
109 | * reader. | ||
110 | */ | ||
111 | #define SYN_CAP_EXT_BUTTONS_STICK(ex10) ((ex10) & 0x010000) | ||
112 | #define SYN_CAP_SECUREPAD(ex10) ((ex10) & 0x020000) | ||
113 | |||
114 | #define SYN_CAP_EXT_BUTTON_STICK_L(eb) (!!((eb) & 0x01)) | ||
115 | #define SYN_CAP_EXT_BUTTON_STICK_M(eb) (!!((eb) & 0x02)) | ||
116 | #define SYN_CAP_EXT_BUTTON_STICK_R(eb) (!!((eb) & 0x04)) | ||
117 | |||
92 | /* synaptics modes query bits */ | 118 | /* synaptics modes query bits */ |
93 | #define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7)) | 119 | #define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7)) |
94 | #define SYN_MODE_RATE(m) ((m) & (1 << 6)) | 120 | #define SYN_MODE_RATE(m) ((m) & (1 << 6)) |
@@ -143,6 +169,7 @@ struct synaptics_data { | |||
143 | unsigned long int capabilities; /* Capabilities */ | 169 | unsigned long int capabilities; /* Capabilities */ |
144 | unsigned long int ext_cap; /* Extended Capabilities */ | 170 | unsigned long int ext_cap; /* Extended Capabilities */ |
145 | unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */ | 171 | unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */ |
172 | unsigned long int ext_cap_10; /* Ext Caps from 0x10 query */ | ||
146 | unsigned long int identity; /* Identification */ | 173 | unsigned long int identity; /* Identification */ |
147 | unsigned int x_res, y_res; /* X/Y resolution in units/mm */ | 174 | unsigned int x_res, y_res; /* X/Y resolution in units/mm */ |
148 | unsigned int x_max, y_max; /* Max coordinates (from FW) */ | 175 | unsigned int x_max, y_max; /* Max coordinates (from FW) */ |
@@ -156,6 +183,7 @@ struct synaptics_data { | |||
156 | bool disable_gesture; /* disable gestures */ | 183 | bool disable_gesture; /* disable gestures */ |
157 | 184 | ||
158 | struct serio *pt_port; /* Pass-through serio port */ | 185 | struct serio *pt_port; /* Pass-through serio port */ |
186 | unsigned char pt_buttons; /* Pass-through buttons */ | ||
159 | 187 | ||
160 | /* | 188 | /* |
161 | * Last received Advanced Gesture Mode (AGM) packet. An AGM packet | 189 | * Last received Advanced Gesture Mode (AGM) packet. An AGM packet |
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index 58917525126e..6261fd6d7c3c 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig | |||
@@ -943,6 +943,7 @@ config TOUCHSCREEN_SUN4I | |||
943 | tristate "Allwinner sun4i resistive touchscreen controller support" | 943 | tristate "Allwinner sun4i resistive touchscreen controller support" |
944 | depends on ARCH_SUNXI || COMPILE_TEST | 944 | depends on ARCH_SUNXI || COMPILE_TEST |
945 | depends on HWMON | 945 | depends on HWMON |
946 | depends on THERMAL || !THERMAL_OF | ||
946 | help | 947 | help |
947 | This selects support for the resistive touchscreen controller | 948 | This selects support for the resistive touchscreen controller |
948 | found on Allwinner sunxi SoCs. | 949 | found on Allwinner sunxi SoCs. |
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index baa0d9786f50..1ae4e547b419 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -23,6 +23,7 @@ config IOMMU_IO_PGTABLE | |||
23 | config IOMMU_IO_PGTABLE_LPAE | 23 | config IOMMU_IO_PGTABLE_LPAE |
24 | bool "ARMv7/v8 Long Descriptor Format" | 24 | bool "ARMv7/v8 Long Descriptor Format" |
25 | select IOMMU_IO_PGTABLE | 25 | select IOMMU_IO_PGTABLE |
26 | depends on ARM || ARM64 || COMPILE_TEST | ||
26 | help | 27 | help |
27 | Enable support for the ARM long descriptor pagetable format. | 28 | Enable support for the ARM long descriptor pagetable format. |
28 | This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page | 29 | This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page |
@@ -63,6 +64,7 @@ config MSM_IOMMU | |||
63 | bool "MSM IOMMU Support" | 64 | bool "MSM IOMMU Support" |
64 | depends on ARM | 65 | depends on ARM |
65 | depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST | 66 | depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST |
67 | depends on BROKEN | ||
66 | select IOMMU_API | 68 | select IOMMU_API |
67 | help | 69 | help |
68 | Support for the IOMMUs found on certain Qualcomm SOCs. | 70 | Support for the IOMMUs found on certain Qualcomm SOCs. |
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index fc13dd56953e..a3adde6519f0 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
@@ -1288,10 +1288,13 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, | |||
1288 | return 0; | 1288 | return 0; |
1289 | 1289 | ||
1290 | spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); | 1290 | spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); |
1291 | if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS) | 1291 | if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS && |
1292 | smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { | ||
1292 | ret = arm_smmu_iova_to_phys_hard(domain, iova); | 1293 | ret = arm_smmu_iova_to_phys_hard(domain, iova); |
1293 | else | 1294 | } else { |
1294 | ret = ops->iova_to_phys(ops, iova); | 1295 | ret = ops->iova_to_phys(ops, iova); |
1296 | } | ||
1297 | |||
1295 | spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); | 1298 | spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); |
1296 | 1299 | ||
1297 | return ret; | 1300 | return ret; |
@@ -1556,7 +1559,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
1556 | return -ENODEV; | 1559 | return -ENODEV; |
1557 | } | 1560 | } |
1558 | 1561 | ||
1559 | if (smmu->version == 1 || (!(id & ID0_ATOSNS) && (id & ID0_S1TS))) { | 1562 | if ((id & ID0_S1TS) && ((smmu->version == 1) || (id & ID0_ATOSNS))) { |
1560 | smmu->features |= ARM_SMMU_FEAT_TRANS_OPS; | 1563 | smmu->features |= ARM_SMMU_FEAT_TRANS_OPS; |
1561 | dev_notice(smmu->dev, "\taddress translation ops\n"); | 1564 | dev_notice(smmu->dev, "\taddress translation ops\n"); |
1562 | } | 1565 | } |
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 7ce52737c7a1..dc14fec4ede1 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c | |||
@@ -1186,8 +1186,15 @@ static const struct iommu_ops exynos_iommu_ops = { | |||
1186 | 1186 | ||
1187 | static int __init exynos_iommu_init(void) | 1187 | static int __init exynos_iommu_init(void) |
1188 | { | 1188 | { |
1189 | struct device_node *np; | ||
1189 | int ret; | 1190 | int ret; |
1190 | 1191 | ||
1192 | np = of_find_matching_node(NULL, sysmmu_of_match); | ||
1193 | if (!np) | ||
1194 | return 0; | ||
1195 | |||
1196 | of_node_put(np); | ||
1197 | |||
1191 | lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", | 1198 | lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", |
1192 | LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); | 1199 | LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); |
1193 | if (!lv2table_kmem_cache) { | 1200 | if (!lv2table_kmem_cache) { |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index ae4c1a854e57..2d1e05bdbb53 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -1742,9 +1742,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width) | |||
1742 | 1742 | ||
1743 | static void domain_exit(struct dmar_domain *domain) | 1743 | static void domain_exit(struct dmar_domain *domain) |
1744 | { | 1744 | { |
1745 | struct dmar_drhd_unit *drhd; | ||
1746 | struct intel_iommu *iommu; | ||
1747 | struct page *freelist = NULL; | 1745 | struct page *freelist = NULL; |
1746 | int i; | ||
1748 | 1747 | ||
1749 | /* Domain 0 is reserved, so dont process it */ | 1748 | /* Domain 0 is reserved, so dont process it */ |
1750 | if (!domain) | 1749 | if (!domain) |
@@ -1764,8 +1763,8 @@ static void domain_exit(struct dmar_domain *domain) | |||
1764 | 1763 | ||
1765 | /* clear attached or cached domains */ | 1764 | /* clear attached or cached domains */ |
1766 | rcu_read_lock(); | 1765 | rcu_read_lock(); |
1767 | for_each_active_iommu(iommu, drhd) | 1766 | for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) |
1768 | iommu_detach_domain(domain, iommu); | 1767 | iommu_detach_domain(domain, g_iommus[i]); |
1769 | rcu_read_unlock(); | 1768 | rcu_read_unlock(); |
1770 | 1769 | ||
1771 | dma_free_pagelist(freelist); | 1770 | dma_free_pagelist(freelist); |
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 5a500edf00cc..b610a8dee238 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c | |||
@@ -56,7 +56,8 @@ | |||
56 | ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ | 56 | ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ |
57 | * (d)->bits_per_level) + (d)->pg_shift) | 57 | * (d)->bits_per_level) + (d)->pg_shift) |
58 | 58 | ||
59 | #define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift) | 59 | #define ARM_LPAE_PAGES_PER_PGD(d) \ |
60 | DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift) | ||
60 | 61 | ||
61 | /* | 62 | /* |
62 | * Calculate the index at level l used to map virtual address a using the | 63 | * Calculate the index at level l used to map virtual address a using the |
@@ -66,7 +67,7 @@ | |||
66 | ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) | 67 | ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) |
67 | 68 | ||
68 | #define ARM_LPAE_LVL_IDX(a,l,d) \ | 69 | #define ARM_LPAE_LVL_IDX(a,l,d) \ |
69 | (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ | 70 | (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ |
70 | ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) | 71 | ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) |
71 | 72 | ||
72 | /* Calculate the block/page mapping size at level l for pagetable in d. */ | 73 | /* Calculate the block/page mapping size at level l for pagetable in d. */ |
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 10186cac7716..bc39bdf7b99b 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c | |||
@@ -851,6 +851,7 @@ static int ipmmu_remove(struct platform_device *pdev) | |||
851 | 851 | ||
852 | static const struct of_device_id ipmmu_of_ids[] = { | 852 | static const struct of_device_id ipmmu_of_ids[] = { |
853 | { .compatible = "renesas,ipmmu-vmsa", }, | 853 | { .compatible = "renesas,ipmmu-vmsa", }, |
854 | { } | ||
854 | }; | 855 | }; |
855 | 856 | ||
856 | static struct platform_driver ipmmu_driver = { | 857 | static struct platform_driver ipmmu_driver = { |
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index f59f857b702e..a4ba851825c2 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c | |||
@@ -1376,6 +1376,13 @@ static int __init omap_iommu_init(void) | |||
1376 | struct kmem_cache *p; | 1376 | struct kmem_cache *p; |
1377 | const unsigned long flags = SLAB_HWCACHE_ALIGN; | 1377 | const unsigned long flags = SLAB_HWCACHE_ALIGN; |
1378 | size_t align = 1 << 10; /* L2 pagetable alignement */ | 1378 | size_t align = 1 << 10; /* L2 pagetable alignement */ |
1379 | struct device_node *np; | ||
1380 | |||
1381 | np = of_find_matching_node(NULL, omap_iommu_of_match); | ||
1382 | if (!np) | ||
1383 | return 0; | ||
1384 | |||
1385 | of_node_put(np); | ||
1379 | 1386 | ||
1380 | p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, | 1387 | p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, |
1381 | iopte_cachep_ctor); | 1388 | iopte_cachep_ctor); |
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 6a8b1ec4a48a..9f74fddcd304 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c | |||
@@ -1015,8 +1015,15 @@ static struct platform_driver rk_iommu_driver = { | |||
1015 | 1015 | ||
1016 | static int __init rk_iommu_init(void) | 1016 | static int __init rk_iommu_init(void) |
1017 | { | 1017 | { |
1018 | struct device_node *np; | ||
1018 | int ret; | 1019 | int ret; |
1019 | 1020 | ||
1021 | np = of_find_matching_node(NULL, rk_iommu_dt_ids); | ||
1022 | if (!np) | ||
1023 | return 0; | ||
1024 | |||
1025 | of_node_put(np); | ||
1026 | |||
1020 | ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); | 1027 | ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); |
1021 | if (ret) | 1028 | if (ret) |
1022 | return ret; | 1029 | return ret; |
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index 463c235acbdc..4387dae14e45 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c | |||
@@ -69,6 +69,7 @@ static void __iomem *per_cpu_int_base; | |||
69 | static void __iomem *main_int_base; | 69 | static void __iomem *main_int_base; |
70 | static struct irq_domain *armada_370_xp_mpic_domain; | 70 | static struct irq_domain *armada_370_xp_mpic_domain; |
71 | static u32 doorbell_mask_reg; | 71 | static u32 doorbell_mask_reg; |
72 | static int parent_irq; | ||
72 | #ifdef CONFIG_PCI_MSI | 73 | #ifdef CONFIG_PCI_MSI |
73 | static struct irq_domain *armada_370_xp_msi_domain; | 74 | static struct irq_domain *armada_370_xp_msi_domain; |
74 | static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR); | 75 | static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR); |
@@ -356,6 +357,7 @@ static int armada_xp_mpic_secondary_init(struct notifier_block *nfb, | |||
356 | { | 357 | { |
357 | if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) | 358 | if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) |
358 | armada_xp_mpic_smp_cpu_init(); | 359 | armada_xp_mpic_smp_cpu_init(); |
360 | |||
359 | return NOTIFY_OK; | 361 | return NOTIFY_OK; |
360 | } | 362 | } |
361 | 363 | ||
@@ -364,6 +366,20 @@ static struct notifier_block armada_370_xp_mpic_cpu_notifier = { | |||
364 | .priority = 100, | 366 | .priority = 100, |
365 | }; | 367 | }; |
366 | 368 | ||
369 | static int mpic_cascaded_secondary_init(struct notifier_block *nfb, | ||
370 | unsigned long action, void *hcpu) | ||
371 | { | ||
372 | if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) | ||
373 | enable_percpu_irq(parent_irq, IRQ_TYPE_NONE); | ||
374 | |||
375 | return NOTIFY_OK; | ||
376 | } | ||
377 | |||
378 | static struct notifier_block mpic_cascaded_cpu_notifier = { | ||
379 | .notifier_call = mpic_cascaded_secondary_init, | ||
380 | .priority = 100, | ||
381 | }; | ||
382 | |||
367 | #endif /* CONFIG_SMP */ | 383 | #endif /* CONFIG_SMP */ |
368 | 384 | ||
369 | static struct irq_domain_ops armada_370_xp_mpic_irq_ops = { | 385 | static struct irq_domain_ops armada_370_xp_mpic_irq_ops = { |
@@ -539,7 +555,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, | |||
539 | struct device_node *parent) | 555 | struct device_node *parent) |
540 | { | 556 | { |
541 | struct resource main_int_res, per_cpu_int_res; | 557 | struct resource main_int_res, per_cpu_int_res; |
542 | int parent_irq, nr_irqs, i; | 558 | int nr_irqs, i; |
543 | u32 control; | 559 | u32 control; |
544 | 560 | ||
545 | BUG_ON(of_address_to_resource(node, 0, &main_int_res)); | 561 | BUG_ON(of_address_to_resource(node, 0, &main_int_res)); |
@@ -587,6 +603,9 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, | |||
587 | register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier); | 603 | register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier); |
588 | #endif | 604 | #endif |
589 | } else { | 605 | } else { |
606 | #ifdef CONFIG_SMP | ||
607 | register_cpu_notifier(&mpic_cascaded_cpu_notifier); | ||
608 | #endif | ||
590 | irq_set_chained_handler(parent_irq, | 609 | irq_set_chained_handler(parent_irq, |
591 | armada_370_xp_mpic_handle_cascade_irq); | 610 | armada_370_xp_mpic_handle_cascade_irq); |
592 | } | 611 | } |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index d8996bdf0f61..9687f8afebff 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
@@ -169,7 +169,7 @@ static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) | |||
169 | 169 | ||
170 | static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) | 170 | static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) |
171 | { | 171 | { |
172 | cmd->raw_cmd[0] &= ~(0xffffUL << 32); | 172 | cmd->raw_cmd[0] &= BIT_ULL(32) - 1; |
173 | cmd->raw_cmd[0] |= ((u64)devid) << 32; | 173 | cmd->raw_cmd[0] |= ((u64)devid) << 32; |
174 | } | 174 | } |
175 | 175 | ||
@@ -416,13 +416,14 @@ static void its_send_single_command(struct its_node *its, | |||
416 | { | 416 | { |
417 | struct its_cmd_block *cmd, *sync_cmd, *next_cmd; | 417 | struct its_cmd_block *cmd, *sync_cmd, *next_cmd; |
418 | struct its_collection *sync_col; | 418 | struct its_collection *sync_col; |
419 | unsigned long flags; | ||
419 | 420 | ||
420 | raw_spin_lock(&its->lock); | 421 | raw_spin_lock_irqsave(&its->lock, flags); |
421 | 422 | ||
422 | cmd = its_allocate_entry(its); | 423 | cmd = its_allocate_entry(its); |
423 | if (!cmd) { /* We're soooooo screewed... */ | 424 | if (!cmd) { /* We're soooooo screewed... */ |
424 | pr_err_ratelimited("ITS can't allocate, dropping command\n"); | 425 | pr_err_ratelimited("ITS can't allocate, dropping command\n"); |
425 | raw_spin_unlock(&its->lock); | 426 | raw_spin_unlock_irqrestore(&its->lock, flags); |
426 | return; | 427 | return; |
427 | } | 428 | } |
428 | sync_col = builder(cmd, desc); | 429 | sync_col = builder(cmd, desc); |
@@ -442,7 +443,7 @@ static void its_send_single_command(struct its_node *its, | |||
442 | 443 | ||
443 | post: | 444 | post: |
444 | next_cmd = its_post_commands(its); | 445 | next_cmd = its_post_commands(its); |
445 | raw_spin_unlock(&its->lock); | 446 | raw_spin_unlock_irqrestore(&its->lock, flags); |
446 | 447 | ||
447 | its_wait_for_range_completion(its, cmd, next_cmd); | 448 | its_wait_for_range_completion(its, cmd, next_cmd); |
448 | } | 449 | } |
@@ -799,21 +800,44 @@ static int its_alloc_tables(struct its_node *its) | |||
799 | { | 800 | { |
800 | int err; | 801 | int err; |
801 | int i; | 802 | int i; |
802 | int psz = PAGE_SIZE; | 803 | int psz = SZ_64K; |
803 | u64 shr = GITS_BASER_InnerShareable; | 804 | u64 shr = GITS_BASER_InnerShareable; |
805 | u64 cache = GITS_BASER_WaWb; | ||
804 | 806 | ||
805 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { | 807 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { |
806 | u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); | 808 | u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); |
807 | u64 type = GITS_BASER_TYPE(val); | 809 | u64 type = GITS_BASER_TYPE(val); |
808 | u64 entry_size = GITS_BASER_ENTRY_SIZE(val); | 810 | u64 entry_size = GITS_BASER_ENTRY_SIZE(val); |
811 | int order = get_order(psz); | ||
812 | int alloc_size; | ||
809 | u64 tmp; | 813 | u64 tmp; |
810 | void *base; | 814 | void *base; |
811 | 815 | ||
812 | if (type == GITS_BASER_TYPE_NONE) | 816 | if (type == GITS_BASER_TYPE_NONE) |
813 | continue; | 817 | continue; |
814 | 818 | ||
815 | /* We're lazy and only allocate a single page for now */ | 819 | /* |
816 | base = (void *)get_zeroed_page(GFP_KERNEL); | 820 | * Allocate as many entries as required to fit the |
821 | * range of device IDs that the ITS can grok... The ID | ||
822 | * space being incredibly sparse, this results in a | ||
823 | * massive waste of memory. | ||
824 | * | ||
825 | * For other tables, only allocate a single page. | ||
826 | */ | ||
827 | if (type == GITS_BASER_TYPE_DEVICE) { | ||
828 | u64 typer = readq_relaxed(its->base + GITS_TYPER); | ||
829 | u32 ids = GITS_TYPER_DEVBITS(typer); | ||
830 | |||
831 | order = get_order((1UL << ids) * entry_size); | ||
832 | if (order >= MAX_ORDER) { | ||
833 | order = MAX_ORDER - 1; | ||
834 | pr_warn("%s: Device Table too large, reduce its page order to %u\n", | ||
835 | its->msi_chip.of_node->full_name, order); | ||
836 | } | ||
837 | } | ||
838 | |||
839 | alloc_size = (1 << order) * PAGE_SIZE; | ||
840 | base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); | ||
817 | if (!base) { | 841 | if (!base) { |
818 | err = -ENOMEM; | 842 | err = -ENOMEM; |
819 | goto out_free; | 843 | goto out_free; |
@@ -825,7 +849,7 @@ retry_baser: | |||
825 | val = (virt_to_phys(base) | | 849 | val = (virt_to_phys(base) | |
826 | (type << GITS_BASER_TYPE_SHIFT) | | 850 | (type << GITS_BASER_TYPE_SHIFT) | |
827 | ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | | 851 | ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | |
828 | GITS_BASER_WaWb | | 852 | cache | |
829 | shr | | 853 | shr | |
830 | GITS_BASER_VALID); | 854 | GITS_BASER_VALID); |
831 | 855 | ||
@@ -841,7 +865,7 @@ retry_baser: | |||
841 | break; | 865 | break; |
842 | } | 866 | } |
843 | 867 | ||
844 | val |= (PAGE_SIZE / psz) - 1; | 868 | val |= (alloc_size / psz) - 1; |
845 | 869 | ||
846 | writeq_relaxed(val, its->base + GITS_BASER + i * 8); | 870 | writeq_relaxed(val, its->base + GITS_BASER + i * 8); |
847 | tmp = readq_relaxed(its->base + GITS_BASER + i * 8); | 871 | tmp = readq_relaxed(its->base + GITS_BASER + i * 8); |
@@ -851,9 +875,12 @@ retry_baser: | |||
851 | * Shareability didn't stick. Just use | 875 | * Shareability didn't stick. Just use |
852 | * whatever the read reported, which is likely | 876 | * whatever the read reported, which is likely |
853 | * to be the only thing this redistributor | 877 | * to be the only thing this redistributor |
854 | * supports. | 878 | * supports. If that's zero, make it |
879 | * non-cacheable as well. | ||
855 | */ | 880 | */ |
856 | shr = tmp & GITS_BASER_SHAREABILITY_MASK; | 881 | shr = tmp & GITS_BASER_SHAREABILITY_MASK; |
882 | if (!shr) | ||
883 | cache = GITS_BASER_nC; | ||
857 | goto retry_baser; | 884 | goto retry_baser; |
858 | } | 885 | } |
859 | 886 | ||
@@ -882,7 +909,7 @@ retry_baser: | |||
882 | } | 909 | } |
883 | 910 | ||
884 | pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n", | 911 | pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n", |
885 | (int)(PAGE_SIZE / entry_size), | 912 | (int)(alloc_size / entry_size), |
886 | its_base_type_string[type], | 913 | its_base_type_string[type], |
887 | (unsigned long)virt_to_phys(base), | 914 | (unsigned long)virt_to_phys(base), |
888 | psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); | 915 | psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); |
@@ -957,16 +984,39 @@ static void its_cpu_init_lpis(void) | |||
957 | tmp = readq_relaxed(rbase + GICR_PROPBASER); | 984 | tmp = readq_relaxed(rbase + GICR_PROPBASER); |
958 | 985 | ||
959 | if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { | 986 | if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { |
987 | if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { | ||
988 | /* | ||
989 | * The HW reports non-shareable, we must | ||
990 | * remove the cacheability attributes as | ||
991 | * well. | ||
992 | */ | ||
993 | val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | | ||
994 | GICR_PROPBASER_CACHEABILITY_MASK); | ||
995 | val |= GICR_PROPBASER_nC; | ||
996 | writeq_relaxed(val, rbase + GICR_PROPBASER); | ||
997 | } | ||
960 | pr_info_once("GIC: using cache flushing for LPI property table\n"); | 998 | pr_info_once("GIC: using cache flushing for LPI property table\n"); |
961 | gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; | 999 | gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; |
962 | } | 1000 | } |
963 | 1001 | ||
964 | /* set PENDBASE */ | 1002 | /* set PENDBASE */ |
965 | val = (page_to_phys(pend_page) | | 1003 | val = (page_to_phys(pend_page) | |
966 | GICR_PROPBASER_InnerShareable | | 1004 | GICR_PENDBASER_InnerShareable | |
967 | GICR_PROPBASER_WaWb); | 1005 | GICR_PENDBASER_WaWb); |
968 | 1006 | ||
969 | writeq_relaxed(val, rbase + GICR_PENDBASER); | 1007 | writeq_relaxed(val, rbase + GICR_PENDBASER); |
1008 | tmp = readq_relaxed(rbase + GICR_PENDBASER); | ||
1009 | |||
1010 | if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { | ||
1011 | /* | ||
1012 | * The HW reports non-shareable, we must remove the | ||
1013 | * cacheability attributes as well. | ||
1014 | */ | ||
1015 | val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | | ||
1016 | GICR_PENDBASER_CACHEABILITY_MASK); | ||
1017 | val |= GICR_PENDBASER_nC; | ||
1018 | writeq_relaxed(val, rbase + GICR_PENDBASER); | ||
1019 | } | ||
970 | 1020 | ||
971 | /* Enable LPIs */ | 1021 | /* Enable LPIs */ |
972 | val = readl_relaxed(rbase + GICR_CTLR); | 1022 | val = readl_relaxed(rbase + GICR_CTLR); |
@@ -1003,7 +1053,7 @@ static void its_cpu_init_collection(void) | |||
1003 | * This ITS wants a linear CPU number. | 1053 | * This ITS wants a linear CPU number. |
1004 | */ | 1054 | */ |
1005 | target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER); | 1055 | target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER); |
1006 | target = GICR_TYPER_CPU_NUMBER(target); | 1056 | target = GICR_TYPER_CPU_NUMBER(target) << 16; |
1007 | } | 1057 | } |
1008 | 1058 | ||
1009 | /* Perform collection mapping */ | 1059 | /* Perform collection mapping */ |
@@ -1020,8 +1070,9 @@ static void its_cpu_init_collection(void) | |||
1020 | static struct its_device *its_find_device(struct its_node *its, u32 dev_id) | 1070 | static struct its_device *its_find_device(struct its_node *its, u32 dev_id) |
1021 | { | 1071 | { |
1022 | struct its_device *its_dev = NULL, *tmp; | 1072 | struct its_device *its_dev = NULL, *tmp; |
1073 | unsigned long flags; | ||
1023 | 1074 | ||
1024 | raw_spin_lock(&its->lock); | 1075 | raw_spin_lock_irqsave(&its->lock, flags); |
1025 | 1076 | ||
1026 | list_for_each_entry(tmp, &its->its_device_list, entry) { | 1077 | list_for_each_entry(tmp, &its->its_device_list, entry) { |
1027 | if (tmp->device_id == dev_id) { | 1078 | if (tmp->device_id == dev_id) { |
@@ -1030,7 +1081,7 @@ static struct its_device *its_find_device(struct its_node *its, u32 dev_id) | |||
1030 | } | 1081 | } |
1031 | } | 1082 | } |
1032 | 1083 | ||
1033 | raw_spin_unlock(&its->lock); | 1084 | raw_spin_unlock_irqrestore(&its->lock, flags); |
1034 | 1085 | ||
1035 | return its_dev; | 1086 | return its_dev; |
1036 | } | 1087 | } |
@@ -1040,6 +1091,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, | |||
1040 | { | 1091 | { |
1041 | struct its_device *dev; | 1092 | struct its_device *dev; |
1042 | unsigned long *lpi_map; | 1093 | unsigned long *lpi_map; |
1094 | unsigned long flags; | ||
1043 | void *itt; | 1095 | void *itt; |
1044 | int lpi_base; | 1096 | int lpi_base; |
1045 | int nr_lpis; | 1097 | int nr_lpis; |
@@ -1056,7 +1108,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, | |||
1056 | nr_ites = max(2UL, roundup_pow_of_two(nvecs)); | 1108 | nr_ites = max(2UL, roundup_pow_of_two(nvecs)); |
1057 | sz = nr_ites * its->ite_size; | 1109 | sz = nr_ites * its->ite_size; |
1058 | sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; | 1110 | sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; |
1059 | itt = kmalloc(sz, GFP_KERNEL); | 1111 | itt = kzalloc(sz, GFP_KERNEL); |
1060 | lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); | 1112 | lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); |
1061 | 1113 | ||
1062 | if (!dev || !itt || !lpi_map) { | 1114 | if (!dev || !itt || !lpi_map) { |
@@ -1075,9 +1127,9 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, | |||
1075 | dev->device_id = dev_id; | 1127 | dev->device_id = dev_id; |
1076 | INIT_LIST_HEAD(&dev->entry); | 1128 | INIT_LIST_HEAD(&dev->entry); |
1077 | 1129 | ||
1078 | raw_spin_lock(&its->lock); | 1130 | raw_spin_lock_irqsave(&its->lock, flags); |
1079 | list_add(&dev->entry, &its->its_device_list); | 1131 | list_add(&dev->entry, &its->its_device_list); |
1080 | raw_spin_unlock(&its->lock); | 1132 | raw_spin_unlock_irqrestore(&its->lock, flags); |
1081 | 1133 | ||
1082 | /* Bind the device to the first possible CPU */ | 1134 | /* Bind the device to the first possible CPU */ |
1083 | cpu = cpumask_first(cpu_online_mask); | 1135 | cpu = cpumask_first(cpu_online_mask); |
@@ -1091,9 +1143,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, | |||
1091 | 1143 | ||
1092 | static void its_free_device(struct its_device *its_dev) | 1144 | static void its_free_device(struct its_device *its_dev) |
1093 | { | 1145 | { |
1094 | raw_spin_lock(&its_dev->its->lock); | 1146 | unsigned long flags; |
1147 | |||
1148 | raw_spin_lock_irqsave(&its_dev->its->lock, flags); | ||
1095 | list_del(&its_dev->entry); | 1149 | list_del(&its_dev->entry); |
1096 | raw_spin_unlock(&its_dev->its->lock); | 1150 | raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); |
1097 | kfree(its_dev->itt); | 1151 | kfree(its_dev->itt); |
1098 | kfree(its_dev); | 1152 | kfree(its_dev); |
1099 | } | 1153 | } |
@@ -1112,31 +1166,69 @@ static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) | |||
1112 | return 0; | 1166 | return 0; |
1113 | } | 1167 | } |
1114 | 1168 | ||
1169 | struct its_pci_alias { | ||
1170 | struct pci_dev *pdev; | ||
1171 | u32 dev_id; | ||
1172 | u32 count; | ||
1173 | }; | ||
1174 | |||
1175 | static int its_pci_msi_vec_count(struct pci_dev *pdev) | ||
1176 | { | ||
1177 | int msi, msix; | ||
1178 | |||
1179 | msi = max(pci_msi_vec_count(pdev), 0); | ||
1180 | msix = max(pci_msix_vec_count(pdev), 0); | ||
1181 | |||
1182 | return max(msi, msix); | ||
1183 | } | ||
1184 | |||
1185 | static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data) | ||
1186 | { | ||
1187 | struct its_pci_alias *dev_alias = data; | ||
1188 | |||
1189 | dev_alias->dev_id = alias; | ||
1190 | if (pdev != dev_alias->pdev) | ||
1191 | dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev); | ||
1192 | |||
1193 | return 0; | ||
1194 | } | ||
1195 | |||
1115 | static int its_msi_prepare(struct irq_domain *domain, struct device *dev, | 1196 | static int its_msi_prepare(struct irq_domain *domain, struct device *dev, |
1116 | int nvec, msi_alloc_info_t *info) | 1197 | int nvec, msi_alloc_info_t *info) |
1117 | { | 1198 | { |
1118 | struct pci_dev *pdev; | 1199 | struct pci_dev *pdev; |
1119 | struct its_node *its; | 1200 | struct its_node *its; |
1120 | u32 dev_id; | ||
1121 | struct its_device *its_dev; | 1201 | struct its_device *its_dev; |
1202 | struct its_pci_alias dev_alias; | ||
1122 | 1203 | ||
1123 | if (!dev_is_pci(dev)) | 1204 | if (!dev_is_pci(dev)) |
1124 | return -EINVAL; | 1205 | return -EINVAL; |
1125 | 1206 | ||
1126 | pdev = to_pci_dev(dev); | 1207 | pdev = to_pci_dev(dev); |
1127 | dev_id = PCI_DEVID(pdev->bus->number, pdev->devfn); | 1208 | dev_alias.pdev = pdev; |
1209 | dev_alias.count = nvec; | ||
1210 | |||
1211 | pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias); | ||
1128 | its = domain->parent->host_data; | 1212 | its = domain->parent->host_data; |
1129 | 1213 | ||
1130 | its_dev = its_find_device(its, dev_id); | 1214 | its_dev = its_find_device(its, dev_alias.dev_id); |
1131 | if (WARN_ON(its_dev)) | 1215 | if (its_dev) { |
1132 | return -EINVAL; | 1216 | /* |
1217 | * We already have seen this ID, probably through | ||
1218 | * another alias (PCI bridge of some sort). No need to | ||
1219 | * create the device. | ||
1220 | */ | ||
1221 | dev_dbg(dev, "Reusing ITT for devID %x\n", dev_alias.dev_id); | ||
1222 | goto out; | ||
1223 | } | ||
1133 | 1224 | ||
1134 | its_dev = its_create_device(its, dev_id, nvec); | 1225 | its_dev = its_create_device(its, dev_alias.dev_id, dev_alias.count); |
1135 | if (!its_dev) | 1226 | if (!its_dev) |
1136 | return -ENOMEM; | 1227 | return -ENOMEM; |
1137 | 1228 | ||
1138 | dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n", nvec, ilog2(nvec)); | 1229 | dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n", |
1139 | 1230 | dev_alias.count, ilog2(dev_alias.count)); | |
1231 | out: | ||
1140 | info->scratchpad[0].ptr = its_dev; | 1232 | info->scratchpad[0].ptr = its_dev; |
1141 | info->scratchpad[1].ptr = dev; | 1233 | info->scratchpad[1].ptr = dev; |
1142 | return 0; | 1234 | return 0; |
@@ -1255,6 +1347,34 @@ static const struct irq_domain_ops its_domain_ops = { | |||
1255 | .deactivate = its_irq_domain_deactivate, | 1347 | .deactivate = its_irq_domain_deactivate, |
1256 | }; | 1348 | }; |
1257 | 1349 | ||
1350 | static int its_force_quiescent(void __iomem *base) | ||
1351 | { | ||
1352 | u32 count = 1000000; /* 1s */ | ||
1353 | u32 val; | ||
1354 | |||
1355 | val = readl_relaxed(base + GITS_CTLR); | ||
1356 | if (val & GITS_CTLR_QUIESCENT) | ||
1357 | return 0; | ||
1358 | |||
1359 | /* Disable the generation of all interrupts to this ITS */ | ||
1360 | val &= ~GITS_CTLR_ENABLE; | ||
1361 | writel_relaxed(val, base + GITS_CTLR); | ||
1362 | |||
1363 | /* Poll GITS_CTLR and wait until ITS becomes quiescent */ | ||
1364 | while (1) { | ||
1365 | val = readl_relaxed(base + GITS_CTLR); | ||
1366 | if (val & GITS_CTLR_QUIESCENT) | ||
1367 | return 0; | ||
1368 | |||
1369 | count--; | ||
1370 | if (!count) | ||
1371 | return -EBUSY; | ||
1372 | |||
1373 | cpu_relax(); | ||
1374 | udelay(1); | ||
1375 | } | ||
1376 | } | ||
1377 | |||
1258 | static int its_probe(struct device_node *node, struct irq_domain *parent) | 1378 | static int its_probe(struct device_node *node, struct irq_domain *parent) |
1259 | { | 1379 | { |
1260 | struct resource res; | 1380 | struct resource res; |
@@ -1283,6 +1403,13 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) | |||
1283 | goto out_unmap; | 1403 | goto out_unmap; |
1284 | } | 1404 | } |
1285 | 1405 | ||
1406 | err = its_force_quiescent(its_base); | ||
1407 | if (err) { | ||
1408 | pr_warn("%s: failed to quiesce, giving up\n", | ||
1409 | node->full_name); | ||
1410 | goto out_unmap; | ||
1411 | } | ||
1412 | |||
1286 | pr_info("ITS: %s\n", node->full_name); | 1413 | pr_info("ITS: %s\n", node->full_name); |
1287 | 1414 | ||
1288 | its = kzalloc(sizeof(*its), GFP_KERNEL); | 1415 | its = kzalloc(sizeof(*its), GFP_KERNEL); |
@@ -1322,14 +1449,26 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) | |||
1322 | 1449 | ||
1323 | writeq_relaxed(baser, its->base + GITS_CBASER); | 1450 | writeq_relaxed(baser, its->base + GITS_CBASER); |
1324 | tmp = readq_relaxed(its->base + GITS_CBASER); | 1451 | tmp = readq_relaxed(its->base + GITS_CBASER); |
1325 | writeq_relaxed(0, its->base + GITS_CWRITER); | ||
1326 | writel_relaxed(1, its->base + GITS_CTLR); | ||
1327 | 1452 | ||
1328 | if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) { | 1453 | if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { |
1454 | if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { | ||
1455 | /* | ||
1456 | * The HW reports non-shareable, we must | ||
1457 | * remove the cacheability attributes as | ||
1458 | * well. | ||
1459 | */ | ||
1460 | baser &= ~(GITS_CBASER_SHAREABILITY_MASK | | ||
1461 | GITS_CBASER_CACHEABILITY_MASK); | ||
1462 | baser |= GITS_CBASER_nC; | ||
1463 | writeq_relaxed(baser, its->base + GITS_CBASER); | ||
1464 | } | ||
1329 | pr_info("ITS: using cache flushing for cmd queue\n"); | 1465 | pr_info("ITS: using cache flushing for cmd queue\n"); |
1330 | its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; | 1466 | its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; |
1331 | } | 1467 | } |
1332 | 1468 | ||
1469 | writeq_relaxed(0, its->base + GITS_CWRITER); | ||
1470 | writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR); | ||
1471 | |||
1333 | if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) { | 1472 | if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) { |
1334 | its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its); | 1473 | its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its); |
1335 | if (!its->domain) { | 1474 | if (!its->domain) { |
@@ -1382,12 +1521,11 @@ static bool gic_rdists_supports_plpis(void) | |||
1382 | 1521 | ||
1383 | int its_cpu_init(void) | 1522 | int its_cpu_init(void) |
1384 | { | 1523 | { |
1385 | if (!gic_rdists_supports_plpis()) { | ||
1386 | pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); | ||
1387 | return -ENXIO; | ||
1388 | } | ||
1389 | |||
1390 | if (!list_empty(&its_nodes)) { | 1524 | if (!list_empty(&its_nodes)) { |
1525 | if (!gic_rdists_supports_plpis()) { | ||
1526 | pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); | ||
1527 | return -ENXIO; | ||
1528 | } | ||
1391 | its_cpu_init_lpis(); | 1529 | its_cpu_init_lpis(); |
1392 | its_cpu_init_collection(); | 1530 | its_cpu_init_collection(); |
1393 | } | 1531 | } |
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 1c6dea2fbc34..fd8850def1b8 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c | |||
@@ -466,7 +466,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, | |||
466 | tlist |= 1 << (mpidr & 0xf); | 466 | tlist |= 1 << (mpidr & 0xf); |
467 | 467 | ||
468 | cpu = cpumask_next(cpu, mask); | 468 | cpu = cpumask_next(cpu, mask); |
469 | if (cpu == nr_cpu_ids) | 469 | if (cpu >= nr_cpu_ids) |
470 | goto out; | 470 | goto out; |
471 | 471 | ||
472 | mpidr = cpu_logical_map(cpu); | 472 | mpidr = cpu_logical_map(cpu); |
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 4634cf7d0ec3..471e1cdc1933 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
@@ -154,23 +154,25 @@ static inline unsigned int gic_irq(struct irq_data *d) | |||
154 | static void gic_mask_irq(struct irq_data *d) | 154 | static void gic_mask_irq(struct irq_data *d) |
155 | { | 155 | { |
156 | u32 mask = 1 << (gic_irq(d) % 32); | 156 | u32 mask = 1 << (gic_irq(d) % 32); |
157 | unsigned long flags; | ||
157 | 158 | ||
158 | raw_spin_lock(&irq_controller_lock); | 159 | raw_spin_lock_irqsave(&irq_controller_lock, flags); |
159 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); | 160 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); |
160 | if (gic_arch_extn.irq_mask) | 161 | if (gic_arch_extn.irq_mask) |
161 | gic_arch_extn.irq_mask(d); | 162 | gic_arch_extn.irq_mask(d); |
162 | raw_spin_unlock(&irq_controller_lock); | 163 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); |
163 | } | 164 | } |
164 | 165 | ||
165 | static void gic_unmask_irq(struct irq_data *d) | 166 | static void gic_unmask_irq(struct irq_data *d) |
166 | { | 167 | { |
167 | u32 mask = 1 << (gic_irq(d) % 32); | 168 | u32 mask = 1 << (gic_irq(d) % 32); |
169 | unsigned long flags; | ||
168 | 170 | ||
169 | raw_spin_lock(&irq_controller_lock); | 171 | raw_spin_lock_irqsave(&irq_controller_lock, flags); |
170 | if (gic_arch_extn.irq_unmask) | 172 | if (gic_arch_extn.irq_unmask) |
171 | gic_arch_extn.irq_unmask(d); | 173 | gic_arch_extn.irq_unmask(d); |
172 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); | 174 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); |
173 | raw_spin_unlock(&irq_controller_lock); | 175 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); |
174 | } | 176 | } |
175 | 177 | ||
176 | static void gic_eoi_irq(struct irq_data *d) | 178 | static void gic_eoi_irq(struct irq_data *d) |
@@ -188,6 +190,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) | |||
188 | { | 190 | { |
189 | void __iomem *base = gic_dist_base(d); | 191 | void __iomem *base = gic_dist_base(d); |
190 | unsigned int gicirq = gic_irq(d); | 192 | unsigned int gicirq = gic_irq(d); |
193 | unsigned long flags; | ||
191 | int ret; | 194 | int ret; |
192 | 195 | ||
193 | /* Interrupt configuration for SGIs can't be changed */ | 196 | /* Interrupt configuration for SGIs can't be changed */ |
@@ -199,14 +202,14 @@ static int gic_set_type(struct irq_data *d, unsigned int type) | |||
199 | type != IRQ_TYPE_EDGE_RISING) | 202 | type != IRQ_TYPE_EDGE_RISING) |
200 | return -EINVAL; | 203 | return -EINVAL; |
201 | 204 | ||
202 | raw_spin_lock(&irq_controller_lock); | 205 | raw_spin_lock_irqsave(&irq_controller_lock, flags); |
203 | 206 | ||
204 | if (gic_arch_extn.irq_set_type) | 207 | if (gic_arch_extn.irq_set_type) |
205 | gic_arch_extn.irq_set_type(d, type); | 208 | gic_arch_extn.irq_set_type(d, type); |
206 | 209 | ||
207 | ret = gic_configure_irq(gicirq, type, base, NULL); | 210 | ret = gic_configure_irq(gicirq, type, base, NULL); |
208 | 211 | ||
209 | raw_spin_unlock(&irq_controller_lock); | 212 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); |
210 | 213 | ||
211 | return ret; | 214 | return ret; |
212 | } | 215 | } |
@@ -227,6 +230,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | |||
227 | void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); | 230 | void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); |
228 | unsigned int cpu, shift = (gic_irq(d) % 4) * 8; | 231 | unsigned int cpu, shift = (gic_irq(d) % 4) * 8; |
229 | u32 val, mask, bit; | 232 | u32 val, mask, bit; |
233 | unsigned long flags; | ||
230 | 234 | ||
231 | if (!force) | 235 | if (!force) |
232 | cpu = cpumask_any_and(mask_val, cpu_online_mask); | 236 | cpu = cpumask_any_and(mask_val, cpu_online_mask); |
@@ -236,12 +240,12 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | |||
236 | if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) | 240 | if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) |
237 | return -EINVAL; | 241 | return -EINVAL; |
238 | 242 | ||
239 | raw_spin_lock(&irq_controller_lock); | 243 | raw_spin_lock_irqsave(&irq_controller_lock, flags); |
240 | mask = 0xff << shift; | 244 | mask = 0xff << shift; |
241 | bit = gic_cpu_map[cpu] << shift; | 245 | bit = gic_cpu_map[cpu] << shift; |
242 | val = readl_relaxed(reg) & ~mask; | 246 | val = readl_relaxed(reg) & ~mask; |
243 | writel_relaxed(val | bit, reg); | 247 | writel_relaxed(val | bit, reg); |
244 | raw_spin_unlock(&irq_controller_lock); | 248 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); |
245 | 249 | ||
246 | return IRQ_SET_MASK_OK; | 250 | return IRQ_SET_MASK_OK; |
247 | } | 251 | } |
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index 3c92780bda09..ff48da61c94c 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c | |||
@@ -1755,7 +1755,7 @@ init_card(struct hfc_pci *hc) | |||
1755 | enable_hwirq(hc); | 1755 | enable_hwirq(hc); |
1756 | spin_unlock_irqrestore(&hc->lock, flags); | 1756 | spin_unlock_irqrestore(&hc->lock, flags); |
1757 | /* Timeout 80ms */ | 1757 | /* Timeout 80ms */ |
1758 | current->state = TASK_UNINTERRUPTIBLE; | 1758 | set_current_state(TASK_UNINTERRUPTIBLE); |
1759 | schedule_timeout((80 * HZ) / 1000); | 1759 | schedule_timeout((80 * HZ) / 1000); |
1760 | printk(KERN_INFO "HFC PCI: IRQ %d count %d\n", | 1760 | printk(KERN_INFO "HFC PCI: IRQ %d count %d\n", |
1761 | hc->irq, hc->irqcnt); | 1761 | hc->irq, hc->irqcnt); |
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c index 6a7447c304ac..358a574d9e8b 100644 --- a/drivers/isdn/icn/icn.c +++ b/drivers/isdn/icn/icn.c | |||
@@ -1609,7 +1609,7 @@ icn_setup(char *line) | |||
1609 | if (ints[0] > 1) | 1609 | if (ints[0] > 1) |
1610 | membase = (unsigned long)ints[2]; | 1610 | membase = (unsigned long)ints[2]; |
1611 | if (str && *str) { | 1611 | if (str && *str) { |
1612 | strcpy(sid, str); | 1612 | strlcpy(sid, str, sizeof(sid)); |
1613 | icn_id = sid; | 1613 | icn_id = sid; |
1614 | if ((p = strchr(sid, ','))) { | 1614 | if ((p = strchr(sid, ','))) { |
1615 | *p++ = 0; | 1615 | *p++ = 0; |
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig index ee035ec4526b..169172d2ba05 100644 --- a/drivers/lguest/Kconfig +++ b/drivers/lguest/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config LGUEST | 1 | config LGUEST |
2 | tristate "Linux hypervisor example code" | 2 | tristate "Linux hypervisor example code" |
3 | depends on X86_32 && EVENTFD && TTY | 3 | depends on X86_32 && EVENTFD && TTY && PCI_DIRECT |
4 | select HVC_DRIVER | 4 | select HVC_DRIVER |
5 | ---help--- | 5 | ---help--- |
6 | This is a very simple module which allows you to run | 6 | This is a very simple module which allows you to run |
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 37de0173b6d2..74adcd2c967e 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
@@ -289,9 +289,16 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, | |||
289 | struct request_queue *q = bdev_get_queue(where->bdev); | 289 | struct request_queue *q = bdev_get_queue(where->bdev); |
290 | unsigned short logical_block_size = queue_logical_block_size(q); | 290 | unsigned short logical_block_size = queue_logical_block_size(q); |
291 | sector_t num_sectors; | 291 | sector_t num_sectors; |
292 | unsigned int uninitialized_var(special_cmd_max_sectors); | ||
292 | 293 | ||
293 | /* Reject unsupported discard requests */ | 294 | /* |
294 | if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) { | 295 | * Reject unsupported discard and write same requests. |
296 | */ | ||
297 | if (rw & REQ_DISCARD) | ||
298 | special_cmd_max_sectors = q->limits.max_discard_sectors; | ||
299 | else if (rw & REQ_WRITE_SAME) | ||
300 | special_cmd_max_sectors = q->limits.max_write_same_sectors; | ||
301 | if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) { | ||
295 | dec_count(io, region, -EOPNOTSUPP); | 302 | dec_count(io, region, -EOPNOTSUPP); |
296 | return; | 303 | return; |
297 | } | 304 | } |
@@ -317,7 +324,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, | |||
317 | store_io_and_region_in_bio(bio, io, region); | 324 | store_io_and_region_in_bio(bio, io, region); |
318 | 325 | ||
319 | if (rw & REQ_DISCARD) { | 326 | if (rw & REQ_DISCARD) { |
320 | num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); | 327 | num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); |
321 | bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; | 328 | bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; |
322 | remaining -= num_sectors; | 329 | remaining -= num_sectors; |
323 | } else if (rw & REQ_WRITE_SAME) { | 330 | } else if (rw & REQ_WRITE_SAME) { |
@@ -326,7 +333,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, | |||
326 | */ | 333 | */ |
327 | dp->get_page(dp, &page, &len, &offset); | 334 | dp->get_page(dp, &page, &len, &offset); |
328 | bio_add_page(bio, page, logical_block_size, offset); | 335 | bio_add_page(bio, page, logical_block_size, offset); |
329 | num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); | 336 | num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); |
330 | bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; | 337 | bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; |
331 | 338 | ||
332 | offset = 0; | 339 | offset = 0; |
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 8b204ae216ab..f83a0f3fc365 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -20,6 +20,8 @@ | |||
20 | #include <linux/log2.h> | 20 | #include <linux/log2.h> |
21 | #include <linux/dm-kcopyd.h> | 21 | #include <linux/dm-kcopyd.h> |
22 | 22 | ||
23 | #include "dm.h" | ||
24 | |||
23 | #include "dm-exception-store.h" | 25 | #include "dm-exception-store.h" |
24 | 26 | ||
25 | #define DM_MSG_PREFIX "snapshots" | 27 | #define DM_MSG_PREFIX "snapshots" |
@@ -291,12 +293,23 @@ struct origin { | |||
291 | }; | 293 | }; |
292 | 294 | ||
293 | /* | 295 | /* |
296 | * This structure is allocated for each origin target | ||
297 | */ | ||
298 | struct dm_origin { | ||
299 | struct dm_dev *dev; | ||
300 | struct dm_target *ti; | ||
301 | unsigned split_boundary; | ||
302 | struct list_head hash_list; | ||
303 | }; | ||
304 | |||
305 | /* | ||
294 | * Size of the hash table for origin volumes. If we make this | 306 | * Size of the hash table for origin volumes. If we make this |
295 | * the size of the minors list then it should be nearly perfect | 307 | * the size of the minors list then it should be nearly perfect |
296 | */ | 308 | */ |
297 | #define ORIGIN_HASH_SIZE 256 | 309 | #define ORIGIN_HASH_SIZE 256 |
298 | #define ORIGIN_MASK 0xFF | 310 | #define ORIGIN_MASK 0xFF |
299 | static struct list_head *_origins; | 311 | static struct list_head *_origins; |
312 | static struct list_head *_dm_origins; | ||
300 | static struct rw_semaphore _origins_lock; | 313 | static struct rw_semaphore _origins_lock; |
301 | 314 | ||
302 | static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done); | 315 | static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done); |
@@ -310,12 +323,22 @@ static int init_origin_hash(void) | |||
310 | _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), | 323 | _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), |
311 | GFP_KERNEL); | 324 | GFP_KERNEL); |
312 | if (!_origins) { | 325 | if (!_origins) { |
313 | DMERR("unable to allocate memory"); | 326 | DMERR("unable to allocate memory for _origins"); |
314 | return -ENOMEM; | 327 | return -ENOMEM; |
315 | } | 328 | } |
316 | |||
317 | for (i = 0; i < ORIGIN_HASH_SIZE; i++) | 329 | for (i = 0; i < ORIGIN_HASH_SIZE; i++) |
318 | INIT_LIST_HEAD(_origins + i); | 330 | INIT_LIST_HEAD(_origins + i); |
331 | |||
332 | _dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), | ||
333 | GFP_KERNEL); | ||
334 | if (!_dm_origins) { | ||
335 | DMERR("unable to allocate memory for _dm_origins"); | ||
336 | kfree(_origins); | ||
337 | return -ENOMEM; | ||
338 | } | ||
339 | for (i = 0; i < ORIGIN_HASH_SIZE; i++) | ||
340 | INIT_LIST_HEAD(_dm_origins + i); | ||
341 | |||
319 | init_rwsem(&_origins_lock); | 342 | init_rwsem(&_origins_lock); |
320 | 343 | ||
321 | return 0; | 344 | return 0; |
@@ -324,6 +347,7 @@ static int init_origin_hash(void) | |||
324 | static void exit_origin_hash(void) | 347 | static void exit_origin_hash(void) |
325 | { | 348 | { |
326 | kfree(_origins); | 349 | kfree(_origins); |
350 | kfree(_dm_origins); | ||
327 | } | 351 | } |
328 | 352 | ||
329 | static unsigned origin_hash(struct block_device *bdev) | 353 | static unsigned origin_hash(struct block_device *bdev) |
@@ -350,6 +374,30 @@ static void __insert_origin(struct origin *o) | |||
350 | list_add_tail(&o->hash_list, sl); | 374 | list_add_tail(&o->hash_list, sl); |
351 | } | 375 | } |
352 | 376 | ||
377 | static struct dm_origin *__lookup_dm_origin(struct block_device *origin) | ||
378 | { | ||
379 | struct list_head *ol; | ||
380 | struct dm_origin *o; | ||
381 | |||
382 | ol = &_dm_origins[origin_hash(origin)]; | ||
383 | list_for_each_entry (o, ol, hash_list) | ||
384 | if (bdev_equal(o->dev->bdev, origin)) | ||
385 | return o; | ||
386 | |||
387 | return NULL; | ||
388 | } | ||
389 | |||
390 | static void __insert_dm_origin(struct dm_origin *o) | ||
391 | { | ||
392 | struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)]; | ||
393 | list_add_tail(&o->hash_list, sl); | ||
394 | } | ||
395 | |||
396 | static void __remove_dm_origin(struct dm_origin *o) | ||
397 | { | ||
398 | list_del(&o->hash_list); | ||
399 | } | ||
400 | |||
353 | /* | 401 | /* |
354 | * _origins_lock must be held when calling this function. | 402 | * _origins_lock must be held when calling this function. |
355 | * Returns number of snapshots registered using the supplied cow device, plus: | 403 | * Returns number of snapshots registered using the supplied cow device, plus: |
@@ -1840,9 +1888,40 @@ static int snapshot_preresume(struct dm_target *ti) | |||
1840 | static void snapshot_resume(struct dm_target *ti) | 1888 | static void snapshot_resume(struct dm_target *ti) |
1841 | { | 1889 | { |
1842 | struct dm_snapshot *s = ti->private; | 1890 | struct dm_snapshot *s = ti->private; |
1843 | struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; | 1891 | struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL; |
1892 | struct dm_origin *o; | ||
1893 | struct mapped_device *origin_md = NULL; | ||
1894 | bool must_restart_merging = false; | ||
1844 | 1895 | ||
1845 | down_read(&_origins_lock); | 1896 | down_read(&_origins_lock); |
1897 | |||
1898 | o = __lookup_dm_origin(s->origin->bdev); | ||
1899 | if (o) | ||
1900 | origin_md = dm_table_get_md(o->ti->table); | ||
1901 | if (!origin_md) { | ||
1902 | (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging); | ||
1903 | if (snap_merging) | ||
1904 | origin_md = dm_table_get_md(snap_merging->ti->table); | ||
1905 | } | ||
1906 | if (origin_md == dm_table_get_md(ti->table)) | ||
1907 | origin_md = NULL; | ||
1908 | if (origin_md) { | ||
1909 | if (dm_hold(origin_md)) | ||
1910 | origin_md = NULL; | ||
1911 | } | ||
1912 | |||
1913 | up_read(&_origins_lock); | ||
1914 | |||
1915 | if (origin_md) { | ||
1916 | dm_internal_suspend_fast(origin_md); | ||
1917 | if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) { | ||
1918 | must_restart_merging = true; | ||
1919 | stop_merge(snap_merging); | ||
1920 | } | ||
1921 | } | ||
1922 | |||
1923 | down_read(&_origins_lock); | ||
1924 | |||
1846 | (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); | 1925 | (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); |
1847 | if (snap_src && snap_dest) { | 1926 | if (snap_src && snap_dest) { |
1848 | down_write(&snap_src->lock); | 1927 | down_write(&snap_src->lock); |
@@ -1851,8 +1930,16 @@ static void snapshot_resume(struct dm_target *ti) | |||
1851 | up_write(&snap_dest->lock); | 1930 | up_write(&snap_dest->lock); |
1852 | up_write(&snap_src->lock); | 1931 | up_write(&snap_src->lock); |
1853 | } | 1932 | } |
1933 | |||
1854 | up_read(&_origins_lock); | 1934 | up_read(&_origins_lock); |
1855 | 1935 | ||
1936 | if (origin_md) { | ||
1937 | if (must_restart_merging) | ||
1938 | start_merge(snap_merging); | ||
1939 | dm_internal_resume_fast(origin_md); | ||
1940 | dm_put(origin_md); | ||
1941 | } | ||
1942 | |||
1856 | /* Now we have correct chunk size, reregister */ | 1943 | /* Now we have correct chunk size, reregister */ |
1857 | reregister_snapshot(s); | 1944 | reregister_snapshot(s); |
1858 | 1945 | ||
@@ -2133,11 +2220,6 @@ static int origin_write_extent(struct dm_snapshot *merging_snap, | |||
2133 | * Origin: maps a linear range of a device, with hooks for snapshotting. | 2220 | * Origin: maps a linear range of a device, with hooks for snapshotting. |
2134 | */ | 2221 | */ |
2135 | 2222 | ||
2136 | struct dm_origin { | ||
2137 | struct dm_dev *dev; | ||
2138 | unsigned split_boundary; | ||
2139 | }; | ||
2140 | |||
2141 | /* | 2223 | /* |
2142 | * Construct an origin mapping: <dev_path> | 2224 | * Construct an origin mapping: <dev_path> |
2143 | * The context for an origin is merely a 'struct dm_dev *' | 2225 | * The context for an origin is merely a 'struct dm_dev *' |
@@ -2166,6 +2248,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
2166 | goto bad_open; | 2248 | goto bad_open; |
2167 | } | 2249 | } |
2168 | 2250 | ||
2251 | o->ti = ti; | ||
2169 | ti->private = o; | 2252 | ti->private = o; |
2170 | ti->num_flush_bios = 1; | 2253 | ti->num_flush_bios = 1; |
2171 | 2254 | ||
@@ -2180,6 +2263,7 @@ bad_alloc: | |||
2180 | static void origin_dtr(struct dm_target *ti) | 2263 | static void origin_dtr(struct dm_target *ti) |
2181 | { | 2264 | { |
2182 | struct dm_origin *o = ti->private; | 2265 | struct dm_origin *o = ti->private; |
2266 | |||
2183 | dm_put_device(ti, o->dev); | 2267 | dm_put_device(ti, o->dev); |
2184 | kfree(o); | 2268 | kfree(o); |
2185 | } | 2269 | } |
@@ -2216,6 +2300,19 @@ static void origin_resume(struct dm_target *ti) | |||
2216 | struct dm_origin *o = ti->private; | 2300 | struct dm_origin *o = ti->private; |
2217 | 2301 | ||
2218 | o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev); | 2302 | o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev); |
2303 | |||
2304 | down_write(&_origins_lock); | ||
2305 | __insert_dm_origin(o); | ||
2306 | up_write(&_origins_lock); | ||
2307 | } | ||
2308 | |||
2309 | static void origin_postsuspend(struct dm_target *ti) | ||
2310 | { | ||
2311 | struct dm_origin *o = ti->private; | ||
2312 | |||
2313 | down_write(&_origins_lock); | ||
2314 | __remove_dm_origin(o); | ||
2315 | up_write(&_origins_lock); | ||
2219 | } | 2316 | } |
2220 | 2317 | ||
2221 | static void origin_status(struct dm_target *ti, status_type_t type, | 2318 | static void origin_status(struct dm_target *ti, status_type_t type, |
@@ -2258,12 +2355,13 @@ static int origin_iterate_devices(struct dm_target *ti, | |||
2258 | 2355 | ||
2259 | static struct target_type origin_target = { | 2356 | static struct target_type origin_target = { |
2260 | .name = "snapshot-origin", | 2357 | .name = "snapshot-origin", |
2261 | .version = {1, 8, 1}, | 2358 | .version = {1, 9, 0}, |
2262 | .module = THIS_MODULE, | 2359 | .module = THIS_MODULE, |
2263 | .ctr = origin_ctr, | 2360 | .ctr = origin_ctr, |
2264 | .dtr = origin_dtr, | 2361 | .dtr = origin_dtr, |
2265 | .map = origin_map, | 2362 | .map = origin_map, |
2266 | .resume = origin_resume, | 2363 | .resume = origin_resume, |
2364 | .postsuspend = origin_postsuspend, | ||
2267 | .status = origin_status, | 2365 | .status = origin_status, |
2268 | .merge = origin_merge, | 2366 | .merge = origin_merge, |
2269 | .iterate_devices = origin_iterate_devices, | 2367 | .iterate_devices = origin_iterate_devices, |
@@ -2271,7 +2369,7 @@ static struct target_type origin_target = { | |||
2271 | 2369 | ||
2272 | static struct target_type snapshot_target = { | 2370 | static struct target_type snapshot_target = { |
2273 | .name = "snapshot", | 2371 | .name = "snapshot", |
2274 | .version = {1, 12, 0}, | 2372 | .version = {1, 13, 0}, |
2275 | .module = THIS_MODULE, | 2373 | .module = THIS_MODULE, |
2276 | .ctr = snapshot_ctr, | 2374 | .ctr = snapshot_ctr, |
2277 | .dtr = snapshot_dtr, | 2375 | .dtr = snapshot_dtr, |
@@ -2285,7 +2383,7 @@ static struct target_type snapshot_target = { | |||
2285 | 2383 | ||
2286 | static struct target_type merge_target = { | 2384 | static struct target_type merge_target = { |
2287 | .name = dm_snapshot_merge_target_name, | 2385 | .name = dm_snapshot_merge_target_name, |
2288 | .version = {1, 2, 0}, | 2386 | .version = {1, 3, 0}, |
2289 | .module = THIS_MODULE, | 2387 | .module = THIS_MODULE, |
2290 | .ctr = snapshot_ctr, | 2388 | .ctr = snapshot_ctr, |
2291 | .dtr = snapshot_dtr, | 2389 | .dtr = snapshot_dtr, |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 654773cb1eee..921aafd12aee 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -2358,17 +2358,6 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) | |||
2358 | return DM_MAPIO_REMAPPED; | 2358 | return DM_MAPIO_REMAPPED; |
2359 | 2359 | ||
2360 | case -ENODATA: | 2360 | case -ENODATA: |
2361 | if (get_pool_mode(tc->pool) == PM_READ_ONLY) { | ||
2362 | /* | ||
2363 | * This block isn't provisioned, and we have no way | ||
2364 | * of doing so. | ||
2365 | */ | ||
2366 | handle_unserviceable_bio(tc->pool, bio); | ||
2367 | cell_defer_no_holder(tc, virt_cell); | ||
2368 | return DM_MAPIO_SUBMITTED; | ||
2369 | } | ||
2370 | /* fall through */ | ||
2371 | |||
2372 | case -EWOULDBLOCK: | 2361 | case -EWOULDBLOCK: |
2373 | thin_defer_cell(tc, virt_cell); | 2362 | thin_defer_cell(tc, virt_cell); |
2374 | return DM_MAPIO_SUBMITTED; | 2363 | return DM_MAPIO_SUBMITTED; |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 73f28802dc7a..8001fe9e3434 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -433,7 +433,6 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode) | |||
433 | 433 | ||
434 | dm_get(md); | 434 | dm_get(md); |
435 | atomic_inc(&md->open_count); | 435 | atomic_inc(&md->open_count); |
436 | |||
437 | out: | 436 | out: |
438 | spin_unlock(&_minor_lock); | 437 | spin_unlock(&_minor_lock); |
439 | 438 | ||
@@ -442,16 +441,20 @@ out: | |||
442 | 441 | ||
443 | static void dm_blk_close(struct gendisk *disk, fmode_t mode) | 442 | static void dm_blk_close(struct gendisk *disk, fmode_t mode) |
444 | { | 443 | { |
445 | struct mapped_device *md = disk->private_data; | 444 | struct mapped_device *md; |
446 | 445 | ||
447 | spin_lock(&_minor_lock); | 446 | spin_lock(&_minor_lock); |
448 | 447 | ||
448 | md = disk->private_data; | ||
449 | if (WARN_ON(!md)) | ||
450 | goto out; | ||
451 | |||
449 | if (atomic_dec_and_test(&md->open_count) && | 452 | if (atomic_dec_and_test(&md->open_count) && |
450 | (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) | 453 | (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) |
451 | queue_work(deferred_remove_workqueue, &deferred_remove_work); | 454 | queue_work(deferred_remove_workqueue, &deferred_remove_work); |
452 | 455 | ||
453 | dm_put(md); | 456 | dm_put(md); |
454 | 457 | out: | |
455 | spin_unlock(&_minor_lock); | 458 | spin_unlock(&_minor_lock); |
456 | } | 459 | } |
457 | 460 | ||
@@ -2241,7 +2244,6 @@ static void free_dev(struct mapped_device *md) | |||
2241 | int minor = MINOR(disk_devt(md->disk)); | 2244 | int minor = MINOR(disk_devt(md->disk)); |
2242 | 2245 | ||
2243 | unlock_fs(md); | 2246 | unlock_fs(md); |
2244 | bdput(md->bdev); | ||
2245 | destroy_workqueue(md->wq); | 2247 | destroy_workqueue(md->wq); |
2246 | 2248 | ||
2247 | if (md->kworker_task) | 2249 | if (md->kworker_task) |
@@ -2252,19 +2254,22 @@ static void free_dev(struct mapped_device *md) | |||
2252 | mempool_destroy(md->rq_pool); | 2254 | mempool_destroy(md->rq_pool); |
2253 | if (md->bs) | 2255 | if (md->bs) |
2254 | bioset_free(md->bs); | 2256 | bioset_free(md->bs); |
2255 | blk_integrity_unregister(md->disk); | 2257 | |
2256 | del_gendisk(md->disk); | ||
2257 | cleanup_srcu_struct(&md->io_barrier); | 2258 | cleanup_srcu_struct(&md->io_barrier); |
2258 | free_table_devices(&md->table_devices); | 2259 | free_table_devices(&md->table_devices); |
2259 | free_minor(minor); | 2260 | dm_stats_cleanup(&md->stats); |
2260 | 2261 | ||
2261 | spin_lock(&_minor_lock); | 2262 | spin_lock(&_minor_lock); |
2262 | md->disk->private_data = NULL; | 2263 | md->disk->private_data = NULL; |
2263 | spin_unlock(&_minor_lock); | 2264 | spin_unlock(&_minor_lock); |
2264 | 2265 | if (blk_get_integrity(md->disk)) | |
2266 | blk_integrity_unregister(md->disk); | ||
2267 | del_gendisk(md->disk); | ||
2265 | put_disk(md->disk); | 2268 | put_disk(md->disk); |
2266 | blk_cleanup_queue(md->queue); | 2269 | blk_cleanup_queue(md->queue); |
2267 | dm_stats_cleanup(&md->stats); | 2270 | bdput(md->bdev); |
2271 | free_minor(minor); | ||
2272 | |||
2268 | module_put(THIS_MODULE); | 2273 | module_put(THIS_MODULE); |
2269 | kfree(md); | 2274 | kfree(md); |
2270 | } | 2275 | } |
@@ -2616,6 +2621,19 @@ void dm_get(struct mapped_device *md) | |||
2616 | BUG_ON(test_bit(DMF_FREEING, &md->flags)); | 2621 | BUG_ON(test_bit(DMF_FREEING, &md->flags)); |
2617 | } | 2622 | } |
2618 | 2623 | ||
2624 | int dm_hold(struct mapped_device *md) | ||
2625 | { | ||
2626 | spin_lock(&_minor_lock); | ||
2627 | if (test_bit(DMF_FREEING, &md->flags)) { | ||
2628 | spin_unlock(&_minor_lock); | ||
2629 | return -EBUSY; | ||
2630 | } | ||
2631 | dm_get(md); | ||
2632 | spin_unlock(&_minor_lock); | ||
2633 | return 0; | ||
2634 | } | ||
2635 | EXPORT_SYMBOL_GPL(dm_hold); | ||
2636 | |||
2619 | const char *dm_device_name(struct mapped_device *md) | 2637 | const char *dm_device_name(struct mapped_device *md) |
2620 | { | 2638 | { |
2621 | return md->name; | 2639 | return md->name; |
@@ -2629,8 +2647,9 @@ static void __dm_destroy(struct mapped_device *md, bool wait) | |||
2629 | 2647 | ||
2630 | might_sleep(); | 2648 | might_sleep(); |
2631 | 2649 | ||
2632 | spin_lock(&_minor_lock); | ||
2633 | map = dm_get_live_table(md, &srcu_idx); | 2650 | map = dm_get_live_table(md, &srcu_idx); |
2651 | |||
2652 | spin_lock(&_minor_lock); | ||
2634 | idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); | 2653 | idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); |
2635 | set_bit(DMF_FREEING, &md->flags); | 2654 | set_bit(DMF_FREEING, &md->flags); |
2636 | spin_unlock(&_minor_lock); | 2655 | spin_unlock(&_minor_lock); |
@@ -2638,10 +2657,16 @@ static void __dm_destroy(struct mapped_device *md, bool wait) | |||
2638 | if (dm_request_based(md)) | 2657 | if (dm_request_based(md)) |
2639 | flush_kthread_worker(&md->kworker); | 2658 | flush_kthread_worker(&md->kworker); |
2640 | 2659 | ||
2660 | /* | ||
2661 | * Take suspend_lock so that presuspend and postsuspend methods | ||
2662 | * do not race with internal suspend. | ||
2663 | */ | ||
2664 | mutex_lock(&md->suspend_lock); | ||
2641 | if (!dm_suspended_md(md)) { | 2665 | if (!dm_suspended_md(md)) { |
2642 | dm_table_presuspend_targets(map); | 2666 | dm_table_presuspend_targets(map); |
2643 | dm_table_postsuspend_targets(map); | 2667 | dm_table_postsuspend_targets(map); |
2644 | } | 2668 | } |
2669 | mutex_unlock(&md->suspend_lock); | ||
2645 | 2670 | ||
2646 | /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ | 2671 | /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ |
2647 | dm_put_live_table(md, srcu_idx); | 2672 | dm_put_live_table(md, srcu_idx); |
@@ -3115,6 +3140,7 @@ void dm_internal_suspend_fast(struct mapped_device *md) | |||
3115 | flush_workqueue(md->wq); | 3140 | flush_workqueue(md->wq); |
3116 | dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); | 3141 | dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); |
3117 | } | 3142 | } |
3143 | EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); | ||
3118 | 3144 | ||
3119 | void dm_internal_resume_fast(struct mapped_device *md) | 3145 | void dm_internal_resume_fast(struct mapped_device *md) |
3120 | { | 3146 | { |
@@ -3126,6 +3152,7 @@ void dm_internal_resume_fast(struct mapped_device *md) | |||
3126 | done: | 3152 | done: |
3127 | mutex_unlock(&md->suspend_lock); | 3153 | mutex_unlock(&md->suspend_lock); |
3128 | } | 3154 | } |
3155 | EXPORT_SYMBOL_GPL(dm_internal_resume_fast); | ||
3129 | 3156 | ||
3130 | /*----------------------------------------------------------------- | 3157 | /*----------------------------------------------------------------- |
3131 | * Event notification. | 3158 | * Event notification. |
diff --git a/drivers/md/md.c b/drivers/md/md.c index c8d2bac4e28b..717daad71fb1 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -2555,7 +2555,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) | |||
2555 | return err ? err : len; | 2555 | return err ? err : len; |
2556 | } | 2556 | } |
2557 | static struct rdev_sysfs_entry rdev_state = | 2557 | static struct rdev_sysfs_entry rdev_state = |
2558 | __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store); | 2558 | __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store); |
2559 | 2559 | ||
2560 | static ssize_t | 2560 | static ssize_t |
2561 | errors_show(struct md_rdev *rdev, char *page) | 2561 | errors_show(struct md_rdev *rdev, char *page) |
@@ -3638,7 +3638,8 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len) | |||
3638 | return err ?: len; | 3638 | return err ?: len; |
3639 | } | 3639 | } |
3640 | static struct md_sysfs_entry md_resync_start = | 3640 | static struct md_sysfs_entry md_resync_start = |
3641 | __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store); | 3641 | __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR, |
3642 | resync_start_show, resync_start_store); | ||
3642 | 3643 | ||
3643 | /* | 3644 | /* |
3644 | * The array state can be: | 3645 | * The array state can be: |
@@ -3851,7 +3852,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) | |||
3851 | return err ?: len; | 3852 | return err ?: len; |
3852 | } | 3853 | } |
3853 | static struct md_sysfs_entry md_array_state = | 3854 | static struct md_sysfs_entry md_array_state = |
3854 | __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); | 3855 | __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); |
3855 | 3856 | ||
3856 | static ssize_t | 3857 | static ssize_t |
3857 | max_corrected_read_errors_show(struct mddev *mddev, char *page) { | 3858 | max_corrected_read_errors_show(struct mddev *mddev, char *page) { |
@@ -4101,7 +4102,7 @@ out_unlock: | |||
4101 | } | 4102 | } |
4102 | 4103 | ||
4103 | static struct md_sysfs_entry md_metadata = | 4104 | static struct md_sysfs_entry md_metadata = |
4104 | __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); | 4105 | __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); |
4105 | 4106 | ||
4106 | static ssize_t | 4107 | static ssize_t |
4107 | action_show(struct mddev *mddev, char *page) | 4108 | action_show(struct mddev *mddev, char *page) |
@@ -4189,7 +4190,7 @@ action_store(struct mddev *mddev, const char *page, size_t len) | |||
4189 | } | 4190 | } |
4190 | 4191 | ||
4191 | static struct md_sysfs_entry md_scan_mode = | 4192 | static struct md_sysfs_entry md_scan_mode = |
4192 | __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); | 4193 | __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); |
4193 | 4194 | ||
4194 | static ssize_t | 4195 | static ssize_t |
4195 | last_sync_action_show(struct mddev *mddev, char *page) | 4196 | last_sync_action_show(struct mddev *mddev, char *page) |
@@ -4335,7 +4336,8 @@ sync_completed_show(struct mddev *mddev, char *page) | |||
4335 | return sprintf(page, "%llu / %llu\n", resync, max_sectors); | 4336 | return sprintf(page, "%llu / %llu\n", resync, max_sectors); |
4336 | } | 4337 | } |
4337 | 4338 | ||
4338 | static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed); | 4339 | static struct md_sysfs_entry md_sync_completed = |
4340 | __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL); | ||
4339 | 4341 | ||
4340 | static ssize_t | 4342 | static ssize_t |
4341 | min_sync_show(struct mddev *mddev, char *page) | 4343 | min_sync_show(struct mddev *mddev, char *page) |
@@ -5078,7 +5080,8 @@ int md_run(struct mddev *mddev) | |||
5078 | } | 5080 | } |
5079 | if (err) { | 5081 | if (err) { |
5080 | mddev_detach(mddev); | 5082 | mddev_detach(mddev); |
5081 | pers->free(mddev, mddev->private); | 5083 | if (mddev->private) |
5084 | pers->free(mddev, mddev->private); | ||
5082 | module_put(pers->owner); | 5085 | module_put(pers->owner); |
5083 | bitmap_destroy(mddev); | 5086 | bitmap_destroy(mddev); |
5084 | return err; | 5087 | return err; |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index a13f738a7b39..3ed9f42ddca6 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -467,8 +467,6 @@ static int raid0_run(struct mddev *mddev) | |||
467 | dump_zones(mddev); | 467 | dump_zones(mddev); |
468 | 468 | ||
469 | ret = md_integrity_register(mddev); | 469 | ret = md_integrity_register(mddev); |
470 | if (ret) | ||
471 | raid0_free(mddev, conf); | ||
472 | 470 | ||
473 | return ret; | 471 | return ret; |
474 | } | 472 | } |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 4153da5d4011..d34e238afa54 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -560,7 +560,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect | |||
560 | if (test_bit(WriteMostly, &rdev->flags)) { | 560 | if (test_bit(WriteMostly, &rdev->flags)) { |
561 | /* Don't balance among write-mostly, just | 561 | /* Don't balance among write-mostly, just |
562 | * use the first as a last resort */ | 562 | * use the first as a last resort */ |
563 | if (best_disk < 0) { | 563 | if (best_dist_disk < 0) { |
564 | if (is_badblock(rdev, this_sector, sectors, | 564 | if (is_badblock(rdev, this_sector, sectors, |
565 | &first_bad, &bad_sectors)) { | 565 | &first_bad, &bad_sectors)) { |
566 | if (first_bad < this_sector) | 566 | if (first_bad < this_sector) |
@@ -569,7 +569,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect | |||
569 | best_good_sectors = first_bad - this_sector; | 569 | best_good_sectors = first_bad - this_sector; |
570 | } else | 570 | } else |
571 | best_good_sectors = sectors; | 571 | best_good_sectors = sectors; |
572 | best_disk = disk; | 572 | best_dist_disk = disk; |
573 | best_pending_disk = disk; | ||
573 | } | 574 | } |
574 | continue; | 575 | continue; |
575 | } | 576 | } |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e75d48c0421a..cd2f96b2c572 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -5121,12 +5121,17 @@ static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int | |||
5121 | schedule_timeout_uninterruptible(1); | 5121 | schedule_timeout_uninterruptible(1); |
5122 | } | 5122 | } |
5123 | /* Need to check if array will still be degraded after recovery/resync | 5123 | /* Need to check if array will still be degraded after recovery/resync |
5124 | * We don't need to check the 'failed' flag as when that gets set, | 5124 | * Note in case of > 1 drive failures it's possible we're rebuilding |
5125 | * recovery aborts. | 5125 | * one drive while leaving another faulty drive in array. |
5126 | */ | 5126 | */ |
5127 | for (i = 0; i < conf->raid_disks; i++) | 5127 | rcu_read_lock(); |
5128 | if (conf->disks[i].rdev == NULL) | 5128 | for (i = 0; i < conf->raid_disks; i++) { |
5129 | struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev); | ||
5130 | |||
5131 | if (rdev == NULL || test_bit(Faulty, &rdev->flags)) | ||
5129 | still_degraded = 1; | 5132 | still_degraded = 1; |
5133 | } | ||
5134 | rcu_read_unlock(); | ||
5130 | 5135 | ||
5131 | bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); | 5136 | bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); |
5132 | 5137 | ||
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c index 5d2d8f45b4b6..67faa8d6950e 100644 --- a/drivers/media/dvb-frontends/rtl2832.c +++ b/drivers/media/dvb-frontends/rtl2832.c | |||
@@ -1240,7 +1240,7 @@ static int rtl2832_probe(struct i2c_client *client, | |||
1240 | dev->regmap_config.max_register = 5 * 0x100, | 1240 | dev->regmap_config.max_register = 5 * 0x100, |
1241 | dev->regmap_config.ranges = regmap_range_cfg, | 1241 | dev->regmap_config.ranges = regmap_range_cfg, |
1242 | dev->regmap_config.num_ranges = ARRAY_SIZE(regmap_range_cfg), | 1242 | dev->regmap_config.num_ranges = ARRAY_SIZE(regmap_range_cfg), |
1243 | dev->regmap_config.cache_type = REGCACHE_RBTREE, | 1243 | dev->regmap_config.cache_type = REGCACHE_NONE, |
1244 | dev->regmap = regmap_init(&client->dev, ®map_bus, client, | 1244 | dev->regmap = regmap_init(&client->dev, ®map_bus, client, |
1245 | &dev->regmap_config); | 1245 | &dev->regmap_config); |
1246 | if (IS_ERR(dev->regmap)) { | 1246 | if (IS_ERR(dev->regmap)) { |
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c index e4901a503c73..63c0ee5d0bf5 100644 --- a/drivers/media/pci/cx23885/cx23885-417.c +++ b/drivers/media/pci/cx23885/cx23885-417.c | |||
@@ -1339,14 +1339,13 @@ static int vidioc_querycap(struct file *file, void *priv, | |||
1339 | strlcpy(cap->driver, dev->name, sizeof(cap->driver)); | 1339 | strlcpy(cap->driver, dev->name, sizeof(cap->driver)); |
1340 | strlcpy(cap->card, cx23885_boards[tsport->dev->board].name, | 1340 | strlcpy(cap->card, cx23885_boards[tsport->dev->board].name, |
1341 | sizeof(cap->card)); | 1341 | sizeof(cap->card)); |
1342 | sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci)); | 1342 | sprintf(cap->bus_info, "PCIe:%s", pci_name(dev->pci)); |
1343 | cap->capabilities = | 1343 | cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | |
1344 | V4L2_CAP_VIDEO_CAPTURE | | 1344 | V4L2_CAP_STREAMING; |
1345 | V4L2_CAP_READWRITE | | ||
1346 | V4L2_CAP_STREAMING | | ||
1347 | 0; | ||
1348 | if (dev->tuner_type != TUNER_ABSENT) | 1345 | if (dev->tuner_type != TUNER_ABSENT) |
1349 | cap->capabilities |= V4L2_CAP_TUNER; | 1346 | cap->device_caps |= V4L2_CAP_TUNER; |
1347 | cap->capabilities = cap->device_caps | V4L2_CAP_VBI_CAPTURE | | ||
1348 | V4L2_CAP_AUDIO | V4L2_CAP_DEVICE_CAPS; | ||
1350 | 1349 | ||
1351 | return 0; | 1350 | return 0; |
1352 | } | 1351 | } |
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c index 12f7452edce3..a92ff4249d10 100644 --- a/drivers/media/platform/s5p-jpeg/jpeg-core.c +++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c | |||
@@ -1845,6 +1845,9 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx) | |||
1845 | struct s5p_jpeg_addr jpeg_addr; | 1845 | struct s5p_jpeg_addr jpeg_addr; |
1846 | u32 pix_size, padding_bytes = 0; | 1846 | u32 pix_size, padding_bytes = 0; |
1847 | 1847 | ||
1848 | jpeg_addr.cb = 0; | ||
1849 | jpeg_addr.cr = 0; | ||
1850 | |||
1848 | pix_size = ctx->cap_q.w * ctx->cap_q.h; | 1851 | pix_size = ctx->cap_q.w * ctx->cap_q.h; |
1849 | 1852 | ||
1850 | if (ctx->mode == S5P_JPEG_ENCODE) { | 1853 | if (ctx->mode == S5P_JPEG_ENCODE) { |
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c index e8c2cad93962..0974b9a7a584 100644 --- a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c +++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c | |||
@@ -20,7 +20,7 @@ | |||
20 | 20 | ||
21 | void exynos3250_jpeg_reset(void __iomem *regs) | 21 | void exynos3250_jpeg_reset(void __iomem *regs) |
22 | { | 22 | { |
23 | u32 reg = 0; | 23 | u32 reg = 1; |
24 | int count = 1000; | 24 | int count = 1000; |
25 | 25 | ||
26 | writel(1, regs + EXYNOS3250_SW_RESET); | 26 | writel(1, regs + EXYNOS3250_SW_RESET); |
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index 8e44a59d8ec2..98374e8bad3e 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c | |||
@@ -833,6 +833,7 @@ static int s5p_mfc_open(struct file *file) | |||
833 | q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; | 833 | q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; |
834 | q->io_modes = VB2_MMAP; | 834 | q->io_modes = VB2_MMAP; |
835 | q->drv_priv = &ctx->fh; | 835 | q->drv_priv = &ctx->fh; |
836 | q->lock = &dev->mfc_mutex; | ||
836 | if (vdev == dev->vfd_dec) { | 837 | if (vdev == dev->vfd_dec) { |
837 | q->io_modes = VB2_MMAP; | 838 | q->io_modes = VB2_MMAP; |
838 | q->ops = get_dec_queue_ops(); | 839 | q->ops = get_dec_queue_ops(); |
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h index 15f7663dd9f5..24262bbb1a35 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h | |||
@@ -29,7 +29,7 @@ | |||
29 | 29 | ||
30 | /* Offset base used to differentiate between CAPTURE and OUTPUT | 30 | /* Offset base used to differentiate between CAPTURE and OUTPUT |
31 | * while mmaping */ | 31 | * while mmaping */ |
32 | #define DST_QUEUE_OFF_BASE (TASK_SIZE / 2) | 32 | #define DST_QUEUE_OFF_BASE (1 << 30) |
33 | 33 | ||
34 | #define MFC_BANK1_ALLOC_CTX 0 | 34 | #define MFC_BANK1_ALLOC_CTX 0 |
35 | #define MFC_BANK2_ALLOC_CTX 1 | 35 | #define MFC_BANK2_ALLOC_CTX 1 |
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h index de2b8c69daa5..22dfb3effda8 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h | |||
@@ -302,7 +302,7 @@ struct s5p_mfc_hw_ops { | |||
302 | void (*write_info)(struct s5p_mfc_ctx *ctx, unsigned int data, | 302 | void (*write_info)(struct s5p_mfc_ctx *ctx, unsigned int data, |
303 | unsigned int ofs); | 303 | unsigned int ofs); |
304 | unsigned int (*read_info)(struct s5p_mfc_ctx *ctx, | 304 | unsigned int (*read_info)(struct s5p_mfc_ctx *ctx, |
305 | unsigned int ofs); | 305 | unsigned long ofs); |
306 | int (*get_dspl_y_adr)(struct s5p_mfc_dev *dev); | 306 | int (*get_dspl_y_adr)(struct s5p_mfc_dev *dev); |
307 | int (*get_dec_y_adr)(struct s5p_mfc_dev *dev); | 307 | int (*get_dec_y_adr)(struct s5p_mfc_dev *dev); |
308 | int (*get_dspl_status)(struct s5p_mfc_dev *dev); | 308 | int (*get_dspl_status)(struct s5p_mfc_dev *dev); |
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c index 0c4fcf2dfd09..b09bcd140491 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c | |||
@@ -263,15 +263,15 @@ static void s5p_mfc_release_dev_context_buffer_v5(struct s5p_mfc_dev *dev) | |||
263 | static void s5p_mfc_write_info_v5(struct s5p_mfc_ctx *ctx, unsigned int data, | 263 | static void s5p_mfc_write_info_v5(struct s5p_mfc_ctx *ctx, unsigned int data, |
264 | unsigned int ofs) | 264 | unsigned int ofs) |
265 | { | 265 | { |
266 | writel(data, (volatile void __iomem *)(ctx->shm.virt + ofs)); | 266 | writel(data, (void *)(ctx->shm.virt + ofs)); |
267 | wmb(); | 267 | wmb(); |
268 | } | 268 | } |
269 | 269 | ||
270 | static unsigned int s5p_mfc_read_info_v5(struct s5p_mfc_ctx *ctx, | 270 | static unsigned int s5p_mfc_read_info_v5(struct s5p_mfc_ctx *ctx, |
271 | unsigned int ofs) | 271 | unsigned long ofs) |
272 | { | 272 | { |
273 | rmb(); | 273 | rmb(); |
274 | return readl((volatile void __iomem *)(ctx->shm.virt + ofs)); | 274 | return readl((void *)(ctx->shm.virt + ofs)); |
275 | } | 275 | } |
276 | 276 | ||
277 | static void s5p_mfc_dec_calc_dpb_size_v5(struct s5p_mfc_ctx *ctx) | 277 | static void s5p_mfc_dec_calc_dpb_size_v5(struct s5p_mfc_ctx *ctx) |
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c index d826c58b5d53..cefad184fe96 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c | |||
@@ -1852,17 +1852,17 @@ static void s5p_mfc_write_info_v6(struct s5p_mfc_ctx *ctx, unsigned int data, | |||
1852 | unsigned int ofs) | 1852 | unsigned int ofs) |
1853 | { | 1853 | { |
1854 | s5p_mfc_clock_on(); | 1854 | s5p_mfc_clock_on(); |
1855 | writel(data, (volatile void __iomem *)((unsigned long)ofs)); | 1855 | writel(data, (void *)((unsigned long)ofs)); |
1856 | s5p_mfc_clock_off(); | 1856 | s5p_mfc_clock_off(); |
1857 | } | 1857 | } |
1858 | 1858 | ||
1859 | static unsigned int | 1859 | static unsigned int |
1860 | s5p_mfc_read_info_v6(struct s5p_mfc_ctx *ctx, unsigned int ofs) | 1860 | s5p_mfc_read_info_v6(struct s5p_mfc_ctx *ctx, unsigned long ofs) |
1861 | { | 1861 | { |
1862 | int ret; | 1862 | int ret; |
1863 | 1863 | ||
1864 | s5p_mfc_clock_on(); | 1864 | s5p_mfc_clock_on(); |
1865 | ret = readl((volatile void __iomem *)((unsigned long)ofs)); | 1865 | ret = readl((void *)ofs); |
1866 | s5p_mfc_clock_off(); | 1866 | s5p_mfc_clock_off(); |
1867 | 1867 | ||
1868 | return ret; | 1868 | return ret; |
diff --git a/drivers/media/platform/s5p-tv/Kconfig b/drivers/media/platform/s5p-tv/Kconfig index 5a1835dd65e8..697aaed42486 100644 --- a/drivers/media/platform/s5p-tv/Kconfig +++ b/drivers/media/platform/s5p-tv/Kconfig | |||
@@ -20,6 +20,7 @@ if VIDEO_SAMSUNG_S5P_TV | |||
20 | config VIDEO_SAMSUNG_S5P_HDMI | 20 | config VIDEO_SAMSUNG_S5P_HDMI |
21 | tristate "Samsung HDMI Driver" | 21 | tristate "Samsung HDMI Driver" |
22 | depends on VIDEO_V4L2 | 22 | depends on VIDEO_V4L2 |
23 | depends on I2C | ||
23 | depends on VIDEO_SAMSUNG_S5P_TV | 24 | depends on VIDEO_SAMSUNG_S5P_TV |
24 | select VIDEO_SAMSUNG_S5P_HDMIPHY | 25 | select VIDEO_SAMSUNG_S5P_HDMIPHY |
25 | help | 26 | help |
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c index a901b6248557..2554f3719b9e 100644 --- a/drivers/media/platform/sh_veu.c +++ b/drivers/media/platform/sh_veu.c | |||
@@ -1158,6 +1158,7 @@ static int sh_veu_probe(struct platform_device *pdev) | |||
1158 | } | 1158 | } |
1159 | 1159 | ||
1160 | *vdev = sh_veu_videodev; | 1160 | *vdev = sh_veu_videodev; |
1161 | vdev->v4l2_dev = &veu->v4l2_dev; | ||
1161 | spin_lock_init(&veu->lock); | 1162 | spin_lock_init(&veu->lock); |
1162 | mutex_init(&veu->fop_lock); | 1163 | mutex_init(&veu->fop_lock); |
1163 | vdev->lock = &veu->fop_lock; | 1164 | vdev->lock = &veu->fop_lock; |
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c index 8526bf5c8429..c835beb2a1a8 100644 --- a/drivers/media/platform/soc_camera/atmel-isi.c +++ b/drivers/media/platform/soc_camera/atmel-isi.c | |||
@@ -843,6 +843,8 @@ static int isi_camera_set_bus_param(struct soc_camera_device *icd) | |||
843 | if (isi->pdata.full_mode) | 843 | if (isi->pdata.full_mode) |
844 | cfg1 |= ISI_CFG1_FULL_MODE; | 844 | cfg1 |= ISI_CFG1_FULL_MODE; |
845 | 845 | ||
846 | cfg1 |= ISI_CFG1_THMASK_BEATS_16; | ||
847 | |||
846 | isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS); | 848 | isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS); |
847 | isi_writel(isi, ISI_CFG1, cfg1); | 849 | isi_writel(isi, ISI_CFG1, cfg1); |
848 | 850 | ||
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c index cee7b56f8404..66634b469c98 100644 --- a/drivers/media/platform/soc_camera/soc_camera.c +++ b/drivers/media/platform/soc_camera/soc_camera.c | |||
@@ -1665,7 +1665,7 @@ eclkreg: | |||
1665 | eaddpdev: | 1665 | eaddpdev: |
1666 | platform_device_put(sasc->pdev); | 1666 | platform_device_put(sasc->pdev); |
1667 | eallocpdev: | 1667 | eallocpdev: |
1668 | devm_kfree(ici->v4l2_dev.dev, sasc); | 1668 | devm_kfree(ici->v4l2_dev.dev, info); |
1669 | dev_err(ici->v4l2_dev.dev, "group probe failed: %d\n", ret); | 1669 | dev_err(ici->v4l2_dev.dev, "group probe failed: %d\n", ret); |
1670 | 1670 | ||
1671 | return ret; | 1671 | return ret; |
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c index 77dcfdf547ac..87fc0fe29ebd 100644 --- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c | |||
@@ -780,8 +780,6 @@ static int rtl2832u_frontend_callback(void *adapter_priv, int component, | |||
780 | case TUNER_RTL2832_TUA9001: | 780 | case TUNER_RTL2832_TUA9001: |
781 | return rtl2832u_tua9001_tuner_callback(d, cmd, arg); | 781 | return rtl2832u_tua9001_tuner_callback(d, cmd, arg); |
782 | } | 782 | } |
783 | default: | ||
784 | return -EINVAL; | ||
785 | } | 783 | } |
786 | 784 | ||
787 | return 0; | 785 | return 0; |
diff --git a/drivers/media/usb/gspca/Kconfig b/drivers/media/usb/gspca/Kconfig index 60af3b167f3b..3fd94fe7e1eb 100644 --- a/drivers/media/usb/gspca/Kconfig +++ b/drivers/media/usb/gspca/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | menuconfig USB_GSPCA | 1 | menuconfig USB_GSPCA |
2 | tristate "GSPCA based webcams" | 2 | tristate "GSPCA based webcams" |
3 | depends on VIDEO_V4L2 | 3 | depends on VIDEO_V4L2 |
4 | depends on INPUT || INPUT=n | ||
4 | default m | 5 | default m |
5 | ---help--- | 6 | ---help--- |
6 | Say Y here if you want to enable selecting webcams based | 7 | Say Y here if you want to enable selecting webcams based |
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index bc08a829bc13..cc16e76a2493 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c | |||
@@ -3230,18 +3230,13 @@ int vb2_thread_stop(struct vb2_queue *q) | |||
3230 | 3230 | ||
3231 | if (threadio == NULL) | 3231 | if (threadio == NULL) |
3232 | return 0; | 3232 | return 0; |
3233 | call_void_qop(q, wait_finish, q); | ||
3234 | threadio->stop = true; | 3233 | threadio->stop = true; |
3235 | vb2_internal_streamoff(q, q->type); | 3234 | /* Wake up all pending sleeps in the thread */ |
3236 | call_void_qop(q, wait_prepare, q); | 3235 | vb2_queue_error(q); |
3237 | err = kthread_stop(threadio->thread); | 3236 | err = kthread_stop(threadio->thread); |
3238 | q->fileio = NULL; | 3237 | __vb2_cleanup_fileio(q); |
3239 | fileio->req.count = 0; | ||
3240 | vb2_reqbufs(q, &fileio->req); | ||
3241 | kfree(fileio); | ||
3242 | threadio->thread = NULL; | 3238 | threadio->thread = NULL; |
3243 | kfree(threadio); | 3239 | kfree(threadio); |
3244 | q->fileio = NULL; | ||
3245 | q->threadio = NULL; | 3240 | q->threadio = NULL; |
3246 | return err; | 3241 | return err; |
3247 | } | 3242 | } |
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c index b481d20c8372..69e0483adfee 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c | |||
@@ -632,8 +632,7 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, | |||
632 | } | 632 | } |
633 | 633 | ||
634 | /* extract page list from userspace mapping */ | 634 | /* extract page list from userspace mapping */ |
635 | ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, | 635 | ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, dma_dir); |
636 | dma_dir == DMA_FROM_DEVICE); | ||
637 | if (ret) { | 636 | if (ret) { |
638 | unsigned long pfn; | 637 | unsigned long pfn; |
639 | if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) { | 638 | if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) { |
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c index f38ec424872e..5615522f8d62 100644 --- a/drivers/mfd/kempld-core.c +++ b/drivers/mfd/kempld-core.c | |||
@@ -739,7 +739,7 @@ static int __init kempld_init(void) | |||
739 | for (id = kempld_dmi_table; | 739 | for (id = kempld_dmi_table; |
740 | id->matches[0].slot != DMI_NONE; id++) | 740 | id->matches[0].slot != DMI_NONE; id++) |
741 | if (strstr(id->ident, force_device_id)) | 741 | if (strstr(id->ident, force_device_id)) |
742 | if (id->callback && id->callback(id)) | 742 | if (id->callback && !id->callback(id)) |
743 | break; | 743 | break; |
744 | if (id->matches[0].slot == DMI_NONE) | 744 | if (id->matches[0].slot == DMI_NONE) |
745 | return -ENODEV; | 745 | return -ENODEV; |
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c index ede50244f265..dbd907d7170e 100644 --- a/drivers/mfd/rtsx_usb.c +++ b/drivers/mfd/rtsx_usb.c | |||
@@ -196,18 +196,27 @@ EXPORT_SYMBOL_GPL(rtsx_usb_ep0_write_register); | |||
196 | int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data) | 196 | int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data) |
197 | { | 197 | { |
198 | u16 value; | 198 | u16 value; |
199 | u8 *buf; | ||
200 | int ret; | ||
199 | 201 | ||
200 | if (!data) | 202 | if (!data) |
201 | return -EINVAL; | 203 | return -EINVAL; |
202 | *data = 0; | 204 | |
205 | buf = kzalloc(sizeof(u8), GFP_KERNEL); | ||
206 | if (!buf) | ||
207 | return -ENOMEM; | ||
203 | 208 | ||
204 | addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT; | 209 | addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT; |
205 | value = swab16(addr); | 210 | value = swab16(addr); |
206 | 211 | ||
207 | return usb_control_msg(ucr->pusb_dev, | 212 | ret = usb_control_msg(ucr->pusb_dev, |
208 | usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP, | 213 | usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP, |
209 | USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, | 214 | USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, |
210 | value, 0, data, 1, 100); | 215 | value, 0, buf, 1, 100); |
216 | *data = *buf; | ||
217 | |||
218 | kfree(buf); | ||
219 | return ret; | ||
211 | } | 220 | } |
212 | EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register); | 221 | EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register); |
213 | 222 | ||
@@ -288,18 +297,27 @@ static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status) | |||
288 | int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status) | 297 | int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status) |
289 | { | 298 | { |
290 | int ret; | 299 | int ret; |
300 | u16 *buf; | ||
291 | 301 | ||
292 | if (!status) | 302 | if (!status) |
293 | return -EINVAL; | 303 | return -EINVAL; |
294 | 304 | ||
295 | if (polling_pipe == 0) | 305 | if (polling_pipe == 0) { |
306 | buf = kzalloc(sizeof(u16), GFP_KERNEL); | ||
307 | if (!buf) | ||
308 | return -ENOMEM; | ||
309 | |||
296 | ret = usb_control_msg(ucr->pusb_dev, | 310 | ret = usb_control_msg(ucr->pusb_dev, |
297 | usb_rcvctrlpipe(ucr->pusb_dev, 0), | 311 | usb_rcvctrlpipe(ucr->pusb_dev, 0), |
298 | RTSX_USB_REQ_POLL, | 312 | RTSX_USB_REQ_POLL, |
299 | USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, | 313 | USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, |
300 | 0, 0, status, 2, 100); | 314 | 0, 0, buf, 2, 100); |
301 | else | 315 | *status = *buf; |
316 | |||
317 | kfree(buf); | ||
318 | } else { | ||
302 | ret = rtsx_usb_get_status_with_bulk(ucr, status); | 319 | ret = rtsx_usb_get_status_with_bulk(ucr, status); |
320 | } | ||
303 | 321 | ||
304 | /* usb_control_msg may return positive when success */ | 322 | /* usb_control_msg may return positive when success */ |
305 | if (ret < 0) | 323 | if (ret < 0) |
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index 38552a31304a..65fed7146e9b 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c | |||
@@ -202,16 +202,17 @@ static void enclosure_remove_links(struct enclosure_component *cdev) | |||
202 | { | 202 | { |
203 | char name[ENCLOSURE_NAME_SIZE]; | 203 | char name[ENCLOSURE_NAME_SIZE]; |
204 | 204 | ||
205 | enclosure_link_name(cdev, name); | ||
206 | |||
205 | /* | 207 | /* |
206 | * In odd circumstances, like multipath devices, something else may | 208 | * In odd circumstances, like multipath devices, something else may |
207 | * already have removed the links, so check for this condition first. | 209 | * already have removed the links, so check for this condition first. |
208 | */ | 210 | */ |
209 | if (!cdev->dev->kobj.sd) | 211 | if (cdev->dev->kobj.sd) |
210 | return; | 212 | sysfs_remove_link(&cdev->dev->kobj, name); |
211 | 213 | ||
212 | enclosure_link_name(cdev, name); | 214 | if (cdev->cdev.kobj.sd) |
213 | sysfs_remove_link(&cdev->dev->kobj, name); | 215 | sysfs_remove_link(&cdev->cdev.kobj, "device"); |
214 | sysfs_remove_link(&cdev->cdev.kobj, "device"); | ||
215 | } | 216 | } |
216 | 217 | ||
217 | static int enclosure_add_links(struct enclosure_component *cdev) | 218 | static int enclosure_add_links(struct enclosure_component *cdev) |
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index 9306219d5675..6ad049a08e4d 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c | |||
@@ -341,6 +341,8 @@ void mei_stop(struct mei_device *dev) | |||
341 | 341 | ||
342 | dev->dev_state = MEI_DEV_POWER_DOWN; | 342 | dev->dev_state = MEI_DEV_POWER_DOWN; |
343 | mei_reset(dev); | 343 | mei_reset(dev); |
344 | /* move device to disabled state unconditionally */ | ||
345 | dev->dev_state = MEI_DEV_DISABLED; | ||
344 | 346 | ||
345 | mutex_unlock(&dev->device_lock); | 347 | mutex_unlock(&dev->device_lock); |
346 | 348 | ||
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c index e9f1d8d84613..c53f14a7ce54 100644 --- a/drivers/mmc/core/pwrseq_simple.c +++ b/drivers/mmc/core/pwrseq_simple.c | |||
@@ -124,7 +124,7 @@ int mmc_pwrseq_simple_alloc(struct mmc_host *host, struct device *dev) | |||
124 | PTR_ERR(pwrseq->reset_gpios[i]) != -ENOSYS) { | 124 | PTR_ERR(pwrseq->reset_gpios[i]) != -ENOSYS) { |
125 | ret = PTR_ERR(pwrseq->reset_gpios[i]); | 125 | ret = PTR_ERR(pwrseq->reset_gpios[i]); |
126 | 126 | ||
127 | while (--i) | 127 | while (i--) |
128 | gpiod_put(pwrseq->reset_gpios[i]); | 128 | gpiod_put(pwrseq->reset_gpios[i]); |
129 | 129 | ||
130 | goto clk_put; | 130 | goto clk_put; |
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 5b76a173cd95..5897d8d8fa5a 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig | |||
@@ -526,6 +526,7 @@ config MTD_NAND_SUNXI | |||
526 | 526 | ||
527 | config MTD_NAND_HISI504 | 527 | config MTD_NAND_HISI504 |
528 | tristate "Support for NAND controller on Hisilicon SoC Hip04" | 528 | tristate "Support for NAND controller on Hisilicon SoC Hip04" |
529 | depends on HAS_DMA | ||
529 | help | 530 | help |
530 | Enables support for NAND controller on Hisilicon SoC Hip04. | 531 | Enables support for NAND controller on Hisilicon SoC Hip04. |
531 | 532 | ||
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 96b0b1d27df1..10b1f7a4fe50 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c | |||
@@ -480,6 +480,42 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask) | |||
480 | nand_writel(info, NDCR, ndcr | int_mask); | 480 | nand_writel(info, NDCR, ndcr | int_mask); |
481 | } | 481 | } |
482 | 482 | ||
483 | static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len) | ||
484 | { | ||
485 | if (info->ecc_bch) { | ||
486 | int timeout; | ||
487 | |||
488 | /* | ||
489 | * According to the datasheet, when reading from NDDB | ||
490 | * with BCH enabled, after each 32 bytes reads, we | ||
491 | * have to make sure that the NDSR.RDDREQ bit is set. | ||
492 | * | ||
493 | * Drain the FIFO 8 32 bits reads at a time, and skip | ||
494 | * the polling on the last read. | ||
495 | */ | ||
496 | while (len > 8) { | ||
497 | __raw_readsl(info->mmio_base + NDDB, data, 8); | ||
498 | |||
499 | for (timeout = 0; | ||
500 | !(nand_readl(info, NDSR) & NDSR_RDDREQ); | ||
501 | timeout++) { | ||
502 | if (timeout >= 5) { | ||
503 | dev_err(&info->pdev->dev, | ||
504 | "Timeout on RDDREQ while draining the FIFO\n"); | ||
505 | return; | ||
506 | } | ||
507 | |||
508 | mdelay(1); | ||
509 | } | ||
510 | |||
511 | data += 32; | ||
512 | len -= 8; | ||
513 | } | ||
514 | } | ||
515 | |||
516 | __raw_readsl(info->mmio_base + NDDB, data, len); | ||
517 | } | ||
518 | |||
483 | static void handle_data_pio(struct pxa3xx_nand_info *info) | 519 | static void handle_data_pio(struct pxa3xx_nand_info *info) |
484 | { | 520 | { |
485 | unsigned int do_bytes = min(info->data_size, info->chunk_size); | 521 | unsigned int do_bytes = min(info->data_size, info->chunk_size); |
@@ -496,14 +532,14 @@ static void handle_data_pio(struct pxa3xx_nand_info *info) | |||
496 | DIV_ROUND_UP(info->oob_size, 4)); | 532 | DIV_ROUND_UP(info->oob_size, 4)); |
497 | break; | 533 | break; |
498 | case STATE_PIO_READING: | 534 | case STATE_PIO_READING: |
499 | __raw_readsl(info->mmio_base + NDDB, | 535 | drain_fifo(info, |
500 | info->data_buff + info->data_buff_pos, | 536 | info->data_buff + info->data_buff_pos, |
501 | DIV_ROUND_UP(do_bytes, 4)); | 537 | DIV_ROUND_UP(do_bytes, 4)); |
502 | 538 | ||
503 | if (info->oob_size > 0) | 539 | if (info->oob_size > 0) |
504 | __raw_readsl(info->mmio_base + NDDB, | 540 | drain_fifo(info, |
505 | info->oob_buff + info->oob_buff_pos, | 541 | info->oob_buff + info->oob_buff_pos, |
506 | DIV_ROUND_UP(info->oob_size, 4)); | 542 | DIV_ROUND_UP(info->oob_size, 4)); |
507 | break; | 543 | break; |
508 | default: | 544 | default: |
509 | dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, | 545 | dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, |
@@ -1572,6 +1608,8 @@ static int alloc_nand_resource(struct platform_device *pdev) | |||
1572 | int ret, irq, cs; | 1608 | int ret, irq, cs; |
1573 | 1609 | ||
1574 | pdata = dev_get_platdata(&pdev->dev); | 1610 | pdata = dev_get_platdata(&pdev->dev); |
1611 | if (pdata->num_cs <= 0) | ||
1612 | return -ENODEV; | ||
1575 | info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) + | 1613 | info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) + |
1576 | sizeof(*host)) * pdata->num_cs, GFP_KERNEL); | 1614 | sizeof(*host)) * pdata->num_cs, GFP_KERNEL); |
1577 | if (!info) | 1615 | if (!info) |
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index da4c79259f67..16e34b37d134 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c | |||
@@ -425,9 +425,10 @@ retry: | |||
425 | ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d", | 425 | ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d", |
426 | pnum, vol_id, lnum); | 426 | pnum, vol_id, lnum); |
427 | err = -EBADMSG; | 427 | err = -EBADMSG; |
428 | } else | 428 | } else { |
429 | err = -EINVAL; | 429 | err = -EINVAL; |
430 | ubi_ro_mode(ubi); | 430 | ubi_ro_mode(ubi); |
431 | } | ||
431 | } | 432 | } |
432 | goto out_free; | 433 | goto out_free; |
433 | } else if (err == UBI_IO_BITFLIPS) | 434 | } else if (err == UBI_IO_BITFLIPS) |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 84673ebcf428..df51d6025a90 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -157,7 +157,7 @@ config IPVLAN | |||
157 | making it transparent to the connected L2 switch. | 157 | making it transparent to the connected L2 switch. |
158 | 158 | ||
159 | Ipvlan devices can be added using the "ip" command from the | 159 | Ipvlan devices can be added using the "ip" command from the |
160 | iproute2 package starting with the iproute2-X.Y.ZZ release: | 160 | iproute2 package starting with the iproute2-3.19 release: |
161 | 161 | ||
162 | "ip link add link <main-dev> [ NAME ] type ipvlan" | 162 | "ip link add link <main-dev> [ NAME ] type ipvlan" |
163 | 163 | ||
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig index 4ce6ca5f3d36..dc6b78e5342f 100644 --- a/drivers/net/appletalk/Kconfig +++ b/drivers/net/appletalk/Kconfig | |||
@@ -40,7 +40,7 @@ config DEV_APPLETALK | |||
40 | 40 | ||
41 | config LTPC | 41 | config LTPC |
42 | tristate "Apple/Farallon LocalTalk PC support" | 42 | tristate "Apple/Farallon LocalTalk PC support" |
43 | depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API | 43 | depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API && VIRT_TO_BUS |
44 | help | 44 | help |
45 | This allows you to use the AppleTalk PC card to connect to LocalTalk | 45 | This allows you to use the AppleTalk PC card to connect to LocalTalk |
46 | networks. The card is also known as the Farallon PhoneNet PC card. | 46 | networks. The card is also known as the Farallon PhoneNet PC card. |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b979c265fc51..089a4028859d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -3850,7 +3850,8 @@ static inline int bond_slave_override(struct bonding *bond, | |||
3850 | /* Find out if any slaves have the same mapping as this skb. */ | 3850 | /* Find out if any slaves have the same mapping as this skb. */ |
3851 | bond_for_each_slave_rcu(bond, slave, iter) { | 3851 | bond_for_each_slave_rcu(bond, slave, iter) { |
3852 | if (slave->queue_id == skb->queue_mapping) { | 3852 | if (slave->queue_id == skb->queue_mapping) { |
3853 | if (bond_slave_can_tx(slave)) { | 3853 | if (bond_slave_is_up(slave) && |
3854 | slave->link == BOND_LINK_UP) { | ||
3854 | bond_dev_queue_xmit(bond, skb, slave->dev); | 3855 | bond_dev_queue_xmit(bond, skb, slave->dev); |
3855 | return 0; | 3856 | return 0; |
3856 | } | 3857 | } |
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 98d73aab52fe..58808f651452 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig | |||
@@ -131,7 +131,7 @@ config CAN_RCAR | |||
131 | 131 | ||
132 | config CAN_XILINXCAN | 132 | config CAN_XILINXCAN |
133 | tristate "Xilinx CAN" | 133 | tristate "Xilinx CAN" |
134 | depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST | 134 | depends on ARCH_ZYNQ || ARM64 || MICROBLAZE || COMPILE_TEST |
135 | depends on COMMON_CLK && HAS_IOMEM | 135 | depends on COMMON_CLK && HAS_IOMEM |
136 | ---help--- | 136 | ---help--- |
137 | Xilinx CAN driver. This driver supports both soft AXI CAN IP and | 137 | Xilinx CAN driver. This driver supports both soft AXI CAN IP and |
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 3c82e02e3dae..b0f69248cb71 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c | |||
@@ -579,6 +579,10 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf) | |||
579 | skb->pkt_type = PACKET_BROADCAST; | 579 | skb->pkt_type = PACKET_BROADCAST; |
580 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 580 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
581 | 581 | ||
582 | skb_reset_mac_header(skb); | ||
583 | skb_reset_network_header(skb); | ||
584 | skb_reset_transport_header(skb); | ||
585 | |||
582 | can_skb_reserve(skb); | 586 | can_skb_reserve(skb); |
583 | can_skb_prv(skb)->ifindex = dev->ifindex; | 587 | can_skb_prv(skb)->ifindex = dev->ifindex; |
584 | 588 | ||
@@ -603,6 +607,10 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev, | |||
603 | skb->pkt_type = PACKET_BROADCAST; | 607 | skb->pkt_type = PACKET_BROADCAST; |
604 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 608 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
605 | 609 | ||
610 | skb_reset_mac_header(skb); | ||
611 | skb_reset_network_header(skb); | ||
612 | skb_reset_transport_header(skb); | ||
613 | |||
606 | can_skb_reserve(skb); | 614 | can_skb_reserve(skb); |
607 | can_skb_prv(skb)->ifindex = dev->ifindex; | 615 | can_skb_prv(skb)->ifindex = dev->ifindex; |
608 | 616 | ||
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 80c46ad4cee4..ad0a7e8c2c2b 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c | |||
@@ -592,13 +592,12 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr) | |||
592 | rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ? | 592 | rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ? |
593 | CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE; | 593 | CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE; |
594 | new_state = max(tx_state, rx_state); | 594 | new_state = max(tx_state, rx_state); |
595 | } else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE)) { | 595 | } else { |
596 | __flexcan_get_berr_counter(dev, &bec); | 596 | __flexcan_get_berr_counter(dev, &bec); |
597 | new_state = CAN_STATE_ERROR_PASSIVE; | 597 | new_state = flt == FLEXCAN_ESR_FLT_CONF_PASSIVE ? |
598 | CAN_STATE_ERROR_PASSIVE : CAN_STATE_BUS_OFF; | ||
598 | rx_state = bec.rxerr >= bec.txerr ? new_state : 0; | 599 | rx_state = bec.rxerr >= bec.txerr ? new_state : 0; |
599 | tx_state = bec.rxerr <= bec.txerr ? new_state : 0; | 600 | tx_state = bec.rxerr <= bec.txerr ? new_state : 0; |
600 | } else { | ||
601 | new_state = CAN_STATE_BUS_OFF; | ||
602 | } | 601 | } |
603 | 602 | ||
604 | /* state hasn't changed */ | 603 | /* state hasn't changed */ |
@@ -1158,12 +1157,19 @@ static int flexcan_probe(struct platform_device *pdev) | |||
1158 | const struct flexcan_devtype_data *devtype_data; | 1157 | const struct flexcan_devtype_data *devtype_data; |
1159 | struct net_device *dev; | 1158 | struct net_device *dev; |
1160 | struct flexcan_priv *priv; | 1159 | struct flexcan_priv *priv; |
1160 | struct regulator *reg_xceiver; | ||
1161 | struct resource *mem; | 1161 | struct resource *mem; |
1162 | struct clk *clk_ipg = NULL, *clk_per = NULL; | 1162 | struct clk *clk_ipg = NULL, *clk_per = NULL; |
1163 | void __iomem *base; | 1163 | void __iomem *base; |
1164 | int err, irq; | 1164 | int err, irq; |
1165 | u32 clock_freq = 0; | 1165 | u32 clock_freq = 0; |
1166 | 1166 | ||
1167 | reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver"); | ||
1168 | if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER) | ||
1169 | return -EPROBE_DEFER; | ||
1170 | else if (IS_ERR(reg_xceiver)) | ||
1171 | reg_xceiver = NULL; | ||
1172 | |||
1167 | if (pdev->dev.of_node) | 1173 | if (pdev->dev.of_node) |
1168 | of_property_read_u32(pdev->dev.of_node, | 1174 | of_property_read_u32(pdev->dev.of_node, |
1169 | "clock-frequency", &clock_freq); | 1175 | "clock-frequency", &clock_freq); |
@@ -1224,9 +1230,7 @@ static int flexcan_probe(struct platform_device *pdev) | |||
1224 | priv->pdata = dev_get_platdata(&pdev->dev); | 1230 | priv->pdata = dev_get_platdata(&pdev->dev); |
1225 | priv->devtype_data = devtype_data; | 1231 | priv->devtype_data = devtype_data; |
1226 | 1232 | ||
1227 | priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver"); | 1233 | priv->reg_xceiver = reg_xceiver; |
1228 | if (IS_ERR(priv->reg_xceiver)) | ||
1229 | priv->reg_xceiver = NULL; | ||
1230 | 1234 | ||
1231 | netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT); | 1235 | netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT); |
1232 | 1236 | ||
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 009acc8641fc..8b4d3e6875eb 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c | |||
@@ -901,6 +901,8 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id * | |||
901 | } | 901 | } |
902 | 902 | ||
903 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | 903 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
904 | if (!dev) | ||
905 | return -ENOMEM; | ||
904 | init_usb_anchor(&dev->rx_submitted); | 906 | init_usb_anchor(&dev->rx_submitted); |
905 | 907 | ||
906 | atomic_set(&dev->active_channels, 0); | 908 | atomic_set(&dev->active_channels, 0); |
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 2928f7003041..57611fd91229 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c | |||
@@ -14,6 +14,8 @@ | |||
14 | * Copyright (C) 2015 Valeo S.A. | 14 | * Copyright (C) 2015 Valeo S.A. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/spinlock.h> | ||
18 | #include <linux/kernel.h> | ||
17 | #include <linux/completion.h> | 19 | #include <linux/completion.h> |
18 | #include <linux/module.h> | 20 | #include <linux/module.h> |
19 | #include <linux/netdevice.h> | 21 | #include <linux/netdevice.h> |
@@ -23,7 +25,6 @@ | |||
23 | #include <linux/can/dev.h> | 25 | #include <linux/can/dev.h> |
24 | #include <linux/can/error.h> | 26 | #include <linux/can/error.h> |
25 | 27 | ||
26 | #define MAX_TX_URBS 16 | ||
27 | #define MAX_RX_URBS 4 | 28 | #define MAX_RX_URBS 4 |
28 | #define START_TIMEOUT 1000 /* msecs */ | 29 | #define START_TIMEOUT 1000 /* msecs */ |
29 | #define STOP_TIMEOUT 1000 /* msecs */ | 30 | #define STOP_TIMEOUT 1000 /* msecs */ |
@@ -441,6 +442,7 @@ struct kvaser_usb_error_summary { | |||
441 | }; | 442 | }; |
442 | }; | 443 | }; |
443 | 444 | ||
445 | /* Context for an outstanding, not yet ACKed, transmission */ | ||
444 | struct kvaser_usb_tx_urb_context { | 446 | struct kvaser_usb_tx_urb_context { |
445 | struct kvaser_usb_net_priv *priv; | 447 | struct kvaser_usb_net_priv *priv; |
446 | u32 echo_index; | 448 | u32 echo_index; |
@@ -454,8 +456,13 @@ struct kvaser_usb { | |||
454 | struct usb_endpoint_descriptor *bulk_in, *bulk_out; | 456 | struct usb_endpoint_descriptor *bulk_in, *bulk_out; |
455 | struct usb_anchor rx_submitted; | 457 | struct usb_anchor rx_submitted; |
456 | 458 | ||
459 | /* @max_tx_urbs: Firmware-reported maximum number of oustanding, | ||
460 | * not yet ACKed, transmissions on this device. This value is | ||
461 | * also used as a sentinel for marking free tx contexts. | ||
462 | */ | ||
457 | u32 fw_version; | 463 | u32 fw_version; |
458 | unsigned int nchannels; | 464 | unsigned int nchannels; |
465 | unsigned int max_tx_urbs; | ||
459 | enum kvaser_usb_family family; | 466 | enum kvaser_usb_family family; |
460 | 467 | ||
461 | bool rxinitdone; | 468 | bool rxinitdone; |
@@ -465,18 +472,18 @@ struct kvaser_usb { | |||
465 | 472 | ||
466 | struct kvaser_usb_net_priv { | 473 | struct kvaser_usb_net_priv { |
467 | struct can_priv can; | 474 | struct can_priv can; |
468 | 475 | struct can_berr_counter bec; | |
469 | atomic_t active_tx_urbs; | ||
470 | struct usb_anchor tx_submitted; | ||
471 | struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS]; | ||
472 | |||
473 | struct completion start_comp, stop_comp; | ||
474 | 476 | ||
475 | struct kvaser_usb *dev; | 477 | struct kvaser_usb *dev; |
476 | struct net_device *netdev; | 478 | struct net_device *netdev; |
477 | int channel; | 479 | int channel; |
478 | 480 | ||
479 | struct can_berr_counter bec; | 481 | struct completion start_comp, stop_comp; |
482 | struct usb_anchor tx_submitted; | ||
483 | |||
484 | spinlock_t tx_contexts_lock; | ||
485 | int active_tx_contexts; | ||
486 | struct kvaser_usb_tx_urb_context tx_contexts[]; | ||
480 | }; | 487 | }; |
481 | 488 | ||
482 | static const struct usb_device_id kvaser_usb_table[] = { | 489 | static const struct usb_device_id kvaser_usb_table[] = { |
@@ -584,8 +591,15 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id, | |||
584 | while (pos <= actual_len - MSG_HEADER_LEN) { | 591 | while (pos <= actual_len - MSG_HEADER_LEN) { |
585 | tmp = buf + pos; | 592 | tmp = buf + pos; |
586 | 593 | ||
587 | if (!tmp->len) | 594 | /* Handle messages crossing the USB endpoint max packet |
588 | break; | 595 | * size boundary. Check kvaser_usb_read_bulk_callback() |
596 | * for further details. | ||
597 | */ | ||
598 | if (tmp->len == 0) { | ||
599 | pos = round_up(pos, le16_to_cpu(dev->bulk_in-> | ||
600 | wMaxPacketSize)); | ||
601 | continue; | ||
602 | } | ||
589 | 603 | ||
590 | if (pos + tmp->len > actual_len) { | 604 | if (pos + tmp->len > actual_len) { |
591 | dev_err(dev->udev->dev.parent, | 605 | dev_err(dev->udev->dev.parent, |
@@ -647,9 +661,13 @@ static int kvaser_usb_get_software_info(struct kvaser_usb *dev) | |||
647 | switch (dev->family) { | 661 | switch (dev->family) { |
648 | case KVASER_LEAF: | 662 | case KVASER_LEAF: |
649 | dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version); | 663 | dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version); |
664 | dev->max_tx_urbs = | ||
665 | le16_to_cpu(msg.u.leaf.softinfo.max_outstanding_tx); | ||
650 | break; | 666 | break; |
651 | case KVASER_USBCAN: | 667 | case KVASER_USBCAN: |
652 | dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version); | 668 | dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version); |
669 | dev->max_tx_urbs = | ||
670 | le16_to_cpu(msg.u.usbcan.softinfo.max_outstanding_tx); | ||
653 | break; | 671 | break; |
654 | } | 672 | } |
655 | 673 | ||
@@ -686,6 +704,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev, | |||
686 | struct kvaser_usb_net_priv *priv; | 704 | struct kvaser_usb_net_priv *priv; |
687 | struct sk_buff *skb; | 705 | struct sk_buff *skb; |
688 | struct can_frame *cf; | 706 | struct can_frame *cf; |
707 | unsigned long flags; | ||
689 | u8 channel, tid; | 708 | u8 channel, tid; |
690 | 709 | ||
691 | channel = msg->u.tx_acknowledge_header.channel; | 710 | channel = msg->u.tx_acknowledge_header.channel; |
@@ -704,7 +723,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev, | |||
704 | 723 | ||
705 | stats = &priv->netdev->stats; | 724 | stats = &priv->netdev->stats; |
706 | 725 | ||
707 | context = &priv->tx_contexts[tid % MAX_TX_URBS]; | 726 | context = &priv->tx_contexts[tid % dev->max_tx_urbs]; |
708 | 727 | ||
709 | /* Sometimes the state change doesn't come after a bus-off event */ | 728 | /* Sometimes the state change doesn't come after a bus-off event */ |
710 | if (priv->can.restart_ms && | 729 | if (priv->can.restart_ms && |
@@ -729,12 +748,15 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev, | |||
729 | 748 | ||
730 | stats->tx_packets++; | 749 | stats->tx_packets++; |
731 | stats->tx_bytes += context->dlc; | 750 | stats->tx_bytes += context->dlc; |
732 | can_get_echo_skb(priv->netdev, context->echo_index); | ||
733 | 751 | ||
734 | context->echo_index = MAX_TX_URBS; | 752 | spin_lock_irqsave(&priv->tx_contexts_lock, flags); |
735 | atomic_dec(&priv->active_tx_urbs); | ||
736 | 753 | ||
754 | can_get_echo_skb(priv->netdev, context->echo_index); | ||
755 | context->echo_index = dev->max_tx_urbs; | ||
756 | --priv->active_tx_contexts; | ||
737 | netif_wake_queue(priv->netdev); | 757 | netif_wake_queue(priv->netdev); |
758 | |||
759 | spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); | ||
738 | } | 760 | } |
739 | 761 | ||
740 | static void kvaser_usb_simple_msg_callback(struct urb *urb) | 762 | static void kvaser_usb_simple_msg_callback(struct urb *urb) |
@@ -787,7 +809,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, | |||
787 | netdev_err(netdev, "Error transmitting URB\n"); | 809 | netdev_err(netdev, "Error transmitting URB\n"); |
788 | usb_unanchor_urb(urb); | 810 | usb_unanchor_urb(urb); |
789 | usb_free_urb(urb); | 811 | usb_free_urb(urb); |
790 | kfree(buf); | ||
791 | return err; | 812 | return err; |
792 | } | 813 | } |
793 | 814 | ||
@@ -796,17 +817,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, | |||
796 | return 0; | 817 | return 0; |
797 | } | 818 | } |
798 | 819 | ||
799 | static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv) | ||
800 | { | ||
801 | int i; | ||
802 | |||
803 | usb_kill_anchored_urbs(&priv->tx_submitted); | ||
804 | atomic_set(&priv->active_tx_urbs, 0); | ||
805 | |||
806 | for (i = 0; i < MAX_TX_URBS; i++) | ||
807 | priv->tx_contexts[i].echo_index = MAX_TX_URBS; | ||
808 | } | ||
809 | |||
810 | static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, | 820 | static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, |
811 | const struct kvaser_usb_error_summary *es, | 821 | const struct kvaser_usb_error_summary *es, |
812 | struct can_frame *cf) | 822 | struct can_frame *cf) |
@@ -1317,8 +1327,20 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) | |||
1317 | while (pos <= urb->actual_length - MSG_HEADER_LEN) { | 1327 | while (pos <= urb->actual_length - MSG_HEADER_LEN) { |
1318 | msg = urb->transfer_buffer + pos; | 1328 | msg = urb->transfer_buffer + pos; |
1319 | 1329 | ||
1320 | if (!msg->len) | 1330 | /* The Kvaser firmware can only read and write messages that |
1321 | break; | 1331 | * does not cross the USB's endpoint wMaxPacketSize boundary. |
1332 | * If a follow-up command crosses such boundary, firmware puts | ||
1333 | * a placeholder zero-length command in its place then aligns | ||
1334 | * the real command to the next max packet size. | ||
1335 | * | ||
1336 | * Handle such cases or we're going to miss a significant | ||
1337 | * number of events in case of a heavy rx load on the bus. | ||
1338 | */ | ||
1339 | if (msg->len == 0) { | ||
1340 | pos = round_up(pos, le16_to_cpu(dev->bulk_in-> | ||
1341 | wMaxPacketSize)); | ||
1342 | continue; | ||
1343 | } | ||
1322 | 1344 | ||
1323 | if (pos + msg->len > urb->actual_length) { | 1345 | if (pos + msg->len > urb->actual_length) { |
1324 | dev_err(dev->udev->dev.parent, "Format error\n"); | 1346 | dev_err(dev->udev->dev.parent, "Format error\n"); |
@@ -1326,7 +1348,6 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) | |||
1326 | } | 1348 | } |
1327 | 1349 | ||
1328 | kvaser_usb_handle_message(dev, msg); | 1350 | kvaser_usb_handle_message(dev, msg); |
1329 | |||
1330 | pos += msg->len; | 1351 | pos += msg->len; |
1331 | } | 1352 | } |
1332 | 1353 | ||
@@ -1498,6 +1519,26 @@ error: | |||
1498 | return err; | 1519 | return err; |
1499 | } | 1520 | } |
1500 | 1521 | ||
1522 | static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv) | ||
1523 | { | ||
1524 | int i, max_tx_urbs; | ||
1525 | |||
1526 | max_tx_urbs = priv->dev->max_tx_urbs; | ||
1527 | |||
1528 | priv->active_tx_contexts = 0; | ||
1529 | for (i = 0; i < max_tx_urbs; i++) | ||
1530 | priv->tx_contexts[i].echo_index = max_tx_urbs; | ||
1531 | } | ||
1532 | |||
1533 | /* This method might sleep. Do not call it in the atomic context | ||
1534 | * of URB completions. | ||
1535 | */ | ||
1536 | static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv) | ||
1537 | { | ||
1538 | usb_kill_anchored_urbs(&priv->tx_submitted); | ||
1539 | kvaser_usb_reset_tx_urb_contexts(priv); | ||
1540 | } | ||
1541 | |||
1501 | static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev) | 1542 | static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev) |
1502 | { | 1543 | { |
1503 | int i; | 1544 | int i; |
@@ -1615,9 +1656,9 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
1615 | struct urb *urb; | 1656 | struct urb *urb; |
1616 | void *buf; | 1657 | void *buf; |
1617 | struct kvaser_msg *msg; | 1658 | struct kvaser_msg *msg; |
1618 | int i, err; | 1659 | int i, err, ret = NETDEV_TX_OK; |
1619 | int ret = NETDEV_TX_OK; | ||
1620 | u8 *msg_tx_can_flags = NULL; /* GCC */ | 1660 | u8 *msg_tx_can_flags = NULL; /* GCC */ |
1661 | unsigned long flags; | ||
1621 | 1662 | ||
1622 | if (can_dropped_invalid_skb(netdev, skb)) | 1663 | if (can_dropped_invalid_skb(netdev, skb)) |
1623 | return NETDEV_TX_OK; | 1664 | return NETDEV_TX_OK; |
@@ -1634,7 +1675,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
1634 | if (!buf) { | 1675 | if (!buf) { |
1635 | stats->tx_dropped++; | 1676 | stats->tx_dropped++; |
1636 | dev_kfree_skb(skb); | 1677 | dev_kfree_skb(skb); |
1637 | goto nobufmem; | 1678 | goto freeurb; |
1638 | } | 1679 | } |
1639 | 1680 | ||
1640 | msg = buf; | 1681 | msg = buf; |
@@ -1671,22 +1712,32 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
1671 | if (cf->can_id & CAN_RTR_FLAG) | 1712 | if (cf->can_id & CAN_RTR_FLAG) |
1672 | *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; | 1713 | *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; |
1673 | 1714 | ||
1674 | for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) { | 1715 | spin_lock_irqsave(&priv->tx_contexts_lock, flags); |
1675 | if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) { | 1716 | for (i = 0; i < dev->max_tx_urbs; i++) { |
1717 | if (priv->tx_contexts[i].echo_index == dev->max_tx_urbs) { | ||
1676 | context = &priv->tx_contexts[i]; | 1718 | context = &priv->tx_contexts[i]; |
1719 | |||
1720 | context->echo_index = i; | ||
1721 | can_put_echo_skb(skb, netdev, context->echo_index); | ||
1722 | ++priv->active_tx_contexts; | ||
1723 | if (priv->active_tx_contexts >= dev->max_tx_urbs) | ||
1724 | netif_stop_queue(netdev); | ||
1725 | |||
1677 | break; | 1726 | break; |
1678 | } | 1727 | } |
1679 | } | 1728 | } |
1729 | spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); | ||
1680 | 1730 | ||
1681 | /* This should never happen; it implies a flow control bug */ | 1731 | /* This should never happen; it implies a flow control bug */ |
1682 | if (!context) { | 1732 | if (!context) { |
1683 | netdev_warn(netdev, "cannot find free context\n"); | 1733 | netdev_warn(netdev, "cannot find free context\n"); |
1734 | |||
1735 | kfree(buf); | ||
1684 | ret = NETDEV_TX_BUSY; | 1736 | ret = NETDEV_TX_BUSY; |
1685 | goto releasebuf; | 1737 | goto freeurb; |
1686 | } | 1738 | } |
1687 | 1739 | ||
1688 | context->priv = priv; | 1740 | context->priv = priv; |
1689 | context->echo_index = i; | ||
1690 | context->dlc = cf->can_dlc; | 1741 | context->dlc = cf->can_dlc; |
1691 | 1742 | ||
1692 | msg->u.tx_can.tid = context->echo_index; | 1743 | msg->u.tx_can.tid = context->echo_index; |
@@ -1698,18 +1749,17 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
1698 | kvaser_usb_write_bulk_callback, context); | 1749 | kvaser_usb_write_bulk_callback, context); |
1699 | usb_anchor_urb(urb, &priv->tx_submitted); | 1750 | usb_anchor_urb(urb, &priv->tx_submitted); |
1700 | 1751 | ||
1701 | can_put_echo_skb(skb, netdev, context->echo_index); | ||
1702 | |||
1703 | atomic_inc(&priv->active_tx_urbs); | ||
1704 | |||
1705 | if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS) | ||
1706 | netif_stop_queue(netdev); | ||
1707 | |||
1708 | err = usb_submit_urb(urb, GFP_ATOMIC); | 1752 | err = usb_submit_urb(urb, GFP_ATOMIC); |
1709 | if (unlikely(err)) { | 1753 | if (unlikely(err)) { |
1754 | spin_lock_irqsave(&priv->tx_contexts_lock, flags); | ||
1755 | |||
1710 | can_free_echo_skb(netdev, context->echo_index); | 1756 | can_free_echo_skb(netdev, context->echo_index); |
1757 | context->echo_index = dev->max_tx_urbs; | ||
1758 | --priv->active_tx_contexts; | ||
1759 | netif_wake_queue(netdev); | ||
1760 | |||
1761 | spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); | ||
1711 | 1762 | ||
1712 | atomic_dec(&priv->active_tx_urbs); | ||
1713 | usb_unanchor_urb(urb); | 1763 | usb_unanchor_urb(urb); |
1714 | 1764 | ||
1715 | stats->tx_dropped++; | 1765 | stats->tx_dropped++; |
@@ -1719,16 +1769,12 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
1719 | else | 1769 | else |
1720 | netdev_warn(netdev, "Failed tx_urb %d\n", err); | 1770 | netdev_warn(netdev, "Failed tx_urb %d\n", err); |
1721 | 1771 | ||
1722 | goto releasebuf; | 1772 | goto freeurb; |
1723 | } | 1773 | } |
1724 | 1774 | ||
1725 | usb_free_urb(urb); | 1775 | ret = NETDEV_TX_OK; |
1726 | |||
1727 | return NETDEV_TX_OK; | ||
1728 | 1776 | ||
1729 | releasebuf: | 1777 | freeurb: |
1730 | kfree(buf); | ||
1731 | nobufmem: | ||
1732 | usb_free_urb(urb); | 1778 | usb_free_urb(urb); |
1733 | return ret; | 1779 | return ret; |
1734 | } | 1780 | } |
@@ -1840,13 +1886,15 @@ static int kvaser_usb_init_one(struct usb_interface *intf, | |||
1840 | struct kvaser_usb *dev = usb_get_intfdata(intf); | 1886 | struct kvaser_usb *dev = usb_get_intfdata(intf); |
1841 | struct net_device *netdev; | 1887 | struct net_device *netdev; |
1842 | struct kvaser_usb_net_priv *priv; | 1888 | struct kvaser_usb_net_priv *priv; |
1843 | int i, err; | 1889 | int err; |
1844 | 1890 | ||
1845 | err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel); | 1891 | err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel); |
1846 | if (err) | 1892 | if (err) |
1847 | return err; | 1893 | return err; |
1848 | 1894 | ||
1849 | netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS); | 1895 | netdev = alloc_candev(sizeof(*priv) + |
1896 | dev->max_tx_urbs * sizeof(*priv->tx_contexts), | ||
1897 | dev->max_tx_urbs); | ||
1850 | if (!netdev) { | 1898 | if (!netdev) { |
1851 | dev_err(&intf->dev, "Cannot alloc candev\n"); | 1899 | dev_err(&intf->dev, "Cannot alloc candev\n"); |
1852 | return -ENOMEM; | 1900 | return -ENOMEM; |
@@ -1854,19 +1902,17 @@ static int kvaser_usb_init_one(struct usb_interface *intf, | |||
1854 | 1902 | ||
1855 | priv = netdev_priv(netdev); | 1903 | priv = netdev_priv(netdev); |
1856 | 1904 | ||
1905 | init_usb_anchor(&priv->tx_submitted); | ||
1857 | init_completion(&priv->start_comp); | 1906 | init_completion(&priv->start_comp); |
1858 | init_completion(&priv->stop_comp); | 1907 | init_completion(&priv->stop_comp); |
1859 | 1908 | ||
1860 | init_usb_anchor(&priv->tx_submitted); | ||
1861 | atomic_set(&priv->active_tx_urbs, 0); | ||
1862 | |||
1863 | for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) | ||
1864 | priv->tx_contexts[i].echo_index = MAX_TX_URBS; | ||
1865 | |||
1866 | priv->dev = dev; | 1909 | priv->dev = dev; |
1867 | priv->netdev = netdev; | 1910 | priv->netdev = netdev; |
1868 | priv->channel = channel; | 1911 | priv->channel = channel; |
1869 | 1912 | ||
1913 | spin_lock_init(&priv->tx_contexts_lock); | ||
1914 | kvaser_usb_reset_tx_urb_contexts(priv); | ||
1915 | |||
1870 | priv->can.state = CAN_STATE_STOPPED; | 1916 | priv->can.state = CAN_STATE_STOPPED; |
1871 | priv->can.clock.freq = CAN_USB_CLOCK; | 1917 | priv->can.clock.freq = CAN_USB_CLOCK; |
1872 | priv->can.bittiming_const = &kvaser_usb_bittiming_const; | 1918 | priv->can.bittiming_const = &kvaser_usb_bittiming_const; |
@@ -1976,6 +2022,13 @@ static int kvaser_usb_probe(struct usb_interface *intf, | |||
1976 | return err; | 2022 | return err; |
1977 | } | 2023 | } |
1978 | 2024 | ||
2025 | dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n", | ||
2026 | ((dev->fw_version >> 24) & 0xff), | ||
2027 | ((dev->fw_version >> 16) & 0xff), | ||
2028 | (dev->fw_version & 0xffff)); | ||
2029 | |||
2030 | dev_dbg(&intf->dev, "Max oustanding tx = %d URBs\n", dev->max_tx_urbs); | ||
2031 | |||
1979 | err = kvaser_usb_get_card_info(dev); | 2032 | err = kvaser_usb_get_card_info(dev); |
1980 | if (err) { | 2033 | if (err) { |
1981 | dev_err(&intf->dev, | 2034 | dev_err(&intf->dev, |
@@ -1983,11 +2036,6 @@ static int kvaser_usb_probe(struct usb_interface *intf, | |||
1983 | return err; | 2036 | return err; |
1984 | } | 2037 | } |
1985 | 2038 | ||
1986 | dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n", | ||
1987 | ((dev->fw_version >> 24) & 0xff), | ||
1988 | ((dev->fw_version >> 16) & 0xff), | ||
1989 | (dev->fw_version & 0xffff)); | ||
1990 | |||
1991 | for (i = 0; i < dev->nchannels; i++) { | 2039 | for (i = 0; i < dev->nchannels; i++) { |
1992 | err = kvaser_usb_init_one(intf, id, i); | 2040 | err = kvaser_usb_init_one(intf, id, i); |
1993 | if (err) { | 2041 | if (err) { |
diff --git a/drivers/net/can/usb/peak_usb/pcan_ucan.h b/drivers/net/can/usb/peak_usb/pcan_ucan.h index 1ba7c25002e1..e8fc4952c6b0 100644 --- a/drivers/net/can/usb/peak_usb/pcan_ucan.h +++ b/drivers/net/can/usb/peak_usb/pcan_ucan.h | |||
@@ -26,8 +26,8 @@ | |||
26 | #define PUCAN_CMD_FILTER_STD 0x008 | 26 | #define PUCAN_CMD_FILTER_STD 0x008 |
27 | #define PUCAN_CMD_TX_ABORT 0x009 | 27 | #define PUCAN_CMD_TX_ABORT 0x009 |
28 | #define PUCAN_CMD_WR_ERR_CNT 0x00a | 28 | #define PUCAN_CMD_WR_ERR_CNT 0x00a |
29 | #define PUCAN_CMD_RX_FRAME_ENABLE 0x00b | 29 | #define PUCAN_CMD_SET_EN_OPTION 0x00b |
30 | #define PUCAN_CMD_RX_FRAME_DISABLE 0x00c | 30 | #define PUCAN_CMD_CLR_DIS_OPTION 0x00c |
31 | #define PUCAN_CMD_END_OF_COLLECTION 0x3ff | 31 | #define PUCAN_CMD_END_OF_COLLECTION 0x3ff |
32 | 32 | ||
33 | /* uCAN received messages list */ | 33 | /* uCAN received messages list */ |
@@ -101,14 +101,15 @@ struct __packed pucan_wr_err_cnt { | |||
101 | u16 unused; | 101 | u16 unused; |
102 | }; | 102 | }; |
103 | 103 | ||
104 | /* uCAN RX_FRAME_ENABLE command fields */ | 104 | /* uCAN SET_EN/CLR_DIS _OPTION command fields */ |
105 | #define PUCAN_FLTEXT_ERROR 0x0001 | 105 | #define PUCAN_OPTION_ERROR 0x0001 |
106 | #define PUCAN_FLTEXT_BUSLOAD 0x0002 | 106 | #define PUCAN_OPTION_BUSLOAD 0x0002 |
107 | #define PUCAN_OPTION_CANDFDISO 0x0004 | ||
107 | 108 | ||
108 | struct __packed pucan_filter_ext { | 109 | struct __packed pucan_options { |
109 | __le16 opcode_channel; | 110 | __le16 opcode_channel; |
110 | 111 | ||
111 | __le16 ext_mask; | 112 | __le16 options; |
112 | u32 unused; | 113 | u32 unused; |
113 | }; | 114 | }; |
114 | 115 | ||
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index 962c3f027383..a9221ad9f1a0 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c | |||
@@ -110,13 +110,13 @@ struct __packed pcan_ufd_led { | |||
110 | u8 unused[5]; | 110 | u8 unused[5]; |
111 | }; | 111 | }; |
112 | 112 | ||
113 | /* Extended usage of uCAN commands CMD_RX_FRAME_xxxABLE for PCAN-USB Pro FD */ | 113 | /* Extended usage of uCAN commands CMD_xxx_xx_OPTION for PCAN-USB Pro FD */ |
114 | #define PCAN_UFD_FLTEXT_CALIBRATION 0x8000 | 114 | #define PCAN_UFD_FLTEXT_CALIBRATION 0x8000 |
115 | 115 | ||
116 | struct __packed pcan_ufd_filter_ext { | 116 | struct __packed pcan_ufd_options { |
117 | __le16 opcode_channel; | 117 | __le16 opcode_channel; |
118 | 118 | ||
119 | __le16 ext_mask; | 119 | __le16 ucan_mask; |
120 | u16 unused; | 120 | u16 unused; |
121 | __le16 usb_mask; | 121 | __le16 usb_mask; |
122 | }; | 122 | }; |
@@ -251,6 +251,27 @@ static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf) | |||
251 | /* moves the pointer forward */ | 251 | /* moves the pointer forward */ |
252 | pc += sizeof(struct pucan_wr_err_cnt); | 252 | pc += sizeof(struct pucan_wr_err_cnt); |
253 | 253 | ||
254 | /* add command to switch from ISO to non-ISO mode, if fw allows it */ | ||
255 | if (dev->can.ctrlmode_supported & CAN_CTRLMODE_FD_NON_ISO) { | ||
256 | struct pucan_options *puo = (struct pucan_options *)pc; | ||
257 | |||
258 | puo->opcode_channel = | ||
259 | (dev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) ? | ||
260 | pucan_cmd_opcode_channel(dev, | ||
261 | PUCAN_CMD_CLR_DIS_OPTION) : | ||
262 | pucan_cmd_opcode_channel(dev, PUCAN_CMD_SET_EN_OPTION); | ||
263 | |||
264 | puo->options = cpu_to_le16(PUCAN_OPTION_CANDFDISO); | ||
265 | |||
266 | /* to be sure that no other extended bits will be taken into | ||
267 | * account | ||
268 | */ | ||
269 | puo->unused = 0; | ||
270 | |||
271 | /* moves the pointer forward */ | ||
272 | pc += sizeof(struct pucan_options); | ||
273 | } | ||
274 | |||
254 | /* next, go back to operational mode */ | 275 | /* next, go back to operational mode */ |
255 | cmd = (struct pucan_command *)pc; | 276 | cmd = (struct pucan_command *)pc; |
256 | cmd->opcode_channel = pucan_cmd_opcode_channel(dev, | 277 | cmd->opcode_channel = pucan_cmd_opcode_channel(dev, |
@@ -321,21 +342,21 @@ static int pcan_usb_fd_set_filter_std(struct peak_usb_device *dev, int idx, | |||
321 | return pcan_usb_fd_send_cmd(dev, cmd); | 342 | return pcan_usb_fd_send_cmd(dev, cmd); |
322 | } | 343 | } |
323 | 344 | ||
324 | /* set/unset notifications filter: | 345 | /* set/unset options |
325 | * | 346 | * |
326 | * onoff sets(1)/unset(0) notifications | 347 | * onoff set(1)/unset(0) options |
327 | * mask each bit defines a kind of notification to set/unset | 348 | * mask each bit defines a kind of options to set/unset |
328 | */ | 349 | */ |
329 | static int pcan_usb_fd_set_filter_ext(struct peak_usb_device *dev, | 350 | static int pcan_usb_fd_set_options(struct peak_usb_device *dev, |
330 | bool onoff, u16 ext_mask, u16 usb_mask) | 351 | bool onoff, u16 ucan_mask, u16 usb_mask) |
331 | { | 352 | { |
332 | struct pcan_ufd_filter_ext *cmd = pcan_usb_fd_cmd_buffer(dev); | 353 | struct pcan_ufd_options *cmd = pcan_usb_fd_cmd_buffer(dev); |
333 | 354 | ||
334 | cmd->opcode_channel = pucan_cmd_opcode_channel(dev, | 355 | cmd->opcode_channel = pucan_cmd_opcode_channel(dev, |
335 | (onoff) ? PUCAN_CMD_RX_FRAME_ENABLE : | 356 | (onoff) ? PUCAN_CMD_SET_EN_OPTION : |
336 | PUCAN_CMD_RX_FRAME_DISABLE); | 357 | PUCAN_CMD_CLR_DIS_OPTION); |
337 | 358 | ||
338 | cmd->ext_mask = cpu_to_le16(ext_mask); | 359 | cmd->ucan_mask = cpu_to_le16(ucan_mask); |
339 | cmd->usb_mask = cpu_to_le16(usb_mask); | 360 | cmd->usb_mask = cpu_to_le16(usb_mask); |
340 | 361 | ||
341 | /* send the command */ | 362 | /* send the command */ |
@@ -770,9 +791,9 @@ static int pcan_usb_fd_start(struct peak_usb_device *dev) | |||
770 | &pcan_usb_pro_fd); | 791 | &pcan_usb_pro_fd); |
771 | 792 | ||
772 | /* enable USB calibration messages */ | 793 | /* enable USB calibration messages */ |
773 | err = pcan_usb_fd_set_filter_ext(dev, 1, | 794 | err = pcan_usb_fd_set_options(dev, 1, |
774 | PUCAN_FLTEXT_ERROR, | 795 | PUCAN_OPTION_ERROR, |
775 | PCAN_UFD_FLTEXT_CALIBRATION); | 796 | PCAN_UFD_FLTEXT_CALIBRATION); |
776 | } | 797 | } |
777 | 798 | ||
778 | pdev->usb_if->dev_opened_count++; | 799 | pdev->usb_if->dev_opened_count++; |
@@ -806,9 +827,9 @@ static int pcan_usb_fd_stop(struct peak_usb_device *dev) | |||
806 | 827 | ||
807 | /* turn off special msgs for that interface if no other dev opened */ | 828 | /* turn off special msgs for that interface if no other dev opened */ |
808 | if (pdev->usb_if->dev_opened_count == 1) | 829 | if (pdev->usb_if->dev_opened_count == 1) |
809 | pcan_usb_fd_set_filter_ext(dev, 0, | 830 | pcan_usb_fd_set_options(dev, 0, |
810 | PUCAN_FLTEXT_ERROR, | 831 | PUCAN_OPTION_ERROR, |
811 | PCAN_UFD_FLTEXT_CALIBRATION); | 832 | PCAN_UFD_FLTEXT_CALIBRATION); |
812 | pdev->usb_if->dev_opened_count--; | 833 | pdev->usb_if->dev_opened_count--; |
813 | 834 | ||
814 | return 0; | 835 | return 0; |
@@ -860,8 +881,14 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev) | |||
860 | pdev->usb_if->fw_info.fw_version[2], | 881 | pdev->usb_if->fw_info.fw_version[2], |
861 | dev->adapter->ctrl_count); | 882 | dev->adapter->ctrl_count); |
862 | 883 | ||
863 | /* the currently supported hw is non-ISO */ | 884 | /* check for ability to switch between ISO/non-ISO modes */ |
864 | dev->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO; | 885 | if (pdev->usb_if->fw_info.fw_version[0] >= 2) { |
886 | /* firmware >= 2.x supports ISO/non-ISO switching */ | ||
887 | dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO; | ||
888 | } else { | ||
889 | /* firmware < 2.x only supports fixed(!) non-ISO */ | ||
890 | dev->can.ctrlmode |= CAN_CTRLMODE_FD_NON_ISO; | ||
891 | } | ||
865 | 892 | ||
866 | /* tell the hardware the can driver is running */ | 893 | /* tell the hardware the can driver is running */ |
867 | err = pcan_usb_fd_drv_loaded(dev, 1); | 894 | err = pcan_usb_fd_drv_loaded(dev, 1); |
@@ -879,6 +906,10 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev) | |||
879 | 906 | ||
880 | pdev->usb_if = ppdev->usb_if; | 907 | pdev->usb_if = ppdev->usb_if; |
881 | pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr; | 908 | pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr; |
909 | |||
910 | /* do a copy of the ctrlmode[_supported] too */ | ||
911 | dev->can.ctrlmode = ppdev->dev.can.ctrlmode; | ||
912 | dev->can.ctrlmode_supported = ppdev->dev.can.ctrlmode_supported; | ||
882 | } | 913 | } |
883 | 914 | ||
884 | pdev->usb_if->dev[dev->ctrl_idx] = dev; | 915 | pdev->usb_if->dev[dev->ctrl_idx] = dev; |
@@ -933,9 +964,9 @@ static void pcan_usb_fd_exit(struct peak_usb_device *dev) | |||
933 | if (dev->ctrl_idx == 0) { | 964 | if (dev->ctrl_idx == 0) { |
934 | /* turn off calibration message if any device were opened */ | 965 | /* turn off calibration message if any device were opened */ |
935 | if (pdev->usb_if->dev_opened_count > 0) | 966 | if (pdev->usb_if->dev_opened_count > 0) |
936 | pcan_usb_fd_set_filter_ext(dev, 0, | 967 | pcan_usb_fd_set_options(dev, 0, |
937 | PUCAN_FLTEXT_ERROR, | 968 | PUCAN_OPTION_ERROR, |
938 | PCAN_UFD_FLTEXT_CALIBRATION); | 969 | PCAN_UFD_FLTEXT_CALIBRATION); |
939 | 970 | ||
940 | /* tell USB adapter that the driver is being unloaded */ | 971 | /* tell USB adapter that the driver is being unloaded */ |
941 | pcan_usb_fd_drv_loaded(dev, 0); | 972 | pcan_usb_fd_drv_loaded(dev, 0); |
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index ee9f650d5026..7b7053d3c5fa 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h | |||
@@ -105,8 +105,8 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \ | |||
105 | { \ | 105 | { \ |
106 | u32 indir, dir; \ | 106 | u32 indir, dir; \ |
107 | spin_lock(&priv->indir_lock); \ | 107 | spin_lock(&priv->indir_lock); \ |
108 | indir = reg_readl(priv, REG_DIR_DATA_READ); \ | ||
109 | dir = __raw_readl(priv->name + off); \ | 108 | dir = __raw_readl(priv->name + off); \ |
109 | indir = reg_readl(priv, REG_DIR_DATA_READ); \ | ||
110 | spin_unlock(&priv->indir_lock); \ | 110 | spin_unlock(&priv->indir_lock); \ |
111 | return (u64)indir << 32 | dir; \ | 111 | return (u64)indir << 32 | dir; \ |
112 | } \ | 112 | } \ |
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c index 7769c05543f1..ec6eac1f8c95 100644 --- a/drivers/net/ethernet/8390/axnet_cs.c +++ b/drivers/net/ethernet/8390/axnet_cs.c | |||
@@ -484,11 +484,8 @@ static int axnet_open(struct net_device *dev) | |||
484 | link->open++; | 484 | link->open++; |
485 | 485 | ||
486 | info->link_status = 0x00; | 486 | info->link_status = 0x00; |
487 | init_timer(&info->watchdog); | 487 | setup_timer(&info->watchdog, ei_watchdog, (u_long)dev); |
488 | info->watchdog.function = ei_watchdog; | 488 | mod_timer(&info->watchdog, jiffies + HZ); |
489 | info->watchdog.data = (u_long)dev; | ||
490 | info->watchdog.expires = jiffies + HZ; | ||
491 | add_timer(&info->watchdog); | ||
492 | 489 | ||
493 | return ax_open(dev); | 490 | return ax_open(dev); |
494 | } /* axnet_open */ | 491 | } /* axnet_open */ |
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c index 9fb7b9d4fd6c..2777289a26c0 100644 --- a/drivers/net/ethernet/8390/pcnet_cs.c +++ b/drivers/net/ethernet/8390/pcnet_cs.c | |||
@@ -918,11 +918,8 @@ static int pcnet_open(struct net_device *dev) | |||
918 | 918 | ||
919 | info->phy_id = info->eth_phy; | 919 | info->phy_id = info->eth_phy; |
920 | info->link_status = 0x00; | 920 | info->link_status = 0x00; |
921 | init_timer(&info->watchdog); | 921 | setup_timer(&info->watchdog, ei_watchdog, (u_long)dev); |
922 | info->watchdog.function = ei_watchdog; | 922 | mod_timer(&info->watchdog, jiffies + HZ); |
923 | info->watchdog.data = (u_long)dev; | ||
924 | info->watchdog.expires = jiffies + HZ; | ||
925 | add_timer(&info->watchdog); | ||
926 | 923 | ||
927 | return ei_open(dev); | 924 | return ei_open(dev); |
928 | } /* pcnet_open */ | 925 | } /* pcnet_open */ |
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 760c72c6e2ac..6725dc00750b 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c | |||
@@ -376,7 +376,8 @@ static int tse_rx(struct altera_tse_private *priv, int limit) | |||
376 | u16 pktlength; | 376 | u16 pktlength; |
377 | u16 pktstatus; | 377 | u16 pktstatus; |
378 | 378 | ||
379 | while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) { | 379 | while (((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) && |
380 | (count < limit)) { | ||
380 | pktstatus = rxstatus >> 16; | 381 | pktstatus = rxstatus >> 16; |
381 | pktlength = rxstatus & 0xffff; | 382 | pktlength = rxstatus & 0xffff; |
382 | 383 | ||
@@ -491,28 +492,27 @@ static int tse_poll(struct napi_struct *napi, int budget) | |||
491 | struct altera_tse_private *priv = | 492 | struct altera_tse_private *priv = |
492 | container_of(napi, struct altera_tse_private, napi); | 493 | container_of(napi, struct altera_tse_private, napi); |
493 | int rxcomplete = 0; | 494 | int rxcomplete = 0; |
494 | int txcomplete = 0; | ||
495 | unsigned long int flags; | 495 | unsigned long int flags; |
496 | 496 | ||
497 | txcomplete = tse_tx_complete(priv); | 497 | tse_tx_complete(priv); |
498 | 498 | ||
499 | rxcomplete = tse_rx(priv, budget); | 499 | rxcomplete = tse_rx(priv, budget); |
500 | 500 | ||
501 | if (rxcomplete >= budget || txcomplete > 0) | 501 | if (rxcomplete < budget) { |
502 | return rxcomplete; | ||
503 | 502 | ||
504 | napi_gro_flush(napi, false); | 503 | napi_gro_flush(napi, false); |
505 | __napi_complete(napi); | 504 | __napi_complete(napi); |
506 | 505 | ||
507 | netdev_dbg(priv->dev, | 506 | netdev_dbg(priv->dev, |
508 | "NAPI Complete, did %d packets with budget %d\n", | 507 | "NAPI Complete, did %d packets with budget %d\n", |
509 | txcomplete+rxcomplete, budget); | 508 | rxcomplete, budget); |
510 | 509 | ||
511 | spin_lock_irqsave(&priv->rxdma_irq_lock, flags); | 510 | spin_lock_irqsave(&priv->rxdma_irq_lock, flags); |
512 | priv->dmaops->enable_rxirq(priv); | 511 | priv->dmaops->enable_rxirq(priv); |
513 | priv->dmaops->enable_txirq(priv); | 512 | priv->dmaops->enable_txirq(priv); |
514 | spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); | 513 | spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); |
515 | return rxcomplete + txcomplete; | 514 | } |
515 | return rxcomplete; | ||
516 | } | 516 | } |
517 | 517 | ||
518 | /* DMA TX & RX FIFO interrupt routing | 518 | /* DMA TX & RX FIFO interrupt routing |
@@ -521,7 +521,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id) | |||
521 | { | 521 | { |
522 | struct net_device *dev = dev_id; | 522 | struct net_device *dev = dev_id; |
523 | struct altera_tse_private *priv; | 523 | struct altera_tse_private *priv; |
524 | unsigned long int flags; | ||
525 | 524 | ||
526 | if (unlikely(!dev)) { | 525 | if (unlikely(!dev)) { |
527 | pr_err("%s: invalid dev pointer\n", __func__); | 526 | pr_err("%s: invalid dev pointer\n", __func__); |
@@ -529,20 +528,20 @@ static irqreturn_t altera_isr(int irq, void *dev_id) | |||
529 | } | 528 | } |
530 | priv = netdev_priv(dev); | 529 | priv = netdev_priv(dev); |
531 | 530 | ||
532 | /* turn off desc irqs and enable napi rx */ | 531 | spin_lock(&priv->rxdma_irq_lock); |
533 | spin_lock_irqsave(&priv->rxdma_irq_lock, flags); | 532 | /* reset IRQs */ |
533 | priv->dmaops->clear_rxirq(priv); | ||
534 | priv->dmaops->clear_txirq(priv); | ||
535 | spin_unlock(&priv->rxdma_irq_lock); | ||
534 | 536 | ||
535 | if (likely(napi_schedule_prep(&priv->napi))) { | 537 | if (likely(napi_schedule_prep(&priv->napi))) { |
538 | spin_lock(&priv->rxdma_irq_lock); | ||
536 | priv->dmaops->disable_rxirq(priv); | 539 | priv->dmaops->disable_rxirq(priv); |
537 | priv->dmaops->disable_txirq(priv); | 540 | priv->dmaops->disable_txirq(priv); |
541 | spin_unlock(&priv->rxdma_irq_lock); | ||
538 | __napi_schedule(&priv->napi); | 542 | __napi_schedule(&priv->napi); |
539 | } | 543 | } |
540 | 544 | ||
541 | /* reset IRQs */ | ||
542 | priv->dmaops->clear_rxirq(priv); | ||
543 | priv->dmaops->clear_txirq(priv); | ||
544 | |||
545 | spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); | ||
546 | 545 | ||
547 | return IRQ_HANDLED; | 546 | return IRQ_HANDLED; |
548 | } | 547 | } |
@@ -1399,7 +1398,7 @@ static int altera_tse_probe(struct platform_device *pdev) | |||
1399 | } | 1398 | } |
1400 | 1399 | ||
1401 | if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", | 1400 | if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", |
1402 | &priv->rx_fifo_depth)) { | 1401 | &priv->tx_fifo_depth)) { |
1403 | dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n"); | 1402 | dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n"); |
1404 | ret = -ENXIO; | 1403 | ret = -ENXIO; |
1405 | goto err_free_netdev; | 1404 | goto err_free_netdev; |
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 11d6e6561df1..15a8190a6f75 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c | |||
@@ -1543,7 +1543,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1543 | { | 1543 | { |
1544 | struct pcnet32_private *lp; | 1544 | struct pcnet32_private *lp; |
1545 | int i, media; | 1545 | int i, media; |
1546 | int fdx, mii, fset, dxsuflo; | 1546 | int fdx, mii, fset, dxsuflo, sram; |
1547 | int chip_version; | 1547 | int chip_version; |
1548 | char *chipname; | 1548 | char *chipname; |
1549 | struct net_device *dev; | 1549 | struct net_device *dev; |
@@ -1580,7 +1580,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1580 | } | 1580 | } |
1581 | 1581 | ||
1582 | /* initialize variables */ | 1582 | /* initialize variables */ |
1583 | fdx = mii = fset = dxsuflo = 0; | 1583 | fdx = mii = fset = dxsuflo = sram = 0; |
1584 | chip_version = (chip_version >> 12) & 0xffff; | 1584 | chip_version = (chip_version >> 12) & 0xffff; |
1585 | 1585 | ||
1586 | switch (chip_version) { | 1586 | switch (chip_version) { |
@@ -1613,6 +1613,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1613 | chipname = "PCnet/FAST III 79C973"; /* PCI */ | 1613 | chipname = "PCnet/FAST III 79C973"; /* PCI */ |
1614 | fdx = 1; | 1614 | fdx = 1; |
1615 | mii = 1; | 1615 | mii = 1; |
1616 | sram = 1; | ||
1616 | break; | 1617 | break; |
1617 | case 0x2626: | 1618 | case 0x2626: |
1618 | chipname = "PCnet/Home 79C978"; /* PCI */ | 1619 | chipname = "PCnet/Home 79C978"; /* PCI */ |
@@ -1636,6 +1637,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1636 | chipname = "PCnet/FAST III 79C975"; /* PCI */ | 1637 | chipname = "PCnet/FAST III 79C975"; /* PCI */ |
1637 | fdx = 1; | 1638 | fdx = 1; |
1638 | mii = 1; | 1639 | mii = 1; |
1640 | sram = 1; | ||
1639 | break; | 1641 | break; |
1640 | case 0x2628: | 1642 | case 0x2628: |
1641 | chipname = "PCnet/PRO 79C976"; | 1643 | chipname = "PCnet/PRO 79C976"; |
@@ -1664,6 +1666,31 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1664 | dxsuflo = 1; | 1666 | dxsuflo = 1; |
1665 | } | 1667 | } |
1666 | 1668 | ||
1669 | /* | ||
1670 | * The Am79C973/Am79C975 controllers come with 12K of SRAM | ||
1671 | * which we can use for the Tx/Rx buffers but most importantly, | ||
1672 | * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid | ||
1673 | * Tx fifo underflows. | ||
1674 | */ | ||
1675 | if (sram) { | ||
1676 | /* | ||
1677 | * The SRAM is being configured in two steps. First we | ||
1678 | * set the SRAM size in the BCR25:SRAM_SIZE bits. According | ||
1679 | * to the datasheet, each bit corresponds to a 512-byte | ||
1680 | * page so we can have at most 24 pages. The SRAM_SIZE | ||
1681 | * holds the value of the upper 8 bits of the 16-bit SRAM size. | ||
1682 | * The low 8-bits start at 0x00 and end at 0xff. So the | ||
1683 | * address range is from 0x0000 up to 0x17ff. Therefore, | ||
1684 | * the SRAM_SIZE is set to 0x17. The next step is to set | ||
1685 | * the BCR26:SRAM_BND midway through so the Tx and Rx | ||
1686 | * buffers can share the SRAM equally. | ||
1687 | */ | ||
1688 | a->write_bcr(ioaddr, 25, 0x17); | ||
1689 | a->write_bcr(ioaddr, 26, 0xc); | ||
1690 | /* And finally enable the NOUFLO bit */ | ||
1691 | a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11)); | ||
1692 | } | ||
1693 | |||
1667 | dev = alloc_etherdev(sizeof(*lp)); | 1694 | dev = alloc_etherdev(sizeof(*lp)); |
1668 | if (!dev) { | 1695 | if (!dev) { |
1669 | ret = -ENOMEM; | 1696 | ret = -ENOMEM; |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index b93d4404d975..885b02b5be07 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c | |||
@@ -609,6 +609,68 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del) | |||
609 | } | 609 | } |
610 | } | 610 | } |
611 | 611 | ||
612 | static int xgbe_request_irqs(struct xgbe_prv_data *pdata) | ||
613 | { | ||
614 | struct xgbe_channel *channel; | ||
615 | struct net_device *netdev = pdata->netdev; | ||
616 | unsigned int i; | ||
617 | int ret; | ||
618 | |||
619 | ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0, | ||
620 | netdev->name, pdata); | ||
621 | if (ret) { | ||
622 | netdev_alert(netdev, "error requesting irq %d\n", | ||
623 | pdata->dev_irq); | ||
624 | return ret; | ||
625 | } | ||
626 | |||
627 | if (!pdata->per_channel_irq) | ||
628 | return 0; | ||
629 | |||
630 | channel = pdata->channel; | ||
631 | for (i = 0; i < pdata->channel_count; i++, channel++) { | ||
632 | snprintf(channel->dma_irq_name, | ||
633 | sizeof(channel->dma_irq_name) - 1, | ||
634 | "%s-TxRx-%u", netdev_name(netdev), | ||
635 | channel->queue_index); | ||
636 | |||
637 | ret = devm_request_irq(pdata->dev, channel->dma_irq, | ||
638 | xgbe_dma_isr, 0, | ||
639 | channel->dma_irq_name, channel); | ||
640 | if (ret) { | ||
641 | netdev_alert(netdev, "error requesting irq %d\n", | ||
642 | channel->dma_irq); | ||
643 | goto err_irq; | ||
644 | } | ||
645 | } | ||
646 | |||
647 | return 0; | ||
648 | |||
649 | err_irq: | ||
650 | /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ | ||
651 | for (i--, channel--; i < pdata->channel_count; i--, channel--) | ||
652 | devm_free_irq(pdata->dev, channel->dma_irq, channel); | ||
653 | |||
654 | devm_free_irq(pdata->dev, pdata->dev_irq, pdata); | ||
655 | |||
656 | return ret; | ||
657 | } | ||
658 | |||
659 | static void xgbe_free_irqs(struct xgbe_prv_data *pdata) | ||
660 | { | ||
661 | struct xgbe_channel *channel; | ||
662 | unsigned int i; | ||
663 | |||
664 | devm_free_irq(pdata->dev, pdata->dev_irq, pdata); | ||
665 | |||
666 | if (!pdata->per_channel_irq) | ||
667 | return; | ||
668 | |||
669 | channel = pdata->channel; | ||
670 | for (i = 0; i < pdata->channel_count; i++, channel++) | ||
671 | devm_free_irq(pdata->dev, channel->dma_irq, channel); | ||
672 | } | ||
673 | |||
612 | void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) | 674 | void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) |
613 | { | 675 | { |
614 | struct xgbe_hw_if *hw_if = &pdata->hw_if; | 676 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
@@ -810,20 +872,20 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller) | |||
810 | return -EINVAL; | 872 | return -EINVAL; |
811 | } | 873 | } |
812 | 874 | ||
813 | phy_stop(pdata->phydev); | ||
814 | |||
815 | spin_lock_irqsave(&pdata->lock, flags); | 875 | spin_lock_irqsave(&pdata->lock, flags); |
816 | 876 | ||
817 | if (caller == XGMAC_DRIVER_CONTEXT) | 877 | if (caller == XGMAC_DRIVER_CONTEXT) |
818 | netif_device_detach(netdev); | 878 | netif_device_detach(netdev); |
819 | 879 | ||
820 | netif_tx_stop_all_queues(netdev); | 880 | netif_tx_stop_all_queues(netdev); |
821 | xgbe_napi_disable(pdata, 0); | ||
822 | 881 | ||
823 | /* Powerdown Tx/Rx */ | ||
824 | hw_if->powerdown_tx(pdata); | 882 | hw_if->powerdown_tx(pdata); |
825 | hw_if->powerdown_rx(pdata); | 883 | hw_if->powerdown_rx(pdata); |
826 | 884 | ||
885 | xgbe_napi_disable(pdata, 0); | ||
886 | |||
887 | phy_stop(pdata->phydev); | ||
888 | |||
827 | pdata->power_down = 1; | 889 | pdata->power_down = 1; |
828 | 890 | ||
829 | spin_unlock_irqrestore(&pdata->lock, flags); | 891 | spin_unlock_irqrestore(&pdata->lock, flags); |
@@ -854,14 +916,14 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller) | |||
854 | 916 | ||
855 | phy_start(pdata->phydev); | 917 | phy_start(pdata->phydev); |
856 | 918 | ||
857 | /* Enable Tx/Rx */ | 919 | xgbe_napi_enable(pdata, 0); |
920 | |||
858 | hw_if->powerup_tx(pdata); | 921 | hw_if->powerup_tx(pdata); |
859 | hw_if->powerup_rx(pdata); | 922 | hw_if->powerup_rx(pdata); |
860 | 923 | ||
861 | if (caller == XGMAC_DRIVER_CONTEXT) | 924 | if (caller == XGMAC_DRIVER_CONTEXT) |
862 | netif_device_attach(netdev); | 925 | netif_device_attach(netdev); |
863 | 926 | ||
864 | xgbe_napi_enable(pdata, 0); | ||
865 | netif_tx_start_all_queues(netdev); | 927 | netif_tx_start_all_queues(netdev); |
866 | 928 | ||
867 | spin_unlock_irqrestore(&pdata->lock, flags); | 929 | spin_unlock_irqrestore(&pdata->lock, flags); |
@@ -875,6 +937,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata) | |||
875 | { | 937 | { |
876 | struct xgbe_hw_if *hw_if = &pdata->hw_if; | 938 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
877 | struct net_device *netdev = pdata->netdev; | 939 | struct net_device *netdev = pdata->netdev; |
940 | int ret; | ||
878 | 941 | ||
879 | DBGPR("-->xgbe_start\n"); | 942 | DBGPR("-->xgbe_start\n"); |
880 | 943 | ||
@@ -884,17 +947,31 @@ static int xgbe_start(struct xgbe_prv_data *pdata) | |||
884 | 947 | ||
885 | phy_start(pdata->phydev); | 948 | phy_start(pdata->phydev); |
886 | 949 | ||
950 | xgbe_napi_enable(pdata, 1); | ||
951 | |||
952 | ret = xgbe_request_irqs(pdata); | ||
953 | if (ret) | ||
954 | goto err_napi; | ||
955 | |||
887 | hw_if->enable_tx(pdata); | 956 | hw_if->enable_tx(pdata); |
888 | hw_if->enable_rx(pdata); | 957 | hw_if->enable_rx(pdata); |
889 | 958 | ||
890 | xgbe_init_tx_timers(pdata); | 959 | xgbe_init_tx_timers(pdata); |
891 | 960 | ||
892 | xgbe_napi_enable(pdata, 1); | ||
893 | netif_tx_start_all_queues(netdev); | 961 | netif_tx_start_all_queues(netdev); |
894 | 962 | ||
895 | DBGPR("<--xgbe_start\n"); | 963 | DBGPR("<--xgbe_start\n"); |
896 | 964 | ||
897 | return 0; | 965 | return 0; |
966 | |||
967 | err_napi: | ||
968 | xgbe_napi_disable(pdata, 1); | ||
969 | |||
970 | phy_stop(pdata->phydev); | ||
971 | |||
972 | hw_if->exit(pdata); | ||
973 | |||
974 | return ret; | ||
898 | } | 975 | } |
899 | 976 | ||
900 | static void xgbe_stop(struct xgbe_prv_data *pdata) | 977 | static void xgbe_stop(struct xgbe_prv_data *pdata) |
@@ -907,16 +984,21 @@ static void xgbe_stop(struct xgbe_prv_data *pdata) | |||
907 | 984 | ||
908 | DBGPR("-->xgbe_stop\n"); | 985 | DBGPR("-->xgbe_stop\n"); |
909 | 986 | ||
910 | phy_stop(pdata->phydev); | ||
911 | |||
912 | netif_tx_stop_all_queues(netdev); | 987 | netif_tx_stop_all_queues(netdev); |
913 | xgbe_napi_disable(pdata, 1); | ||
914 | 988 | ||
915 | xgbe_stop_tx_timers(pdata); | 989 | xgbe_stop_tx_timers(pdata); |
916 | 990 | ||
917 | hw_if->disable_tx(pdata); | 991 | hw_if->disable_tx(pdata); |
918 | hw_if->disable_rx(pdata); | 992 | hw_if->disable_rx(pdata); |
919 | 993 | ||
994 | xgbe_free_irqs(pdata); | ||
995 | |||
996 | xgbe_napi_disable(pdata, 1); | ||
997 | |||
998 | phy_stop(pdata->phydev); | ||
999 | |||
1000 | hw_if->exit(pdata); | ||
1001 | |||
920 | channel = pdata->channel; | 1002 | channel = pdata->channel; |
921 | for (i = 0; i < pdata->channel_count; i++, channel++) { | 1003 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
922 | if (!channel->tx_ring) | 1004 | if (!channel->tx_ring) |
@@ -931,10 +1013,6 @@ static void xgbe_stop(struct xgbe_prv_data *pdata) | |||
931 | 1013 | ||
932 | static void xgbe_restart_dev(struct xgbe_prv_data *pdata) | 1014 | static void xgbe_restart_dev(struct xgbe_prv_data *pdata) |
933 | { | 1015 | { |
934 | struct xgbe_channel *channel; | ||
935 | struct xgbe_hw_if *hw_if = &pdata->hw_if; | ||
936 | unsigned int i; | ||
937 | |||
938 | DBGPR("-->xgbe_restart_dev\n"); | 1016 | DBGPR("-->xgbe_restart_dev\n"); |
939 | 1017 | ||
940 | /* If not running, "restart" will happen on open */ | 1018 | /* If not running, "restart" will happen on open */ |
@@ -942,19 +1020,10 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata) | |||
942 | return; | 1020 | return; |
943 | 1021 | ||
944 | xgbe_stop(pdata); | 1022 | xgbe_stop(pdata); |
945 | synchronize_irq(pdata->dev_irq); | ||
946 | if (pdata->per_channel_irq) { | ||
947 | channel = pdata->channel; | ||
948 | for (i = 0; i < pdata->channel_count; i++, channel++) | ||
949 | synchronize_irq(channel->dma_irq); | ||
950 | } | ||
951 | 1023 | ||
952 | xgbe_free_tx_data(pdata); | 1024 | xgbe_free_tx_data(pdata); |
953 | xgbe_free_rx_data(pdata); | 1025 | xgbe_free_rx_data(pdata); |
954 | 1026 | ||
955 | /* Issue software reset to device */ | ||
956 | hw_if->exit(pdata); | ||
957 | |||
958 | xgbe_start(pdata); | 1027 | xgbe_start(pdata); |
959 | 1028 | ||
960 | DBGPR("<--xgbe_restart_dev\n"); | 1029 | DBGPR("<--xgbe_restart_dev\n"); |
@@ -1283,10 +1352,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata, | |||
1283 | static int xgbe_open(struct net_device *netdev) | 1352 | static int xgbe_open(struct net_device *netdev) |
1284 | { | 1353 | { |
1285 | struct xgbe_prv_data *pdata = netdev_priv(netdev); | 1354 | struct xgbe_prv_data *pdata = netdev_priv(netdev); |
1286 | struct xgbe_hw_if *hw_if = &pdata->hw_if; | ||
1287 | struct xgbe_desc_if *desc_if = &pdata->desc_if; | 1355 | struct xgbe_desc_if *desc_if = &pdata->desc_if; |
1288 | struct xgbe_channel *channel = NULL; | ||
1289 | unsigned int i = 0; | ||
1290 | int ret; | 1356 | int ret; |
1291 | 1357 | ||
1292 | DBGPR("-->xgbe_open\n"); | 1358 | DBGPR("-->xgbe_open\n"); |
@@ -1329,55 +1395,14 @@ static int xgbe_open(struct net_device *netdev) | |||
1329 | INIT_WORK(&pdata->restart_work, xgbe_restart); | 1395 | INIT_WORK(&pdata->restart_work, xgbe_restart); |
1330 | INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); | 1396 | INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); |
1331 | 1397 | ||
1332 | /* Request interrupts */ | ||
1333 | ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0, | ||
1334 | netdev->name, pdata); | ||
1335 | if (ret) { | ||
1336 | netdev_alert(netdev, "error requesting irq %d\n", | ||
1337 | pdata->dev_irq); | ||
1338 | goto err_rings; | ||
1339 | } | ||
1340 | |||
1341 | if (pdata->per_channel_irq) { | ||
1342 | channel = pdata->channel; | ||
1343 | for (i = 0; i < pdata->channel_count; i++, channel++) { | ||
1344 | snprintf(channel->dma_irq_name, | ||
1345 | sizeof(channel->dma_irq_name) - 1, | ||
1346 | "%s-TxRx-%u", netdev_name(netdev), | ||
1347 | channel->queue_index); | ||
1348 | |||
1349 | ret = devm_request_irq(pdata->dev, channel->dma_irq, | ||
1350 | xgbe_dma_isr, 0, | ||
1351 | channel->dma_irq_name, channel); | ||
1352 | if (ret) { | ||
1353 | netdev_alert(netdev, | ||
1354 | "error requesting irq %d\n", | ||
1355 | channel->dma_irq); | ||
1356 | goto err_irq; | ||
1357 | } | ||
1358 | } | ||
1359 | } | ||
1360 | |||
1361 | ret = xgbe_start(pdata); | 1398 | ret = xgbe_start(pdata); |
1362 | if (ret) | 1399 | if (ret) |
1363 | goto err_start; | 1400 | goto err_rings; |
1364 | 1401 | ||
1365 | DBGPR("<--xgbe_open\n"); | 1402 | DBGPR("<--xgbe_open\n"); |
1366 | 1403 | ||
1367 | return 0; | 1404 | return 0; |
1368 | 1405 | ||
1369 | err_start: | ||
1370 | hw_if->exit(pdata); | ||
1371 | |||
1372 | err_irq: | ||
1373 | if (pdata->per_channel_irq) { | ||
1374 | /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ | ||
1375 | for (i--, channel--; i < pdata->channel_count; i--, channel--) | ||
1376 | devm_free_irq(pdata->dev, channel->dma_irq, channel); | ||
1377 | } | ||
1378 | |||
1379 | devm_free_irq(pdata->dev, pdata->dev_irq, pdata); | ||
1380 | |||
1381 | err_rings: | 1406 | err_rings: |
1382 | desc_if->free_ring_resources(pdata); | 1407 | desc_if->free_ring_resources(pdata); |
1383 | 1408 | ||
@@ -1399,30 +1424,16 @@ err_phy_init: | |||
1399 | static int xgbe_close(struct net_device *netdev) | 1424 | static int xgbe_close(struct net_device *netdev) |
1400 | { | 1425 | { |
1401 | struct xgbe_prv_data *pdata = netdev_priv(netdev); | 1426 | struct xgbe_prv_data *pdata = netdev_priv(netdev); |
1402 | struct xgbe_hw_if *hw_if = &pdata->hw_if; | ||
1403 | struct xgbe_desc_if *desc_if = &pdata->desc_if; | 1427 | struct xgbe_desc_if *desc_if = &pdata->desc_if; |
1404 | struct xgbe_channel *channel; | ||
1405 | unsigned int i; | ||
1406 | 1428 | ||
1407 | DBGPR("-->xgbe_close\n"); | 1429 | DBGPR("-->xgbe_close\n"); |
1408 | 1430 | ||
1409 | /* Stop the device */ | 1431 | /* Stop the device */ |
1410 | xgbe_stop(pdata); | 1432 | xgbe_stop(pdata); |
1411 | 1433 | ||
1412 | /* Issue software reset to device */ | ||
1413 | hw_if->exit(pdata); | ||
1414 | |||
1415 | /* Free the ring descriptors and buffers */ | 1434 | /* Free the ring descriptors and buffers */ |
1416 | desc_if->free_ring_resources(pdata); | 1435 | desc_if->free_ring_resources(pdata); |
1417 | 1436 | ||
1418 | /* Release the interrupts */ | ||
1419 | devm_free_irq(pdata->dev, pdata->dev_irq, pdata); | ||
1420 | if (pdata->per_channel_irq) { | ||
1421 | channel = pdata->channel; | ||
1422 | for (i = 0; i < pdata->channel_count; i++, channel++) | ||
1423 | devm_free_irq(pdata->dev, channel->dma_irq, channel); | ||
1424 | } | ||
1425 | |||
1426 | /* Free the channel and ring structures */ | 1437 | /* Free the channel and ring structures */ |
1427 | xgbe_free_channels(pdata); | 1438 | xgbe_free_channels(pdata); |
1428 | 1439 | ||
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 869d97fcf781..b927021c6c40 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c | |||
@@ -593,7 +593,7 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata) | |||
593 | if (!xgene_ring_mgr_init(pdata)) | 593 | if (!xgene_ring_mgr_init(pdata)) |
594 | return -ENODEV; | 594 | return -ENODEV; |
595 | 595 | ||
596 | if (!efi_enabled(EFI_BOOT)) { | 596 | if (pdata->clk) { |
597 | clk_prepare_enable(pdata->clk); | 597 | clk_prepare_enable(pdata->clk); |
598 | clk_disable_unprepare(pdata->clk); | 598 | clk_disable_unprepare(pdata->clk); |
599 | clk_prepare_enable(pdata->clk); | 599 | clk_prepare_enable(pdata->clk); |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 4de62b210c85..635a83be7e5e 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c | |||
@@ -1025,6 +1025,8 @@ static int xgene_enet_remove(struct platform_device *pdev) | |||
1025 | #ifdef CONFIG_ACPI | 1025 | #ifdef CONFIG_ACPI |
1026 | static const struct acpi_device_id xgene_enet_acpi_match[] = { | 1026 | static const struct acpi_device_id xgene_enet_acpi_match[] = { |
1027 | { "APMC0D05", }, | 1027 | { "APMC0D05", }, |
1028 | { "APMC0D30", }, | ||
1029 | { "APMC0D31", }, | ||
1028 | { } | 1030 | { } |
1029 | }; | 1031 | }; |
1030 | MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); | 1032 | MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); |
@@ -1033,6 +1035,8 @@ MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); | |||
1033 | #ifdef CONFIG_OF | 1035 | #ifdef CONFIG_OF |
1034 | static struct of_device_id xgene_enet_of_match[] = { | 1036 | static struct of_device_id xgene_enet_of_match[] = { |
1035 | {.compatible = "apm,xgene-enet",}, | 1037 | {.compatible = "apm,xgene-enet",}, |
1038 | {.compatible = "apm,xgene1-sgenet",}, | ||
1039 | {.compatible = "apm,xgene1-xgenet",}, | ||
1036 | {}, | 1040 | {}, |
1037 | }; | 1041 | }; |
1038 | 1042 | ||
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 21206d33b638..a7f2cc3e485e 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c | |||
@@ -486,7 +486,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) | |||
486 | { | 486 | { |
487 | struct bcm_enet_priv *priv; | 487 | struct bcm_enet_priv *priv; |
488 | struct net_device *dev; | 488 | struct net_device *dev; |
489 | int tx_work_done, rx_work_done; | 489 | int rx_work_done; |
490 | 490 | ||
491 | priv = container_of(napi, struct bcm_enet_priv, napi); | 491 | priv = container_of(napi, struct bcm_enet_priv, napi); |
492 | dev = priv->net_dev; | 492 | dev = priv->net_dev; |
@@ -498,14 +498,14 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) | |||
498 | ENETDMAC_IR, priv->tx_chan); | 498 | ENETDMAC_IR, priv->tx_chan); |
499 | 499 | ||
500 | /* reclaim sent skb */ | 500 | /* reclaim sent skb */ |
501 | tx_work_done = bcm_enet_tx_reclaim(dev, 0); | 501 | bcm_enet_tx_reclaim(dev, 0); |
502 | 502 | ||
503 | spin_lock(&priv->rx_lock); | 503 | spin_lock(&priv->rx_lock); |
504 | rx_work_done = bcm_enet_receive_queue(dev, budget); | 504 | rx_work_done = bcm_enet_receive_queue(dev, budget); |
505 | spin_unlock(&priv->rx_lock); | 505 | spin_unlock(&priv->rx_lock); |
506 | 506 | ||
507 | if (rx_work_done >= budget || tx_work_done > 0) { | 507 | if (rx_work_done >= budget) { |
508 | /* rx/tx queue is not yet empty/clean */ | 508 | /* rx queue is not yet empty/clean */ |
509 | return rx_work_done; | 509 | return rx_work_done; |
510 | } | 510 | } |
511 | 511 | ||
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 5b308a4a4d0e..783543ad1fcf 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
@@ -274,9 +274,9 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { | |||
274 | /* RBUF misc statistics */ | 274 | /* RBUF misc statistics */ |
275 | STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), | 275 | STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), |
276 | STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), | 276 | STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), |
277 | STAT_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), | 277 | STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), |
278 | STAT_MIB_RX("rx_dma_failed", mib.rx_dma_failed), | 278 | STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed), |
279 | STAT_MIB_TX("tx_dma_failed", mib.tx_dma_failed), | 279 | STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed), |
280 | }; | 280 | }; |
281 | 281 | ||
282 | #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) | 282 | #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) |
@@ -345,6 +345,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) | |||
345 | s = &bcm_sysport_gstrings_stats[i]; | 345 | s = &bcm_sysport_gstrings_stats[i]; |
346 | switch (s->type) { | 346 | switch (s->type) { |
347 | case BCM_SYSPORT_STAT_NETDEV: | 347 | case BCM_SYSPORT_STAT_NETDEV: |
348 | case BCM_SYSPORT_STAT_SOFT: | ||
348 | continue; | 349 | continue; |
349 | case BCM_SYSPORT_STAT_MIB_RX: | 350 | case BCM_SYSPORT_STAT_MIB_RX: |
350 | case BCM_SYSPORT_STAT_MIB_TX: | 351 | case BCM_SYSPORT_STAT_MIB_TX: |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index fc19417d82a5..7e3d87a88c76 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h | |||
@@ -570,6 +570,7 @@ enum bcm_sysport_stat_type { | |||
570 | BCM_SYSPORT_STAT_RUNT, | 570 | BCM_SYSPORT_STAT_RUNT, |
571 | BCM_SYSPORT_STAT_RXCHK, | 571 | BCM_SYSPORT_STAT_RXCHK, |
572 | BCM_SYSPORT_STAT_RBUF, | 572 | BCM_SYSPORT_STAT_RBUF, |
573 | BCM_SYSPORT_STAT_SOFT, | ||
573 | }; | 574 | }; |
574 | 575 | ||
575 | /* Macros to help define ethtool statistics */ | 576 | /* Macros to help define ethtool statistics */ |
@@ -590,6 +591,7 @@ enum bcm_sysport_stat_type { | |||
590 | #define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX) | 591 | #define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX) |
591 | #define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX) | 592 | #define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX) |
592 | #define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT) | 593 | #define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT) |
594 | #define STAT_MIB_SOFT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_SOFT) | ||
593 | 595 | ||
594 | #define STAT_RXCHK(str, m, ofs) { \ | 596 | #define STAT_RXCHK(str, m, ofs) { \ |
595 | .stat_string = str, \ | 597 | .stat_string = str, \ |
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 676ffe093180..0469f72c6e7e 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c | |||
@@ -302,9 +302,6 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, | |||
302 | slot->skb = skb; | 302 | slot->skb = skb; |
303 | slot->dma_addr = dma_addr; | 303 | slot->dma_addr = dma_addr; |
304 | 304 | ||
305 | if (slot->dma_addr & 0xC0000000) | ||
306 | bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); | ||
307 | |||
308 | return 0; | 305 | return 0; |
309 | } | 306 | } |
310 | 307 | ||
@@ -505,8 +502,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) | |||
505 | ring->mmio_base); | 502 | ring->mmio_base); |
506 | goto err_dma_free; | 503 | goto err_dma_free; |
507 | } | 504 | } |
508 | if (ring->dma_base & 0xC0000000) | ||
509 | bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); | ||
510 | 505 | ||
511 | ring->unaligned = bgmac_dma_unaligned(bgmac, ring, | 506 | ring->unaligned = bgmac_dma_unaligned(bgmac, ring, |
512 | BGMAC_DMA_RING_TX); | 507 | BGMAC_DMA_RING_TX); |
@@ -536,8 +531,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) | |||
536 | err = -ENOMEM; | 531 | err = -ENOMEM; |
537 | goto err_dma_free; | 532 | goto err_dma_free; |
538 | } | 533 | } |
539 | if (ring->dma_base & 0xC0000000) | ||
540 | bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); | ||
541 | 534 | ||
542 | ring->unaligned = bgmac_dma_unaligned(bgmac, ring, | 535 | ring->unaligned = bgmac_dma_unaligned(bgmac, ring, |
543 | BGMAC_DMA_RING_RX); | 536 | BGMAC_DMA_RING_RX); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 756053c028be..4085c4b31047 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
@@ -1811,7 +1811,7 @@ struct bnx2x { | |||
1811 | int stats_state; | 1811 | int stats_state; |
1812 | 1812 | ||
1813 | /* used for synchronization of concurrent threads statistics handling */ | 1813 | /* used for synchronization of concurrent threads statistics handling */ |
1814 | spinlock_t stats_lock; | 1814 | struct mutex stats_lock; |
1815 | 1815 | ||
1816 | /* used by dmae command loader */ | 1816 | /* used by dmae command loader */ |
1817 | struct dmae_command stats_dmae; | 1817 | struct dmae_command stats_dmae; |
@@ -1935,8 +1935,6 @@ struct bnx2x { | |||
1935 | 1935 | ||
1936 | int fp_array_size; | 1936 | int fp_array_size; |
1937 | u32 dump_preset_idx; | 1937 | u32 dump_preset_idx; |
1938 | bool stats_started; | ||
1939 | struct semaphore stats_sema; | ||
1940 | 1938 | ||
1941 | u8 phys_port_id[ETH_ALEN]; | 1939 | u8 phys_port_id[ETH_ALEN]; |
1942 | 1940 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 7155e1d2c208..1ec635f54994 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -129,8 +129,8 @@ struct bnx2x_mac_vals { | |||
129 | u32 xmac_val; | 129 | u32 xmac_val; |
130 | u32 emac_addr; | 130 | u32 emac_addr; |
131 | u32 emac_val; | 131 | u32 emac_val; |
132 | u32 umac_addr; | 132 | u32 umac_addr[2]; |
133 | u32 umac_val; | 133 | u32 umac_val[2]; |
134 | u32 bmac_addr; | 134 | u32 bmac_addr; |
135 | u32 bmac_val[2]; | 135 | u32 bmac_val[2]; |
136 | }; | 136 | }; |
@@ -7866,6 +7866,20 @@ int bnx2x_init_hw_func_cnic(struct bnx2x *bp) | |||
7866 | return 0; | 7866 | return 0; |
7867 | } | 7867 | } |
7868 | 7868 | ||
7869 | /* previous driver DMAE transaction may have occurred when pre-boot stage ended | ||
7870 | * and boot began, or when kdump kernel was loaded. Either case would invalidate | ||
7871 | * the addresses of the transaction, resulting in was-error bit set in the pci | ||
7872 | * causing all hw-to-host pcie transactions to timeout. If this happened we want | ||
7873 | * to clear the interrupt which detected this from the pglueb and the was done | ||
7874 | * bit | ||
7875 | */ | ||
7876 | static void bnx2x_clean_pglue_errors(struct bnx2x *bp) | ||
7877 | { | ||
7878 | if (!CHIP_IS_E1x(bp)) | ||
7879 | REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, | ||
7880 | 1 << BP_ABS_FUNC(bp)); | ||
7881 | } | ||
7882 | |||
7869 | static int bnx2x_init_hw_func(struct bnx2x *bp) | 7883 | static int bnx2x_init_hw_func(struct bnx2x *bp) |
7870 | { | 7884 | { |
7871 | int port = BP_PORT(bp); | 7885 | int port = BP_PORT(bp); |
@@ -7958,8 +7972,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) | |||
7958 | 7972 | ||
7959 | bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); | 7973 | bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); |
7960 | 7974 | ||
7961 | if (!CHIP_IS_E1x(bp)) | 7975 | bnx2x_clean_pglue_errors(bp); |
7962 | REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); | ||
7963 | 7976 | ||
7964 | bnx2x_init_block(bp, BLOCK_ATC, init_phase); | 7977 | bnx2x_init_block(bp, BLOCK_ATC, init_phase); |
7965 | bnx2x_init_block(bp, BLOCK_DMAE, init_phase); | 7978 | bnx2x_init_block(bp, BLOCK_DMAE, init_phase); |
@@ -10141,6 +10154,25 @@ static u32 bnx2x_get_pretend_reg(struct bnx2x *bp) | |||
10141 | return base + (BP_ABS_FUNC(bp)) * stride; | 10154 | return base + (BP_ABS_FUNC(bp)) * stride; |
10142 | } | 10155 | } |
10143 | 10156 | ||
10157 | static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp, | ||
10158 | u8 port, u32 reset_reg, | ||
10159 | struct bnx2x_mac_vals *vals) | ||
10160 | { | ||
10161 | u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; | ||
10162 | u32 base_addr; | ||
10163 | |||
10164 | if (!(mask & reset_reg)) | ||
10165 | return false; | ||
10166 | |||
10167 | BNX2X_DEV_INFO("Disable umac Rx %02x\n", port); | ||
10168 | base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; | ||
10169 | vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG; | ||
10170 | vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]); | ||
10171 | REG_WR(bp, vals->umac_addr[port], 0); | ||
10172 | |||
10173 | return true; | ||
10174 | } | ||
10175 | |||
10144 | static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, | 10176 | static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, |
10145 | struct bnx2x_mac_vals *vals) | 10177 | struct bnx2x_mac_vals *vals) |
10146 | { | 10178 | { |
@@ -10149,10 +10181,7 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, | |||
10149 | u8 port = BP_PORT(bp); | 10181 | u8 port = BP_PORT(bp); |
10150 | 10182 | ||
10151 | /* reset addresses as they also mark which values were changed */ | 10183 | /* reset addresses as they also mark which values were changed */ |
10152 | vals->bmac_addr = 0; | 10184 | memset(vals, 0, sizeof(*vals)); |
10153 | vals->umac_addr = 0; | ||
10154 | vals->xmac_addr = 0; | ||
10155 | vals->emac_addr = 0; | ||
10156 | 10185 | ||
10157 | reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2); | 10186 | reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2); |
10158 | 10187 | ||
@@ -10201,15 +10230,11 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, | |||
10201 | REG_WR(bp, vals->xmac_addr, 0); | 10230 | REG_WR(bp, vals->xmac_addr, 0); |
10202 | mac_stopped = true; | 10231 | mac_stopped = true; |
10203 | } | 10232 | } |
10204 | mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; | 10233 | |
10205 | if (mask & reset_reg) { | 10234 | mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0, |
10206 | BNX2X_DEV_INFO("Disable umac Rx\n"); | 10235 | reset_reg, vals); |
10207 | base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; | 10236 | mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1, |
10208 | vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; | 10237 | reset_reg, vals); |
10209 | vals->umac_val = REG_RD(bp, vals->umac_addr); | ||
10210 | REG_WR(bp, vals->umac_addr, 0); | ||
10211 | mac_stopped = true; | ||
10212 | } | ||
10213 | } | 10238 | } |
10214 | 10239 | ||
10215 | if (mac_stopped) | 10240 | if (mac_stopped) |
@@ -10505,8 +10530,11 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) | |||
10505 | /* Close the MAC Rx to prevent BRB from filling up */ | 10530 | /* Close the MAC Rx to prevent BRB from filling up */ |
10506 | bnx2x_prev_unload_close_mac(bp, &mac_vals); | 10531 | bnx2x_prev_unload_close_mac(bp, &mac_vals); |
10507 | 10532 | ||
10508 | /* close LLH filters towards the BRB */ | 10533 | /* close LLH filters for both ports towards the BRB */ |
10534 | bnx2x_set_rx_filter(&bp->link_params, 0); | ||
10535 | bp->link_params.port ^= 1; | ||
10509 | bnx2x_set_rx_filter(&bp->link_params, 0); | 10536 | bnx2x_set_rx_filter(&bp->link_params, 0); |
10537 | bp->link_params.port ^= 1; | ||
10510 | 10538 | ||
10511 | /* Check if the UNDI driver was previously loaded */ | 10539 | /* Check if the UNDI driver was previously loaded */ |
10512 | if (bnx2x_prev_is_after_undi(bp)) { | 10540 | if (bnx2x_prev_is_after_undi(bp)) { |
@@ -10553,8 +10581,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) | |||
10553 | 10581 | ||
10554 | if (mac_vals.xmac_addr) | 10582 | if (mac_vals.xmac_addr) |
10555 | REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val); | 10583 | REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val); |
10556 | if (mac_vals.umac_addr) | 10584 | if (mac_vals.umac_addr[0]) |
10557 | REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val); | 10585 | REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]); |
10586 | if (mac_vals.umac_addr[1]) | ||
10587 | REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]); | ||
10558 | if (mac_vals.emac_addr) | 10588 | if (mac_vals.emac_addr) |
10559 | REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val); | 10589 | REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val); |
10560 | if (mac_vals.bmac_addr) { | 10590 | if (mac_vals.bmac_addr) { |
@@ -10571,26 +10601,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) | |||
10571 | return bnx2x_prev_mcp_done(bp); | 10601 | return bnx2x_prev_mcp_done(bp); |
10572 | } | 10602 | } |
10573 | 10603 | ||
10574 | /* previous driver DMAE transaction may have occurred when pre-boot stage ended | ||
10575 | * and boot began, or when kdump kernel was loaded. Either case would invalidate | ||
10576 | * the addresses of the transaction, resulting in was-error bit set in the pci | ||
10577 | * causing all hw-to-host pcie transactions to timeout. If this happened we want | ||
10578 | * to clear the interrupt which detected this from the pglueb and the was done | ||
10579 | * bit | ||
10580 | */ | ||
10581 | static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp) | ||
10582 | { | ||
10583 | if (!CHIP_IS_E1x(bp)) { | ||
10584 | u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); | ||
10585 | if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { | ||
10586 | DP(BNX2X_MSG_SP, | ||
10587 | "'was error' bit was found to be set in pglueb upon startup. Clearing\n"); | ||
10588 | REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, | ||
10589 | 1 << BP_FUNC(bp)); | ||
10590 | } | ||
10591 | } | ||
10592 | } | ||
10593 | |||
10594 | static int bnx2x_prev_unload(struct bnx2x *bp) | 10604 | static int bnx2x_prev_unload(struct bnx2x *bp) |
10595 | { | 10605 | { |
10596 | int time_counter = 10; | 10606 | int time_counter = 10; |
@@ -10600,7 +10610,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp) | |||
10600 | /* clear hw from errors which may have resulted from an interrupted | 10610 | /* clear hw from errors which may have resulted from an interrupted |
10601 | * dmae transaction. | 10611 | * dmae transaction. |
10602 | */ | 10612 | */ |
10603 | bnx2x_prev_interrupted_dmae(bp); | 10613 | bnx2x_clean_pglue_errors(bp); |
10604 | 10614 | ||
10605 | /* Release previously held locks */ | 10615 | /* Release previously held locks */ |
10606 | hw_lock_reg = (BP_FUNC(bp) <= 5) ? | 10616 | hw_lock_reg = (BP_FUNC(bp) <= 5) ? |
@@ -12037,9 +12047,8 @@ static int bnx2x_init_bp(struct bnx2x *bp) | |||
12037 | mutex_init(&bp->port.phy_mutex); | 12047 | mutex_init(&bp->port.phy_mutex); |
12038 | mutex_init(&bp->fw_mb_mutex); | 12048 | mutex_init(&bp->fw_mb_mutex); |
12039 | mutex_init(&bp->drv_info_mutex); | 12049 | mutex_init(&bp->drv_info_mutex); |
12050 | mutex_init(&bp->stats_lock); | ||
12040 | bp->drv_info_mng_owner = false; | 12051 | bp->drv_info_mng_owner = false; |
12041 | spin_lock_init(&bp->stats_lock); | ||
12042 | sema_init(&bp->stats_sema, 1); | ||
12043 | 12052 | ||
12044 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); | 12053 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); |
12045 | INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); | 12054 | INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); |
@@ -12722,6 +12731,9 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, | |||
12722 | pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, | 12731 | pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, |
12723 | PCICFG_VENDOR_ID_OFFSET); | 12732 | PCICFG_VENDOR_ID_OFFSET); |
12724 | 12733 | ||
12734 | /* Set PCIe reset type to fundamental for EEH recovery */ | ||
12735 | pdev->needs_freset = 1; | ||
12736 | |||
12725 | /* AER (Advanced Error reporting) configuration */ | 12737 | /* AER (Advanced Error reporting) configuration */ |
12726 | rc = pci_enable_pcie_error_reporting(pdev); | 12738 | rc = pci_enable_pcie_error_reporting(pdev); |
12727 | if (!rc) | 12739 | if (!rc) |
@@ -12766,7 +12778,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, | |||
12766 | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | | 12778 | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | |
12767 | NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | | 12779 | NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | |
12768 | NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX; | 12780 | NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX; |
12769 | if (!CHIP_IS_E1x(bp)) { | 12781 | if (!chip_is_e1x) { |
12770 | dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | | 12782 | dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | |
12771 | NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT; | 12783 | NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT; |
12772 | dev->hw_enc_features = | 12784 | dev->hw_enc_features = |
@@ -13665,9 +13677,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) | |||
13665 | cancel_delayed_work_sync(&bp->sp_task); | 13677 | cancel_delayed_work_sync(&bp->sp_task); |
13666 | cancel_delayed_work_sync(&bp->period_task); | 13678 | cancel_delayed_work_sync(&bp->period_task); |
13667 | 13679 | ||
13668 | spin_lock_bh(&bp->stats_lock); | 13680 | mutex_lock(&bp->stats_lock); |
13669 | bp->stats_state = STATS_STATE_DISABLED; | 13681 | bp->stats_state = STATS_STATE_DISABLED; |
13670 | spin_unlock_bh(&bp->stats_lock); | 13682 | mutex_unlock(&bp->stats_lock); |
13671 | 13683 | ||
13672 | bnx2x_save_statistics(bp); | 13684 | bnx2x_save_statistics(bp); |
13673 | 13685 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index e5aca2de1871..cfe3c7695455 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | |||
@@ -2238,7 +2238,9 @@ int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
2238 | 2238 | ||
2239 | cookie.vf = vf; | 2239 | cookie.vf = vf; |
2240 | cookie.state = VF_ACQUIRED; | 2240 | cookie.state = VF_ACQUIRED; |
2241 | bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); | 2241 | rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); |
2242 | if (rc) | ||
2243 | goto op_err; | ||
2242 | } | 2244 | } |
2243 | 2245 | ||
2244 | DP(BNX2X_MSG_IOV, "set state to acquired\n"); | 2246 | DP(BNX2X_MSG_IOV, "set state to acquired\n"); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index d1608297c773..800ab44a07ce 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c | |||
@@ -123,36 +123,28 @@ static void bnx2x_dp_stats(struct bnx2x *bp) | |||
123 | */ | 123 | */ |
124 | static void bnx2x_storm_stats_post(struct bnx2x *bp) | 124 | static void bnx2x_storm_stats_post(struct bnx2x *bp) |
125 | { | 125 | { |
126 | if (!bp->stats_pending) { | 126 | int rc; |
127 | int rc; | ||
128 | 127 | ||
129 | spin_lock_bh(&bp->stats_lock); | 128 | if (bp->stats_pending) |
130 | 129 | return; | |
131 | if (bp->stats_pending) { | ||
132 | spin_unlock_bh(&bp->stats_lock); | ||
133 | return; | ||
134 | } | ||
135 | |||
136 | bp->fw_stats_req->hdr.drv_stats_counter = | ||
137 | cpu_to_le16(bp->stats_counter++); | ||
138 | 130 | ||
139 | DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", | 131 | bp->fw_stats_req->hdr.drv_stats_counter = |
140 | le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter)); | 132 | cpu_to_le16(bp->stats_counter++); |
141 | 133 | ||
142 | /* adjust the ramrod to include VF queues statistics */ | 134 | DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", |
143 | bnx2x_iov_adjust_stats_req(bp); | 135 | le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter)); |
144 | bnx2x_dp_stats(bp); | ||
145 | 136 | ||
146 | /* send FW stats ramrod */ | 137 | /* adjust the ramrod to include VF queues statistics */ |
147 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, | 138 | bnx2x_iov_adjust_stats_req(bp); |
148 | U64_HI(bp->fw_stats_req_mapping), | 139 | bnx2x_dp_stats(bp); |
149 | U64_LO(bp->fw_stats_req_mapping), | ||
150 | NONE_CONNECTION_TYPE); | ||
151 | if (rc == 0) | ||
152 | bp->stats_pending = 1; | ||
153 | 140 | ||
154 | spin_unlock_bh(&bp->stats_lock); | 141 | /* send FW stats ramrod */ |
155 | } | 142 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, |
143 | U64_HI(bp->fw_stats_req_mapping), | ||
144 | U64_LO(bp->fw_stats_req_mapping), | ||
145 | NONE_CONNECTION_TYPE); | ||
146 | if (rc == 0) | ||
147 | bp->stats_pending = 1; | ||
156 | } | 148 | } |
157 | 149 | ||
158 | static void bnx2x_hw_stats_post(struct bnx2x *bp) | 150 | static void bnx2x_hw_stats_post(struct bnx2x *bp) |
@@ -221,7 +213,7 @@ static void bnx2x_stats_comp(struct bnx2x *bp) | |||
221 | */ | 213 | */ |
222 | 214 | ||
223 | /* should be called under stats_sema */ | 215 | /* should be called under stats_sema */ |
224 | static void __bnx2x_stats_pmf_update(struct bnx2x *bp) | 216 | static void bnx2x_stats_pmf_update(struct bnx2x *bp) |
225 | { | 217 | { |
226 | struct dmae_command *dmae; | 218 | struct dmae_command *dmae; |
227 | u32 opcode; | 219 | u32 opcode; |
@@ -519,7 +511,7 @@ static void bnx2x_func_stats_init(struct bnx2x *bp) | |||
519 | } | 511 | } |
520 | 512 | ||
521 | /* should be called under stats_sema */ | 513 | /* should be called under stats_sema */ |
522 | static void __bnx2x_stats_start(struct bnx2x *bp) | 514 | static void bnx2x_stats_start(struct bnx2x *bp) |
523 | { | 515 | { |
524 | if (IS_PF(bp)) { | 516 | if (IS_PF(bp)) { |
525 | if (bp->port.pmf) | 517 | if (bp->port.pmf) |
@@ -531,34 +523,13 @@ static void __bnx2x_stats_start(struct bnx2x *bp) | |||
531 | bnx2x_hw_stats_post(bp); | 523 | bnx2x_hw_stats_post(bp); |
532 | bnx2x_storm_stats_post(bp); | 524 | bnx2x_storm_stats_post(bp); |
533 | } | 525 | } |
534 | |||
535 | bp->stats_started = true; | ||
536 | } | ||
537 | |||
538 | static void bnx2x_stats_start(struct bnx2x *bp) | ||
539 | { | ||
540 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
541 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
542 | __bnx2x_stats_start(bp); | ||
543 | up(&bp->stats_sema); | ||
544 | } | 526 | } |
545 | 527 | ||
546 | static void bnx2x_stats_pmf_start(struct bnx2x *bp) | 528 | static void bnx2x_stats_pmf_start(struct bnx2x *bp) |
547 | { | 529 | { |
548 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
549 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
550 | bnx2x_stats_comp(bp); | 530 | bnx2x_stats_comp(bp); |
551 | __bnx2x_stats_pmf_update(bp); | 531 | bnx2x_stats_pmf_update(bp); |
552 | __bnx2x_stats_start(bp); | 532 | bnx2x_stats_start(bp); |
553 | up(&bp->stats_sema); | ||
554 | } | ||
555 | |||
556 | static void bnx2x_stats_pmf_update(struct bnx2x *bp) | ||
557 | { | ||
558 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
559 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
560 | __bnx2x_stats_pmf_update(bp); | ||
561 | up(&bp->stats_sema); | ||
562 | } | 533 | } |
563 | 534 | ||
564 | static void bnx2x_stats_restart(struct bnx2x *bp) | 535 | static void bnx2x_stats_restart(struct bnx2x *bp) |
@@ -568,11 +539,9 @@ static void bnx2x_stats_restart(struct bnx2x *bp) | |||
568 | */ | 539 | */ |
569 | if (IS_VF(bp)) | 540 | if (IS_VF(bp)) |
570 | return; | 541 | return; |
571 | if (down_timeout(&bp->stats_sema, HZ/10)) | 542 | |
572 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
573 | bnx2x_stats_comp(bp); | 543 | bnx2x_stats_comp(bp); |
574 | __bnx2x_stats_start(bp); | 544 | bnx2x_stats_start(bp); |
575 | up(&bp->stats_sema); | ||
576 | } | 545 | } |
577 | 546 | ||
578 | static void bnx2x_bmac_stats_update(struct bnx2x *bp) | 547 | static void bnx2x_bmac_stats_update(struct bnx2x *bp) |
@@ -1246,18 +1215,12 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1246 | { | 1215 | { |
1247 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | 1216 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); |
1248 | 1217 | ||
1249 | /* we run update from timer context, so give up | 1218 | if (bnx2x_edebug_stats_stopped(bp)) |
1250 | * if somebody is in the middle of transition | ||
1251 | */ | ||
1252 | if (down_trylock(&bp->stats_sema)) | ||
1253 | return; | 1219 | return; |
1254 | 1220 | ||
1255 | if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started) | ||
1256 | goto out; | ||
1257 | |||
1258 | if (IS_PF(bp)) { | 1221 | if (IS_PF(bp)) { |
1259 | if (*stats_comp != DMAE_COMP_VAL) | 1222 | if (*stats_comp != DMAE_COMP_VAL) |
1260 | goto out; | 1223 | return; |
1261 | 1224 | ||
1262 | if (bp->port.pmf) | 1225 | if (bp->port.pmf) |
1263 | bnx2x_hw_stats_update(bp); | 1226 | bnx2x_hw_stats_update(bp); |
@@ -1267,7 +1230,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1267 | BNX2X_ERR("storm stats were not updated for 3 times\n"); | 1230 | BNX2X_ERR("storm stats were not updated for 3 times\n"); |
1268 | bnx2x_panic(); | 1231 | bnx2x_panic(); |
1269 | } | 1232 | } |
1270 | goto out; | 1233 | return; |
1271 | } | 1234 | } |
1272 | } else { | 1235 | } else { |
1273 | /* vf doesn't collect HW statistics, and doesn't get completions | 1236 | /* vf doesn't collect HW statistics, and doesn't get completions |
@@ -1281,7 +1244,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1281 | 1244 | ||
1282 | /* vf is done */ | 1245 | /* vf is done */ |
1283 | if (IS_VF(bp)) | 1246 | if (IS_VF(bp)) |
1284 | goto out; | 1247 | return; |
1285 | 1248 | ||
1286 | if (netif_msg_timer(bp)) { | 1249 | if (netif_msg_timer(bp)) { |
1287 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | 1250 | struct bnx2x_eth_stats *estats = &bp->eth_stats; |
@@ -1292,9 +1255,6 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1292 | 1255 | ||
1293 | bnx2x_hw_stats_post(bp); | 1256 | bnx2x_hw_stats_post(bp); |
1294 | bnx2x_storm_stats_post(bp); | 1257 | bnx2x_storm_stats_post(bp); |
1295 | |||
1296 | out: | ||
1297 | up(&bp->stats_sema); | ||
1298 | } | 1258 | } |
1299 | 1259 | ||
1300 | static void bnx2x_port_stats_stop(struct bnx2x *bp) | 1260 | static void bnx2x_port_stats_stop(struct bnx2x *bp) |
@@ -1358,12 +1318,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp) | |||
1358 | 1318 | ||
1359 | static void bnx2x_stats_stop(struct bnx2x *bp) | 1319 | static void bnx2x_stats_stop(struct bnx2x *bp) |
1360 | { | 1320 | { |
1361 | int update = 0; | 1321 | bool update = false; |
1362 | |||
1363 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
1364 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
1365 | |||
1366 | bp->stats_started = false; | ||
1367 | 1322 | ||
1368 | bnx2x_stats_comp(bp); | 1323 | bnx2x_stats_comp(bp); |
1369 | 1324 | ||
@@ -1381,8 +1336,6 @@ static void bnx2x_stats_stop(struct bnx2x *bp) | |||
1381 | bnx2x_hw_stats_post(bp); | 1336 | bnx2x_hw_stats_post(bp); |
1382 | bnx2x_stats_comp(bp); | 1337 | bnx2x_stats_comp(bp); |
1383 | } | 1338 | } |
1384 | |||
1385 | up(&bp->stats_sema); | ||
1386 | } | 1339 | } |
1387 | 1340 | ||
1388 | static void bnx2x_stats_do_nothing(struct bnx2x *bp) | 1341 | static void bnx2x_stats_do_nothing(struct bnx2x *bp) |
@@ -1410,18 +1363,28 @@ static const struct { | |||
1410 | 1363 | ||
1411 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) | 1364 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) |
1412 | { | 1365 | { |
1413 | enum bnx2x_stats_state state; | 1366 | enum bnx2x_stats_state state = bp->stats_state; |
1414 | void (*action)(struct bnx2x *bp); | 1367 | |
1415 | if (unlikely(bp->panic)) | 1368 | if (unlikely(bp->panic)) |
1416 | return; | 1369 | return; |
1417 | 1370 | ||
1418 | spin_lock_bh(&bp->stats_lock); | 1371 | /* Statistics update run from timer context, and we don't want to stop |
1419 | state = bp->stats_state; | 1372 | * that context in case someone is in the middle of a transition. |
1373 | * For other events, wait a bit until lock is taken. | ||
1374 | */ | ||
1375 | if (!mutex_trylock(&bp->stats_lock)) { | ||
1376 | if (event == STATS_EVENT_UPDATE) | ||
1377 | return; | ||
1378 | |||
1379 | DP(BNX2X_MSG_STATS, | ||
1380 | "Unlikely stats' lock contention [event %d]\n", event); | ||
1381 | mutex_lock(&bp->stats_lock); | ||
1382 | } | ||
1383 | |||
1384 | bnx2x_stats_stm[state][event].action(bp); | ||
1420 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; | 1385 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; |
1421 | action = bnx2x_stats_stm[state][event].action; | ||
1422 | spin_unlock_bh(&bp->stats_lock); | ||
1423 | 1386 | ||
1424 | action(bp); | 1387 | mutex_unlock(&bp->stats_lock); |
1425 | 1388 | ||
1426 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) | 1389 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) |
1427 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", | 1390 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", |
@@ -1998,13 +1961,34 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, | |||
1998 | } | 1961 | } |
1999 | } | 1962 | } |
2000 | 1963 | ||
2001 | void bnx2x_stats_safe_exec(struct bnx2x *bp, | 1964 | int bnx2x_stats_safe_exec(struct bnx2x *bp, |
2002 | void (func_to_exec)(void *cookie), | 1965 | void (func_to_exec)(void *cookie), |
2003 | void *cookie){ | 1966 | void *cookie) |
2004 | if (down_timeout(&bp->stats_sema, HZ/10)) | 1967 | { |
2005 | BNX2X_ERR("Unable to acquire stats lock\n"); | 1968 | int cnt = 10, rc = 0; |
1969 | |||
1970 | /* Wait for statistics to end [while blocking further requests], | ||
1971 | * then run supplied function 'safely'. | ||
1972 | */ | ||
1973 | mutex_lock(&bp->stats_lock); | ||
1974 | |||
2006 | bnx2x_stats_comp(bp); | 1975 | bnx2x_stats_comp(bp); |
1976 | while (bp->stats_pending && cnt--) | ||
1977 | if (bnx2x_storm_stats_update(bp)) | ||
1978 | usleep_range(1000, 2000); | ||
1979 | if (bp->stats_pending) { | ||
1980 | BNX2X_ERR("Failed to wait for stats pending to clear [possibly FW is stuck]\n"); | ||
1981 | rc = -EBUSY; | ||
1982 | goto out; | ||
1983 | } | ||
1984 | |||
2007 | func_to_exec(cookie); | 1985 | func_to_exec(cookie); |
2008 | __bnx2x_stats_start(bp); | 1986 | |
2009 | up(&bp->stats_sema); | 1987 | out: |
1988 | /* No need to restart statistics - if they're enabled, the timer | ||
1989 | * will restart the statistics. | ||
1990 | */ | ||
1991 | mutex_unlock(&bp->stats_lock); | ||
1992 | |||
1993 | return rc; | ||
2010 | } | 1994 | } |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index 2beceaefdeea..965539a9dabe 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h | |||
@@ -539,9 +539,9 @@ struct bnx2x; | |||
539 | void bnx2x_memset_stats(struct bnx2x *bp); | 539 | void bnx2x_memset_stats(struct bnx2x *bp); |
540 | void bnx2x_stats_init(struct bnx2x *bp); | 540 | void bnx2x_stats_init(struct bnx2x *bp); |
541 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); | 541 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); |
542 | void bnx2x_stats_safe_exec(struct bnx2x *bp, | 542 | int bnx2x_stats_safe_exec(struct bnx2x *bp, |
543 | void (func_to_exec)(void *cookie), | 543 | void (func_to_exec)(void *cookie), |
544 | void *cookie); | 544 | void *cookie); |
545 | 545 | ||
546 | /** | 546 | /** |
547 | * bnx2x_save_statistics - save statistics when unloading. | 547 | * bnx2x_save_statistics - save statistics when unloading. |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index ff83c46bc389..6befde61c203 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c | |||
@@ -487,6 +487,7 @@ enum bcmgenet_stat_type { | |||
487 | BCMGENET_STAT_MIB_TX, | 487 | BCMGENET_STAT_MIB_TX, |
488 | BCMGENET_STAT_RUNT, | 488 | BCMGENET_STAT_RUNT, |
489 | BCMGENET_STAT_MISC, | 489 | BCMGENET_STAT_MISC, |
490 | BCMGENET_STAT_SOFT, | ||
490 | }; | 491 | }; |
491 | 492 | ||
492 | struct bcmgenet_stats { | 493 | struct bcmgenet_stats { |
@@ -515,6 +516,7 @@ struct bcmgenet_stats { | |||
515 | #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) | 516 | #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) |
516 | #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) | 517 | #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) |
517 | #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) | 518 | #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) |
519 | #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT) | ||
518 | 520 | ||
519 | #define STAT_GENET_MISC(str, m, offset) { \ | 521 | #define STAT_GENET_MISC(str, m, offset) { \ |
520 | .stat_string = str, \ | 522 | .stat_string = str, \ |
@@ -614,9 +616,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { | |||
614 | UMAC_RBUF_OVFL_CNT), | 616 | UMAC_RBUF_OVFL_CNT), |
615 | STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), | 617 | STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), |
616 | STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), | 618 | STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), |
617 | STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), | 619 | STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), |
618 | STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed), | 620 | STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed), |
619 | STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed), | 621 | STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed), |
620 | }; | 622 | }; |
621 | 623 | ||
622 | #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) | 624 | #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) |
@@ -668,6 +670,7 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) | |||
668 | s = &bcmgenet_gstrings_stats[i]; | 670 | s = &bcmgenet_gstrings_stats[i]; |
669 | switch (s->type) { | 671 | switch (s->type) { |
670 | case BCMGENET_STAT_NETDEV: | 672 | case BCMGENET_STAT_NETDEV: |
673 | case BCMGENET_STAT_SOFT: | ||
671 | continue; | 674 | continue; |
672 | case BCMGENET_STAT_MIB_RX: | 675 | case BCMGENET_STAT_MIB_RX: |
673 | case BCMGENET_STAT_MIB_TX: | 676 | case BCMGENET_STAT_MIB_TX: |
@@ -971,13 +974,14 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv, | |||
971 | } | 974 | } |
972 | 975 | ||
973 | /* Unlocked version of the reclaim routine */ | 976 | /* Unlocked version of the reclaim routine */ |
974 | static void __bcmgenet_tx_reclaim(struct net_device *dev, | 977 | static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, |
975 | struct bcmgenet_tx_ring *ring) | 978 | struct bcmgenet_tx_ring *ring) |
976 | { | 979 | { |
977 | struct bcmgenet_priv *priv = netdev_priv(dev); | 980 | struct bcmgenet_priv *priv = netdev_priv(dev); |
978 | int last_tx_cn, last_c_index, num_tx_bds; | 981 | int last_tx_cn, last_c_index, num_tx_bds; |
979 | struct enet_cb *tx_cb_ptr; | 982 | struct enet_cb *tx_cb_ptr; |
980 | struct netdev_queue *txq; | 983 | struct netdev_queue *txq; |
984 | unsigned int pkts_compl = 0; | ||
981 | unsigned int bds_compl; | 985 | unsigned int bds_compl; |
982 | unsigned int c_index; | 986 | unsigned int c_index; |
983 | 987 | ||
@@ -1005,6 +1009,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, | |||
1005 | tx_cb_ptr = ring->cbs + last_c_index; | 1009 | tx_cb_ptr = ring->cbs + last_c_index; |
1006 | bds_compl = 0; | 1010 | bds_compl = 0; |
1007 | if (tx_cb_ptr->skb) { | 1011 | if (tx_cb_ptr->skb) { |
1012 | pkts_compl++; | ||
1008 | bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1; | 1013 | bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1; |
1009 | dev->stats.tx_bytes += tx_cb_ptr->skb->len; | 1014 | dev->stats.tx_bytes += tx_cb_ptr->skb->len; |
1010 | dma_unmap_single(&dev->dev, | 1015 | dma_unmap_single(&dev->dev, |
@@ -1028,23 +1033,45 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, | |||
1028 | last_c_index &= (num_tx_bds - 1); | 1033 | last_c_index &= (num_tx_bds - 1); |
1029 | } | 1034 | } |
1030 | 1035 | ||
1031 | if (ring->free_bds > (MAX_SKB_FRAGS + 1)) | 1036 | if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { |
1032 | ring->int_disable(priv, ring); | 1037 | if (netif_tx_queue_stopped(txq)) |
1033 | 1038 | netif_tx_wake_queue(txq); | |
1034 | if (netif_tx_queue_stopped(txq)) | 1039 | } |
1035 | netif_tx_wake_queue(txq); | ||
1036 | 1040 | ||
1037 | ring->c_index = c_index; | 1041 | ring->c_index = c_index; |
1042 | |||
1043 | return pkts_compl; | ||
1038 | } | 1044 | } |
1039 | 1045 | ||
1040 | static void bcmgenet_tx_reclaim(struct net_device *dev, | 1046 | static unsigned int bcmgenet_tx_reclaim(struct net_device *dev, |
1041 | struct bcmgenet_tx_ring *ring) | 1047 | struct bcmgenet_tx_ring *ring) |
1042 | { | 1048 | { |
1049 | unsigned int released; | ||
1043 | unsigned long flags; | 1050 | unsigned long flags; |
1044 | 1051 | ||
1045 | spin_lock_irqsave(&ring->lock, flags); | 1052 | spin_lock_irqsave(&ring->lock, flags); |
1046 | __bcmgenet_tx_reclaim(dev, ring); | 1053 | released = __bcmgenet_tx_reclaim(dev, ring); |
1047 | spin_unlock_irqrestore(&ring->lock, flags); | 1054 | spin_unlock_irqrestore(&ring->lock, flags); |
1055 | |||
1056 | return released; | ||
1057 | } | ||
1058 | |||
1059 | static int bcmgenet_tx_poll(struct napi_struct *napi, int budget) | ||
1060 | { | ||
1061 | struct bcmgenet_tx_ring *ring = | ||
1062 | container_of(napi, struct bcmgenet_tx_ring, napi); | ||
1063 | unsigned int work_done = 0; | ||
1064 | |||
1065 | work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring); | ||
1066 | |||
1067 | if (work_done == 0) { | ||
1068 | napi_complete(napi); | ||
1069 | ring->int_enable(ring->priv, ring); | ||
1070 | |||
1071 | return 0; | ||
1072 | } | ||
1073 | |||
1074 | return budget; | ||
1048 | } | 1075 | } |
1049 | 1076 | ||
1050 | static void bcmgenet_tx_reclaim_all(struct net_device *dev) | 1077 | static void bcmgenet_tx_reclaim_all(struct net_device *dev) |
@@ -1302,10 +1329,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1302 | bcmgenet_tdma_ring_writel(priv, ring->index, | 1329 | bcmgenet_tdma_ring_writel(priv, ring->index, |
1303 | ring->prod_index, TDMA_PROD_INDEX); | 1330 | ring->prod_index, TDMA_PROD_INDEX); |
1304 | 1331 | ||
1305 | if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) { | 1332 | if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) |
1306 | netif_tx_stop_queue(txq); | 1333 | netif_tx_stop_queue(txq); |
1307 | ring->int_enable(priv, ring); | ||
1308 | } | ||
1309 | 1334 | ||
1310 | out: | 1335 | out: |
1311 | spin_unlock_irqrestore(&ring->lock, flags); | 1336 | spin_unlock_irqrestore(&ring->lock, flags); |
@@ -1621,6 +1646,7 @@ static int init_umac(struct bcmgenet_priv *priv) | |||
1621 | struct device *kdev = &priv->pdev->dev; | 1646 | struct device *kdev = &priv->pdev->dev; |
1622 | int ret; | 1647 | int ret; |
1623 | u32 reg, cpu_mask_clear; | 1648 | u32 reg, cpu_mask_clear; |
1649 | int index; | ||
1624 | 1650 | ||
1625 | dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); | 1651 | dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); |
1626 | 1652 | ||
@@ -1647,7 +1673,7 @@ static int init_umac(struct bcmgenet_priv *priv) | |||
1647 | 1673 | ||
1648 | bcmgenet_intr_disable(priv); | 1674 | bcmgenet_intr_disable(priv); |
1649 | 1675 | ||
1650 | cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE; | 1676 | cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE; |
1651 | 1677 | ||
1652 | dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__); | 1678 | dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__); |
1653 | 1679 | ||
@@ -1674,6 +1700,10 @@ static int init_umac(struct bcmgenet_priv *priv) | |||
1674 | 1700 | ||
1675 | bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR); | 1701 | bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR); |
1676 | 1702 | ||
1703 | for (index = 0; index < priv->hw_params->tx_queues; index++) | ||
1704 | bcmgenet_intrl2_1_writel(priv, (1 << index), | ||
1705 | INTRL2_CPU_MASK_CLEAR); | ||
1706 | |||
1677 | /* Enable rx/tx engine.*/ | 1707 | /* Enable rx/tx engine.*/ |
1678 | dev_dbg(kdev, "done init umac\n"); | 1708 | dev_dbg(kdev, "done init umac\n"); |
1679 | 1709 | ||
@@ -1693,6 +1723,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, | |||
1693 | unsigned int first_bd; | 1723 | unsigned int first_bd; |
1694 | 1724 | ||
1695 | spin_lock_init(&ring->lock); | 1725 | spin_lock_init(&ring->lock); |
1726 | ring->priv = priv; | ||
1727 | netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); | ||
1696 | ring->index = index; | 1728 | ring->index = index; |
1697 | if (index == DESC_INDEX) { | 1729 | if (index == DESC_INDEX) { |
1698 | ring->queue = 0; | 1730 | ring->queue = 0; |
@@ -1738,6 +1770,17 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, | |||
1738 | TDMA_WRITE_PTR); | 1770 | TDMA_WRITE_PTR); |
1739 | bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, | 1771 | bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, |
1740 | DMA_END_ADDR); | 1772 | DMA_END_ADDR); |
1773 | |||
1774 | napi_enable(&ring->napi); | ||
1775 | } | ||
1776 | |||
1777 | static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv, | ||
1778 | unsigned int index) | ||
1779 | { | ||
1780 | struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; | ||
1781 | |||
1782 | napi_disable(&ring->napi); | ||
1783 | netif_napi_del(&ring->napi); | ||
1741 | } | 1784 | } |
1742 | 1785 | ||
1743 | /* Initialize a RDMA ring */ | 1786 | /* Initialize a RDMA ring */ |
@@ -1907,7 +1950,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) | |||
1907 | return ret; | 1950 | return ret; |
1908 | } | 1951 | } |
1909 | 1952 | ||
1910 | static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) | 1953 | static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv) |
1911 | { | 1954 | { |
1912 | int i; | 1955 | int i; |
1913 | 1956 | ||
@@ -1926,6 +1969,18 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) | |||
1926 | kfree(priv->tx_cbs); | 1969 | kfree(priv->tx_cbs); |
1927 | } | 1970 | } |
1928 | 1971 | ||
1972 | static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) | ||
1973 | { | ||
1974 | int i; | ||
1975 | |||
1976 | bcmgenet_fini_tx_ring(priv, DESC_INDEX); | ||
1977 | |||
1978 | for (i = 0; i < priv->hw_params->tx_queues; i++) | ||
1979 | bcmgenet_fini_tx_ring(priv, i); | ||
1980 | |||
1981 | __bcmgenet_fini_dma(priv); | ||
1982 | } | ||
1983 | |||
1929 | /* init_edma: Initialize DMA control register */ | 1984 | /* init_edma: Initialize DMA control register */ |
1930 | static int bcmgenet_init_dma(struct bcmgenet_priv *priv) | 1985 | static int bcmgenet_init_dma(struct bcmgenet_priv *priv) |
1931 | { | 1986 | { |
@@ -1952,7 +2007,7 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv) | |||
1952 | priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), | 2007 | priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), |
1953 | GFP_KERNEL); | 2008 | GFP_KERNEL); |
1954 | if (!priv->tx_cbs) { | 2009 | if (!priv->tx_cbs) { |
1955 | bcmgenet_fini_dma(priv); | 2010 | __bcmgenet_fini_dma(priv); |
1956 | return -ENOMEM; | 2011 | return -ENOMEM; |
1957 | } | 2012 | } |
1958 | 2013 | ||
@@ -1975,9 +2030,6 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget) | |||
1975 | struct bcmgenet_priv, napi); | 2030 | struct bcmgenet_priv, napi); |
1976 | unsigned int work_done; | 2031 | unsigned int work_done; |
1977 | 2032 | ||
1978 | /* tx reclaim */ | ||
1979 | bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); | ||
1980 | |||
1981 | work_done = bcmgenet_desc_rx(priv, budget); | 2033 | work_done = bcmgenet_desc_rx(priv, budget); |
1982 | 2034 | ||
1983 | /* Advancing our consumer index*/ | 2035 | /* Advancing our consumer index*/ |
@@ -2022,28 +2074,34 @@ static void bcmgenet_irq_task(struct work_struct *work) | |||
2022 | static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) | 2074 | static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) |
2023 | { | 2075 | { |
2024 | struct bcmgenet_priv *priv = dev_id; | 2076 | struct bcmgenet_priv *priv = dev_id; |
2077 | struct bcmgenet_tx_ring *ring; | ||
2025 | unsigned int index; | 2078 | unsigned int index; |
2026 | 2079 | ||
2027 | /* Save irq status for bottom-half processing. */ | 2080 | /* Save irq status for bottom-half processing. */ |
2028 | priv->irq1_stat = | 2081 | priv->irq1_stat = |
2029 | bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & | 2082 | bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & |
2030 | ~priv->int1_mask; | 2083 | ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); |
2031 | /* clear interrupts */ | 2084 | /* clear interrupts */ |
2032 | bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); | 2085 | bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); |
2033 | 2086 | ||
2034 | netif_dbg(priv, intr, priv->dev, | 2087 | netif_dbg(priv, intr, priv->dev, |
2035 | "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); | 2088 | "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); |
2089 | |||
2036 | /* Check the MBDONE interrupts. | 2090 | /* Check the MBDONE interrupts. |
2037 | * packet is done, reclaim descriptors | 2091 | * packet is done, reclaim descriptors |
2038 | */ | 2092 | */ |
2039 | if (priv->irq1_stat & 0x0000ffff) { | 2093 | for (index = 0; index < priv->hw_params->tx_queues; index++) { |
2040 | index = 0; | 2094 | if (!(priv->irq1_stat & BIT(index))) |
2041 | for (index = 0; index < 16; index++) { | 2095 | continue; |
2042 | if (priv->irq1_stat & (1 << index)) | 2096 | |
2043 | bcmgenet_tx_reclaim(priv->dev, | 2097 | ring = &priv->tx_rings[index]; |
2044 | &priv->tx_rings[index]); | 2098 | |
2099 | if (likely(napi_schedule_prep(&ring->napi))) { | ||
2100 | ring->int_disable(priv, ring); | ||
2101 | __napi_schedule(&ring->napi); | ||
2045 | } | 2102 | } |
2046 | } | 2103 | } |
2104 | |||
2047 | return IRQ_HANDLED; | 2105 | return IRQ_HANDLED; |
2048 | } | 2106 | } |
2049 | 2107 | ||
@@ -2075,8 +2133,12 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) | |||
2075 | } | 2133 | } |
2076 | if (priv->irq0_stat & | 2134 | if (priv->irq0_stat & |
2077 | (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) { | 2135 | (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) { |
2078 | /* Tx reclaim */ | 2136 | struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX]; |
2079 | bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); | 2137 | |
2138 | if (likely(napi_schedule_prep(&ring->napi))) { | ||
2139 | ring->int_disable(priv, ring); | ||
2140 | __napi_schedule(&ring->napi); | ||
2141 | } | ||
2080 | } | 2142 | } |
2081 | if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | | 2143 | if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | |
2082 | UMAC_IRQ_PHY_DET_F | | 2144 | UMAC_IRQ_PHY_DET_F | |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index b36ddec0cc0a..0d370d168aee 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h | |||
@@ -520,6 +520,7 @@ struct bcmgenet_hw_params { | |||
520 | 520 | ||
521 | struct bcmgenet_tx_ring { | 521 | struct bcmgenet_tx_ring { |
522 | spinlock_t lock; /* ring lock */ | 522 | spinlock_t lock; /* ring lock */ |
523 | struct napi_struct napi; /* NAPI per tx queue */ | ||
523 | unsigned int index; /* ring index */ | 524 | unsigned int index; /* ring index */ |
524 | unsigned int queue; /* queue index */ | 525 | unsigned int queue; /* queue index */ |
525 | struct enet_cb *cbs; /* tx ring buffer control block*/ | 526 | struct enet_cb *cbs; /* tx ring buffer control block*/ |
@@ -534,6 +535,7 @@ struct bcmgenet_tx_ring { | |||
534 | struct bcmgenet_tx_ring *); | 535 | struct bcmgenet_tx_ring *); |
535 | void (*int_disable)(struct bcmgenet_priv *priv, | 536 | void (*int_disable)(struct bcmgenet_priv *priv, |
536 | struct bcmgenet_tx_ring *); | 537 | struct bcmgenet_tx_ring *); |
538 | struct bcmgenet_priv *priv; | ||
537 | }; | 539 | }; |
538 | 540 | ||
539 | /* device context */ | 541 | /* device context */ |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index 149a0d70c108..b97122926d3a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c | |||
@@ -73,15 +73,17 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
73 | if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE)) | 73 | if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE)) |
74 | return -EINVAL; | 74 | return -EINVAL; |
75 | 75 | ||
76 | reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); | ||
76 | if (wol->wolopts & WAKE_MAGICSECURE) { | 77 | if (wol->wolopts & WAKE_MAGICSECURE) { |
77 | bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), | 78 | bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), |
78 | UMAC_MPD_PW_MS); | 79 | UMAC_MPD_PW_MS); |
79 | bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), | 80 | bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), |
80 | UMAC_MPD_PW_LS); | 81 | UMAC_MPD_PW_LS); |
81 | reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); | ||
82 | reg |= MPD_PW_EN; | 82 | reg |= MPD_PW_EN; |
83 | bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); | 83 | } else { |
84 | reg &= ~MPD_PW_EN; | ||
84 | } | 85 | } |
86 | bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); | ||
85 | 87 | ||
86 | /* Flag the device and relevant IRQ as wakeup capable */ | 88 | /* Flag the device and relevant IRQ as wakeup capable */ |
87 | if (wol->wolopts) { | 89 | if (wol->wolopts) { |
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index ad76b8e35a00..81d41539fcba 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
@@ -2113,17 +2113,17 @@ static const struct net_device_ops macb_netdev_ops = { | |||
2113 | }; | 2113 | }; |
2114 | 2114 | ||
2115 | #if defined(CONFIG_OF) | 2115 | #if defined(CONFIG_OF) |
2116 | static struct macb_config pc302gem_config = { | 2116 | static const struct macb_config pc302gem_config = { |
2117 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, | 2117 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, |
2118 | .dma_burst_length = 16, | 2118 | .dma_burst_length = 16, |
2119 | }; | 2119 | }; |
2120 | 2120 | ||
2121 | static struct macb_config sama5d3_config = { | 2121 | static const struct macb_config sama5d3_config = { |
2122 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, | 2122 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, |
2123 | .dma_burst_length = 16, | 2123 | .dma_burst_length = 16, |
2124 | }; | 2124 | }; |
2125 | 2125 | ||
2126 | static struct macb_config sama5d4_config = { | 2126 | static const struct macb_config sama5d4_config = { |
2127 | .caps = 0, | 2127 | .caps = 0, |
2128 | .dma_burst_length = 4, | 2128 | .dma_burst_length = 4, |
2129 | }; | 2129 | }; |
@@ -2154,7 +2154,7 @@ static void macb_configure_caps(struct macb *bp) | |||
2154 | if (bp->pdev->dev.of_node) { | 2154 | if (bp->pdev->dev.of_node) { |
2155 | match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node); | 2155 | match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node); |
2156 | if (match && match->data) { | 2156 | if (match && match->data) { |
2157 | config = (const struct macb_config *)match->data; | 2157 | config = match->data; |
2158 | 2158 | ||
2159 | bp->caps = config->caps; | 2159 | bp->caps = config->caps; |
2160 | /* | 2160 | /* |
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 31dc080f2437..ff85619a9732 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h | |||
@@ -351,7 +351,7 @@ | |||
351 | 351 | ||
352 | /* Bitfields in MID */ | 352 | /* Bitfields in MID */ |
353 | #define MACB_IDNUM_OFFSET 16 | 353 | #define MACB_IDNUM_OFFSET 16 |
354 | #define MACB_IDNUM_SIZE 16 | 354 | #define MACB_IDNUM_SIZE 12 |
355 | #define MACB_REV_OFFSET 0 | 355 | #define MACB_REV_OFFSET 0 |
356 | #define MACB_REV_SIZE 16 | 356 | #define MACB_REV_SIZE 16 |
357 | 357 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c index 9062a8434246..c308429dd9c7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c +++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c | |||
@@ -35,10 +35,10 @@ static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key) | |||
35 | } | 35 | } |
36 | 36 | ||
37 | static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr, | 37 | static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr, |
38 | int addr_len) | 38 | u8 v6) |
39 | { | 39 | { |
40 | return addr_len == 4 ? ipv4_clip_hash(ctbl, addr) : | 40 | return v6 ? ipv6_clip_hash(ctbl, addr) : |
41 | ipv6_clip_hash(ctbl, addr); | 41 | ipv4_clip_hash(ctbl, addr); |
42 | } | 42 | } |
43 | 43 | ||
44 | static int clip6_get_mbox(const struct net_device *dev, | 44 | static int clip6_get_mbox(const struct net_device *dev, |
@@ -78,23 +78,22 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) | |||
78 | struct clip_entry *ce, *cte; | 78 | struct clip_entry *ce, *cte; |
79 | u32 *addr = (u32 *)lip; | 79 | u32 *addr = (u32 *)lip; |
80 | int hash; | 80 | int hash; |
81 | int addr_len; | 81 | int ret = -1; |
82 | int ret = 0; | ||
83 | 82 | ||
84 | if (!ctbl) | 83 | if (!ctbl) |
85 | return 0; | 84 | return 0; |
86 | 85 | ||
87 | if (v6) | 86 | hash = clip_addr_hash(ctbl, addr, v6); |
88 | addr_len = 16; | ||
89 | else | ||
90 | addr_len = 4; | ||
91 | |||
92 | hash = clip_addr_hash(ctbl, addr, addr_len); | ||
93 | 87 | ||
94 | read_lock_bh(&ctbl->lock); | 88 | read_lock_bh(&ctbl->lock); |
95 | list_for_each_entry(cte, &ctbl->hash_list[hash], list) { | 89 | list_for_each_entry(cte, &ctbl->hash_list[hash], list) { |
96 | if (addr_len == cte->addr_len && | 90 | if (cte->addr6.sin6_family == AF_INET6 && v6) |
97 | memcmp(lip, cte->addr, cte->addr_len) == 0) { | 91 | ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr, |
92 | sizeof(struct in6_addr)); | ||
93 | else if (cte->addr.sin_family == AF_INET && !v6) | ||
94 | ret = memcmp(lip, (char *)(&cte->addr.sin_addr), | ||
95 | sizeof(struct in_addr)); | ||
96 | if (!ret) { | ||
98 | ce = cte; | 97 | ce = cte; |
99 | read_unlock_bh(&ctbl->lock); | 98 | read_unlock_bh(&ctbl->lock); |
100 | goto found; | 99 | goto found; |
@@ -111,15 +110,20 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) | |||
111 | spin_lock_init(&ce->lock); | 110 | spin_lock_init(&ce->lock); |
112 | atomic_set(&ce->refcnt, 0); | 111 | atomic_set(&ce->refcnt, 0); |
113 | atomic_dec(&ctbl->nfree); | 112 | atomic_dec(&ctbl->nfree); |
114 | ce->addr_len = addr_len; | ||
115 | memcpy(ce->addr, lip, addr_len); | ||
116 | list_add_tail(&ce->list, &ctbl->hash_list[hash]); | 113 | list_add_tail(&ce->list, &ctbl->hash_list[hash]); |
117 | if (v6) { | 114 | if (v6) { |
115 | ce->addr6.sin6_family = AF_INET6; | ||
116 | memcpy(ce->addr6.sin6_addr.s6_addr, | ||
117 | lip, sizeof(struct in6_addr)); | ||
118 | ret = clip6_get_mbox(dev, (const struct in6_addr *)lip); | 118 | ret = clip6_get_mbox(dev, (const struct in6_addr *)lip); |
119 | if (ret) { | 119 | if (ret) { |
120 | write_unlock_bh(&ctbl->lock); | 120 | write_unlock_bh(&ctbl->lock); |
121 | return ret; | 121 | return ret; |
122 | } | 122 | } |
123 | } else { | ||
124 | ce->addr.sin_family = AF_INET; | ||
125 | memcpy((char *)(&ce->addr.sin_addr), lip, | ||
126 | sizeof(struct in_addr)); | ||
123 | } | 127 | } |
124 | } else { | 128 | } else { |
125 | write_unlock_bh(&ctbl->lock); | 129 | write_unlock_bh(&ctbl->lock); |
@@ -140,19 +144,19 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6) | |||
140 | struct clip_entry *ce, *cte; | 144 | struct clip_entry *ce, *cte; |
141 | u32 *addr = (u32 *)lip; | 145 | u32 *addr = (u32 *)lip; |
142 | int hash; | 146 | int hash; |
143 | int addr_len; | 147 | int ret = -1; |
144 | |||
145 | if (v6) | ||
146 | addr_len = 16; | ||
147 | else | ||
148 | addr_len = 4; | ||
149 | 148 | ||
150 | hash = clip_addr_hash(ctbl, addr, addr_len); | 149 | hash = clip_addr_hash(ctbl, addr, v6); |
151 | 150 | ||
152 | read_lock_bh(&ctbl->lock); | 151 | read_lock_bh(&ctbl->lock); |
153 | list_for_each_entry(cte, &ctbl->hash_list[hash], list) { | 152 | list_for_each_entry(cte, &ctbl->hash_list[hash], list) { |
154 | if (addr_len == cte->addr_len && | 153 | if (cte->addr6.sin6_family == AF_INET6 && v6) |
155 | memcmp(lip, cte->addr, cte->addr_len) == 0) { | 154 | ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr, |
155 | sizeof(struct in6_addr)); | ||
156 | else if (cte->addr.sin_family == AF_INET && !v6) | ||
157 | ret = memcmp(lip, (char *)(&cte->addr.sin_addr), | ||
158 | sizeof(struct in_addr)); | ||
159 | if (!ret) { | ||
156 | ce = cte; | 160 | ce = cte; |
157 | read_unlock_bh(&ctbl->lock); | 161 | read_unlock_bh(&ctbl->lock); |
158 | goto found; | 162 | goto found; |
@@ -249,10 +253,7 @@ int clip_tbl_show(struct seq_file *seq, void *v) | |||
249 | for (i = 0 ; i < ctbl->clipt_size; ++i) { | 253 | for (i = 0 ; i < ctbl->clipt_size; ++i) { |
250 | list_for_each_entry(ce, &ctbl->hash_list[i], list) { | 254 | list_for_each_entry(ce, &ctbl->hash_list[i], list) { |
251 | ip[0] = '\0'; | 255 | ip[0] = '\0'; |
252 | if (ce->addr_len == 16) | 256 | sprintf(ip, "%pISc", &ce->addr); |
253 | sprintf(ip, "%pI6c", ce->addr); | ||
254 | else | ||
255 | sprintf(ip, "%pI4c", ce->addr); | ||
256 | seq_printf(seq, "%-25s %u\n", ip, | 257 | seq_printf(seq, "%-25s %u\n", ip, |
257 | atomic_read(&ce->refcnt)); | 258 | atomic_read(&ce->refcnt)); |
258 | } | 259 | } |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h index 2eaba0161cf8..35eb43c6bcbb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h | |||
@@ -14,8 +14,10 @@ struct clip_entry { | |||
14 | spinlock_t lock; /* Hold while modifying clip reference */ | 14 | spinlock_t lock; /* Hold while modifying clip reference */ |
15 | atomic_t refcnt; | 15 | atomic_t refcnt; |
16 | struct list_head list; | 16 | struct list_head list; |
17 | u32 addr[4]; | 17 | union { |
18 | int addr_len; | 18 | struct sockaddr_in addr; |
19 | struct sockaddr_in6 addr6; | ||
20 | }; | ||
19 | }; | 21 | }; |
20 | 22 | ||
21 | struct clip_tbl { | 23 | struct clip_tbl { |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index d6cda17efe6e..c6ff4890d171 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | |||
@@ -376,8 +376,6 @@ enum { | |||
376 | enum { | 376 | enum { |
377 | INGQ_EXTRAS = 2, /* firmware event queue and */ | 377 | INGQ_EXTRAS = 2, /* firmware event queue and */ |
378 | /* forwarded interrupts */ | 378 | /* forwarded interrupts */ |
379 | MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2 | ||
380 | + MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES, | ||
381 | MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES | 379 | MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES |
382 | + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS, | 380 | + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS, |
383 | }; | 381 | }; |
@@ -616,11 +614,13 @@ struct sge { | |||
616 | unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */ | 614 | unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */ |
617 | 615 | ||
618 | unsigned int egr_start; | 616 | unsigned int egr_start; |
617 | unsigned int egr_sz; | ||
619 | unsigned int ingr_start; | 618 | unsigned int ingr_start; |
620 | void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */ | 619 | unsigned int ingr_sz; |
621 | struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */ | 620 | void **egr_map; /* qid->queue egress queue map */ |
622 | DECLARE_BITMAP(starving_fl, MAX_EGRQ); | 621 | struct sge_rspq **ingr_map; /* qid->queue ingress queue map */ |
623 | DECLARE_BITMAP(txq_maperr, MAX_EGRQ); | 622 | unsigned long *starving_fl; |
623 | unsigned long *txq_maperr; | ||
624 | struct timer_list rx_timer; /* refills starving FLs */ | 624 | struct timer_list rx_timer; /* refills starving FLs */ |
625 | struct timer_list tx_timer; /* checks Tx queues */ | 625 | struct timer_list tx_timer; /* checks Tx queues */ |
626 | }; | 626 | }; |
@@ -1103,7 +1103,7 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); | |||
1103 | #define T4_MEMORY_WRITE 0 | 1103 | #define T4_MEMORY_WRITE 0 |
1104 | #define T4_MEMORY_READ 1 | 1104 | #define T4_MEMORY_READ 1 |
1105 | int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len, | 1105 | int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len, |
1106 | __be32 *buf, int dir); | 1106 | void *buf, int dir); |
1107 | static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr, | 1107 | static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr, |
1108 | u32 len, __be32 *buf) | 1108 | u32 len, __be32 *buf) |
1109 | { | 1109 | { |
@@ -1136,6 +1136,8 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter, | |||
1136 | 1136 | ||
1137 | unsigned int qtimer_val(const struct adapter *adap, | 1137 | unsigned int qtimer_val(const struct adapter *adap, |
1138 | const struct sge_rspq *q); | 1138 | const struct sge_rspq *q); |
1139 | |||
1140 | int t4_init_devlog_params(struct adapter *adapter); | ||
1139 | int t4_init_sge_params(struct adapter *adapter); | 1141 | int t4_init_sge_params(struct adapter *adapter); |
1140 | int t4_init_tp_params(struct adapter *adap); | 1142 | int t4_init_tp_params(struct adapter *adap); |
1141 | int t4_filter_field_shift(const struct adapter *adap, int filter_sel); | 1143 | int t4_filter_field_shift(const struct adapter *adap, int filter_sel); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 78854ceb0870..dcb047945290 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | |||
@@ -670,9 +670,13 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v) | |||
670 | "0.9375" }; | 670 | "0.9375" }; |
671 | 671 | ||
672 | int i; | 672 | int i; |
673 | u16 incr[NMTUS][NCCTRL_WIN]; | 673 | u16 (*incr)[NCCTRL_WIN]; |
674 | struct adapter *adap = seq->private; | 674 | struct adapter *adap = seq->private; |
675 | 675 | ||
676 | incr = kmalloc(sizeof(*incr) * NMTUS, GFP_KERNEL); | ||
677 | if (!incr) | ||
678 | return -ENOMEM; | ||
679 | |||
676 | t4_read_cong_tbl(adap, incr); | 680 | t4_read_cong_tbl(adap, incr); |
677 | 681 | ||
678 | for (i = 0; i < NCCTRL_WIN; ++i) { | 682 | for (i = 0; i < NCCTRL_WIN; ++i) { |
@@ -685,6 +689,8 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v) | |||
685 | adap->params.a_wnd[i], | 689 | adap->params.a_wnd[i], |
686 | dec_fac[adap->params.b_wnd[i]]); | 690 | dec_fac[adap->params.b_wnd[i]]); |
687 | } | 691 | } |
692 | |||
693 | kfree(incr); | ||
688 | return 0; | 694 | return 0; |
689 | } | 695 | } |
690 | 696 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index a22cf932ca35..d92995138f7e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
@@ -920,7 +920,7 @@ static void quiesce_rx(struct adapter *adap) | |||
920 | { | 920 | { |
921 | int i; | 921 | int i; |
922 | 922 | ||
923 | for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { | 923 | for (i = 0; i < adap->sge.ingr_sz; i++) { |
924 | struct sge_rspq *q = adap->sge.ingr_map[i]; | 924 | struct sge_rspq *q = adap->sge.ingr_map[i]; |
925 | 925 | ||
926 | if (q && q->handler) { | 926 | if (q && q->handler) { |
@@ -934,6 +934,21 @@ static void quiesce_rx(struct adapter *adap) | |||
934 | } | 934 | } |
935 | } | 935 | } |
936 | 936 | ||
937 | /* Disable interrupt and napi handler */ | ||
938 | static void disable_interrupts(struct adapter *adap) | ||
939 | { | ||
940 | if (adap->flags & FULL_INIT_DONE) { | ||
941 | t4_intr_disable(adap); | ||
942 | if (adap->flags & USING_MSIX) { | ||
943 | free_msix_queue_irqs(adap); | ||
944 | free_irq(adap->msix_info[0].vec, adap); | ||
945 | } else { | ||
946 | free_irq(adap->pdev->irq, adap); | ||
947 | } | ||
948 | quiesce_rx(adap); | ||
949 | } | ||
950 | } | ||
951 | |||
937 | /* | 952 | /* |
938 | * Enable NAPI scheduling and interrupt generation for all Rx queues. | 953 | * Enable NAPI scheduling and interrupt generation for all Rx queues. |
939 | */ | 954 | */ |
@@ -941,7 +956,7 @@ static void enable_rx(struct adapter *adap) | |||
941 | { | 956 | { |
942 | int i; | 957 | int i; |
943 | 958 | ||
944 | for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { | 959 | for (i = 0; i < adap->sge.ingr_sz; i++) { |
945 | struct sge_rspq *q = adap->sge.ingr_map[i]; | 960 | struct sge_rspq *q = adap->sge.ingr_map[i]; |
946 | 961 | ||
947 | if (!q) | 962 | if (!q) |
@@ -970,8 +985,8 @@ static int setup_sge_queues(struct adapter *adap) | |||
970 | int err, msi_idx, i, j; | 985 | int err, msi_idx, i, j; |
971 | struct sge *s = &adap->sge; | 986 | struct sge *s = &adap->sge; |
972 | 987 | ||
973 | bitmap_zero(s->starving_fl, MAX_EGRQ); | 988 | bitmap_zero(s->starving_fl, s->egr_sz); |
974 | bitmap_zero(s->txq_maperr, MAX_EGRQ); | 989 | bitmap_zero(s->txq_maperr, s->egr_sz); |
975 | 990 | ||
976 | if (adap->flags & USING_MSIX) | 991 | if (adap->flags & USING_MSIX) |
977 | msi_idx = 1; /* vector 0 is for non-queue interrupts */ | 992 | msi_idx = 1; /* vector 0 is for non-queue interrupts */ |
@@ -983,6 +998,19 @@ static int setup_sge_queues(struct adapter *adap) | |||
983 | msi_idx = -((int)s->intrq.abs_id + 1); | 998 | msi_idx = -((int)s->intrq.abs_id + 1); |
984 | } | 999 | } |
985 | 1000 | ||
1001 | /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here, | ||
1002 | * don't forget to update the following which need to be | ||
1003 | * synchronized to and changes here. | ||
1004 | * | ||
1005 | * 1. The calculations of MAX_INGQ in cxgb4.h. | ||
1006 | * | ||
1007 | * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs | ||
1008 | * to accommodate any new/deleted Ingress Queues | ||
1009 | * which need MSI-X Vectors. | ||
1010 | * | ||
1011 | * 3. Update sge_qinfo_show() to include information on the | ||
1012 | * new/deleted queues. | ||
1013 | */ | ||
986 | err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], | 1014 | err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], |
987 | msi_idx, NULL, fwevtq_handler); | 1015 | msi_idx, NULL, fwevtq_handler); |
988 | if (err) { | 1016 | if (err) { |
@@ -4244,19 +4272,12 @@ static int cxgb_up(struct adapter *adap) | |||
4244 | 4272 | ||
4245 | static void cxgb_down(struct adapter *adapter) | 4273 | static void cxgb_down(struct adapter *adapter) |
4246 | { | 4274 | { |
4247 | t4_intr_disable(adapter); | ||
4248 | cancel_work_sync(&adapter->tid_release_task); | 4275 | cancel_work_sync(&adapter->tid_release_task); |
4249 | cancel_work_sync(&adapter->db_full_task); | 4276 | cancel_work_sync(&adapter->db_full_task); |
4250 | cancel_work_sync(&adapter->db_drop_task); | 4277 | cancel_work_sync(&adapter->db_drop_task); |
4251 | adapter->tid_release_task_busy = false; | 4278 | adapter->tid_release_task_busy = false; |
4252 | adapter->tid_release_head = NULL; | 4279 | adapter->tid_release_head = NULL; |
4253 | 4280 | ||
4254 | if (adapter->flags & USING_MSIX) { | ||
4255 | free_msix_queue_irqs(adapter); | ||
4256 | free_irq(adapter->msix_info[0].vec, adapter); | ||
4257 | } else | ||
4258 | free_irq(adapter->pdev->irq, adapter); | ||
4259 | quiesce_rx(adapter); | ||
4260 | t4_sge_stop(adapter); | 4281 | t4_sge_stop(adapter); |
4261 | t4_free_sge_resources(adapter); | 4282 | t4_free_sge_resources(adapter); |
4262 | adapter->flags &= ~FULL_INIT_DONE; | 4283 | adapter->flags &= ~FULL_INIT_DONE; |
@@ -4733,8 +4754,9 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) | |||
4733 | if (ret < 0) | 4754 | if (ret < 0) |
4734 | return ret; | 4755 | return ret; |
4735 | 4756 | ||
4736 | ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ, | 4757 | ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64, |
4737 | 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF); | 4758 | MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, |
4759 | FW_CMD_CAP_PF); | ||
4738 | if (ret < 0) | 4760 | if (ret < 0) |
4739 | return ret; | 4761 | return ret; |
4740 | 4762 | ||
@@ -5088,10 +5110,15 @@ static int adap_init0(struct adapter *adap) | |||
5088 | enum dev_state state; | 5110 | enum dev_state state; |
5089 | u32 params[7], val[7]; | 5111 | u32 params[7], val[7]; |
5090 | struct fw_caps_config_cmd caps_cmd; | 5112 | struct fw_caps_config_cmd caps_cmd; |
5091 | struct fw_devlog_cmd devlog_cmd; | ||
5092 | u32 devlog_meminfo; | ||
5093 | int reset = 1; | 5113 | int reset = 1; |
5094 | 5114 | ||
5115 | /* Grab Firmware Device Log parameters as early as possible so we have | ||
5116 | * access to it for debugging, etc. | ||
5117 | */ | ||
5118 | ret = t4_init_devlog_params(adap); | ||
5119 | if (ret < 0) | ||
5120 | return ret; | ||
5121 | |||
5095 | /* Contact FW, advertising Master capability */ | 5122 | /* Contact FW, advertising Master capability */ |
5096 | ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state); | 5123 | ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state); |
5097 | if (ret < 0) { | 5124 | if (ret < 0) { |
@@ -5169,30 +5196,6 @@ static int adap_init0(struct adapter *adap) | |||
5169 | if (ret < 0) | 5196 | if (ret < 0) |
5170 | goto bye; | 5197 | goto bye; |
5171 | 5198 | ||
5172 | /* Read firmware device log parameters. We really need to find a way | ||
5173 | * to get these parameters initialized with some default values (which | ||
5174 | * are likely to be correct) for the case where we either don't | ||
5175 | * attache to the firmware or it's crashed when we probe the adapter. | ||
5176 | * That way we'll still be able to perform early firmware startup | ||
5177 | * debugging ... If the request to get the Firmware's Device Log | ||
5178 | * parameters fails, we'll live so we don't make that a fatal error. | ||
5179 | */ | ||
5180 | memset(&devlog_cmd, 0, sizeof(devlog_cmd)); | ||
5181 | devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) | | ||
5182 | FW_CMD_REQUEST_F | FW_CMD_READ_F); | ||
5183 | devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd)); | ||
5184 | ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd), | ||
5185 | &devlog_cmd); | ||
5186 | if (ret == 0) { | ||
5187 | devlog_meminfo = | ||
5188 | ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog); | ||
5189 | adap->params.devlog.memtype = | ||
5190 | FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo); | ||
5191 | adap->params.devlog.start = | ||
5192 | FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4; | ||
5193 | adap->params.devlog.size = ntohl(devlog_cmd.memsize_devlog); | ||
5194 | } | ||
5195 | |||
5196 | /* | 5199 | /* |
5197 | * Find out what ports are available to us. Note that we need to do | 5200 | * Find out what ports are available to us. Note that we need to do |
5198 | * this before calling adap_init0_no_config() since it needs nports | 5201 | * this before calling adap_init0_no_config() since it needs nports |
@@ -5293,6 +5296,51 @@ static int adap_init0(struct adapter *adap) | |||
5293 | adap->tids.nftids = val[4] - val[3] + 1; | 5296 | adap->tids.nftids = val[4] - val[3] + 1; |
5294 | adap->sge.ingr_start = val[5]; | 5297 | adap->sge.ingr_start = val[5]; |
5295 | 5298 | ||
5299 | /* qids (ingress/egress) returned from firmware can be anywhere | ||
5300 | * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END. | ||
5301 | * Hence driver needs to allocate memory for this range to | ||
5302 | * store the queue info. Get the highest IQFLINT/EQ index returned | ||
5303 | * in FW_EQ_*_CMD.alloc command. | ||
5304 | */ | ||
5305 | params[0] = FW_PARAM_PFVF(EQ_END); | ||
5306 | params[1] = FW_PARAM_PFVF(IQFLINT_END); | ||
5307 | ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); | ||
5308 | if (ret < 0) | ||
5309 | goto bye; | ||
5310 | adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1; | ||
5311 | adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1; | ||
5312 | |||
5313 | adap->sge.egr_map = kcalloc(adap->sge.egr_sz, | ||
5314 | sizeof(*adap->sge.egr_map), GFP_KERNEL); | ||
5315 | if (!adap->sge.egr_map) { | ||
5316 | ret = -ENOMEM; | ||
5317 | goto bye; | ||
5318 | } | ||
5319 | |||
5320 | adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz, | ||
5321 | sizeof(*adap->sge.ingr_map), GFP_KERNEL); | ||
5322 | if (!adap->sge.ingr_map) { | ||
5323 | ret = -ENOMEM; | ||
5324 | goto bye; | ||
5325 | } | ||
5326 | |||
5327 | /* Allocate the memory for the vaious egress queue bitmaps | ||
5328 | * ie starving_fl and txq_maperr. | ||
5329 | */ | ||
5330 | adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), | ||
5331 | sizeof(long), GFP_KERNEL); | ||
5332 | if (!adap->sge.starving_fl) { | ||
5333 | ret = -ENOMEM; | ||
5334 | goto bye; | ||
5335 | } | ||
5336 | |||
5337 | adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), | ||
5338 | sizeof(long), GFP_KERNEL); | ||
5339 | if (!adap->sge.txq_maperr) { | ||
5340 | ret = -ENOMEM; | ||
5341 | goto bye; | ||
5342 | } | ||
5343 | |||
5296 | params[0] = FW_PARAM_PFVF(CLIP_START); | 5344 | params[0] = FW_PARAM_PFVF(CLIP_START); |
5297 | params[1] = FW_PARAM_PFVF(CLIP_END); | 5345 | params[1] = FW_PARAM_PFVF(CLIP_END); |
5298 | ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); | 5346 | ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); |
@@ -5501,6 +5549,10 @@ static int adap_init0(struct adapter *adap) | |||
5501 | * happened to HW/FW, stop issuing commands. | 5549 | * happened to HW/FW, stop issuing commands. |
5502 | */ | 5550 | */ |
5503 | bye: | 5551 | bye: |
5552 | kfree(adap->sge.egr_map); | ||
5553 | kfree(adap->sge.ingr_map); | ||
5554 | kfree(adap->sge.starving_fl); | ||
5555 | kfree(adap->sge.txq_maperr); | ||
5504 | if (ret != -ETIMEDOUT && ret != -EIO) | 5556 | if (ret != -ETIMEDOUT && ret != -EIO) |
5505 | t4_fw_bye(adap, adap->mbox); | 5557 | t4_fw_bye(adap, adap->mbox); |
5506 | return ret; | 5558 | return ret; |
@@ -5528,6 +5580,7 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev, | |||
5528 | netif_carrier_off(dev); | 5580 | netif_carrier_off(dev); |
5529 | } | 5581 | } |
5530 | spin_unlock(&adap->stats_lock); | 5582 | spin_unlock(&adap->stats_lock); |
5583 | disable_interrupts(adap); | ||
5531 | if (adap->flags & FULL_INIT_DONE) | 5584 | if (adap->flags & FULL_INIT_DONE) |
5532 | cxgb_down(adap); | 5585 | cxgb_down(adap); |
5533 | rtnl_unlock(); | 5586 | rtnl_unlock(); |
@@ -5912,6 +5965,10 @@ static void free_some_resources(struct adapter *adapter) | |||
5912 | 5965 | ||
5913 | t4_free_mem(adapter->l2t); | 5966 | t4_free_mem(adapter->l2t); |
5914 | t4_free_mem(adapter->tids.tid_tab); | 5967 | t4_free_mem(adapter->tids.tid_tab); |
5968 | kfree(adapter->sge.egr_map); | ||
5969 | kfree(adapter->sge.ingr_map); | ||
5970 | kfree(adapter->sge.starving_fl); | ||
5971 | kfree(adapter->sge.txq_maperr); | ||
5915 | disable_msi(adapter); | 5972 | disable_msi(adapter); |
5916 | 5973 | ||
5917 | for_each_port(adapter, i) | 5974 | for_each_port(adapter, i) |
@@ -6237,6 +6294,8 @@ static void remove_one(struct pci_dev *pdev) | |||
6237 | if (is_offload(adapter)) | 6294 | if (is_offload(adapter)) |
6238 | detach_ulds(adapter); | 6295 | detach_ulds(adapter); |
6239 | 6296 | ||
6297 | disable_interrupts(adapter); | ||
6298 | |||
6240 | for_each_port(adapter, i) | 6299 | for_each_port(adapter, i) |
6241 | if (adapter->port[i]->reg_state == NETREG_REGISTERED) | 6300 | if (adapter->port[i]->reg_state == NETREG_REGISTERED) |
6242 | unregister_netdev(adapter->port[i]); | 6301 | unregister_netdev(adapter->port[i]); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index b4b9f6048fe7..b688b32c21fe 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
@@ -2171,7 +2171,7 @@ static void sge_rx_timer_cb(unsigned long data) | |||
2171 | struct adapter *adap = (struct adapter *)data; | 2171 | struct adapter *adap = (struct adapter *)data; |
2172 | struct sge *s = &adap->sge; | 2172 | struct sge *s = &adap->sge; |
2173 | 2173 | ||
2174 | for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) | 2174 | for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) |
2175 | for (m = s->starving_fl[i]; m; m &= m - 1) { | 2175 | for (m = s->starving_fl[i]; m; m &= m - 1) { |
2176 | struct sge_eth_rxq *rxq; | 2176 | struct sge_eth_rxq *rxq; |
2177 | unsigned int id = __ffs(m) + i * BITS_PER_LONG; | 2177 | unsigned int id = __ffs(m) + i * BITS_PER_LONG; |
@@ -2259,7 +2259,7 @@ static void sge_tx_timer_cb(unsigned long data) | |||
2259 | struct adapter *adap = (struct adapter *)data; | 2259 | struct adapter *adap = (struct adapter *)data; |
2260 | struct sge *s = &adap->sge; | 2260 | struct sge *s = &adap->sge; |
2261 | 2261 | ||
2262 | for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++) | 2262 | for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) |
2263 | for (m = s->txq_maperr[i]; m; m &= m - 1) { | 2263 | for (m = s->txq_maperr[i]; m; m &= m - 1) { |
2264 | unsigned long id = __ffs(m) + i * BITS_PER_LONG; | 2264 | unsigned long id = __ffs(m) + i * BITS_PER_LONG; |
2265 | struct sge_ofld_txq *txq = s->egr_map[id]; | 2265 | struct sge_ofld_txq *txq = s->egr_map[id]; |
@@ -2741,7 +2741,8 @@ void t4_free_sge_resources(struct adapter *adap) | |||
2741 | free_rspq_fl(adap, &adap->sge.intrq, NULL); | 2741 | free_rspq_fl(adap, &adap->sge.intrq, NULL); |
2742 | 2742 | ||
2743 | /* clear the reverse egress queue map */ | 2743 | /* clear the reverse egress queue map */ |
2744 | memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map)); | 2744 | memset(adap->sge.egr_map, 0, |
2745 | adap->sge.egr_sz * sizeof(*adap->sge.egr_map)); | ||
2745 | } | 2746 | } |
2746 | 2747 | ||
2747 | void t4_sge_start(struct adapter *adap) | 2748 | void t4_sge_start(struct adapter *adap) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 4d643b65265e..ee394dc68303 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
@@ -449,7 +449,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) | |||
449 | * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC | 449 | * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC |
450 | * @addr: address within indicated memory type | 450 | * @addr: address within indicated memory type |
451 | * @len: amount of memory to transfer | 451 | * @len: amount of memory to transfer |
452 | * @buf: host memory buffer | 452 | * @hbuf: host memory buffer |
453 | * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) | 453 | * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) |
454 | * | 454 | * |
455 | * Reads/writes an [almost] arbitrary memory region in the firmware: the | 455 | * Reads/writes an [almost] arbitrary memory region in the firmware: the |
@@ -460,15 +460,17 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) | |||
460 | * caller's responsibility to perform appropriate byte order conversions. | 460 | * caller's responsibility to perform appropriate byte order conversions. |
461 | */ | 461 | */ |
462 | int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, | 462 | int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, |
463 | u32 len, __be32 *buf, int dir) | 463 | u32 len, void *hbuf, int dir) |
464 | { | 464 | { |
465 | u32 pos, offset, resid, memoffset; | 465 | u32 pos, offset, resid, memoffset; |
466 | u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base; | 466 | u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base; |
467 | u32 *buf; | ||
467 | 468 | ||
468 | /* Argument sanity checks ... | 469 | /* Argument sanity checks ... |
469 | */ | 470 | */ |
470 | if (addr & 0x3) | 471 | if (addr & 0x3 || (uintptr_t)hbuf & 0x3) |
471 | return -EINVAL; | 472 | return -EINVAL; |
473 | buf = (u32 *)hbuf; | ||
472 | 474 | ||
473 | /* It's convenient to be able to handle lengths which aren't a | 475 | /* It's convenient to be able to handle lengths which aren't a |
474 | * multiple of 32-bits because we often end up transferring files to | 476 | * multiple of 32-bits because we often end up transferring files to |
@@ -532,14 +534,45 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, | |||
532 | 534 | ||
533 | /* Transfer data to/from the adapter as long as there's an integral | 535 | /* Transfer data to/from the adapter as long as there's an integral |
534 | * number of 32-bit transfers to complete. | 536 | * number of 32-bit transfers to complete. |
537 | * | ||
538 | * A note on Endianness issues: | ||
539 | * | ||
540 | * The "register" reads and writes below from/to the PCI-E Memory | ||
541 | * Window invoke the standard adapter Big-Endian to PCI-E Link | ||
542 | * Little-Endian "swizzel." As a result, if we have the following | ||
543 | * data in adapter memory: | ||
544 | * | ||
545 | * Memory: ... | b0 | b1 | b2 | b3 | ... | ||
546 | * Address: i+0 i+1 i+2 i+3 | ||
547 | * | ||
548 | * Then a read of the adapter memory via the PCI-E Memory Window | ||
549 | * will yield: | ||
550 | * | ||
551 | * x = readl(i) | ||
552 | * 31 0 | ||
553 | * [ b3 | b2 | b1 | b0 ] | ||
554 | * | ||
555 | * If this value is stored into local memory on a Little-Endian system | ||
556 | * it will show up correctly in local memory as: | ||
557 | * | ||
558 | * ( ..., b0, b1, b2, b3, ... ) | ||
559 | * | ||
560 | * But on a Big-Endian system, the store will show up in memory | ||
561 | * incorrectly swizzled as: | ||
562 | * | ||
563 | * ( ..., b3, b2, b1, b0, ... ) | ||
564 | * | ||
565 | * So we need to account for this in the reads and writes to the | ||
566 | * PCI-E Memory Window below by undoing the register read/write | ||
567 | * swizzels. | ||
535 | */ | 568 | */ |
536 | while (len > 0) { | 569 | while (len > 0) { |
537 | if (dir == T4_MEMORY_READ) | 570 | if (dir == T4_MEMORY_READ) |
538 | *buf++ = (__force __be32) t4_read_reg(adap, | 571 | *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap, |
539 | mem_base + offset); | 572 | mem_base + offset)); |
540 | else | 573 | else |
541 | t4_write_reg(adap, mem_base + offset, | 574 | t4_write_reg(adap, mem_base + offset, |
542 | (__force u32) *buf++); | 575 | (__force u32)cpu_to_le32(*buf++)); |
543 | offset += sizeof(__be32); | 576 | offset += sizeof(__be32); |
544 | len -= sizeof(__be32); | 577 | len -= sizeof(__be32); |
545 | 578 | ||
@@ -568,15 +601,16 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, | |||
568 | */ | 601 | */ |
569 | if (resid) { | 602 | if (resid) { |
570 | union { | 603 | union { |
571 | __be32 word; | 604 | u32 word; |
572 | char byte[4]; | 605 | char byte[4]; |
573 | } last; | 606 | } last; |
574 | unsigned char *bp; | 607 | unsigned char *bp; |
575 | int i; | 608 | int i; |
576 | 609 | ||
577 | if (dir == T4_MEMORY_READ) { | 610 | if (dir == T4_MEMORY_READ) { |
578 | last.word = (__force __be32) t4_read_reg(adap, | 611 | last.word = le32_to_cpu( |
579 | mem_base + offset); | 612 | (__force __le32)t4_read_reg(adap, |
613 | mem_base + offset)); | ||
580 | for (bp = (unsigned char *)buf, i = resid; i < 4; i++) | 614 | for (bp = (unsigned char *)buf, i = resid; i < 4; i++) |
581 | bp[i] = last.byte[i]; | 615 | bp[i] = last.byte[i]; |
582 | } else { | 616 | } else { |
@@ -584,7 +618,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, | |||
584 | for (i = resid; i < 4; i++) | 618 | for (i = resid; i < 4; i++) |
585 | last.byte[i] = 0; | 619 | last.byte[i] = 0; |
586 | t4_write_reg(adap, mem_base + offset, | 620 | t4_write_reg(adap, mem_base + offset, |
587 | (__force u32) last.word); | 621 | (__force u32)cpu_to_le32(last.word)); |
588 | } | 622 | } |
589 | } | 623 | } |
590 | 624 | ||
@@ -1086,7 +1120,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, | |||
1086 | } | 1120 | } |
1087 | 1121 | ||
1088 | /* Installed successfully, update the cached header too. */ | 1122 | /* Installed successfully, update the cached header too. */ |
1089 | memcpy(card_fw, fs_fw, sizeof(*card_fw)); | 1123 | *card_fw = *fs_fw; |
1090 | card_fw_usable = 1; | 1124 | card_fw_usable = 1; |
1091 | *reset = 0; /* already reset as part of load_fw */ | 1125 | *reset = 0; /* already reset as part of load_fw */ |
1092 | } | 1126 | } |
@@ -4425,6 +4459,59 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter, | |||
4425 | } | 4459 | } |
4426 | 4460 | ||
4427 | /** | 4461 | /** |
4462 | * t4_init_devlog_params - initialize adapter->params.devlog | ||
4463 | * @adap: the adapter | ||
4464 | * | ||
4465 | * Initialize various fields of the adapter's Firmware Device Log | ||
4466 | * Parameters structure. | ||
4467 | */ | ||
4468 | int t4_init_devlog_params(struct adapter *adap) | ||
4469 | { | ||
4470 | struct devlog_params *dparams = &adap->params.devlog; | ||
4471 | u32 pf_dparams; | ||
4472 | unsigned int devlog_meminfo; | ||
4473 | struct fw_devlog_cmd devlog_cmd; | ||
4474 | int ret; | ||
4475 | |||
4476 | /* If we're dealing with newer firmware, the Device Log Paramerters | ||
4477 | * are stored in a designated register which allows us to access the | ||
4478 | * Device Log even if we can't talk to the firmware. | ||
4479 | */ | ||
4480 | pf_dparams = | ||
4481 | t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG)); | ||
4482 | if (pf_dparams) { | ||
4483 | unsigned int nentries, nentries128; | ||
4484 | |||
4485 | dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams); | ||
4486 | dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4; | ||
4487 | |||
4488 | nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams); | ||
4489 | nentries = (nentries128 + 1) * 128; | ||
4490 | dparams->size = nentries * sizeof(struct fw_devlog_e); | ||
4491 | |||
4492 | return 0; | ||
4493 | } | ||
4494 | |||
4495 | /* Otherwise, ask the firmware for it's Device Log Parameters. | ||
4496 | */ | ||
4497 | memset(&devlog_cmd, 0, sizeof(devlog_cmd)); | ||
4498 | devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) | | ||
4499 | FW_CMD_REQUEST_F | FW_CMD_READ_F); | ||
4500 | devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd)); | ||
4501 | ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd), | ||
4502 | &devlog_cmd); | ||
4503 | if (ret) | ||
4504 | return ret; | ||
4505 | |||
4506 | devlog_meminfo = ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog); | ||
4507 | dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo); | ||
4508 | dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4; | ||
4509 | dparams->size = ntohl(devlog_cmd.memsize_devlog); | ||
4510 | |||
4511 | return 0; | ||
4512 | } | ||
4513 | |||
4514 | /** | ||
4428 | * t4_init_sge_params - initialize adap->params.sge | 4515 | * t4_init_sge_params - initialize adap->params.sge |
4429 | * @adapter: the adapter | 4516 | * @adapter: the adapter |
4430 | * | 4517 | * |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 231a725f6d5d..326674b19983 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | |||
@@ -63,6 +63,8 @@ | |||
63 | #define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) | 63 | #define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) |
64 | #define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) | 64 | #define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) |
65 | 65 | ||
66 | #define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) | ||
67 | |||
66 | #define SGE_PF_KDOORBELL_A 0x0 | 68 | #define SGE_PF_KDOORBELL_A 0x0 |
67 | 69 | ||
68 | #define QID_S 15 | 70 | #define QID_S 15 |
@@ -707,6 +709,7 @@ | |||
707 | #define PFNUM_V(x) ((x) << PFNUM_S) | 709 | #define PFNUM_V(x) ((x) << PFNUM_S) |
708 | 710 | ||
709 | #define PCIE_FW_A 0x30b8 | 711 | #define PCIE_FW_A 0x30b8 |
712 | #define PCIE_FW_PF_A 0x30bc | ||
710 | 713 | ||
711 | #define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908 | 714 | #define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908 |
712 | 715 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 9b353a88cbda..a4a19e0ec7f5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | |||
@@ -101,7 +101,7 @@ enum fw_wr_opcodes { | |||
101 | FW_RI_BIND_MW_WR = 0x18, | 101 | FW_RI_BIND_MW_WR = 0x18, |
102 | FW_RI_FR_NSMR_WR = 0x19, | 102 | FW_RI_FR_NSMR_WR = 0x19, |
103 | FW_RI_INV_LSTAG_WR = 0x1a, | 103 | FW_RI_INV_LSTAG_WR = 0x1a, |
104 | FW_LASTC2E_WR = 0x40 | 104 | FW_LASTC2E_WR = 0x70 |
105 | }; | 105 | }; |
106 | 106 | ||
107 | struct fw_wr_hdr { | 107 | struct fw_wr_hdr { |
@@ -993,6 +993,7 @@ enum fw_memtype_cf { | |||
993 | FW_MEMTYPE_CF_EXTMEM = 0x2, | 993 | FW_MEMTYPE_CF_EXTMEM = 0x2, |
994 | FW_MEMTYPE_CF_FLASH = 0x4, | 994 | FW_MEMTYPE_CF_FLASH = 0x4, |
995 | FW_MEMTYPE_CF_INTERNAL = 0x5, | 995 | FW_MEMTYPE_CF_INTERNAL = 0x5, |
996 | FW_MEMTYPE_CF_EXTMEM1 = 0x6, | ||
996 | }; | 997 | }; |
997 | 998 | ||
998 | struct fw_caps_config_cmd { | 999 | struct fw_caps_config_cmd { |
@@ -1035,6 +1036,7 @@ enum fw_params_mnem { | |||
1035 | FW_PARAMS_MNEM_PFVF = 2, /* function params */ | 1036 | FW_PARAMS_MNEM_PFVF = 2, /* function params */ |
1036 | FW_PARAMS_MNEM_REG = 3, /* limited register access */ | 1037 | FW_PARAMS_MNEM_REG = 3, /* limited register access */ |
1037 | FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */ | 1038 | FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */ |
1039 | FW_PARAMS_MNEM_CHNET = 5, /* chnet params */ | ||
1038 | FW_PARAMS_MNEM_LAST | 1040 | FW_PARAMS_MNEM_LAST |
1039 | }; | 1041 | }; |
1040 | 1042 | ||
@@ -3102,7 +3104,8 @@ enum fw_devlog_facility { | |||
3102 | FW_DEVLOG_FACILITY_FCOE = 0x2E, | 3104 | FW_DEVLOG_FACILITY_FCOE = 0x2E, |
3103 | FW_DEVLOG_FACILITY_FOISCSI = 0x30, | 3105 | FW_DEVLOG_FACILITY_FOISCSI = 0x30, |
3104 | FW_DEVLOG_FACILITY_FOFCOE = 0x32, | 3106 | FW_DEVLOG_FACILITY_FOFCOE = 0x32, |
3105 | FW_DEVLOG_FACILITY_MAX = 0x32, | 3107 | FW_DEVLOG_FACILITY_CHNET = 0x34, |
3108 | FW_DEVLOG_FACILITY_MAX = 0x34, | ||
3106 | }; | 3109 | }; |
3107 | 3110 | ||
3108 | /* log message format */ | 3111 | /* log message format */ |
@@ -3139,4 +3142,36 @@ struct fw_devlog_cmd { | |||
3139 | (((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \ | 3142 | (((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \ |
3140 | FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M) | 3143 | FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M) |
3141 | 3144 | ||
3145 | /* P C I E F W P F 7 R E G I S T E R */ | ||
3146 | |||
3147 | /* PF7 stores the Firmware Device Log parameters which allows Host Drivers to | ||
3148 | * access the "devlog" which needing to contact firmware. The encoding is | ||
3149 | * mostly the same as that returned by the DEVLOG command except for the size | ||
3150 | * which is encoded as the number of entries in multiples-1 of 128 here rather | ||
3151 | * than the memory size as is done in the DEVLOG command. Thus, 0 means 128 | ||
3152 | * and 15 means 2048. This of course in turn constrains the allowed values | ||
3153 | * for the devlog size ... | ||
3154 | */ | ||
3155 | #define PCIE_FW_PF_DEVLOG 7 | ||
3156 | |||
3157 | #define PCIE_FW_PF_DEVLOG_NENTRIES128_S 28 | ||
3158 | #define PCIE_FW_PF_DEVLOG_NENTRIES128_M 0xf | ||
3159 | #define PCIE_FW_PF_DEVLOG_NENTRIES128_V(x) \ | ||
3160 | ((x) << PCIE_FW_PF_DEVLOG_NENTRIES128_S) | ||
3161 | #define PCIE_FW_PF_DEVLOG_NENTRIES128_G(x) \ | ||
3162 | (((x) >> PCIE_FW_PF_DEVLOG_NENTRIES128_S) & \ | ||
3163 | PCIE_FW_PF_DEVLOG_NENTRIES128_M) | ||
3164 | |||
3165 | #define PCIE_FW_PF_DEVLOG_ADDR16_S 4 | ||
3166 | #define PCIE_FW_PF_DEVLOG_ADDR16_M 0xffffff | ||
3167 | #define PCIE_FW_PF_DEVLOG_ADDR16_V(x) ((x) << PCIE_FW_PF_DEVLOG_ADDR16_S) | ||
3168 | #define PCIE_FW_PF_DEVLOG_ADDR16_G(x) \ | ||
3169 | (((x) >> PCIE_FW_PF_DEVLOG_ADDR16_S) & PCIE_FW_PF_DEVLOG_ADDR16_M) | ||
3170 | |||
3171 | #define PCIE_FW_PF_DEVLOG_MEMTYPE_S 0 | ||
3172 | #define PCIE_FW_PF_DEVLOG_MEMTYPE_M 0xf | ||
3173 | #define PCIE_FW_PF_DEVLOG_MEMTYPE_V(x) ((x) << PCIE_FW_PF_DEVLOG_MEMTYPE_S) | ||
3174 | #define PCIE_FW_PF_DEVLOG_MEMTYPE_G(x) \ | ||
3175 | (((x) >> PCIE_FW_PF_DEVLOG_MEMTYPE_S) & PCIE_FW_PF_DEVLOG_MEMTYPE_M) | ||
3176 | |||
3142 | #endif /* _T4FW_INTERFACE_H_ */ | 3177 | #endif /* _T4FW_INTERFACE_H_ */ |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h index e2bd3f747858..b9d1cbac0eee 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h | |||
@@ -36,13 +36,13 @@ | |||
36 | #define __T4FW_VERSION_H__ | 36 | #define __T4FW_VERSION_H__ |
37 | 37 | ||
38 | #define T4FW_VERSION_MAJOR 0x01 | 38 | #define T4FW_VERSION_MAJOR 0x01 |
39 | #define T4FW_VERSION_MINOR 0x0C | 39 | #define T4FW_VERSION_MINOR 0x0D |
40 | #define T4FW_VERSION_MICRO 0x19 | 40 | #define T4FW_VERSION_MICRO 0x20 |
41 | #define T4FW_VERSION_BUILD 0x00 | 41 | #define T4FW_VERSION_BUILD 0x00 |
42 | 42 | ||
43 | #define T5FW_VERSION_MAJOR 0x01 | 43 | #define T5FW_VERSION_MAJOR 0x01 |
44 | #define T5FW_VERSION_MINOR 0x0C | 44 | #define T5FW_VERSION_MINOR 0x0D |
45 | #define T5FW_VERSION_MICRO 0x19 | 45 | #define T5FW_VERSION_MICRO 0x20 |
46 | #define T5FW_VERSION_BUILD 0x00 | 46 | #define T5FW_VERSION_BUILD 0x00 |
47 | 47 | ||
48 | #endif | 48 | #endif |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 0545f0de1c52..e0d711071afb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c | |||
@@ -1004,7 +1004,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq, | |||
1004 | ? (tq->pidx - 1) | 1004 | ? (tq->pidx - 1) |
1005 | : (tq->size - 1)); | 1005 | : (tq->size - 1)); |
1006 | __be64 *src = (__be64 *)&tq->desc[index]; | 1006 | __be64 *src = (__be64 *)&tq->desc[index]; |
1007 | __be64 __iomem *dst = (__be64 *)(tq->bar2_addr + | 1007 | __be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr + |
1008 | SGE_UDB_WCDOORBELL); | 1008 | SGE_UDB_WCDOORBELL); |
1009 | unsigned int count = EQ_UNIT / sizeof(__be64); | 1009 | unsigned int count = EQ_UNIT / sizeof(__be64); |
1010 | 1010 | ||
@@ -1018,7 +1018,11 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq, | |||
1018 | * DMA. | 1018 | * DMA. |
1019 | */ | 1019 | */ |
1020 | while (count) { | 1020 | while (count) { |
1021 | writeq(*src, dst); | 1021 | /* the (__force u64) is because the compiler |
1022 | * doesn't understand the endian swizzling | ||
1023 | * going on | ||
1024 | */ | ||
1025 | writeq((__force u64)*src, dst); | ||
1022 | src++; | 1026 | src++; |
1023 | dst++; | 1027 | dst++; |
1024 | count--; | 1028 | count--; |
@@ -1252,8 +1256,8 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1252 | BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1); | 1256 | BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1); |
1253 | wr = (void *)&txq->q.desc[txq->q.pidx]; | 1257 | wr = (void *)&txq->q.desc[txq->q.pidx]; |
1254 | wr->equiq_to_len16 = cpu_to_be32(wr_mid); | 1258 | wr->equiq_to_len16 = cpu_to_be32(wr_mid); |
1255 | wr->r3[0] = cpu_to_be64(0); | 1259 | wr->r3[0] = cpu_to_be32(0); |
1256 | wr->r3[1] = cpu_to_be64(0); | 1260 | wr->r3[1] = cpu_to_be32(0); |
1257 | skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); | 1261 | skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); |
1258 | end = (u64 *)wr + flits; | 1262 | end = (u64 *)wr + flits; |
1259 | 1263 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 1b5506df35b1..280b4a215849 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | |||
@@ -210,10 +210,10 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, | |||
210 | 210 | ||
211 | if (rpl) { | 211 | if (rpl) { |
212 | /* request bit in high-order BE word */ | 212 | /* request bit in high-order BE word */ |
213 | WARN_ON((be32_to_cpu(*(const u32 *)cmd) | 213 | WARN_ON((be32_to_cpu(*(const __be32 *)cmd) |
214 | & FW_CMD_REQUEST_F) == 0); | 214 | & FW_CMD_REQUEST_F) == 0); |
215 | get_mbox_rpl(adapter, rpl, size, mbox_data); | 215 | get_mbox_rpl(adapter, rpl, size, mbox_data); |
216 | WARN_ON((be32_to_cpu(*(u32 *)rpl) | 216 | WARN_ON((be32_to_cpu(*(__be32 *)rpl) |
217 | & FW_CMD_REQUEST_F) != 0); | 217 | & FW_CMD_REQUEST_F) != 0); |
218 | } | 218 | } |
219 | t4_write_reg(adapter, mbox_ctl, | 219 | t4_write_reg(adapter, mbox_ctl, |
@@ -484,7 +484,7 @@ int t4_bar2_sge_qregs(struct adapter *adapter, | |||
484 | * o The BAR2 Queue ID. | 484 | * o The BAR2 Queue ID. |
485 | * o The BAR2 Queue ID Offset into the BAR2 page. | 485 | * o The BAR2 Queue ID Offset into the BAR2 page. |
486 | */ | 486 | */ |
487 | bar2_page_offset = ((qid >> qpp_shift) << page_shift); | 487 | bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift); |
488 | bar2_qid = qid & qpp_mask; | 488 | bar2_qid = qid & qpp_mask; |
489 | bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; | 489 | bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; |
490 | 490 | ||
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 9cbe038a388e..a5179bfcdc2c 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c | |||
@@ -272,8 +272,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data) | |||
272 | } | 272 | } |
273 | 273 | ||
274 | if (ENIC_TEST_INTR(pba, notify_intr)) { | 274 | if (ENIC_TEST_INTR(pba, notify_intr)) { |
275 | vnic_intr_return_all_credits(&enic->intr[notify_intr]); | ||
276 | enic_notify_check(enic); | 275 | enic_notify_check(enic); |
276 | vnic_intr_return_all_credits(&enic->intr[notify_intr]); | ||
277 | } | 277 | } |
278 | 278 | ||
279 | if (ENIC_TEST_INTR(pba, err_intr)) { | 279 | if (ENIC_TEST_INTR(pba, err_intr)) { |
@@ -346,8 +346,8 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data) | |||
346 | struct enic *enic = data; | 346 | struct enic *enic = data; |
347 | unsigned int intr = enic_msix_notify_intr(enic); | 347 | unsigned int intr = enic_msix_notify_intr(enic); |
348 | 348 | ||
349 | vnic_intr_return_all_credits(&enic->intr[intr]); | ||
350 | enic_notify_check(enic); | 349 | enic_notify_check(enic); |
350 | vnic_intr_return_all_credits(&enic->intr[intr]); | ||
351 | 351 | ||
352 | return IRQ_HANDLED; | 352 | return IRQ_HANDLED; |
353 | } | 353 | } |
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 3b42556f7f8d..ed41559bae77 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c | |||
@@ -589,7 +589,7 @@ static void tulip_tx_timeout(struct net_device *dev) | |||
589 | (unsigned int)tp->rx_ring[i].buffer1, | 589 | (unsigned int)tp->rx_ring[i].buffer1, |
590 | (unsigned int)tp->rx_ring[i].buffer2, | 590 | (unsigned int)tp->rx_ring[i].buffer2, |
591 | buf[0], buf[1], buf[2]); | 591 | buf[0], buf[1], buf[2]); |
592 | for (j = 0; buf[j] != 0xee && j < 1600; j++) | 592 | for (j = 0; ((j < 1600) && buf[j] != 0xee); j++) |
593 | if (j < 100) | 593 | if (j < 100) |
594 | pr_cont(" %02x", buf[j]); | 594 | pr_cont(" %02x", buf[j]); |
595 | pr_cont(" j=%d\n", j); | 595 | pr_cont(" j=%d\n", j); |
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 27de37aa90af..27b9fe99a9bd 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h | |||
@@ -354,6 +354,7 @@ struct be_vf_cfg { | |||
354 | u16 vlan_tag; | 354 | u16 vlan_tag; |
355 | u32 tx_rate; | 355 | u32 tx_rate; |
356 | u32 plink_tracking; | 356 | u32 plink_tracking; |
357 | u32 privileges; | ||
357 | }; | 358 | }; |
358 | 359 | ||
359 | enum vf_state { | 360 | enum vf_state { |
@@ -423,6 +424,7 @@ struct be_adapter { | |||
423 | 424 | ||
424 | u8 __iomem *csr; /* CSR BAR used only for BE2/3 */ | 425 | u8 __iomem *csr; /* CSR BAR used only for BE2/3 */ |
425 | u8 __iomem *db; /* Door Bell */ | 426 | u8 __iomem *db; /* Door Bell */ |
427 | u8 __iomem *pcicfg; /* On SH,BEx only. Shadow of PCI config space */ | ||
426 | 428 | ||
427 | struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ | 429 | struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ |
428 | struct be_dma_mem mbox_mem; | 430 | struct be_dma_mem mbox_mem; |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 36916cfa70f9..7f05f309e935 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
@@ -1902,15 +1902,11 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, | |||
1902 | { | 1902 | { |
1903 | int num_eqs, i = 0; | 1903 | int num_eqs, i = 0; |
1904 | 1904 | ||
1905 | if (lancer_chip(adapter) && num > 8) { | 1905 | while (num) { |
1906 | while (num) { | 1906 | num_eqs = min(num, 8); |
1907 | num_eqs = min(num, 8); | 1907 | __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); |
1908 | __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); | 1908 | i += num_eqs; |
1909 | i += num_eqs; | 1909 | num -= num_eqs; |
1910 | num -= num_eqs; | ||
1911 | } | ||
1912 | } else { | ||
1913 | __be_cmd_modify_eqd(adapter, set_eqd, num); | ||
1914 | } | 1910 | } |
1915 | 1911 | ||
1916 | return 0; | 1912 | return 0; |
@@ -1918,7 +1914,7 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, | |||
1918 | 1914 | ||
1919 | /* Uses sycnhronous mcc */ | 1915 | /* Uses sycnhronous mcc */ |
1920 | int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, | 1916 | int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, |
1921 | u32 num) | 1917 | u32 num, u32 domain) |
1922 | { | 1918 | { |
1923 | struct be_mcc_wrb *wrb; | 1919 | struct be_mcc_wrb *wrb; |
1924 | struct be_cmd_req_vlan_config *req; | 1920 | struct be_cmd_req_vlan_config *req; |
@@ -1936,6 +1932,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, | |||
1936 | be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | 1932 | be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, |
1937 | OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), | 1933 | OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), |
1938 | wrb, NULL); | 1934 | wrb, NULL); |
1935 | req->hdr.domain = domain; | ||
1939 | 1936 | ||
1940 | req->interface_id = if_id; | 1937 | req->interface_id = if_id; |
1941 | req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; | 1938 | req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index db761e8e42a3..a7634a3f052a 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h | |||
@@ -2256,7 +2256,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter, | |||
2256 | int be_cmd_get_fw_ver(struct be_adapter *adapter); | 2256 | int be_cmd_get_fw_ver(struct be_adapter *adapter); |
2257 | int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); | 2257 | int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); |
2258 | int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, | 2258 | int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, |
2259 | u32 num); | 2259 | u32 num, u32 domain); |
2260 | int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); | 2260 | int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); |
2261 | int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); | 2261 | int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); |
2262 | int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); | 2262 | int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 0a816859aca5..e6b790f0d9dc 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -1171,7 +1171,7 @@ static int be_vid_config(struct be_adapter *adapter) | |||
1171 | for_each_set_bit(i, adapter->vids, VLAN_N_VID) | 1171 | for_each_set_bit(i, adapter->vids, VLAN_N_VID) |
1172 | vids[num++] = cpu_to_le16(i); | 1172 | vids[num++] = cpu_to_le16(i); |
1173 | 1173 | ||
1174 | status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num); | 1174 | status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0); |
1175 | if (status) { | 1175 | if (status) { |
1176 | dev_err(dev, "Setting HW VLAN filtering failed\n"); | 1176 | dev_err(dev, "Setting HW VLAN filtering failed\n"); |
1177 | /* Set to VLAN promisc mode as setting VLAN filter failed */ | 1177 | /* Set to VLAN promisc mode as setting VLAN filter failed */ |
@@ -1380,11 +1380,67 @@ static int be_get_vf_config(struct net_device *netdev, int vf, | |||
1380 | return 0; | 1380 | return 0; |
1381 | } | 1381 | } |
1382 | 1382 | ||
1383 | static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan) | ||
1384 | { | ||
1385 | struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; | ||
1386 | u16 vids[BE_NUM_VLANS_SUPPORTED]; | ||
1387 | int vf_if_id = vf_cfg->if_handle; | ||
1388 | int status; | ||
1389 | |||
1390 | /* Enable Transparent VLAN Tagging */ | ||
1391 | status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0); | ||
1392 | if (status) | ||
1393 | return status; | ||
1394 | |||
1395 | /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */ | ||
1396 | vids[0] = 0; | ||
1397 | status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1); | ||
1398 | if (!status) | ||
1399 | dev_info(&adapter->pdev->dev, | ||
1400 | "Cleared guest VLANs on VF%d", vf); | ||
1401 | |||
1402 | /* After TVT is enabled, disallow VFs to program VLAN filters */ | ||
1403 | if (vf_cfg->privileges & BE_PRIV_FILTMGMT) { | ||
1404 | status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges & | ||
1405 | ~BE_PRIV_FILTMGMT, vf + 1); | ||
1406 | if (!status) | ||
1407 | vf_cfg->privileges &= ~BE_PRIV_FILTMGMT; | ||
1408 | } | ||
1409 | return 0; | ||
1410 | } | ||
1411 | |||
1412 | static int be_clear_vf_tvt(struct be_adapter *adapter, int vf) | ||
1413 | { | ||
1414 | struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; | ||
1415 | struct device *dev = &adapter->pdev->dev; | ||
1416 | int status; | ||
1417 | |||
1418 | /* Reset Transparent VLAN Tagging. */ | ||
1419 | status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1, | ||
1420 | vf_cfg->if_handle, 0); | ||
1421 | if (status) | ||
1422 | return status; | ||
1423 | |||
1424 | /* Allow VFs to program VLAN filtering */ | ||
1425 | if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) { | ||
1426 | status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges | | ||
1427 | BE_PRIV_FILTMGMT, vf + 1); | ||
1428 | if (!status) { | ||
1429 | vf_cfg->privileges |= BE_PRIV_FILTMGMT; | ||
1430 | dev_info(dev, "VF%d: FILTMGMT priv enabled", vf); | ||
1431 | } | ||
1432 | } | ||
1433 | |||
1434 | dev_info(dev, | ||
1435 | "Disable/re-enable i/f in VM to clear Transparent VLAN tag"); | ||
1436 | return 0; | ||
1437 | } | ||
1438 | |||
1383 | static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) | 1439 | static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) |
1384 | { | 1440 | { |
1385 | struct be_adapter *adapter = netdev_priv(netdev); | 1441 | struct be_adapter *adapter = netdev_priv(netdev); |
1386 | struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; | 1442 | struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; |
1387 | int status = 0; | 1443 | int status; |
1388 | 1444 | ||
1389 | if (!sriov_enabled(adapter)) | 1445 | if (!sriov_enabled(adapter)) |
1390 | return -EPERM; | 1446 | return -EPERM; |
@@ -1394,24 +1450,19 @@ static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) | |||
1394 | 1450 | ||
1395 | if (vlan || qos) { | 1451 | if (vlan || qos) { |
1396 | vlan |= qos << VLAN_PRIO_SHIFT; | 1452 | vlan |= qos << VLAN_PRIO_SHIFT; |
1397 | if (vf_cfg->vlan_tag != vlan) | 1453 | status = be_set_vf_tvt(adapter, vf, vlan); |
1398 | status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, | ||
1399 | vf_cfg->if_handle, 0); | ||
1400 | } else { | 1454 | } else { |
1401 | /* Reset Transparent Vlan Tagging. */ | 1455 | status = be_clear_vf_tvt(adapter, vf); |
1402 | status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, | ||
1403 | vf + 1, vf_cfg->if_handle, 0); | ||
1404 | } | 1456 | } |
1405 | 1457 | ||
1406 | if (status) { | 1458 | if (status) { |
1407 | dev_err(&adapter->pdev->dev, | 1459 | dev_err(&adapter->pdev->dev, |
1408 | "VLAN %d config on VF %d failed : %#x\n", vlan, | 1460 | "VLAN %d config on VF %d failed : %#x\n", vlan, vf, |
1409 | vf, status); | 1461 | status); |
1410 | return be_cmd_status(status); | 1462 | return be_cmd_status(status); |
1411 | } | 1463 | } |
1412 | 1464 | ||
1413 | vf_cfg->vlan_tag = vlan; | 1465 | vf_cfg->vlan_tag = vlan; |
1414 | |||
1415 | return 0; | 1466 | return 0; |
1416 | } | 1467 | } |
1417 | 1468 | ||
@@ -2772,14 +2823,12 @@ void be_detect_error(struct be_adapter *adapter) | |||
2772 | } | 2823 | } |
2773 | } | 2824 | } |
2774 | } else { | 2825 | } else { |
2775 | pci_read_config_dword(adapter->pdev, | 2826 | ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW); |
2776 | PCICFG_UE_STATUS_LOW, &ue_lo); | 2827 | ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH); |
2777 | pci_read_config_dword(adapter->pdev, | 2828 | ue_lo_mask = ioread32(adapter->pcicfg + |
2778 | PCICFG_UE_STATUS_HIGH, &ue_hi); | 2829 | PCICFG_UE_STATUS_LOW_MASK); |
2779 | pci_read_config_dword(adapter->pdev, | 2830 | ue_hi_mask = ioread32(adapter->pcicfg + |
2780 | PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask); | 2831 | PCICFG_UE_STATUS_HI_MASK); |
2781 | pci_read_config_dword(adapter->pdev, | ||
2782 | PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask); | ||
2783 | 2832 | ||
2784 | ue_lo = (ue_lo & ~ue_lo_mask); | 2833 | ue_lo = (ue_lo & ~ue_lo_mask); |
2785 | ue_hi = (ue_hi & ~ue_hi_mask); | 2834 | ue_hi = (ue_hi & ~ue_hi_mask); |
@@ -3339,7 +3388,6 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle, | |||
3339 | u32 cap_flags, u32 vf) | 3388 | u32 cap_flags, u32 vf) |
3340 | { | 3389 | { |
3341 | u32 en_flags; | 3390 | u32 en_flags; |
3342 | int status; | ||
3343 | 3391 | ||
3344 | en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | | 3392 | en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | |
3345 | BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS | | 3393 | BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS | |
@@ -3347,10 +3395,7 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle, | |||
3347 | 3395 | ||
3348 | en_flags &= cap_flags; | 3396 | en_flags &= cap_flags; |
3349 | 3397 | ||
3350 | status = be_cmd_if_create(adapter, cap_flags, en_flags, | 3398 | return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf); |
3351 | if_handle, vf); | ||
3352 | |||
3353 | return status; | ||
3354 | } | 3399 | } |
3355 | 3400 | ||
3356 | static int be_vfs_if_create(struct be_adapter *adapter) | 3401 | static int be_vfs_if_create(struct be_adapter *adapter) |
@@ -3368,8 +3413,13 @@ static int be_vfs_if_create(struct be_adapter *adapter) | |||
3368 | if (!BE3_chip(adapter)) { | 3413 | if (!BE3_chip(adapter)) { |
3369 | status = be_cmd_get_profile_config(adapter, &res, | 3414 | status = be_cmd_get_profile_config(adapter, &res, |
3370 | vf + 1); | 3415 | vf + 1); |
3371 | if (!status) | 3416 | if (!status) { |
3372 | cap_flags = res.if_cap_flags; | 3417 | cap_flags = res.if_cap_flags; |
3418 | /* Prevent VFs from enabling VLAN promiscuous | ||
3419 | * mode | ||
3420 | */ | ||
3421 | cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS; | ||
3422 | } | ||
3373 | } | 3423 | } |
3374 | 3424 | ||
3375 | status = be_if_create(adapter, &vf_cfg->if_handle, | 3425 | status = be_if_create(adapter, &vf_cfg->if_handle, |
@@ -3403,7 +3453,6 @@ static int be_vf_setup(struct be_adapter *adapter) | |||
3403 | struct device *dev = &adapter->pdev->dev; | 3453 | struct device *dev = &adapter->pdev->dev; |
3404 | struct be_vf_cfg *vf_cfg; | 3454 | struct be_vf_cfg *vf_cfg; |
3405 | int status, old_vfs, vf; | 3455 | int status, old_vfs, vf; |
3406 | u32 privileges; | ||
3407 | 3456 | ||
3408 | old_vfs = pci_num_vf(adapter->pdev); | 3457 | old_vfs = pci_num_vf(adapter->pdev); |
3409 | 3458 | ||
@@ -3433,15 +3482,18 @@ static int be_vf_setup(struct be_adapter *adapter) | |||
3433 | 3482 | ||
3434 | for_all_vfs(adapter, vf_cfg, vf) { | 3483 | for_all_vfs(adapter, vf_cfg, vf) { |
3435 | /* Allow VFs to programs MAC/VLAN filters */ | 3484 | /* Allow VFs to programs MAC/VLAN filters */ |
3436 | status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1); | 3485 | status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges, |
3437 | if (!status && !(privileges & BE_PRIV_FILTMGMT)) { | 3486 | vf + 1); |
3487 | if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) { | ||
3438 | status = be_cmd_set_fn_privileges(adapter, | 3488 | status = be_cmd_set_fn_privileges(adapter, |
3439 | privileges | | 3489 | vf_cfg->privileges | |
3440 | BE_PRIV_FILTMGMT, | 3490 | BE_PRIV_FILTMGMT, |
3441 | vf + 1); | 3491 | vf + 1); |
3442 | if (!status) | 3492 | if (!status) { |
3493 | vf_cfg->privileges |= BE_PRIV_FILTMGMT; | ||
3443 | dev_info(dev, "VF%d has FILTMGMT privilege\n", | 3494 | dev_info(dev, "VF%d has FILTMGMT privilege\n", |
3444 | vf); | 3495 | vf); |
3496 | } | ||
3445 | } | 3497 | } |
3446 | 3498 | ||
3447 | /* Allow full available bandwidth */ | 3499 | /* Allow full available bandwidth */ |
@@ -4820,24 +4872,37 @@ static int be_roce_map_pci_bars(struct be_adapter *adapter) | |||
4820 | 4872 | ||
4821 | static int be_map_pci_bars(struct be_adapter *adapter) | 4873 | static int be_map_pci_bars(struct be_adapter *adapter) |
4822 | { | 4874 | { |
4875 | struct pci_dev *pdev = adapter->pdev; | ||
4823 | u8 __iomem *addr; | 4876 | u8 __iomem *addr; |
4824 | 4877 | ||
4825 | if (BEx_chip(adapter) && be_physfn(adapter)) { | 4878 | if (BEx_chip(adapter) && be_physfn(adapter)) { |
4826 | adapter->csr = pci_iomap(adapter->pdev, 2, 0); | 4879 | adapter->csr = pci_iomap(pdev, 2, 0); |
4827 | if (!adapter->csr) | 4880 | if (!adapter->csr) |
4828 | return -ENOMEM; | 4881 | return -ENOMEM; |
4829 | } | 4882 | } |
4830 | 4883 | ||
4831 | addr = pci_iomap(adapter->pdev, db_bar(adapter), 0); | 4884 | addr = pci_iomap(pdev, db_bar(adapter), 0); |
4832 | if (!addr) | 4885 | if (!addr) |
4833 | goto pci_map_err; | 4886 | goto pci_map_err; |
4834 | adapter->db = addr; | 4887 | adapter->db = addr; |
4835 | 4888 | ||
4889 | if (skyhawk_chip(adapter) || BEx_chip(adapter)) { | ||
4890 | if (be_physfn(adapter)) { | ||
4891 | /* PCICFG is the 2nd BAR in BE2 */ | ||
4892 | addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0); | ||
4893 | if (!addr) | ||
4894 | goto pci_map_err; | ||
4895 | adapter->pcicfg = addr; | ||
4896 | } else { | ||
4897 | adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET; | ||
4898 | } | ||
4899 | } | ||
4900 | |||
4836 | be_roce_map_pci_bars(adapter); | 4901 | be_roce_map_pci_bars(adapter); |
4837 | return 0; | 4902 | return 0; |
4838 | 4903 | ||
4839 | pci_map_err: | 4904 | pci_map_err: |
4840 | dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n"); | 4905 | dev_err(&pdev->dev, "Error in mapping PCI BARs\n"); |
4841 | be_unmap_pci_bars(adapter); | 4906 | be_unmap_pci_bars(adapter); |
4842 | return -ENOMEM; | 4907 | return -ENOMEM; |
4843 | } | 4908 | } |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 9bb6220663b2..f6a3a7abd468 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -1189,13 +1189,12 @@ static void | |||
1189 | fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) | 1189 | fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) |
1190 | { | 1190 | { |
1191 | struct fec_enet_private *fep; | 1191 | struct fec_enet_private *fep; |
1192 | struct bufdesc *bdp, *bdp_t; | 1192 | struct bufdesc *bdp; |
1193 | unsigned short status; | 1193 | unsigned short status; |
1194 | struct sk_buff *skb; | 1194 | struct sk_buff *skb; |
1195 | struct fec_enet_priv_tx_q *txq; | 1195 | struct fec_enet_priv_tx_q *txq; |
1196 | struct netdev_queue *nq; | 1196 | struct netdev_queue *nq; |
1197 | int index = 0; | 1197 | int index = 0; |
1198 | int i, bdnum; | ||
1199 | int entries_free; | 1198 | int entries_free; |
1200 | 1199 | ||
1201 | fep = netdev_priv(ndev); | 1200 | fep = netdev_priv(ndev); |
@@ -1216,29 +1215,18 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) | |||
1216 | if (bdp == txq->cur_tx) | 1215 | if (bdp == txq->cur_tx) |
1217 | break; | 1216 | break; |
1218 | 1217 | ||
1219 | bdp_t = bdp; | 1218 | index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); |
1220 | bdnum = 1; | ||
1221 | index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep); | ||
1222 | skb = txq->tx_skbuff[index]; | ||
1223 | while (!skb) { | ||
1224 | bdp_t = fec_enet_get_nextdesc(bdp_t, fep, queue_id); | ||
1225 | index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep); | ||
1226 | skb = txq->tx_skbuff[index]; | ||
1227 | bdnum++; | ||
1228 | } | ||
1229 | if (skb_shinfo(skb)->nr_frags && | ||
1230 | (status = bdp_t->cbd_sc) & BD_ENET_TX_READY) | ||
1231 | break; | ||
1232 | 1219 | ||
1233 | for (i = 0; i < bdnum; i++) { | 1220 | skb = txq->tx_skbuff[index]; |
1234 | if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) | ||
1235 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, | ||
1236 | bdp->cbd_datlen, DMA_TO_DEVICE); | ||
1237 | bdp->cbd_bufaddr = 0; | ||
1238 | if (i < bdnum - 1) | ||
1239 | bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); | ||
1240 | } | ||
1241 | txq->tx_skbuff[index] = NULL; | 1221 | txq->tx_skbuff[index] = NULL; |
1222 | if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) | ||
1223 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, | ||
1224 | bdp->cbd_datlen, DMA_TO_DEVICE); | ||
1225 | bdp->cbd_bufaddr = 0; | ||
1226 | if (!skb) { | ||
1227 | bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); | ||
1228 | continue; | ||
1229 | } | ||
1242 | 1230 | ||
1243 | /* Check for errors. */ | 1231 | /* Check for errors. */ |
1244 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | | 1232 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | |
@@ -1479,8 +1467,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) | |||
1479 | 1467 | ||
1480 | vlan_packet_rcvd = true; | 1468 | vlan_packet_rcvd = true; |
1481 | 1469 | ||
1482 | skb_copy_to_linear_data_offset(skb, VLAN_HLEN, | 1470 | memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2); |
1483 | data, (2 * ETH_ALEN)); | ||
1484 | skb_pull(skb, VLAN_HLEN); | 1471 | skb_pull(skb, VLAN_HLEN); |
1485 | } | 1472 | } |
1486 | 1473 | ||
@@ -1597,7 +1584,7 @@ fec_enet_interrupt(int irq, void *dev_id) | |||
1597 | writel(int_events, fep->hwp + FEC_IEVENT); | 1584 | writel(int_events, fep->hwp + FEC_IEVENT); |
1598 | fec_enet_collect_events(fep, int_events); | 1585 | fec_enet_collect_events(fep, int_events); |
1599 | 1586 | ||
1600 | if (fep->work_tx || fep->work_rx) { | 1587 | if ((fep->work_tx || fep->work_rx) && fep->link) { |
1601 | ret = IRQ_HANDLED; | 1588 | ret = IRQ_HANDLED; |
1602 | 1589 | ||
1603 | if (napi_schedule_prep(&fep->napi)) { | 1590 | if (napi_schedule_prep(&fep->napi)) { |
@@ -1967,6 +1954,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) | |||
1967 | struct fec_enet_private *fep = netdev_priv(ndev); | 1954 | struct fec_enet_private *fep = netdev_priv(ndev); |
1968 | struct device_node *node; | 1955 | struct device_node *node; |
1969 | int err = -ENXIO, i; | 1956 | int err = -ENXIO, i; |
1957 | u32 mii_speed, holdtime; | ||
1970 | 1958 | ||
1971 | /* | 1959 | /* |
1972 | * The i.MX28 dual fec interfaces are not equal. | 1960 | * The i.MX28 dual fec interfaces are not equal. |
@@ -2004,10 +1992,33 @@ static int fec_enet_mii_init(struct platform_device *pdev) | |||
2004 | * Reference Manual has an error on this, and gets fixed on i.MX6Q | 1992 | * Reference Manual has an error on this, and gets fixed on i.MX6Q |
2005 | * document. | 1993 | * document. |
2006 | */ | 1994 | */ |
2007 | fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000); | 1995 | mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000); |
2008 | if (fep->quirks & FEC_QUIRK_ENET_MAC) | 1996 | if (fep->quirks & FEC_QUIRK_ENET_MAC) |
2009 | fep->phy_speed--; | 1997 | mii_speed--; |
2010 | fep->phy_speed <<= 1; | 1998 | if (mii_speed > 63) { |
1999 | dev_err(&pdev->dev, | ||
2000 | "fec clock (%lu) to fast to get right mii speed\n", | ||
2001 | clk_get_rate(fep->clk_ipg)); | ||
2002 | err = -EINVAL; | ||
2003 | goto err_out; | ||
2004 | } | ||
2005 | |||
2006 | /* | ||
2007 | * The i.MX28 and i.MX6 types have another filed in the MSCR (aka | ||
2008 | * MII_SPEED) register that defines the MDIO output hold time. Earlier | ||
2009 | * versions are RAZ there, so just ignore the difference and write the | ||
2010 | * register always. | ||
2011 | * The minimal hold time according to IEE802.3 (clause 22) is 10 ns. | ||
2012 | * HOLDTIME + 1 is the number of clk cycles the fec is holding the | ||
2013 | * output. | ||
2014 | * The HOLDTIME bitfield takes values between 0 and 7 (inclusive). | ||
2015 | * Given that ceil(clkrate / 5000000) <= 64, the calculation for | ||
2016 | * holdtime cannot result in a value greater than 3. | ||
2017 | */ | ||
2018 | holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1; | ||
2019 | |||
2020 | fep->phy_speed = mii_speed << 1 | holdtime << 8; | ||
2021 | |||
2011 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); | 2022 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); |
2012 | 2023 | ||
2013 | fep->mii_bus = mdiobus_alloc(); | 2024 | fep->mii_bus = mdiobus_alloc(); |
@@ -3383,7 +3394,6 @@ fec_drv_remove(struct platform_device *pdev) | |||
3383 | regulator_disable(fep->reg_phy); | 3394 | regulator_disable(fep->reg_phy); |
3384 | if (fep->ptp_clock) | 3395 | if (fep->ptp_clock) |
3385 | ptp_clock_unregister(fep->ptp_clock); | 3396 | ptp_clock_unregister(fep->ptp_clock); |
3386 | fec_enet_clk_enable(ndev, false); | ||
3387 | of_node_put(fep->phy_node); | 3397 | of_node_put(fep->phy_node); |
3388 | free_netdev(ndev); | 3398 | free_netdev(ndev); |
3389 | 3399 | ||
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 43df78882e48..7bf3682cdf47 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
@@ -747,6 +747,18 @@ static int gfar_parse_group(struct device_node *np, | |||
747 | return 0; | 747 | return 0; |
748 | } | 748 | } |
749 | 749 | ||
750 | static int gfar_of_group_count(struct device_node *np) | ||
751 | { | ||
752 | struct device_node *child; | ||
753 | int num = 0; | ||
754 | |||
755 | for_each_available_child_of_node(np, child) | ||
756 | if (!of_node_cmp(child->name, "queue-group")) | ||
757 | num++; | ||
758 | |||
759 | return num; | ||
760 | } | ||
761 | |||
750 | static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) | 762 | static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) |
751 | { | 763 | { |
752 | const char *model; | 764 | const char *model; |
@@ -784,7 +796,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) | |||
784 | num_rx_qs = 1; | 796 | num_rx_qs = 1; |
785 | } else { /* MQ_MG_MODE */ | 797 | } else { /* MQ_MG_MODE */ |
786 | /* get the actual number of supported groups */ | 798 | /* get the actual number of supported groups */ |
787 | unsigned int num_grps = of_get_available_child_count(np); | 799 | unsigned int num_grps = gfar_of_group_count(np); |
788 | 800 | ||
789 | if (num_grps == 0 || num_grps > MAXGROUPS) { | 801 | if (num_grps == 0 || num_grps > MAXGROUPS) { |
790 | dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", | 802 | dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", |
@@ -851,7 +863,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) | |||
851 | 863 | ||
852 | /* Parse and initialize group specific information */ | 864 | /* Parse and initialize group specific information */ |
853 | if (priv->mode == MQ_MG_MODE) { | 865 | if (priv->mode == MQ_MG_MODE) { |
854 | for_each_child_of_node(np, child) { | 866 | for_each_available_child_of_node(np, child) { |
867 | if (of_node_cmp(child->name, "queue-group")) | ||
868 | continue; | ||
869 | |||
855 | err = gfar_parse_group(child, priv, model); | 870 | err = gfar_parse_group(child, priv, model); |
856 | if (err) | 871 | if (err) |
857 | goto err_grp_init; | 872 | goto err_grp_init; |
@@ -3162,8 +3177,8 @@ static void adjust_link(struct net_device *dev) | |||
3162 | struct phy_device *phydev = priv->phydev; | 3177 | struct phy_device *phydev = priv->phydev; |
3163 | 3178 | ||
3164 | if (unlikely(phydev->link != priv->oldlink || | 3179 | if (unlikely(phydev->link != priv->oldlink || |
3165 | phydev->duplex != priv->oldduplex || | 3180 | (phydev->link && (phydev->duplex != priv->oldduplex || |
3166 | phydev->speed != priv->oldspeed)) | 3181 | phydev->speed != priv->oldspeed)))) |
3167 | gfar_update_link_state(priv); | 3182 | gfar_update_link_state(priv); |
3168 | } | 3183 | } |
3169 | 3184 | ||
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 357e8b576905..56b774d3a13d 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c | |||
@@ -3893,6 +3893,9 @@ static int ucc_geth_probe(struct platform_device* ofdev) | |||
3893 | ugeth->phy_interface = phy_interface; | 3893 | ugeth->phy_interface = phy_interface; |
3894 | ugeth->max_speed = max_speed; | 3894 | ugeth->max_speed = max_speed; |
3895 | 3895 | ||
3896 | /* Carrier starts down, phylib will bring it up */ | ||
3897 | netif_carrier_off(dev); | ||
3898 | |||
3896 | err = register_netdev(dev); | 3899 | err = register_netdev(dev); |
3897 | if (err) { | 3900 | if (err) { |
3898 | if (netif_msg_probe(ugeth)) | 3901 | if (netif_msg_probe(ugeth)) |
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index e8a1adb7a962..c05e50759621 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c | |||
@@ -3262,6 +3262,139 @@ static void ehea_remove_device_sysfs(struct platform_device *dev) | |||
3262 | device_remove_file(&dev->dev, &dev_attr_remove_port); | 3262 | device_remove_file(&dev->dev, &dev_attr_remove_port); |
3263 | } | 3263 | } |
3264 | 3264 | ||
3265 | static int ehea_reboot_notifier(struct notifier_block *nb, | ||
3266 | unsigned long action, void *unused) | ||
3267 | { | ||
3268 | if (action == SYS_RESTART) { | ||
3269 | pr_info("Reboot: freeing all eHEA resources\n"); | ||
3270 | ibmebus_unregister_driver(&ehea_driver); | ||
3271 | } | ||
3272 | return NOTIFY_DONE; | ||
3273 | } | ||
3274 | |||
3275 | static struct notifier_block ehea_reboot_nb = { | ||
3276 | .notifier_call = ehea_reboot_notifier, | ||
3277 | }; | ||
3278 | |||
3279 | static int ehea_mem_notifier(struct notifier_block *nb, | ||
3280 | unsigned long action, void *data) | ||
3281 | { | ||
3282 | int ret = NOTIFY_BAD; | ||
3283 | struct memory_notify *arg = data; | ||
3284 | |||
3285 | mutex_lock(&dlpar_mem_lock); | ||
3286 | |||
3287 | switch (action) { | ||
3288 | case MEM_CANCEL_OFFLINE: | ||
3289 | pr_info("memory offlining canceled"); | ||
3290 | /* Fall through: re-add canceled memory block */ | ||
3291 | |||
3292 | case MEM_ONLINE: | ||
3293 | pr_info("memory is going online"); | ||
3294 | set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); | ||
3295 | if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) | ||
3296 | goto out_unlock; | ||
3297 | ehea_rereg_mrs(); | ||
3298 | break; | ||
3299 | |||
3300 | case MEM_GOING_OFFLINE: | ||
3301 | pr_info("memory is going offline"); | ||
3302 | set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); | ||
3303 | if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) | ||
3304 | goto out_unlock; | ||
3305 | ehea_rereg_mrs(); | ||
3306 | break; | ||
3307 | |||
3308 | default: | ||
3309 | break; | ||
3310 | } | ||
3311 | |||
3312 | ehea_update_firmware_handles(); | ||
3313 | ret = NOTIFY_OK; | ||
3314 | |||
3315 | out_unlock: | ||
3316 | mutex_unlock(&dlpar_mem_lock); | ||
3317 | return ret; | ||
3318 | } | ||
3319 | |||
3320 | static struct notifier_block ehea_mem_nb = { | ||
3321 | .notifier_call = ehea_mem_notifier, | ||
3322 | }; | ||
3323 | |||
3324 | static void ehea_crash_handler(void) | ||
3325 | { | ||
3326 | int i; | ||
3327 | |||
3328 | if (ehea_fw_handles.arr) | ||
3329 | for (i = 0; i < ehea_fw_handles.num_entries; i++) | ||
3330 | ehea_h_free_resource(ehea_fw_handles.arr[i].adh, | ||
3331 | ehea_fw_handles.arr[i].fwh, | ||
3332 | FORCE_FREE); | ||
3333 | |||
3334 | if (ehea_bcmc_regs.arr) | ||
3335 | for (i = 0; i < ehea_bcmc_regs.num_entries; i++) | ||
3336 | ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh, | ||
3337 | ehea_bcmc_regs.arr[i].port_id, | ||
3338 | ehea_bcmc_regs.arr[i].reg_type, | ||
3339 | ehea_bcmc_regs.arr[i].macaddr, | ||
3340 | 0, H_DEREG_BCMC); | ||
3341 | } | ||
3342 | |||
3343 | static atomic_t ehea_memory_hooks_registered; | ||
3344 | |||
3345 | /* Register memory hooks on probe of first adapter */ | ||
3346 | static int ehea_register_memory_hooks(void) | ||
3347 | { | ||
3348 | int ret = 0; | ||
3349 | |||
3350 | if (atomic_inc_and_test(&ehea_memory_hooks_registered)) | ||
3351 | return 0; | ||
3352 | |||
3353 | ret = ehea_create_busmap(); | ||
3354 | if (ret) { | ||
3355 | pr_info("ehea_create_busmap failed\n"); | ||
3356 | goto out; | ||
3357 | } | ||
3358 | |||
3359 | ret = register_reboot_notifier(&ehea_reboot_nb); | ||
3360 | if (ret) { | ||
3361 | pr_info("register_reboot_notifier failed\n"); | ||
3362 | goto out; | ||
3363 | } | ||
3364 | |||
3365 | ret = register_memory_notifier(&ehea_mem_nb); | ||
3366 | if (ret) { | ||
3367 | pr_info("register_memory_notifier failed\n"); | ||
3368 | goto out2; | ||
3369 | } | ||
3370 | |||
3371 | ret = crash_shutdown_register(ehea_crash_handler); | ||
3372 | if (ret) { | ||
3373 | pr_info("crash_shutdown_register failed\n"); | ||
3374 | goto out3; | ||
3375 | } | ||
3376 | |||
3377 | return 0; | ||
3378 | |||
3379 | out3: | ||
3380 | unregister_memory_notifier(&ehea_mem_nb); | ||
3381 | out2: | ||
3382 | unregister_reboot_notifier(&ehea_reboot_nb); | ||
3383 | out: | ||
3384 | return ret; | ||
3385 | } | ||
3386 | |||
3387 | static void ehea_unregister_memory_hooks(void) | ||
3388 | { | ||
3389 | if (atomic_read(&ehea_memory_hooks_registered)) | ||
3390 | return; | ||
3391 | |||
3392 | unregister_reboot_notifier(&ehea_reboot_nb); | ||
3393 | if (crash_shutdown_unregister(ehea_crash_handler)) | ||
3394 | pr_info("failed unregistering crash handler\n"); | ||
3395 | unregister_memory_notifier(&ehea_mem_nb); | ||
3396 | } | ||
3397 | |||
3265 | static int ehea_probe_adapter(struct platform_device *dev) | 3398 | static int ehea_probe_adapter(struct platform_device *dev) |
3266 | { | 3399 | { |
3267 | struct ehea_adapter *adapter; | 3400 | struct ehea_adapter *adapter; |
@@ -3269,6 +3402,10 @@ static int ehea_probe_adapter(struct platform_device *dev) | |||
3269 | int ret; | 3402 | int ret; |
3270 | int i; | 3403 | int i; |
3271 | 3404 | ||
3405 | ret = ehea_register_memory_hooks(); | ||
3406 | if (ret) | ||
3407 | return ret; | ||
3408 | |||
3272 | if (!dev || !dev->dev.of_node) { | 3409 | if (!dev || !dev->dev.of_node) { |
3273 | pr_err("Invalid ibmebus device probed\n"); | 3410 | pr_err("Invalid ibmebus device probed\n"); |
3274 | return -EINVAL; | 3411 | return -EINVAL; |
@@ -3392,81 +3529,6 @@ static int ehea_remove(struct platform_device *dev) | |||
3392 | return 0; | 3529 | return 0; |
3393 | } | 3530 | } |
3394 | 3531 | ||
3395 | static void ehea_crash_handler(void) | ||
3396 | { | ||
3397 | int i; | ||
3398 | |||
3399 | if (ehea_fw_handles.arr) | ||
3400 | for (i = 0; i < ehea_fw_handles.num_entries; i++) | ||
3401 | ehea_h_free_resource(ehea_fw_handles.arr[i].adh, | ||
3402 | ehea_fw_handles.arr[i].fwh, | ||
3403 | FORCE_FREE); | ||
3404 | |||
3405 | if (ehea_bcmc_regs.arr) | ||
3406 | for (i = 0; i < ehea_bcmc_regs.num_entries; i++) | ||
3407 | ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh, | ||
3408 | ehea_bcmc_regs.arr[i].port_id, | ||
3409 | ehea_bcmc_regs.arr[i].reg_type, | ||
3410 | ehea_bcmc_regs.arr[i].macaddr, | ||
3411 | 0, H_DEREG_BCMC); | ||
3412 | } | ||
3413 | |||
3414 | static int ehea_mem_notifier(struct notifier_block *nb, | ||
3415 | unsigned long action, void *data) | ||
3416 | { | ||
3417 | int ret = NOTIFY_BAD; | ||
3418 | struct memory_notify *arg = data; | ||
3419 | |||
3420 | mutex_lock(&dlpar_mem_lock); | ||
3421 | |||
3422 | switch (action) { | ||
3423 | case MEM_CANCEL_OFFLINE: | ||
3424 | pr_info("memory offlining canceled"); | ||
3425 | /* Readd canceled memory block */ | ||
3426 | case MEM_ONLINE: | ||
3427 | pr_info("memory is going online"); | ||
3428 | set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); | ||
3429 | if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) | ||
3430 | goto out_unlock; | ||
3431 | ehea_rereg_mrs(); | ||
3432 | break; | ||
3433 | case MEM_GOING_OFFLINE: | ||
3434 | pr_info("memory is going offline"); | ||
3435 | set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); | ||
3436 | if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) | ||
3437 | goto out_unlock; | ||
3438 | ehea_rereg_mrs(); | ||
3439 | break; | ||
3440 | default: | ||
3441 | break; | ||
3442 | } | ||
3443 | |||
3444 | ehea_update_firmware_handles(); | ||
3445 | ret = NOTIFY_OK; | ||
3446 | |||
3447 | out_unlock: | ||
3448 | mutex_unlock(&dlpar_mem_lock); | ||
3449 | return ret; | ||
3450 | } | ||
3451 | |||
3452 | static struct notifier_block ehea_mem_nb = { | ||
3453 | .notifier_call = ehea_mem_notifier, | ||
3454 | }; | ||
3455 | |||
3456 | static int ehea_reboot_notifier(struct notifier_block *nb, | ||
3457 | unsigned long action, void *unused) | ||
3458 | { | ||
3459 | if (action == SYS_RESTART) { | ||
3460 | pr_info("Reboot: freeing all eHEA resources\n"); | ||
3461 | ibmebus_unregister_driver(&ehea_driver); | ||
3462 | } | ||
3463 | return NOTIFY_DONE; | ||
3464 | } | ||
3465 | |||
3466 | static struct notifier_block ehea_reboot_nb = { | ||
3467 | .notifier_call = ehea_reboot_notifier, | ||
3468 | }; | ||
3469 | |||
3470 | static int check_module_parm(void) | 3532 | static int check_module_parm(void) |
3471 | { | 3533 | { |
3472 | int ret = 0; | 3534 | int ret = 0; |
@@ -3520,26 +3582,10 @@ static int __init ehea_module_init(void) | |||
3520 | if (ret) | 3582 | if (ret) |
3521 | goto out; | 3583 | goto out; |
3522 | 3584 | ||
3523 | ret = ehea_create_busmap(); | ||
3524 | if (ret) | ||
3525 | goto out; | ||
3526 | |||
3527 | ret = register_reboot_notifier(&ehea_reboot_nb); | ||
3528 | if (ret) | ||
3529 | pr_info("failed registering reboot notifier\n"); | ||
3530 | |||
3531 | ret = register_memory_notifier(&ehea_mem_nb); | ||
3532 | if (ret) | ||
3533 | pr_info("failed registering memory remove notifier\n"); | ||
3534 | |||
3535 | ret = crash_shutdown_register(ehea_crash_handler); | ||
3536 | if (ret) | ||
3537 | pr_info("failed registering crash handler\n"); | ||
3538 | |||
3539 | ret = ibmebus_register_driver(&ehea_driver); | 3585 | ret = ibmebus_register_driver(&ehea_driver); |
3540 | if (ret) { | 3586 | if (ret) { |
3541 | pr_err("failed registering eHEA device driver on ebus\n"); | 3587 | pr_err("failed registering eHEA device driver on ebus\n"); |
3542 | goto out2; | 3588 | goto out; |
3543 | } | 3589 | } |
3544 | 3590 | ||
3545 | ret = driver_create_file(&ehea_driver.driver, | 3591 | ret = driver_create_file(&ehea_driver.driver, |
@@ -3547,32 +3593,22 @@ static int __init ehea_module_init(void) | |||
3547 | if (ret) { | 3593 | if (ret) { |
3548 | pr_err("failed to register capabilities attribute, ret=%d\n", | 3594 | pr_err("failed to register capabilities attribute, ret=%d\n", |
3549 | ret); | 3595 | ret); |
3550 | goto out3; | 3596 | goto out2; |
3551 | } | 3597 | } |
3552 | 3598 | ||
3553 | return ret; | 3599 | return ret; |
3554 | 3600 | ||
3555 | out3: | ||
3556 | ibmebus_unregister_driver(&ehea_driver); | ||
3557 | out2: | 3601 | out2: |
3558 | unregister_memory_notifier(&ehea_mem_nb); | 3602 | ibmebus_unregister_driver(&ehea_driver); |
3559 | unregister_reboot_notifier(&ehea_reboot_nb); | ||
3560 | crash_shutdown_unregister(ehea_crash_handler); | ||
3561 | out: | 3603 | out: |
3562 | return ret; | 3604 | return ret; |
3563 | } | 3605 | } |
3564 | 3606 | ||
3565 | static void __exit ehea_module_exit(void) | 3607 | static void __exit ehea_module_exit(void) |
3566 | { | 3608 | { |
3567 | int ret; | ||
3568 | |||
3569 | driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); | 3609 | driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); |
3570 | ibmebus_unregister_driver(&ehea_driver); | 3610 | ibmebus_unregister_driver(&ehea_driver); |
3571 | unregister_reboot_notifier(&ehea_reboot_nb); | 3611 | ehea_unregister_memory_hooks(); |
3572 | ret = crash_shutdown_unregister(ehea_crash_handler); | ||
3573 | if (ret) | ||
3574 | pr_info("failed unregistering crash handler\n"); | ||
3575 | unregister_memory_notifier(&ehea_mem_nb); | ||
3576 | kfree(ehea_fw_handles.arr); | 3612 | kfree(ehea_fw_handles.arr); |
3577 | kfree(ehea_bcmc_regs.arr); | 3613 | kfree(ehea_bcmc_regs.arr); |
3578 | ehea_destroy_busmap(); | 3614 | ehea_destroy_busmap(); |
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 21978cc019e7..cd7675ac5bf9 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c | |||
@@ -1136,6 +1136,8 @@ restart_poll: | |||
1136 | ibmveth_replenish_task(adapter); | 1136 | ibmveth_replenish_task(adapter); |
1137 | 1137 | ||
1138 | if (frames_processed < budget) { | 1138 | if (frames_processed < budget) { |
1139 | napi_complete(napi); | ||
1140 | |||
1139 | /* We think we are done - reenable interrupts, | 1141 | /* We think we are done - reenable interrupts, |
1140 | * then check once more to make sure we are done. | 1142 | * then check once more to make sure we are done. |
1141 | */ | 1143 | */ |
@@ -1144,8 +1146,6 @@ restart_poll: | |||
1144 | 1146 | ||
1145 | BUG_ON(lpar_rc != H_SUCCESS); | 1147 | BUG_ON(lpar_rc != H_SUCCESS); |
1146 | 1148 | ||
1147 | napi_complete(napi); | ||
1148 | |||
1149 | if (ibmveth_rxq_pending_buffer(adapter) && | 1149 | if (ibmveth_rxq_pending_buffer(adapter) && |
1150 | napi_reschedule(napi)) { | 1150 | napi_reschedule(napi)) { |
1151 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, | 1151 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, |
@@ -1327,6 +1327,28 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev) | |||
1327 | return ret; | 1327 | return ret; |
1328 | } | 1328 | } |
1329 | 1329 | ||
1330 | static int ibmveth_set_mac_addr(struct net_device *dev, void *p) | ||
1331 | { | ||
1332 | struct ibmveth_adapter *adapter = netdev_priv(dev); | ||
1333 | struct sockaddr *addr = p; | ||
1334 | u64 mac_address; | ||
1335 | int rc; | ||
1336 | |||
1337 | if (!is_valid_ether_addr(addr->sa_data)) | ||
1338 | return -EADDRNOTAVAIL; | ||
1339 | |||
1340 | mac_address = ibmveth_encode_mac_addr(addr->sa_data); | ||
1341 | rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address); | ||
1342 | if (rc) { | ||
1343 | netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc); | ||
1344 | return rc; | ||
1345 | } | ||
1346 | |||
1347 | ether_addr_copy(dev->dev_addr, addr->sa_data); | ||
1348 | |||
1349 | return 0; | ||
1350 | } | ||
1351 | |||
1330 | static const struct net_device_ops ibmveth_netdev_ops = { | 1352 | static const struct net_device_ops ibmveth_netdev_ops = { |
1331 | .ndo_open = ibmveth_open, | 1353 | .ndo_open = ibmveth_open, |
1332 | .ndo_stop = ibmveth_close, | 1354 | .ndo_stop = ibmveth_close, |
@@ -1337,7 +1359,7 @@ static const struct net_device_ops ibmveth_netdev_ops = { | |||
1337 | .ndo_fix_features = ibmveth_fix_features, | 1359 | .ndo_fix_features = ibmveth_fix_features, |
1338 | .ndo_set_features = ibmveth_set_features, | 1360 | .ndo_set_features = ibmveth_set_features, |
1339 | .ndo_validate_addr = eth_validate_addr, | 1361 | .ndo_validate_addr = eth_validate_addr, |
1340 | .ndo_set_mac_address = eth_mac_addr, | 1362 | .ndo_set_mac_address = ibmveth_set_mac_addr, |
1341 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1363 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1342 | .ndo_poll_controller = ibmveth_poll_controller, | 1364 | .ndo_poll_controller = ibmveth_poll_controller, |
1343 | #endif | 1365 | #endif |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 11a9ffebf8d8..6aea65dae5ed 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c | |||
@@ -868,8 +868,9 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw) | |||
868 | * The grst delay value is in 100ms units, and we'll wait a | 868 | * The grst delay value is in 100ms units, and we'll wait a |
869 | * couple counts longer to be sure we don't just miss the end. | 869 | * couple counts longer to be sure we don't just miss the end. |
870 | */ | 870 | */ |
871 | grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK | 871 | grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & |
872 | >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; | 872 | I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> |
873 | I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; | ||
873 | for (cnt = 0; cnt < grst_del + 2; cnt++) { | 874 | for (cnt = 0; cnt < grst_del + 2; cnt++) { |
874 | reg = rd32(hw, I40E_GLGEN_RSTAT); | 875 | reg = rd32(hw, I40E_GLGEN_RSTAT); |
875 | if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) | 876 | if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) |
@@ -2846,7 +2847,7 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, | |||
2846 | 2847 | ||
2847 | status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); | 2848 | status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); |
2848 | 2849 | ||
2849 | if (!status) | 2850 | if (!status && filter_index) |
2850 | *filter_index = resp->index; | 2851 | *filter_index = resp->index; |
2851 | 2852 | ||
2852 | return status; | 2853 | return status; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index 183dcb63ce98..a11c70ca5a28 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c | |||
@@ -40,7 +40,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay) | |||
40 | u32 val; | 40 | u32 val; |
41 | 41 | ||
42 | val = rd32(hw, I40E_PRTDCB_GENC); | 42 | val = rd32(hw, I40E_PRTDCB_GENC); |
43 | *delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >> | 43 | *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >> |
44 | I40E_PRTDCB_GENC_PFCLDA_SHIFT); | 44 | I40E_PRTDCB_GENC_PFCLDA_SHIFT); |
45 | } | 45 | } |
46 | 46 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 61236f983971..c17ee77100d3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c | |||
@@ -989,8 +989,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp, | |||
989 | if (!cmd_buf) | 989 | if (!cmd_buf) |
990 | return count; | 990 | return count; |
991 | bytes_not_copied = copy_from_user(cmd_buf, buffer, count); | 991 | bytes_not_copied = copy_from_user(cmd_buf, buffer, count); |
992 | if (bytes_not_copied < 0) | 992 | if (bytes_not_copied < 0) { |
993 | kfree(cmd_buf); | ||
993 | return bytes_not_copied; | 994 | return bytes_not_copied; |
995 | } | ||
994 | if (bytes_not_copied > 0) | 996 | if (bytes_not_copied > 0) |
995 | count -= bytes_not_copied; | 997 | count -= bytes_not_copied; |
996 | cmd_buf[count] = '\0'; | 998 | cmd_buf[count] = '\0'; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index cbe281be1c9f..dadda3c5d658 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -1512,7 +1512,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, | |||
1512 | vsi->tc_config.numtc = numtc; | 1512 | vsi->tc_config.numtc = numtc; |
1513 | vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; | 1513 | vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; |
1514 | /* Number of queues per enabled TC */ | 1514 | /* Number of queues per enabled TC */ |
1515 | num_tc_qps = vsi->alloc_queue_pairs/numtc; | 1515 | /* In MFP case we can have a much lower count of MSIx |
1516 | * vectors available and so we need to lower the used | ||
1517 | * q count. | ||
1518 | */ | ||
1519 | qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); | ||
1520 | num_tc_qps = qcount / numtc; | ||
1516 | num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); | 1521 | num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); |
1517 | 1522 | ||
1518 | /* Setup queue offset/count for all TCs for given VSI */ | 1523 | /* Setup queue offset/count for all TCs for given VSI */ |
@@ -2684,8 +2689,15 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) | |||
2684 | u16 qoffset, qcount; | 2689 | u16 qoffset, qcount; |
2685 | int i, n; | 2690 | int i, n; |
2686 | 2691 | ||
2687 | if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) | 2692 | if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { |
2688 | return; | 2693 | /* Reset the TC information */ |
2694 | for (i = 0; i < vsi->num_queue_pairs; i++) { | ||
2695 | rx_ring = vsi->rx_rings[i]; | ||
2696 | tx_ring = vsi->tx_rings[i]; | ||
2697 | rx_ring->dcb_tc = 0; | ||
2698 | tx_ring->dcb_tc = 0; | ||
2699 | } | ||
2700 | } | ||
2689 | 2701 | ||
2690 | for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { | 2702 | for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { |
2691 | if (!(vsi->tc_config.enabled_tc & (1 << n))) | 2703 | if (!(vsi->tc_config.enabled_tc & (1 << n))) |
@@ -3830,6 +3842,12 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) | |||
3830 | { | 3842 | { |
3831 | int i; | 3843 | int i; |
3832 | 3844 | ||
3845 | i40e_stop_misc_vector(pf); | ||
3846 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) { | ||
3847 | synchronize_irq(pf->msix_entries[0].vector); | ||
3848 | free_irq(pf->msix_entries[0].vector, pf); | ||
3849 | } | ||
3850 | |||
3833 | i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); | 3851 | i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); |
3834 | for (i = 0; i < pf->num_alloc_vsi; i++) | 3852 | for (i = 0; i < pf->num_alloc_vsi; i++) |
3835 | if (pf->vsi[i]) | 3853 | if (pf->vsi[i]) |
@@ -5254,8 +5272,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, | |||
5254 | 5272 | ||
5255 | /* Wait for the PF's Tx queues to be disabled */ | 5273 | /* Wait for the PF's Tx queues to be disabled */ |
5256 | ret = i40e_pf_wait_txq_disabled(pf); | 5274 | ret = i40e_pf_wait_txq_disabled(pf); |
5257 | if (!ret) | 5275 | if (ret) { |
5276 | /* Schedule PF reset to recover */ | ||
5277 | set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); | ||
5278 | i40e_service_event_schedule(pf); | ||
5279 | } else { | ||
5258 | i40e_pf_unquiesce_all_vsi(pf); | 5280 | i40e_pf_unquiesce_all_vsi(pf); |
5281 | } | ||
5282 | |||
5259 | exit: | 5283 | exit: |
5260 | return ret; | 5284 | return ret; |
5261 | } | 5285 | } |
@@ -5587,7 +5611,8 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf) | |||
5587 | int i, v; | 5611 | int i, v; |
5588 | 5612 | ||
5589 | /* If we're down or resetting, just bail */ | 5613 | /* If we're down or resetting, just bail */ |
5590 | if (test_bit(__I40E_CONFIG_BUSY, &pf->state)) | 5614 | if (test_bit(__I40E_DOWN, &pf->state) || |
5615 | test_bit(__I40E_CONFIG_BUSY, &pf->state)) | ||
5591 | return; | 5616 | return; |
5592 | 5617 | ||
5593 | /* for each VSI/netdev | 5618 | /* for each VSI/netdev |
@@ -9533,6 +9558,7 @@ static void i40e_remove(struct pci_dev *pdev) | |||
9533 | set_bit(__I40E_DOWN, &pf->state); | 9558 | set_bit(__I40E_DOWN, &pf->state); |
9534 | del_timer_sync(&pf->service_timer); | 9559 | del_timer_sync(&pf->service_timer); |
9535 | cancel_work_sync(&pf->service_task); | 9560 | cancel_work_sync(&pf->service_task); |
9561 | i40e_fdir_teardown(pf); | ||
9536 | 9562 | ||
9537 | if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { | 9563 | if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { |
9538 | i40e_free_vfs(pf); | 9564 | i40e_free_vfs(pf); |
@@ -9559,12 +9585,6 @@ static void i40e_remove(struct pci_dev *pdev) | |||
9559 | if (pf->vsi[pf->lan_vsi]) | 9585 | if (pf->vsi[pf->lan_vsi]) |
9560 | i40e_vsi_release(pf->vsi[pf->lan_vsi]); | 9586 | i40e_vsi_release(pf->vsi[pf->lan_vsi]); |
9561 | 9587 | ||
9562 | i40e_stop_misc_vector(pf); | ||
9563 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) { | ||
9564 | synchronize_irq(pf->msix_entries[0].vector); | ||
9565 | free_irq(pf->msix_entries[0].vector, pf); | ||
9566 | } | ||
9567 | |||
9568 | /* shutdown and destroy the HMC */ | 9588 | /* shutdown and destroy the HMC */ |
9569 | if (pf->hw.hmc.hmc_obj) { | 9589 | if (pf->hw.hmc.hmc_obj) { |
9570 | ret_code = i40e_shutdown_lan_hmc(&pf->hw); | 9590 | ret_code = i40e_shutdown_lan_hmc(&pf->hw); |
@@ -9718,6 +9738,8 @@ static void i40e_shutdown(struct pci_dev *pdev) | |||
9718 | wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); | 9738 | wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); |
9719 | wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); | 9739 | wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); |
9720 | 9740 | ||
9741 | i40e_clear_interrupt_scheme(pf); | ||
9742 | |||
9721 | if (system_state == SYSTEM_POWER_OFF) { | 9743 | if (system_state == SYSTEM_POWER_OFF) { |
9722 | pci_wake_from_d3(pdev, pf->wol_en); | 9744 | pci_wake_from_d3(pdev, pf->wol_en); |
9723 | pci_set_power_state(pdev, PCI_D3hot); | 9745 | pci_set_power_state(pdev, PCI_D3hot); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 3e70f2e45a47..5defe0d63514 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c | |||
@@ -679,9 +679,11 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, | |||
679 | { | 679 | { |
680 | i40e_status status; | 680 | i40e_status status; |
681 | enum i40e_nvmupd_cmd upd_cmd; | 681 | enum i40e_nvmupd_cmd upd_cmd; |
682 | bool retry_attempt = false; | ||
682 | 683 | ||
683 | upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); | 684 | upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); |
684 | 685 | ||
686 | retry: | ||
685 | switch (upd_cmd) { | 687 | switch (upd_cmd) { |
686 | case I40E_NVMUPD_WRITE_CON: | 688 | case I40E_NVMUPD_WRITE_CON: |
687 | status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); | 689 | status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); |
@@ -725,6 +727,39 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, | |||
725 | *errno = -ESRCH; | 727 | *errno = -ESRCH; |
726 | break; | 728 | break; |
727 | } | 729 | } |
730 | |||
731 | /* In some circumstances, a multi-write transaction takes longer | ||
732 | * than the default 3 minute timeout on the write semaphore. If | ||
733 | * the write failed with an EBUSY status, this is likely the problem, | ||
734 | * so here we try to reacquire the semaphore then retry the write. | ||
735 | * We only do one retry, then give up. | ||
736 | */ | ||
737 | if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) && | ||
738 | !retry_attempt) { | ||
739 | i40e_status old_status = status; | ||
740 | u32 old_asq_status = hw->aq.asq_last_status; | ||
741 | u32 gtime; | ||
742 | |||
743 | gtime = rd32(hw, I40E_GLVFGEN_TIMER); | ||
744 | if (gtime >= hw->nvm.hw_semaphore_timeout) { | ||
745 | i40e_debug(hw, I40E_DEBUG_ALL, | ||
746 | "NVMUPD: write semaphore expired (%d >= %lld), retrying\n", | ||
747 | gtime, hw->nvm.hw_semaphore_timeout); | ||
748 | i40e_release_nvm(hw); | ||
749 | status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); | ||
750 | if (status) { | ||
751 | i40e_debug(hw, I40E_DEBUG_ALL, | ||
752 | "NVMUPD: write semaphore reacquire failed aq_err = %d\n", | ||
753 | hw->aq.asq_last_status); | ||
754 | status = old_status; | ||
755 | hw->aq.asq_last_status = old_asq_status; | ||
756 | } else { | ||
757 | retry_attempt = true; | ||
758 | goto retry; | ||
759 | } | ||
760 | } | ||
761 | } | ||
762 | |||
728 | return status; | 763 | return status; |
729 | } | 764 | } |
730 | 765 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 2206d2d36f0f..bbf1b1247ac4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c | |||
@@ -586,6 +586,20 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring) | |||
586 | } | 586 | } |
587 | 587 | ||
588 | /** | 588 | /** |
589 | * i40e_get_head - Retrieve head from head writeback | ||
590 | * @tx_ring: tx ring to fetch head of | ||
591 | * | ||
592 | * Returns value of Tx ring head based on value stored | ||
593 | * in head write-back location | ||
594 | **/ | ||
595 | static inline u32 i40e_get_head(struct i40e_ring *tx_ring) | ||
596 | { | ||
597 | void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; | ||
598 | |||
599 | return le32_to_cpu(*(volatile __le32 *)head); | ||
600 | } | ||
601 | |||
602 | /** | ||
589 | * i40e_get_tx_pending - how many tx descriptors not processed | 603 | * i40e_get_tx_pending - how many tx descriptors not processed |
590 | * @tx_ring: the ring of descriptors | 604 | * @tx_ring: the ring of descriptors |
591 | * | 605 | * |
@@ -594,10 +608,16 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring) | |||
594 | **/ | 608 | **/ |
595 | static u32 i40e_get_tx_pending(struct i40e_ring *ring) | 609 | static u32 i40e_get_tx_pending(struct i40e_ring *ring) |
596 | { | 610 | { |
597 | u32 ntu = ((ring->next_to_clean <= ring->next_to_use) | 611 | u32 head, tail; |
598 | ? ring->next_to_use | 612 | |
599 | : ring->next_to_use + ring->count); | 613 | head = i40e_get_head(ring); |
600 | return ntu - ring->next_to_clean; | 614 | tail = readl(ring->tail); |
615 | |||
616 | if (head != tail) | ||
617 | return (head < tail) ? | ||
618 | tail - head : (tail + ring->count - head); | ||
619 | |||
620 | return 0; | ||
601 | } | 621 | } |
602 | 622 | ||
603 | /** | 623 | /** |
@@ -606,6 +626,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring) | |||
606 | **/ | 626 | **/ |
607 | static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) | 627 | static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) |
608 | { | 628 | { |
629 | u32 tx_done = tx_ring->stats.packets; | ||
630 | u32 tx_done_old = tx_ring->tx_stats.tx_done_old; | ||
609 | u32 tx_pending = i40e_get_tx_pending(tx_ring); | 631 | u32 tx_pending = i40e_get_tx_pending(tx_ring); |
610 | struct i40e_pf *pf = tx_ring->vsi->back; | 632 | struct i40e_pf *pf = tx_ring->vsi->back; |
611 | bool ret = false; | 633 | bool ret = false; |
@@ -623,41 +645,25 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) | |||
623 | * run the check_tx_hang logic with a transmit completion | 645 | * run the check_tx_hang logic with a transmit completion |
624 | * pending but without time to complete it yet. | 646 | * pending but without time to complete it yet. |
625 | */ | 647 | */ |
626 | if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && | 648 | if ((tx_done_old == tx_done) && tx_pending) { |
627 | (tx_pending >= I40E_MIN_DESC_PENDING)) { | ||
628 | /* make sure it is true for two checks in a row */ | 649 | /* make sure it is true for two checks in a row */ |
629 | ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, | 650 | ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, |
630 | &tx_ring->state); | 651 | &tx_ring->state); |
631 | } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && | 652 | } else if (tx_done_old == tx_done && |
632 | (tx_pending < I40E_MIN_DESC_PENDING) && | 653 | (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) { |
633 | (tx_pending > 0)) { | ||
634 | if (I40E_DEBUG_FLOW & pf->hw.debug_mask) | 654 | if (I40E_DEBUG_FLOW & pf->hw.debug_mask) |
635 | dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d", | 655 | dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d", |
636 | tx_pending, tx_ring->queue_index); | 656 | tx_pending, tx_ring->queue_index); |
637 | pf->tx_sluggish_count++; | 657 | pf->tx_sluggish_count++; |
638 | } else { | 658 | } else { |
639 | /* update completed stats and disarm the hang check */ | 659 | /* update completed stats and disarm the hang check */ |
640 | tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; | 660 | tx_ring->tx_stats.tx_done_old = tx_done; |
641 | clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); | 661 | clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); |
642 | } | 662 | } |
643 | 663 | ||
644 | return ret; | 664 | return ret; |
645 | } | 665 | } |
646 | 666 | ||
647 | /** | ||
648 | * i40e_get_head - Retrieve head from head writeback | ||
649 | * @tx_ring: tx ring to fetch head of | ||
650 | * | ||
651 | * Returns value of Tx ring head based on value stored | ||
652 | * in head write-back location | ||
653 | **/ | ||
654 | static inline u32 i40e_get_head(struct i40e_ring *tx_ring) | ||
655 | { | ||
656 | void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; | ||
657 | |||
658 | return le32_to_cpu(*(volatile __le32 *)head); | ||
659 | } | ||
660 | |||
661 | #define WB_STRIDE 0x3 | 667 | #define WB_STRIDE 0x3 |
662 | 668 | ||
663 | /** | 669 | /** |
@@ -2140,6 +2146,67 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) | |||
2140 | } | 2146 | } |
2141 | 2147 | ||
2142 | /** | 2148 | /** |
2149 | * i40e_chk_linearize - Check if there are more than 8 fragments per packet | ||
2150 | * @skb: send buffer | ||
2151 | * @tx_flags: collected send information | ||
2152 | * @hdr_len: size of the packet header | ||
2153 | * | ||
2154 | * Note: Our HW can't scatter-gather more than 8 fragments to build | ||
2155 | * a packet on the wire and so we need to figure out the cases where we | ||
2156 | * need to linearize the skb. | ||
2157 | **/ | ||
2158 | static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, | ||
2159 | const u8 hdr_len) | ||
2160 | { | ||
2161 | struct skb_frag_struct *frag; | ||
2162 | bool linearize = false; | ||
2163 | unsigned int size = 0; | ||
2164 | u16 num_frags; | ||
2165 | u16 gso_segs; | ||
2166 | |||
2167 | num_frags = skb_shinfo(skb)->nr_frags; | ||
2168 | gso_segs = skb_shinfo(skb)->gso_segs; | ||
2169 | |||
2170 | if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { | ||
2171 | u16 j = 1; | ||
2172 | |||
2173 | if (num_frags < (I40E_MAX_BUFFER_TXD)) | ||
2174 | goto linearize_chk_done; | ||
2175 | /* try the simple math, if we have too many frags per segment */ | ||
2176 | if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) > | ||
2177 | I40E_MAX_BUFFER_TXD) { | ||
2178 | linearize = true; | ||
2179 | goto linearize_chk_done; | ||
2180 | } | ||
2181 | frag = &skb_shinfo(skb)->frags[0]; | ||
2182 | size = hdr_len; | ||
2183 | /* we might still have more fragments per segment */ | ||
2184 | do { | ||
2185 | size += skb_frag_size(frag); | ||
2186 | frag++; j++; | ||
2187 | if (j == I40E_MAX_BUFFER_TXD) { | ||
2188 | if (size < skb_shinfo(skb)->gso_size) { | ||
2189 | linearize = true; | ||
2190 | break; | ||
2191 | } | ||
2192 | j = 1; | ||
2193 | size -= skb_shinfo(skb)->gso_size; | ||
2194 | if (size) | ||
2195 | j++; | ||
2196 | size += hdr_len; | ||
2197 | } | ||
2198 | num_frags--; | ||
2199 | } while (num_frags); | ||
2200 | } else { | ||
2201 | if (num_frags >= I40E_MAX_BUFFER_TXD) | ||
2202 | linearize = true; | ||
2203 | } | ||
2204 | |||
2205 | linearize_chk_done: | ||
2206 | return linearize; | ||
2207 | } | ||
2208 | |||
2209 | /** | ||
2143 | * i40e_tx_map - Build the Tx descriptor | 2210 | * i40e_tx_map - Build the Tx descriptor |
2144 | * @tx_ring: ring to send buffer on | 2211 | * @tx_ring: ring to send buffer on |
2145 | * @skb: send buffer | 2212 | * @skb: send buffer |
@@ -2396,6 +2463,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, | |||
2396 | if (tsyn) | 2463 | if (tsyn) |
2397 | tx_flags |= I40E_TX_FLAGS_TSYN; | 2464 | tx_flags |= I40E_TX_FLAGS_TSYN; |
2398 | 2465 | ||
2466 | if (i40e_chk_linearize(skb, tx_flags, hdr_len)) | ||
2467 | if (skb_linearize(skb)) | ||
2468 | goto out_drop; | ||
2469 | |||
2399 | skb_tx_timestamp(skb); | 2470 | skb_tx_timestamp(skb); |
2400 | 2471 | ||
2401 | /* always enable CRC insertion offload */ | 2472 | /* always enable CRC insertion offload */ |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 18b00231d2f1..dff0baeb1ecc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h | |||
@@ -112,6 +112,7 @@ enum i40e_dyn_idx_t { | |||
112 | 112 | ||
113 | #define i40e_rx_desc i40e_32byte_rx_desc | 113 | #define i40e_rx_desc i40e_32byte_rx_desc |
114 | 114 | ||
115 | #define I40E_MAX_BUFFER_TXD 8 | ||
115 | #define I40E_MIN_TX_LEN 17 | 116 | #define I40E_MIN_TX_LEN 17 |
116 | #define I40E_MAX_DATA_PER_TXD 8192 | 117 | #define I40E_MAX_DATA_PER_TXD 8192 |
117 | 118 | ||
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 29004382f462..708891571dae 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c | |||
@@ -126,6 +126,20 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring) | |||
126 | } | 126 | } |
127 | 127 | ||
128 | /** | 128 | /** |
129 | * i40e_get_head - Retrieve head from head writeback | ||
130 | * @tx_ring: tx ring to fetch head of | ||
131 | * | ||
132 | * Returns value of Tx ring head based on value stored | ||
133 | * in head write-back location | ||
134 | **/ | ||
135 | static inline u32 i40e_get_head(struct i40e_ring *tx_ring) | ||
136 | { | ||
137 | void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; | ||
138 | |||
139 | return le32_to_cpu(*(volatile __le32 *)head); | ||
140 | } | ||
141 | |||
142 | /** | ||
129 | * i40e_get_tx_pending - how many tx descriptors not processed | 143 | * i40e_get_tx_pending - how many tx descriptors not processed |
130 | * @tx_ring: the ring of descriptors | 144 | * @tx_ring: the ring of descriptors |
131 | * | 145 | * |
@@ -134,10 +148,16 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring) | |||
134 | **/ | 148 | **/ |
135 | static u32 i40e_get_tx_pending(struct i40e_ring *ring) | 149 | static u32 i40e_get_tx_pending(struct i40e_ring *ring) |
136 | { | 150 | { |
137 | u32 ntu = ((ring->next_to_clean <= ring->next_to_use) | 151 | u32 head, tail; |
138 | ? ring->next_to_use | 152 | |
139 | : ring->next_to_use + ring->count); | 153 | head = i40e_get_head(ring); |
140 | return ntu - ring->next_to_clean; | 154 | tail = readl(ring->tail); |
155 | |||
156 | if (head != tail) | ||
157 | return (head < tail) ? | ||
158 | tail - head : (tail + ring->count - head); | ||
159 | |||
160 | return 0; | ||
141 | } | 161 | } |
142 | 162 | ||
143 | /** | 163 | /** |
@@ -146,6 +166,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring) | |||
146 | **/ | 166 | **/ |
147 | static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) | 167 | static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) |
148 | { | 168 | { |
169 | u32 tx_done = tx_ring->stats.packets; | ||
170 | u32 tx_done_old = tx_ring->tx_stats.tx_done_old; | ||
149 | u32 tx_pending = i40e_get_tx_pending(tx_ring); | 171 | u32 tx_pending = i40e_get_tx_pending(tx_ring); |
150 | bool ret = false; | 172 | bool ret = false; |
151 | 173 | ||
@@ -162,36 +184,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) | |||
162 | * run the check_tx_hang logic with a transmit completion | 184 | * run the check_tx_hang logic with a transmit completion |
163 | * pending but without time to complete it yet. | 185 | * pending but without time to complete it yet. |
164 | */ | 186 | */ |
165 | if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && | 187 | if ((tx_done_old == tx_done) && tx_pending) { |
166 | (tx_pending >= I40E_MIN_DESC_PENDING)) { | ||
167 | /* make sure it is true for two checks in a row */ | 188 | /* make sure it is true for two checks in a row */ |
168 | ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, | 189 | ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, |
169 | &tx_ring->state); | 190 | &tx_ring->state); |
170 | } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) || | 191 | } else if (tx_done_old == tx_done && |
171 | !(tx_pending < I40E_MIN_DESC_PENDING) || | 192 | (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) { |
172 | !(tx_pending > 0)) { | ||
173 | /* update completed stats and disarm the hang check */ | 193 | /* update completed stats and disarm the hang check */ |
174 | tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; | 194 | tx_ring->tx_stats.tx_done_old = tx_done; |
175 | clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); | 195 | clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); |
176 | } | 196 | } |
177 | 197 | ||
178 | return ret; | 198 | return ret; |
179 | } | 199 | } |
180 | 200 | ||
181 | /** | ||
182 | * i40e_get_head - Retrieve head from head writeback | ||
183 | * @tx_ring: tx ring to fetch head of | ||
184 | * | ||
185 | * Returns value of Tx ring head based on value stored | ||
186 | * in head write-back location | ||
187 | **/ | ||
188 | static inline u32 i40e_get_head(struct i40e_ring *tx_ring) | ||
189 | { | ||
190 | void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; | ||
191 | |||
192 | return le32_to_cpu(*(volatile __le32 *)head); | ||
193 | } | ||
194 | |||
195 | #define WB_STRIDE 0x3 | 201 | #define WB_STRIDE 0x3 |
196 | 202 | ||
197 | /** | 203 | /** |
@@ -1206,17 +1212,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, | |||
1206 | if (err < 0) | 1212 | if (err < 0) |
1207 | return err; | 1213 | return err; |
1208 | 1214 | ||
1209 | if (protocol == htons(ETH_P_IP)) { | 1215 | iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); |
1210 | iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); | 1216 | ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); |
1217 | |||
1218 | if (iph->version == 4) { | ||
1211 | tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); | 1219 | tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); |
1212 | iph->tot_len = 0; | 1220 | iph->tot_len = 0; |
1213 | iph->check = 0; | 1221 | iph->check = 0; |
1214 | tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, | 1222 | tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, |
1215 | 0, IPPROTO_TCP, 0); | 1223 | 0, IPPROTO_TCP, 0); |
1216 | } else if (skb_is_gso_v6(skb)) { | 1224 | } else if (ipv6h->version == 6) { |
1217 | |||
1218 | ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) | ||
1219 | : ipv6_hdr(skb); | ||
1220 | tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); | 1225 | tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); |
1221 | ipv6h->payload_len = 0; | 1226 | ipv6h->payload_len = 0; |
1222 | tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, | 1227 | tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, |
@@ -1274,13 +1279,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, | |||
1274 | I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; | 1279 | I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; |
1275 | } | 1280 | } |
1276 | } else if (tx_flags & I40E_TX_FLAGS_IPV6) { | 1281 | } else if (tx_flags & I40E_TX_FLAGS_IPV6) { |
1277 | if (tx_flags & I40E_TX_FLAGS_TSO) { | 1282 | *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; |
1278 | *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; | 1283 | if (tx_flags & I40E_TX_FLAGS_TSO) |
1279 | ip_hdr(skb)->check = 0; | 1284 | ip_hdr(skb)->check = 0; |
1280 | } else { | ||
1281 | *cd_tunneling |= | ||
1282 | I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; | ||
1283 | } | ||
1284 | } | 1285 | } |
1285 | 1286 | ||
1286 | /* Now set the ctx descriptor fields */ | 1287 | /* Now set the ctx descriptor fields */ |
@@ -1290,6 +1291,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, | |||
1290 | ((skb_inner_network_offset(skb) - | 1291 | ((skb_inner_network_offset(skb) - |
1291 | skb_transport_offset(skb)) >> 1) << | 1292 | skb_transport_offset(skb)) >> 1) << |
1292 | I40E_TXD_CTX_QW0_NATLEN_SHIFT; | 1293 | I40E_TXD_CTX_QW0_NATLEN_SHIFT; |
1294 | if (this_ip_hdr->version == 6) { | ||
1295 | tx_flags &= ~I40E_TX_FLAGS_IPV4; | ||
1296 | tx_flags |= I40E_TX_FLAGS_IPV6; | ||
1297 | } | ||
1298 | |||
1293 | 1299 | ||
1294 | } else { | 1300 | } else { |
1295 | network_hdr_len = skb_network_header_len(skb); | 1301 | network_hdr_len = skb_network_header_len(skb); |
@@ -1380,6 +1386,67 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, | |||
1380 | context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); | 1386 | context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); |
1381 | } | 1387 | } |
1382 | 1388 | ||
1389 | /** | ||
1390 | * i40e_chk_linearize - Check if there are more than 8 fragments per packet | ||
1391 | * @skb: send buffer | ||
1392 | * @tx_flags: collected send information | ||
1393 | * @hdr_len: size of the packet header | ||
1394 | * | ||
1395 | * Note: Our HW can't scatter-gather more than 8 fragments to build | ||
1396 | * a packet on the wire and so we need to figure out the cases where we | ||
1397 | * need to linearize the skb. | ||
1398 | **/ | ||
1399 | static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, | ||
1400 | const u8 hdr_len) | ||
1401 | { | ||
1402 | struct skb_frag_struct *frag; | ||
1403 | bool linearize = false; | ||
1404 | unsigned int size = 0; | ||
1405 | u16 num_frags; | ||
1406 | u16 gso_segs; | ||
1407 | |||
1408 | num_frags = skb_shinfo(skb)->nr_frags; | ||
1409 | gso_segs = skb_shinfo(skb)->gso_segs; | ||
1410 | |||
1411 | if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { | ||
1412 | u16 j = 1; | ||
1413 | |||
1414 | if (num_frags < (I40E_MAX_BUFFER_TXD)) | ||
1415 | goto linearize_chk_done; | ||
1416 | /* try the simple math, if we have too many frags per segment */ | ||
1417 | if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) > | ||
1418 | I40E_MAX_BUFFER_TXD) { | ||
1419 | linearize = true; | ||
1420 | goto linearize_chk_done; | ||
1421 | } | ||
1422 | frag = &skb_shinfo(skb)->frags[0]; | ||
1423 | size = hdr_len; | ||
1424 | /* we might still have more fragments per segment */ | ||
1425 | do { | ||
1426 | size += skb_frag_size(frag); | ||
1427 | frag++; j++; | ||
1428 | if (j == I40E_MAX_BUFFER_TXD) { | ||
1429 | if (size < skb_shinfo(skb)->gso_size) { | ||
1430 | linearize = true; | ||
1431 | break; | ||
1432 | } | ||
1433 | j = 1; | ||
1434 | size -= skb_shinfo(skb)->gso_size; | ||
1435 | if (size) | ||
1436 | j++; | ||
1437 | size += hdr_len; | ||
1438 | } | ||
1439 | num_frags--; | ||
1440 | } while (num_frags); | ||
1441 | } else { | ||
1442 | if (num_frags >= I40E_MAX_BUFFER_TXD) | ||
1443 | linearize = true; | ||
1444 | } | ||
1445 | |||
1446 | linearize_chk_done: | ||
1447 | return linearize; | ||
1448 | } | ||
1449 | |||
1383 | /** | 1450 | /** |
1384 | * i40e_tx_map - Build the Tx descriptor | 1451 | * i40e_tx_map - Build the Tx descriptor |
1385 | * @tx_ring: ring to send buffer on | 1452 | * @tx_ring: ring to send buffer on |
@@ -1654,6 +1721,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, | |||
1654 | else if (tso) | 1721 | else if (tso) |
1655 | tx_flags |= I40E_TX_FLAGS_TSO; | 1722 | tx_flags |= I40E_TX_FLAGS_TSO; |
1656 | 1723 | ||
1724 | if (i40e_chk_linearize(skb, tx_flags, hdr_len)) | ||
1725 | if (skb_linearize(skb)) | ||
1726 | goto out_drop; | ||
1727 | |||
1657 | skb_tx_timestamp(skb); | 1728 | skb_tx_timestamp(skb); |
1658 | 1729 | ||
1659 | /* always enable CRC insertion offload */ | 1730 | /* always enable CRC insertion offload */ |
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 4e15903b2b6d..c950a038237c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h | |||
@@ -112,6 +112,7 @@ enum i40e_dyn_idx_t { | |||
112 | 112 | ||
113 | #define i40e_rx_desc i40e_32byte_rx_desc | 113 | #define i40e_rx_desc i40e_32byte_rx_desc |
114 | 114 | ||
115 | #define I40E_MAX_BUFFER_TXD 8 | ||
115 | #define I40E_MIN_TX_LEN 17 | 116 | #define I40E_MIN_TX_LEN 17 |
116 | #define I40E_MAX_DATA_PER_TXD 8192 | 117 | #define I40E_MAX_DATA_PER_TXD 8192 |
117 | 118 | ||
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 96208f17bb53..2db653225a0e 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
@@ -2658,16 +2658,11 @@ static int mvneta_stop(struct net_device *dev) | |||
2658 | static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | 2658 | static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
2659 | { | 2659 | { |
2660 | struct mvneta_port *pp = netdev_priv(dev); | 2660 | struct mvneta_port *pp = netdev_priv(dev); |
2661 | int ret; | ||
2662 | 2661 | ||
2663 | if (!pp->phy_dev) | 2662 | if (!pp->phy_dev) |
2664 | return -ENOTSUPP; | 2663 | return -ENOTSUPP; |
2665 | 2664 | ||
2666 | ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd); | 2665 | return phy_mii_ioctl(pp->phy_dev, ifr, cmd); |
2667 | if (!ret) | ||
2668 | mvneta_adjust_link(dev); | ||
2669 | |||
2670 | return ret; | ||
2671 | } | 2666 | } |
2672 | 2667 | ||
2673 | /* Ethtool methods */ | 2668 | /* Ethtool methods */ |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index a681d7c0bb9f..546ca4226916 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c | |||
@@ -724,7 +724,8 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, | |||
724 | * on the host, we deprecate the error message for this | 724 | * on the host, we deprecate the error message for this |
725 | * specific command/input_mod/opcode_mod/fw-status to be debug. | 725 | * specific command/input_mod/opcode_mod/fw-status to be debug. |
726 | */ | 726 | */ |
727 | if (op == MLX4_CMD_SET_PORT && in_modifier == 1 && | 727 | if (op == MLX4_CMD_SET_PORT && |
728 | (in_modifier == 1 || in_modifier == 2) && | ||
728 | op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE) | 729 | op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE) |
729 | mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n", | 730 | mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n", |
730 | op, context->fw_status); | 731 | op, context->fw_status); |
@@ -1993,7 +1994,6 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, | |||
1993 | goto reset_slave; | 1994 | goto reset_slave; |
1994 | slave_state[slave].vhcr_dma = ((u64) param) << 48; | 1995 | slave_state[slave].vhcr_dma = ((u64) param) << 48; |
1995 | priv->mfunc.master.slave_state[slave].cookie = 0; | 1996 | priv->mfunc.master.slave_state[slave].cookie = 0; |
1996 | mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]); | ||
1997 | break; | 1997 | break; |
1998 | case MLX4_COMM_CMD_VHCR1: | 1998 | case MLX4_COMM_CMD_VHCR1: |
1999 | if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0) | 1999 | if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0) |
@@ -2225,6 +2225,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) | |||
2225 | for (i = 0; i < dev->num_slaves; ++i) { | 2225 | for (i = 0; i < dev->num_slaves; ++i) { |
2226 | s_state = &priv->mfunc.master.slave_state[i]; | 2226 | s_state = &priv->mfunc.master.slave_state[i]; |
2227 | s_state->last_cmd = MLX4_COMM_CMD_RESET; | 2227 | s_state->last_cmd = MLX4_COMM_CMD_RESET; |
2228 | mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]); | ||
2228 | for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j) | 2229 | for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j) |
2229 | s_state->event_eq[j].eqn = -1; | 2230 | s_state->event_eq[j].eqn = -1; |
2230 | __raw_writel((__force u32) 0, | 2231 | __raw_writel((__force u32) 0, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 2a210c4efb89..3485acf03014 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -1698,8 +1698,6 @@ int mlx4_en_start_port(struct net_device *dev) | |||
1698 | /* Schedule multicast task to populate multicast list */ | 1698 | /* Schedule multicast task to populate multicast list */ |
1699 | queue_work(mdev->workqueue, &priv->rx_mode_task); | 1699 | queue_work(mdev->workqueue, &priv->rx_mode_task); |
1700 | 1700 | ||
1701 | mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); | ||
1702 | |||
1703 | #ifdef CONFIG_MLX4_EN_VXLAN | 1701 | #ifdef CONFIG_MLX4_EN_VXLAN |
1704 | if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) | 1702 | if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) |
1705 | vxlan_get_rx_port(dev); | 1703 | vxlan_get_rx_port(dev); |
@@ -2807,13 +2805,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
2807 | netif_carrier_off(dev); | 2805 | netif_carrier_off(dev); |
2808 | mlx4_en_set_default_moderation(priv); | 2806 | mlx4_en_set_default_moderation(priv); |
2809 | 2807 | ||
2810 | err = register_netdev(dev); | ||
2811 | if (err) { | ||
2812 | en_err(priv, "Netdev registration failed for port %d\n", port); | ||
2813 | goto out; | ||
2814 | } | ||
2815 | priv->registered = 1; | ||
2816 | |||
2817 | en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); | 2808 | en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); |
2818 | en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); | 2809 | en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); |
2819 | 2810 | ||
@@ -2853,6 +2844,16 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
2853 | queue_delayed_work(mdev->workqueue, &priv->service_task, | 2844 | queue_delayed_work(mdev->workqueue, &priv->service_task, |
2854 | SERVICE_TASK_DELAY); | 2845 | SERVICE_TASK_DELAY); |
2855 | 2846 | ||
2847 | mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); | ||
2848 | |||
2849 | err = register_netdev(dev); | ||
2850 | if (err) { | ||
2851 | en_err(priv, "Netdev registration failed for port %d\n", port); | ||
2852 | goto out; | ||
2853 | } | ||
2854 | |||
2855 | priv->registered = 1; | ||
2856 | |||
2856 | return 0; | 2857 | return 0; |
2857 | 2858 | ||
2858 | out: | 2859 | out: |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index 2d8ee66138e8..a61009f4b2df 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c | |||
@@ -81,12 +81,14 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv) | |||
81 | { | 81 | { |
82 | u32 loopback_ok = 0; | 82 | u32 loopback_ok = 0; |
83 | int i; | 83 | int i; |
84 | 84 | bool gro_enabled; | |
85 | 85 | ||
86 | priv->loopback_ok = 0; | 86 | priv->loopback_ok = 0; |
87 | priv->validate_loopback = 1; | 87 | priv->validate_loopback = 1; |
88 | gro_enabled = priv->dev->features & NETIF_F_GRO; | ||
88 | 89 | ||
89 | mlx4_en_update_loopback_state(priv->dev, priv->dev->features); | 90 | mlx4_en_update_loopback_state(priv->dev, priv->dev->features); |
91 | priv->dev->features &= ~NETIF_F_GRO; | ||
90 | 92 | ||
91 | /* xmit */ | 93 | /* xmit */ |
92 | if (mlx4_en_test_loopback_xmit(priv)) { | 94 | if (mlx4_en_test_loopback_xmit(priv)) { |
@@ -108,6 +110,10 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv) | |||
108 | mlx4_en_test_loopback_exit: | 110 | mlx4_en_test_loopback_exit: |
109 | 111 | ||
110 | priv->validate_loopback = 0; | 112 | priv->validate_loopback = 0; |
113 | |||
114 | if (gro_enabled) | ||
115 | priv->dev->features |= NETIF_F_GRO; | ||
116 | |||
111 | mlx4_en_update_loopback_state(priv->dev, priv->dev->features); | 117 | mlx4_en_update_loopback_state(priv->dev, priv->dev->features); |
112 | return !loopback_ok; | 118 | return !loopback_ok; |
113 | } | 119 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 264bc15c1ff2..6e70ffee8e87 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c | |||
@@ -153,12 +153,10 @@ void mlx4_gen_slave_eqe(struct work_struct *work) | |||
153 | 153 | ||
154 | /* All active slaves need to receive the event */ | 154 | /* All active slaves need to receive the event */ |
155 | if (slave == ALL_SLAVES) { | 155 | if (slave == ALL_SLAVES) { |
156 | for (i = 0; i < dev->num_slaves; i++) { | 156 | for (i = 0; i <= dev->persist->num_vfs; i++) { |
157 | if (i != dev->caps.function && | 157 | if (mlx4_GEN_EQE(dev, i, eqe)) |
158 | master->slave_state[i].active) | 158 | mlx4_warn(dev, "Failed to generate event for slave %d\n", |
159 | if (mlx4_GEN_EQE(dev, i, eqe)) | 159 | i); |
160 | mlx4_warn(dev, "Failed to generate event for slave %d\n", | ||
161 | i); | ||
162 | } | 160 | } |
163 | } else { | 161 | } else { |
164 | if (mlx4_GEN_EQE(dev, slave, eqe)) | 162 | if (mlx4_GEN_EQE(dev, slave, eqe)) |
@@ -203,13 +201,11 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave, | |||
203 | struct mlx4_eqe *eqe) | 201 | struct mlx4_eqe *eqe) |
204 | { | 202 | { |
205 | struct mlx4_priv *priv = mlx4_priv(dev); | 203 | struct mlx4_priv *priv = mlx4_priv(dev); |
206 | struct mlx4_slave_state *s_slave = | ||
207 | &priv->mfunc.master.slave_state[slave]; | ||
208 | 204 | ||
209 | if (!s_slave->active) { | 205 | if (slave < 0 || slave > dev->persist->num_vfs || |
210 | /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/ | 206 | slave == dev->caps.function || |
207 | !priv->mfunc.master.slave_state[slave].active) | ||
211 | return; | 208 | return; |
212 | } | ||
213 | 209 | ||
214 | slave_event(dev, slave, eqe); | 210 | slave_event(dev, slave, eqe); |
215 | } | 211 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 2a8268e6be15..ebbe244e80dd 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | |||
@@ -453,7 +453,7 @@ struct mlx4_en_port_stats { | |||
453 | unsigned long rx_chksum_none; | 453 | unsigned long rx_chksum_none; |
454 | unsigned long rx_chksum_complete; | 454 | unsigned long rx_chksum_complete; |
455 | unsigned long tx_chksum_offload; | 455 | unsigned long tx_chksum_offload; |
456 | #define NUM_PORT_STATS 9 | 456 | #define NUM_PORT_STATS 10 |
457 | }; | 457 | }; |
458 | 458 | ||
459 | struct mlx4_en_perf_stats { | 459 | struct mlx4_en_perf_stats { |
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 2bb8553bd905..eda29dbbfcd2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c | |||
@@ -412,7 +412,6 @@ err_icm: | |||
412 | 412 | ||
413 | EXPORT_SYMBOL_GPL(mlx4_qp_alloc); | 413 | EXPORT_SYMBOL_GPL(mlx4_qp_alloc); |
414 | 414 | ||
415 | #define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC | ||
416 | int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, | 415 | int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, |
417 | enum mlx4_update_qp_attr attr, | 416 | enum mlx4_update_qp_attr attr, |
418 | struct mlx4_update_qp_params *params) | 417 | struct mlx4_update_qp_params *params) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 486e3d26cd4a..6e413ac4e940 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
@@ -713,7 +713,7 @@ static int update_vport_qp_param(struct mlx4_dev *dev, | |||
713 | struct mlx4_vport_oper_state *vp_oper; | 713 | struct mlx4_vport_oper_state *vp_oper; |
714 | struct mlx4_priv *priv; | 714 | struct mlx4_priv *priv; |
715 | u32 qp_type; | 715 | u32 qp_type; |
716 | int port; | 716 | int port, err = 0; |
717 | 717 | ||
718 | port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; | 718 | port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; |
719 | priv = mlx4_priv(dev); | 719 | priv = mlx4_priv(dev); |
@@ -738,7 +738,9 @@ static int update_vport_qp_param(struct mlx4_dev *dev, | |||
738 | } else { | 738 | } else { |
739 | struct mlx4_update_qp_params params = {.flags = 0}; | 739 | struct mlx4_update_qp_params params = {.flags = 0}; |
740 | 740 | ||
741 | mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms); | 741 | err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms); |
742 | if (err) | ||
743 | goto out; | ||
742 | } | 744 | } |
743 | } | 745 | } |
744 | 746 | ||
@@ -773,7 +775,8 @@ static int update_vport_qp_param(struct mlx4_dev *dev, | |||
773 | qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; | 775 | qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; |
774 | qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; | 776 | qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; |
775 | } | 777 | } |
776 | return 0; | 778 | out: |
779 | return err; | ||
777 | } | 780 | } |
778 | 781 | ||
779 | static int mpt_mask(struct mlx4_dev *dev) | 782 | static int mpt_mask(struct mlx4_dev *dev) |
@@ -3092,6 +3095,12 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe) | |||
3092 | if (!priv->mfunc.master.slave_state) | 3095 | if (!priv->mfunc.master.slave_state) |
3093 | return -EINVAL; | 3096 | return -EINVAL; |
3094 | 3097 | ||
3098 | /* check for slave valid, slave not PF, and slave active */ | ||
3099 | if (slave < 0 || slave > dev->persist->num_vfs || | ||
3100 | slave == dev->caps.function || | ||
3101 | !priv->mfunc.master.slave_state[slave].active) | ||
3102 | return 0; | ||
3103 | |||
3095 | event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type]; | 3104 | event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type]; |
3096 | 3105 | ||
3097 | /* Create the event only if the slave is registered */ | 3106 | /* Create the event only if the slave is registered */ |
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index 44e8d7d25547..57a6e6cd74fc 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c | |||
@@ -1239,11 +1239,9 @@ static int pasemi_mac_open(struct net_device *dev) | |||
1239 | if (mac->phydev) | 1239 | if (mac->phydev) |
1240 | phy_start(mac->phydev); | 1240 | phy_start(mac->phydev); |
1241 | 1241 | ||
1242 | init_timer(&mac->tx->clean_timer); | 1242 | setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer, |
1243 | mac->tx->clean_timer.function = pasemi_mac_tx_timer; | 1243 | (unsigned long)mac->tx); |
1244 | mac->tx->clean_timer.data = (unsigned long)mac->tx; | 1244 | mod_timer(&mac->tx->clean_timer, jiffies + HZ); |
1245 | mac->tx->clean_timer.expires = jiffies+HZ; | ||
1246 | add_timer(&mac->tx->clean_timer); | ||
1247 | 1245 | ||
1248 | return 0; | 1246 | return 0; |
1249 | 1247 | ||
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h index 6e426ae94692..0a5e204a0179 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h | |||
@@ -354,7 +354,7 @@ struct cmd_desc_type0 { | |||
354 | 354 | ||
355 | } __attribute__ ((aligned(64))); | 355 | } __attribute__ ((aligned(64))); |
356 | 356 | ||
357 | /* Note: sizeof(rcv_desc) should always be a mutliple of 2 */ | 357 | /* Note: sizeof(rcv_desc) should always be a multiple of 2 */ |
358 | struct rcv_desc { | 358 | struct rcv_desc { |
359 | __le16 reference_handle; | 359 | __le16 reference_handle; |
360 | __le16 reserved; | 360 | __le16 reserved; |
@@ -499,7 +499,7 @@ struct uni_data_desc{ | |||
499 | #define NETXEN_IMAGE_START 0x43000 /* compressed image */ | 499 | #define NETXEN_IMAGE_START 0x43000 /* compressed image */ |
500 | #define NETXEN_SECONDARY_START 0x200000 /* backup images */ | 500 | #define NETXEN_SECONDARY_START 0x200000 /* backup images */ |
501 | #define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */ | 501 | #define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */ |
502 | #define NETXEN_USER_START 0x3E8000 /* Firmare info */ | 502 | #define NETXEN_USER_START 0x3E8000 /* Firmware info */ |
503 | #define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */ | 503 | #define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */ |
504 | #define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */ | 504 | #define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */ |
505 | 505 | ||
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index fa4317611fd6..f221126a5c4e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | |||
@@ -314,7 +314,7 @@ struct qlcnic_fdt { | |||
314 | #define QLCNIC_BRDCFG_START 0x4000 /* board config */ | 314 | #define QLCNIC_BRDCFG_START 0x4000 /* board config */ |
315 | #define QLCNIC_BOOTLD_START 0x10000 /* bootld */ | 315 | #define QLCNIC_BOOTLD_START 0x10000 /* bootld */ |
316 | #define QLCNIC_IMAGE_START 0x43000 /* compressed image */ | 316 | #define QLCNIC_IMAGE_START 0x43000 /* compressed image */ |
317 | #define QLCNIC_USER_START 0x3E8000 /* Firmare info */ | 317 | #define QLCNIC_USER_START 0x3E8000 /* Firmware info */ |
318 | 318 | ||
319 | #define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408) | 319 | #define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408) |
320 | #define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c) | 320 | #define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c) |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index ad0020af2193..c70ab40d8698 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -2561,7 +2561,7 @@ static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) | |||
2561 | int rc = -EINVAL; | 2561 | int rc = -EINVAL; |
2562 | 2562 | ||
2563 | if (!rtl_fw_format_ok(tp, rtl_fw)) { | 2563 | if (!rtl_fw_format_ok(tp, rtl_fw)) { |
2564 | netif_err(tp, ifup, dev, "invalid firwmare\n"); | 2564 | netif_err(tp, ifup, dev, "invalid firmware\n"); |
2565 | goto out; | 2565 | goto out; |
2566 | } | 2566 | } |
2567 | 2567 | ||
@@ -5067,8 +5067,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp) | |||
5067 | RTL_W8(ChipCmd, CmdReset); | 5067 | RTL_W8(ChipCmd, CmdReset); |
5068 | 5068 | ||
5069 | rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); | 5069 | rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); |
5070 | |||
5071 | netdev_reset_queue(tp->dev); | ||
5072 | } | 5070 | } |
5073 | 5071 | ||
5074 | static void rtl_request_uncached_firmware(struct rtl8169_private *tp) | 5072 | static void rtl_request_uncached_firmware(struct rtl8169_private *tp) |
@@ -7049,7 +7047,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, | |||
7049 | u32 status, len; | 7047 | u32 status, len; |
7050 | u32 opts[2]; | 7048 | u32 opts[2]; |
7051 | int frags; | 7049 | int frags; |
7052 | bool stop_queue; | ||
7053 | 7050 | ||
7054 | if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { | 7051 | if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { |
7055 | netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); | 7052 | netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); |
@@ -7090,8 +7087,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, | |||
7090 | 7087 | ||
7091 | txd->opts2 = cpu_to_le32(opts[1]); | 7088 | txd->opts2 = cpu_to_le32(opts[1]); |
7092 | 7089 | ||
7093 | netdev_sent_queue(dev, skb->len); | ||
7094 | |||
7095 | skb_tx_timestamp(skb); | 7090 | skb_tx_timestamp(skb); |
7096 | 7091 | ||
7097 | /* Force memory writes to complete before releasing descriptor */ | 7092 | /* Force memory writes to complete before releasing descriptor */ |
@@ -7106,16 +7101,11 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, | |||
7106 | 7101 | ||
7107 | tp->cur_tx += frags + 1; | 7102 | tp->cur_tx += frags + 1; |
7108 | 7103 | ||
7109 | stop_queue = !TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS); | 7104 | RTL_W8(TxPoll, NPQ); |
7110 | 7105 | ||
7111 | if (!skb->xmit_more || stop_queue || | 7106 | mmiowb(); |
7112 | netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) { | ||
7113 | RTL_W8(TxPoll, NPQ); | ||
7114 | |||
7115 | mmiowb(); | ||
7116 | } | ||
7117 | 7107 | ||
7118 | if (stop_queue) { | 7108 | if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { |
7119 | /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must | 7109 | /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must |
7120 | * not miss a ring update when it notices a stopped queue. | 7110 | * not miss a ring update when it notices a stopped queue. |
7121 | */ | 7111 | */ |
@@ -7198,7 +7188,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev) | |||
7198 | static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) | 7188 | static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) |
7199 | { | 7189 | { |
7200 | unsigned int dirty_tx, tx_left; | 7190 | unsigned int dirty_tx, tx_left; |
7201 | unsigned int bytes_compl = 0, pkts_compl = 0; | ||
7202 | 7191 | ||
7203 | dirty_tx = tp->dirty_tx; | 7192 | dirty_tx = tp->dirty_tx; |
7204 | smp_rmb(); | 7193 | smp_rmb(); |
@@ -7222,8 +7211,10 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) | |||
7222 | rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, | 7211 | rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, |
7223 | tp->TxDescArray + entry); | 7212 | tp->TxDescArray + entry); |
7224 | if (status & LastFrag) { | 7213 | if (status & LastFrag) { |
7225 | pkts_compl++; | 7214 | u64_stats_update_begin(&tp->tx_stats.syncp); |
7226 | bytes_compl += tx_skb->skb->len; | 7215 | tp->tx_stats.packets++; |
7216 | tp->tx_stats.bytes += tx_skb->skb->len; | ||
7217 | u64_stats_update_end(&tp->tx_stats.syncp); | ||
7227 | dev_kfree_skb_any(tx_skb->skb); | 7218 | dev_kfree_skb_any(tx_skb->skb); |
7228 | tx_skb->skb = NULL; | 7219 | tx_skb->skb = NULL; |
7229 | } | 7220 | } |
@@ -7232,13 +7223,6 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) | |||
7232 | } | 7223 | } |
7233 | 7224 | ||
7234 | if (tp->dirty_tx != dirty_tx) { | 7225 | if (tp->dirty_tx != dirty_tx) { |
7235 | netdev_completed_queue(tp->dev, pkts_compl, bytes_compl); | ||
7236 | |||
7237 | u64_stats_update_begin(&tp->tx_stats.syncp); | ||
7238 | tp->tx_stats.packets += pkts_compl; | ||
7239 | tp->tx_stats.bytes += bytes_compl; | ||
7240 | u64_stats_update_end(&tp->tx_stats.syncp); | ||
7241 | |||
7242 | tp->dirty_tx = dirty_tx; | 7226 | tp->dirty_tx = dirty_tx; |
7243 | /* Sync with rtl8169_start_xmit: | 7227 | /* Sync with rtl8169_start_xmit: |
7244 | * - publish dirty_tx ring index (write barrier) | 7228 | * - publish dirty_tx ring index (write barrier) |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 4da8bd263997..736d5d1624a1 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -508,7 +508,6 @@ static struct sh_eth_cpu_data r8a779x_data = { | |||
508 | .tpauser = 1, | 508 | .tpauser = 1, |
509 | .hw_swap = 1, | 509 | .hw_swap = 1, |
510 | .rmiimode = 1, | 510 | .rmiimode = 1, |
511 | .shift_rd0 = 1, | ||
512 | }; | 511 | }; |
513 | 512 | ||
514 | static void sh_eth_set_rate_sh7724(struct net_device *ndev) | 513 | static void sh_eth_set_rate_sh7724(struct net_device *ndev) |
@@ -1392,6 +1391,9 @@ static void sh_eth_dev_exit(struct net_device *ndev) | |||
1392 | msleep(2); /* max frame time at 10 Mbps < 1250 us */ | 1391 | msleep(2); /* max frame time at 10 Mbps < 1250 us */ |
1393 | sh_eth_get_stats(ndev); | 1392 | sh_eth_get_stats(ndev); |
1394 | sh_eth_reset(ndev); | 1393 | sh_eth_reset(ndev); |
1394 | |||
1395 | /* Set MAC address again */ | ||
1396 | update_mac_address(ndev); | ||
1395 | } | 1397 | } |
1396 | 1398 | ||
1397 | /* free Tx skb function */ | 1399 | /* free Tx skb function */ |
@@ -1407,6 +1409,8 @@ static int sh_eth_txfree(struct net_device *ndev) | |||
1407 | txdesc = &mdp->tx_ring[entry]; | 1409 | txdesc = &mdp->tx_ring[entry]; |
1408 | if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) | 1410 | if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) |
1409 | break; | 1411 | break; |
1412 | /* TACT bit must be checked before all the following reads */ | ||
1413 | rmb(); | ||
1410 | /* Free the original skb. */ | 1414 | /* Free the original skb. */ |
1411 | if (mdp->tx_skbuff[entry]) { | 1415 | if (mdp->tx_skbuff[entry]) { |
1412 | dma_unmap_single(&ndev->dev, txdesc->addr, | 1416 | dma_unmap_single(&ndev->dev, txdesc->addr, |
@@ -1444,6 +1448,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1444 | limit = boguscnt; | 1448 | limit = boguscnt; |
1445 | rxdesc = &mdp->rx_ring[entry]; | 1449 | rxdesc = &mdp->rx_ring[entry]; |
1446 | while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { | 1450 | while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { |
1451 | /* RACT bit must be checked before all the following reads */ | ||
1452 | rmb(); | ||
1447 | desc_status = edmac_to_cpu(mdp, rxdesc->status); | 1453 | desc_status = edmac_to_cpu(mdp, rxdesc->status); |
1448 | pkt_len = rxdesc->frame_length; | 1454 | pkt_len = rxdesc->frame_length; |
1449 | 1455 | ||
@@ -1455,8 +1461,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1455 | 1461 | ||
1456 | /* In case of almost all GETHER/ETHERs, the Receive Frame State | 1462 | /* In case of almost all GETHER/ETHERs, the Receive Frame State |
1457 | * (RFS) bits in the Receive Descriptor 0 are from bit 9 to | 1463 | * (RFS) bits in the Receive Descriptor 0 are from bit 9 to |
1458 | * bit 0. However, in case of the R8A7740, R8A779x, and | 1464 | * bit 0. However, in case of the R8A7740 and R7S72100 |
1459 | * R7S72100 the RFS bits are from bit 25 to bit 16. So, the | 1465 | * the RFS bits are from bit 25 to bit 16. So, the |
1460 | * driver needs right shifting by 16. | 1466 | * driver needs right shifting by 16. |
1461 | */ | 1467 | */ |
1462 | if (mdp->cd->shift_rd0) | 1468 | if (mdp->cd->shift_rd0) |
@@ -1523,6 +1529,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1523 | skb_checksum_none_assert(skb); | 1529 | skb_checksum_none_assert(skb); |
1524 | rxdesc->addr = dma_addr; | 1530 | rxdesc->addr = dma_addr; |
1525 | } | 1531 | } |
1532 | wmb(); /* RACT bit must be set after all the above writes */ | ||
1526 | if (entry >= mdp->num_rx_ring - 1) | 1533 | if (entry >= mdp->num_rx_ring - 1) |
1527 | rxdesc->status |= | 1534 | rxdesc->status |= |
1528 | cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); | 1535 | cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); |
@@ -1535,7 +1542,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1535 | /* If we don't need to check status, don't. -KDU */ | 1542 | /* If we don't need to check status, don't. -KDU */ |
1536 | if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { | 1543 | if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { |
1537 | /* fix the values for the next receiving if RDE is set */ | 1544 | /* fix the values for the next receiving if RDE is set */ |
1538 | if (intr_status & EESR_RDE) { | 1545 | if (intr_status & EESR_RDE && mdp->reg_offset[RDFAR] != 0) { |
1539 | u32 count = (sh_eth_read(ndev, RDFAR) - | 1546 | u32 count = (sh_eth_read(ndev, RDFAR) - |
1540 | sh_eth_read(ndev, RDLAR)) >> 4; | 1547 | sh_eth_read(ndev, RDLAR)) >> 4; |
1541 | 1548 | ||
@@ -2174,7 +2181,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
2174 | } | 2181 | } |
2175 | spin_unlock_irqrestore(&mdp->lock, flags); | 2182 | spin_unlock_irqrestore(&mdp->lock, flags); |
2176 | 2183 | ||
2177 | if (skb_padto(skb, ETH_ZLEN)) | 2184 | if (skb_put_padto(skb, ETH_ZLEN)) |
2178 | return NETDEV_TX_OK; | 2185 | return NETDEV_TX_OK; |
2179 | 2186 | ||
2180 | entry = mdp->cur_tx % mdp->num_tx_ring; | 2187 | entry = mdp->cur_tx % mdp->num_tx_ring; |
@@ -2192,6 +2199,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
2192 | } | 2199 | } |
2193 | txdesc->buffer_length = skb->len; | 2200 | txdesc->buffer_length = skb->len; |
2194 | 2201 | ||
2202 | wmb(); /* TACT bit must be set after all the above writes */ | ||
2195 | if (entry >= mdp->num_tx_ring - 1) | 2203 | if (entry >= mdp->num_tx_ring - 1) |
2196 | txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); | 2204 | txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); |
2197 | else | 2205 | else |
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index 34389b6aa67c..5cecec282aba 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c | |||
@@ -1257,9 +1257,9 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable) | |||
1257 | u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); | 1257 | u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); |
1258 | 1258 | ||
1259 | if (enable) | 1259 | if (enable) |
1260 | val |= 1 << rocker_port->lport; | 1260 | val |= 1ULL << rocker_port->lport; |
1261 | else | 1261 | else |
1262 | val &= ~(1 << rocker_port->lport); | 1262 | val &= ~(1ULL << rocker_port->lport); |
1263 | rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); | 1263 | rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); |
1264 | } | 1264 | } |
1265 | 1265 | ||
@@ -4201,6 +4201,8 @@ static int rocker_probe_ports(struct rocker *rocker) | |||
4201 | 4201 | ||
4202 | alloc_size = sizeof(struct rocker_port *) * rocker->port_count; | 4202 | alloc_size = sizeof(struct rocker_port *) * rocker->port_count; |
4203 | rocker->ports = kmalloc(alloc_size, GFP_KERNEL); | 4203 | rocker->ports = kmalloc(alloc_size, GFP_KERNEL); |
4204 | if (!rocker->ports) | ||
4205 | return -ENOMEM; | ||
4204 | for (i = 0; i < rocker->port_count; i++) { | 4206 | for (i = 0; i < rocker->port_count; i++) { |
4205 | err = rocker_probe_port(rocker, i); | 4207 | err = rocker_probe_port(rocker, i); |
4206 | if (err) | 4208 | if (err) |
@@ -4466,10 +4468,16 @@ static int rocker_port_master_changed(struct net_device *dev) | |||
4466 | struct net_device *master = netdev_master_upper_dev_get(dev); | 4468 | struct net_device *master = netdev_master_upper_dev_get(dev); |
4467 | int err = 0; | 4469 | int err = 0; |
4468 | 4470 | ||
4471 | /* There are currently three cases handled here: | ||
4472 | * 1. Joining a bridge | ||
4473 | * 2. Leaving a previously joined bridge | ||
4474 | * 3. Other, e.g. being added to or removed from a bond or openvswitch, | ||
4475 | * in which case nothing is done | ||
4476 | */ | ||
4469 | if (master && master->rtnl_link_ops && | 4477 | if (master && master->rtnl_link_ops && |
4470 | !strcmp(master->rtnl_link_ops->kind, "bridge")) | 4478 | !strcmp(master->rtnl_link_ops->kind, "bridge")) |
4471 | err = rocker_port_bridge_join(rocker_port, master); | 4479 | err = rocker_port_bridge_join(rocker_port, master); |
4472 | else | 4480 | else if (rocker_port_is_bridged(rocker_port)) |
4473 | err = rocker_port_bridge_leave(rocker_port); | 4481 | err = rocker_port_bridge_leave(rocker_port); |
4474 | 4482 | ||
4475 | return err; | 4483 | return err; |
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c index 6b33127ab352..3449893aea8d 100644 --- a/drivers/net/ethernet/smsc/smc91c92_cs.c +++ b/drivers/net/ethernet/smsc/smc91c92_cs.c | |||
@@ -1070,11 +1070,8 @@ static int smc_open(struct net_device *dev) | |||
1070 | smc->packets_waiting = 0; | 1070 | smc->packets_waiting = 0; |
1071 | 1071 | ||
1072 | smc_reset(dev); | 1072 | smc_reset(dev); |
1073 | init_timer(&smc->media); | 1073 | setup_timer(&smc->media, media_check, (u_long)dev); |
1074 | smc->media.function = media_check; | 1074 | mod_timer(&smc->media, jiffies + HZ); |
1075 | smc->media.data = (u_long) dev; | ||
1076 | smc->media.expires = jiffies + HZ; | ||
1077 | add_timer(&smc->media); | ||
1078 | 1075 | ||
1079 | return 0; | 1076 | return 0; |
1080 | } /* smc_open */ | 1077 | } /* smc_open */ |
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 88a55f95fe09..8678e39aba08 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c | |||
@@ -91,6 +91,11 @@ static const char version[] = | |||
91 | 91 | ||
92 | #include "smc91x.h" | 92 | #include "smc91x.h" |
93 | 93 | ||
94 | #if defined(CONFIG_ASSABET_NEPONSET) | ||
95 | #include <mach/assabet.h> | ||
96 | #include <mach/neponset.h> | ||
97 | #endif | ||
98 | |||
94 | #ifndef SMC_NOWAIT | 99 | #ifndef SMC_NOWAIT |
95 | # define SMC_NOWAIT 0 | 100 | # define SMC_NOWAIT 0 |
96 | #endif | 101 | #endif |
@@ -2243,10 +2248,9 @@ static int smc_drv_probe(struct platform_device *pdev) | |||
2243 | const struct of_device_id *match = NULL; | 2248 | const struct of_device_id *match = NULL; |
2244 | struct smc_local *lp; | 2249 | struct smc_local *lp; |
2245 | struct net_device *ndev; | 2250 | struct net_device *ndev; |
2246 | struct resource *res; | 2251 | struct resource *res, *ires; |
2247 | unsigned int __iomem *addr; | 2252 | unsigned int __iomem *addr; |
2248 | unsigned long irq_flags = SMC_IRQ_FLAGS; | 2253 | unsigned long irq_flags = SMC_IRQ_FLAGS; |
2249 | unsigned long irq_resflags; | ||
2250 | int ret; | 2254 | int ret; |
2251 | 2255 | ||
2252 | ndev = alloc_etherdev(sizeof(struct smc_local)); | 2256 | ndev = alloc_etherdev(sizeof(struct smc_local)); |
@@ -2338,25 +2342,23 @@ static int smc_drv_probe(struct platform_device *pdev) | |||
2338 | goto out_free_netdev; | 2342 | goto out_free_netdev; |
2339 | } | 2343 | } |
2340 | 2344 | ||
2341 | ndev->irq = platform_get_irq(pdev, 0); | 2345 | ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
2342 | if (ndev->irq <= 0) { | 2346 | if (!ires) { |
2343 | ret = -ENODEV; | 2347 | ret = -ENODEV; |
2344 | goto out_release_io; | 2348 | goto out_release_io; |
2345 | } | 2349 | } |
2346 | /* | 2350 | |
2347 | * If this platform does not specify any special irqflags, or if | 2351 | ndev->irq = ires->start; |
2348 | * the resource supplies a trigger, override the irqflags with | 2352 | |
2349 | * the trigger flags from the resource. | 2353 | if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK) |
2350 | */ | 2354 | irq_flags = ires->flags & IRQF_TRIGGER_MASK; |
2351 | irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq)); | ||
2352 | if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK) | ||
2353 | irq_flags = irq_resflags & IRQF_TRIGGER_MASK; | ||
2354 | 2355 | ||
2355 | ret = smc_request_attrib(pdev, ndev); | 2356 | ret = smc_request_attrib(pdev, ndev); |
2356 | if (ret) | 2357 | if (ret) |
2357 | goto out_release_io; | 2358 | goto out_release_io; |
2358 | #if defined(CONFIG_SA1100_ASSABET) | 2359 | #if defined(CONFIG_ASSABET_NEPONSET) |
2359 | neponset_ncr_set(NCR_ENET_OSC_EN); | 2360 | if (machine_is_assabet() && machine_has_neponset()) |
2361 | neponset_ncr_set(NCR_ENET_OSC_EN); | ||
2360 | #endif | 2362 | #endif |
2361 | platform_set_drvdata(pdev, ndev); | 2363 | platform_set_drvdata(pdev, ndev); |
2362 | ret = smc_enable_device(pdev); | 2364 | ret = smc_enable_device(pdev); |
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index be67baf5f677..3a18501d1068 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h | |||
@@ -39,14 +39,7 @@ | |||
39 | * Define your architecture specific bus configuration parameters here. | 39 | * Define your architecture specific bus configuration parameters here. |
40 | */ | 40 | */ |
41 | 41 | ||
42 | #if defined(CONFIG_ARCH_LUBBOCK) ||\ | 42 | #if defined(CONFIG_ARM) |
43 | defined(CONFIG_MACH_MAINSTONE) ||\ | ||
44 | defined(CONFIG_MACH_ZYLONITE) ||\ | ||
45 | defined(CONFIG_MACH_LITTLETON) ||\ | ||
46 | defined(CONFIG_MACH_ZYLONITE2) ||\ | ||
47 | defined(CONFIG_ARCH_VIPER) ||\ | ||
48 | defined(CONFIG_MACH_STARGATE2) ||\ | ||
49 | defined(CONFIG_ARCH_VERSATILE) | ||
50 | 43 | ||
51 | #include <asm/mach-types.h> | 44 | #include <asm/mach-types.h> |
52 | 45 | ||
@@ -74,95 +67,8 @@ | |||
74 | /* We actually can't write halfwords properly if not word aligned */ | 67 | /* We actually can't write halfwords properly if not word aligned */ |
75 | static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) | 68 | static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) |
76 | { | 69 | { |
77 | if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) { | 70 | if ((machine_is_mainstone() || machine_is_stargate2() || |
78 | unsigned int v = val << 16; | 71 | machine_is_pxa_idp()) && reg & 2) { |
79 | v |= readl(ioaddr + (reg & ~2)) & 0xffff; | ||
80 | writel(v, ioaddr + (reg & ~2)); | ||
81 | } else { | ||
82 | writew(val, ioaddr + reg); | ||
83 | } | ||
84 | } | ||
85 | |||
86 | #elif defined(CONFIG_SA1100_PLEB) | ||
87 | /* We can only do 16-bit reads and writes in the static memory space. */ | ||
88 | #define SMC_CAN_USE_8BIT 1 | ||
89 | #define SMC_CAN_USE_16BIT 1 | ||
90 | #define SMC_CAN_USE_32BIT 0 | ||
91 | #define SMC_IO_SHIFT 0 | ||
92 | #define SMC_NOWAIT 1 | ||
93 | |||
94 | #define SMC_inb(a, r) readb((a) + (r)) | ||
95 | #define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l)) | ||
96 | #define SMC_inw(a, r) readw((a) + (r)) | ||
97 | #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) | ||
98 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) | ||
99 | #define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l)) | ||
100 | #define SMC_outw(v, a, r) writew(v, (a) + (r)) | ||
101 | #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) | ||
102 | |||
103 | #define SMC_IRQ_FLAGS (-1) | ||
104 | |||
105 | #elif defined(CONFIG_SA1100_ASSABET) | ||
106 | |||
107 | #include <mach/neponset.h> | ||
108 | |||
109 | /* We can only do 8-bit reads and writes in the static memory space. */ | ||
110 | #define SMC_CAN_USE_8BIT 1 | ||
111 | #define SMC_CAN_USE_16BIT 0 | ||
112 | #define SMC_CAN_USE_32BIT 0 | ||
113 | #define SMC_NOWAIT 1 | ||
114 | |||
115 | /* The first two address lines aren't connected... */ | ||
116 | #define SMC_IO_SHIFT 2 | ||
117 | |||
118 | #define SMC_inb(a, r) readb((a) + (r)) | ||
119 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) | ||
120 | #define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l)) | ||
121 | #define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l)) | ||
122 | #define SMC_IRQ_FLAGS (-1) /* from resource */ | ||
123 | |||
124 | #elif defined(CONFIG_MACH_LOGICPD_PXA270) || \ | ||
125 | defined(CONFIG_MACH_NOMADIK_8815NHK) | ||
126 | |||
127 | #define SMC_CAN_USE_8BIT 0 | ||
128 | #define SMC_CAN_USE_16BIT 1 | ||
129 | #define SMC_CAN_USE_32BIT 0 | ||
130 | #define SMC_IO_SHIFT 0 | ||
131 | #define SMC_NOWAIT 1 | ||
132 | |||
133 | #define SMC_inw(a, r) readw((a) + (r)) | ||
134 | #define SMC_outw(v, a, r) writew(v, (a) + (r)) | ||
135 | #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) | ||
136 | #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) | ||
137 | |||
138 | #elif defined(CONFIG_ARCH_INNOKOM) || \ | ||
139 | defined(CONFIG_ARCH_PXA_IDP) || \ | ||
140 | defined(CONFIG_ARCH_RAMSES) || \ | ||
141 | defined(CONFIG_ARCH_PCM027) | ||
142 | |||
143 | #define SMC_CAN_USE_8BIT 1 | ||
144 | #define SMC_CAN_USE_16BIT 1 | ||
145 | #define SMC_CAN_USE_32BIT 1 | ||
146 | #define SMC_IO_SHIFT 0 | ||
147 | #define SMC_NOWAIT 1 | ||
148 | #define SMC_USE_PXA_DMA 1 | ||
149 | |||
150 | #define SMC_inb(a, r) readb((a) + (r)) | ||
151 | #define SMC_inw(a, r) readw((a) + (r)) | ||
152 | #define SMC_inl(a, r) readl((a) + (r)) | ||
153 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) | ||
154 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) | ||
155 | #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) | ||
156 | #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) | ||
157 | #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) | ||
158 | #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) | ||
159 | #define SMC_IRQ_FLAGS (-1) /* from resource */ | ||
160 | |||
161 | /* We actually can't write halfwords properly if not word aligned */ | ||
162 | static inline void | ||
163 | SMC_outw(u16 val, void __iomem *ioaddr, int reg) | ||
164 | { | ||
165 | if (reg & 2) { | ||
166 | unsigned int v = val << 16; | 72 | unsigned int v = val << 16; |
167 | v |= readl(ioaddr + (reg & ~2)) & 0xffff; | 73 | v |= readl(ioaddr + (reg & ~2)) & 0xffff; |
168 | writel(v, ioaddr + (reg & ~2)); | 74 | writel(v, ioaddr + (reg & ~2)); |
@@ -237,20 +143,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg) | |||
237 | #define RPC_LSA_DEFAULT RPC_LED_100_10 | 143 | #define RPC_LSA_DEFAULT RPC_LED_100_10 |
238 | #define RPC_LSB_DEFAULT RPC_LED_TX_RX | 144 | #define RPC_LSB_DEFAULT RPC_LED_TX_RX |
239 | 145 | ||
240 | #elif defined(CONFIG_ARCH_MSM) | ||
241 | |||
242 | #define SMC_CAN_USE_8BIT 0 | ||
243 | #define SMC_CAN_USE_16BIT 1 | ||
244 | #define SMC_CAN_USE_32BIT 0 | ||
245 | #define SMC_NOWAIT 1 | ||
246 | |||
247 | #define SMC_inw(a, r) readw((a) + (r)) | ||
248 | #define SMC_outw(v, a, r) writew(v, (a) + (r)) | ||
249 | #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) | ||
250 | #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) | ||
251 | |||
252 | #define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH | ||
253 | |||
254 | #elif defined(CONFIG_COLDFIRE) | 146 | #elif defined(CONFIG_COLDFIRE) |
255 | 147 | ||
256 | #define SMC_CAN_USE_8BIT 0 | 148 | #define SMC_CAN_USE_8BIT 0 |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 55e89b3838f1..a0ea84fe6519 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -310,11 +310,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv) | |||
310 | spin_lock_irqsave(&priv->lock, flags); | 310 | spin_lock_irqsave(&priv->lock, flags); |
311 | if (!priv->eee_active) { | 311 | if (!priv->eee_active) { |
312 | priv->eee_active = 1; | 312 | priv->eee_active = 1; |
313 | init_timer(&priv->eee_ctrl_timer); | 313 | setup_timer(&priv->eee_ctrl_timer, |
314 | priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer; | 314 | stmmac_eee_ctrl_timer, |
315 | priv->eee_ctrl_timer.data = (unsigned long)priv; | 315 | (unsigned long)priv); |
316 | priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer); | 316 | mod_timer(&priv->eee_ctrl_timer, |
317 | add_timer(&priv->eee_ctrl_timer); | 317 | STMMAC_LPI_T(eee_timer)); |
318 | 318 | ||
319 | priv->hw->mac->set_eee_timer(priv->hw, | 319 | priv->hw->mac->set_eee_timer(priv->hw, |
320 | STMMAC_DEFAULT_LIT_LS, | 320 | STMMAC_DEFAULT_LIT_LS, |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index fb846ebba1d9..f9b42f11950f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
@@ -272,6 +272,37 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) | |||
272 | struct stmmac_priv *priv = NULL; | 272 | struct stmmac_priv *priv = NULL; |
273 | struct plat_stmmacenet_data *plat_dat = NULL; | 273 | struct plat_stmmacenet_data *plat_dat = NULL; |
274 | const char *mac = NULL; | 274 | const char *mac = NULL; |
275 | int irq, wol_irq, lpi_irq; | ||
276 | |||
277 | /* Get IRQ information early to have an ability to ask for deferred | ||
278 | * probe if needed before we went too far with resource allocation. | ||
279 | */ | ||
280 | irq = platform_get_irq_byname(pdev, "macirq"); | ||
281 | if (irq < 0) { | ||
282 | if (irq != -EPROBE_DEFER) { | ||
283 | dev_err(dev, | ||
284 | "MAC IRQ configuration information not found\n"); | ||
285 | } | ||
286 | return irq; | ||
287 | } | ||
288 | |||
289 | /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq | ||
290 | * The external wake up irq can be passed through the platform code | ||
291 | * named as "eth_wake_irq" | ||
292 | * | ||
293 | * In case the wake up interrupt is not passed from the platform | ||
294 | * so the driver will continue to use the mac irq (ndev->irq) | ||
295 | */ | ||
296 | wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); | ||
297 | if (wol_irq < 0) { | ||
298 | if (wol_irq == -EPROBE_DEFER) | ||
299 | return -EPROBE_DEFER; | ||
300 | wol_irq = irq; | ||
301 | } | ||
302 | |||
303 | lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); | ||
304 | if (lpi_irq == -EPROBE_DEFER) | ||
305 | return -EPROBE_DEFER; | ||
275 | 306 | ||
276 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 307 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
277 | addr = devm_ioremap_resource(dev, res); | 308 | addr = devm_ioremap_resource(dev, res); |
@@ -323,39 +354,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) | |||
323 | return PTR_ERR(priv); | 354 | return PTR_ERR(priv); |
324 | } | 355 | } |
325 | 356 | ||
357 | /* Copy IRQ values to priv structure which is now avaialble */ | ||
358 | priv->dev->irq = irq; | ||
359 | priv->wol_irq = wol_irq; | ||
360 | priv->lpi_irq = lpi_irq; | ||
361 | |||
326 | /* Get MAC address if available (DT) */ | 362 | /* Get MAC address if available (DT) */ |
327 | if (mac) | 363 | if (mac) |
328 | memcpy(priv->dev->dev_addr, mac, ETH_ALEN); | 364 | memcpy(priv->dev->dev_addr, mac, ETH_ALEN); |
329 | 365 | ||
330 | /* Get the MAC information */ | ||
331 | priv->dev->irq = platform_get_irq_byname(pdev, "macirq"); | ||
332 | if (priv->dev->irq < 0) { | ||
333 | if (priv->dev->irq != -EPROBE_DEFER) { | ||
334 | netdev_err(priv->dev, | ||
335 | "MAC IRQ configuration information not found\n"); | ||
336 | } | ||
337 | return priv->dev->irq; | ||
338 | } | ||
339 | |||
340 | /* | ||
341 | * On some platforms e.g. SPEAr the wake up irq differs from the mac irq | ||
342 | * The external wake up irq can be passed through the platform code | ||
343 | * named as "eth_wake_irq" | ||
344 | * | ||
345 | * In case the wake up interrupt is not passed from the platform | ||
346 | * so the driver will continue to use the mac irq (ndev->irq) | ||
347 | */ | ||
348 | priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); | ||
349 | if (priv->wol_irq < 0) { | ||
350 | if (priv->wol_irq == -EPROBE_DEFER) | ||
351 | return -EPROBE_DEFER; | ||
352 | priv->wol_irq = priv->dev->irq; | ||
353 | } | ||
354 | |||
355 | priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); | ||
356 | if (priv->lpi_irq == -EPROBE_DEFER) | ||
357 | return -EPROBE_DEFER; | ||
358 | |||
359 | platform_set_drvdata(pdev, priv->dev); | 366 | platform_set_drvdata(pdev, priv->dev); |
360 | 367 | ||
361 | pr_debug("STMMAC platform driver registration completed"); | 368 | pr_debug("STMMAC platform driver registration completed"); |
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 4b51f903fb73..0c5842aeb807 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c | |||
@@ -6989,10 +6989,10 @@ static int niu_class_to_ethflow(u64 class, int *flow_type) | |||
6989 | *flow_type = IP_USER_FLOW; | 6989 | *flow_type = IP_USER_FLOW; |
6990 | break; | 6990 | break; |
6991 | default: | 6991 | default: |
6992 | return 0; | 6992 | return -EINVAL; |
6993 | } | 6993 | } |
6994 | 6994 | ||
6995 | return 1; | 6995 | return 0; |
6996 | } | 6996 | } |
6997 | 6997 | ||
6998 | static int niu_ethflow_to_class(int flow_type, u64 *class) | 6998 | static int niu_ethflow_to_class(int flow_type, u64 *class) |
@@ -7198,11 +7198,9 @@ static int niu_get_ethtool_tcam_entry(struct niu *np, | |||
7198 | class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> | 7198 | class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> |
7199 | TCAM_V4KEY0_CLASS_CODE_SHIFT; | 7199 | TCAM_V4KEY0_CLASS_CODE_SHIFT; |
7200 | ret = niu_class_to_ethflow(class, &fsp->flow_type); | 7200 | ret = niu_class_to_ethflow(class, &fsp->flow_type); |
7201 | |||
7202 | if (ret < 0) { | 7201 | if (ret < 0) { |
7203 | netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n", | 7202 | netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n", |
7204 | parent->index); | 7203 | parent->index); |
7205 | ret = -EINVAL; | ||
7206 | goto out; | 7204 | goto out; |
7207 | } | 7205 | } |
7208 | 7206 | ||
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 7d8dd0d2182e..a1bbaf6352ba 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -1103,7 +1103,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries( | |||
1103 | cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, | 1103 | cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, |
1104 | port_mask, ALE_VLAN, slave->port_vlan, 0); | 1104 | port_mask, ALE_VLAN, slave->port_vlan, 0); |
1105 | cpsw_ale_add_ucast(priv->ale, priv->mac_addr, | 1105 | cpsw_ale_add_ucast(priv->ale, priv->mac_addr, |
1106 | priv->host_port, ALE_VLAN, slave->port_vlan); | 1106 | priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan); |
1107 | } | 1107 | } |
1108 | 1108 | ||
1109 | static void soft_reset_slave(struct cpsw_slave *slave) | 1109 | static void soft_reset_slave(struct cpsw_slave *slave) |
@@ -2466,6 +2466,7 @@ static int cpsw_remove(struct platform_device *pdev) | |||
2466 | return 0; | 2466 | return 0; |
2467 | } | 2467 | } |
2468 | 2468 | ||
2469 | #ifdef CONFIG_PM_SLEEP | ||
2469 | static int cpsw_suspend(struct device *dev) | 2470 | static int cpsw_suspend(struct device *dev) |
2470 | { | 2471 | { |
2471 | struct platform_device *pdev = to_platform_device(dev); | 2472 | struct platform_device *pdev = to_platform_device(dev); |
@@ -2518,11 +2519,9 @@ static int cpsw_resume(struct device *dev) | |||
2518 | } | 2519 | } |
2519 | return 0; | 2520 | return 0; |
2520 | } | 2521 | } |
2522 | #endif | ||
2521 | 2523 | ||
2522 | static const struct dev_pm_ops cpsw_pm_ops = { | 2524 | static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume); |
2523 | .suspend = cpsw_suspend, | ||
2524 | .resume = cpsw_resume, | ||
2525 | }; | ||
2526 | 2525 | ||
2527 | static const struct of_device_id cpsw_of_mtable[] = { | 2526 | static const struct of_device_id cpsw_of_mtable[] = { |
2528 | { .compatible = "ti,cpsw", }, | 2527 | { .compatible = "ti,cpsw", }, |
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 98655b44b97e..c00084d689f3 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c | |||
@@ -423,6 +423,7 @@ static int davinci_mdio_remove(struct platform_device *pdev) | |||
423 | return 0; | 423 | return 0; |
424 | } | 424 | } |
425 | 425 | ||
426 | #ifdef CONFIG_PM_SLEEP | ||
426 | static int davinci_mdio_suspend(struct device *dev) | 427 | static int davinci_mdio_suspend(struct device *dev) |
427 | { | 428 | { |
428 | struct davinci_mdio_data *data = dev_get_drvdata(dev); | 429 | struct davinci_mdio_data *data = dev_get_drvdata(dev); |
@@ -464,10 +465,10 @@ static int davinci_mdio_resume(struct device *dev) | |||
464 | 465 | ||
465 | return 0; | 466 | return 0; |
466 | } | 467 | } |
468 | #endif | ||
467 | 469 | ||
468 | static const struct dev_pm_ops davinci_mdio_pm_ops = { | 470 | static const struct dev_pm_ops davinci_mdio_pm_ops = { |
469 | .suspend_late = davinci_mdio_suspend, | 471 | SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume) |
470 | .resume_early = davinci_mdio_resume, | ||
471 | }; | 472 | }; |
472 | 473 | ||
473 | #if IS_ENABLED(CONFIG_OF) | 474 | #if IS_ENABLED(CONFIG_OF) |
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index a495931a66a1..0e0fbb5842b3 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c | |||
@@ -498,9 +498,9 @@ static int w5100_napi_poll(struct napi_struct *napi, int budget) | |||
498 | } | 498 | } |
499 | 499 | ||
500 | if (rx_count < budget) { | 500 | if (rx_count < budget) { |
501 | napi_complete(napi); | ||
501 | w5100_write(priv, W5100_IMR, IR_S0); | 502 | w5100_write(priv, W5100_IMR, IR_S0); |
502 | mmiowb(); | 503 | mmiowb(); |
503 | napi_complete(napi); | ||
504 | } | 504 | } |
505 | 505 | ||
506 | return rx_count; | 506 | return rx_count; |
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 09322d9db578..4b310002258d 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c | |||
@@ -418,9 +418,9 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget) | |||
418 | } | 418 | } |
419 | 419 | ||
420 | if (rx_count < budget) { | 420 | if (rx_count < budget) { |
421 | napi_complete(napi); | ||
421 | w5300_write(priv, W5300_IMR, IR_S0); | 422 | w5300_write(priv, W5300_IMR, IR_S0); |
422 | mmiowb(); | 423 | mmiowb(); |
423 | napi_complete(napi); | ||
424 | } | 424 | } |
425 | 425 | ||
426 | return rx_count; | 426 | return rx_count; |
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index f7e0f0f7c2e2..9e16a2819d48 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c | |||
@@ -938,7 +938,7 @@ static void eth_set_mcast_list(struct net_device *dev) | |||
938 | int i; | 938 | int i; |
939 | static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; | 939 | static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; |
940 | 940 | ||
941 | if (dev->flags & IFF_ALLMULTI) { | 941 | if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) { |
942 | for (i = 0; i < ETH_ALEN; i++) { | 942 | for (i = 0; i < ETH_ALEN; i++) { |
943 | __raw_writel(allmulti[i], &port->regs->mcast_addr[i]); | 943 | __raw_writel(allmulti[i], &port->regs->mcast_addr[i]); |
944 | __raw_writel(allmulti[i], &port->regs->mcast_mask[i]); | 944 | __raw_writel(allmulti[i], &port->regs->mcast_mask[i]); |
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h index 924ea98bd531..54549a6223dd 100644 --- a/drivers/net/ipvlan/ipvlan.h +++ b/drivers/net/ipvlan/ipvlan.h | |||
@@ -114,7 +114,9 @@ unsigned int ipvlan_mac_hash(const unsigned char *addr); | |||
114 | rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb); | 114 | rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb); |
115 | int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev); | 115 | int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev); |
116 | void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr); | 116 | void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr); |
117 | bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6); | 117 | struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, |
118 | const void *iaddr, bool is_v6); | ||
119 | bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6); | ||
118 | struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, | 120 | struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, |
119 | const void *iaddr, bool is_v6); | 121 | const void *iaddr, bool is_v6); |
120 | void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync); | 122 | void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync); |
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 2a175006028b..b7877a194cfe 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c | |||
@@ -81,19 +81,20 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr) | |||
81 | hash = (addr->atype == IPVL_IPV6) ? | 81 | hash = (addr->atype == IPVL_IPV6) ? |
82 | ipvlan_get_v6_hash(&addr->ip6addr) : | 82 | ipvlan_get_v6_hash(&addr->ip6addr) : |
83 | ipvlan_get_v4_hash(&addr->ip4addr); | 83 | ipvlan_get_v4_hash(&addr->ip4addr); |
84 | hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); | 84 | if (hlist_unhashed(&addr->hlnode)) |
85 | hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); | ||
85 | } | 86 | } |
86 | 87 | ||
87 | void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync) | 88 | void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync) |
88 | { | 89 | { |
89 | hlist_del_rcu(&addr->hlnode); | 90 | hlist_del_init_rcu(&addr->hlnode); |
90 | if (sync) | 91 | if (sync) |
91 | synchronize_rcu(); | 92 | synchronize_rcu(); |
92 | } | 93 | } |
93 | 94 | ||
94 | bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6) | 95 | struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, |
96 | const void *iaddr, bool is_v6) | ||
95 | { | 97 | { |
96 | struct ipvl_port *port = ipvlan->port; | ||
97 | struct ipvl_addr *addr; | 98 | struct ipvl_addr *addr; |
98 | 99 | ||
99 | list_for_each_entry(addr, &ipvlan->addrs, anode) { | 100 | list_for_each_entry(addr, &ipvlan->addrs, anode) { |
@@ -101,12 +102,21 @@ bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6) | |||
101 | ipv6_addr_equal(&addr->ip6addr, iaddr)) || | 102 | ipv6_addr_equal(&addr->ip6addr, iaddr)) || |
102 | (!is_v6 && addr->atype == IPVL_IPV4 && | 103 | (!is_v6 && addr->atype == IPVL_IPV4 && |
103 | addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr)) | 104 | addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr)) |
104 | return true; | 105 | return addr; |
105 | } | 106 | } |
107 | return NULL; | ||
108 | } | ||
109 | |||
110 | bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6) | ||
111 | { | ||
112 | struct ipvl_dev *ipvlan; | ||
106 | 113 | ||
107 | if (ipvlan_ht_addr_lookup(port, iaddr, is_v6)) | 114 | ASSERT_RTNL(); |
108 | return true; | ||
109 | 115 | ||
116 | list_for_each_entry(ipvlan, &port->ipvlans, pnode) { | ||
117 | if (ipvlan_find_addr(ipvlan, iaddr, is_v6)) | ||
118 | return true; | ||
119 | } | ||
110 | return false; | 120 | return false; |
111 | } | 121 | } |
112 | 122 | ||
@@ -192,7 +202,8 @@ static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb, | |||
192 | if (skb->protocol == htons(ETH_P_PAUSE)) | 202 | if (skb->protocol == htons(ETH_P_PAUSE)) |
193 | return; | 203 | return; |
194 | 204 | ||
195 | list_for_each_entry(ipvlan, &port->ipvlans, pnode) { | 205 | rcu_read_lock(); |
206 | list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) { | ||
196 | if (local && (ipvlan == in_dev)) | 207 | if (local && (ipvlan == in_dev)) |
197 | continue; | 208 | continue; |
198 | 209 | ||
@@ -219,6 +230,7 @@ static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb, | |||
219 | mcast_acct: | 230 | mcast_acct: |
220 | ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true); | 231 | ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true); |
221 | } | 232 | } |
233 | rcu_read_unlock(); | ||
222 | 234 | ||
223 | /* Locally generated? ...Forward a copy to the main-device as | 235 | /* Locally generated? ...Forward a copy to the main-device as |
224 | * well. On the RX side we'll ignore it (wont give it to any | 236 | * well. On the RX side we'll ignore it (wont give it to any |
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 4f4099d5603d..4fa14208d799 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c | |||
@@ -505,7 +505,7 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head) | |||
505 | if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { | 505 | if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { |
506 | list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { | 506 | list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { |
507 | ipvlan_ht_addr_del(addr, !dev->dismantle); | 507 | ipvlan_ht_addr_del(addr, !dev->dismantle); |
508 | list_del_rcu(&addr->anode); | 508 | list_del(&addr->anode); |
509 | } | 509 | } |
510 | } | 510 | } |
511 | list_del_rcu(&ipvlan->pnode); | 511 | list_del_rcu(&ipvlan->pnode); |
@@ -607,7 +607,7 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) | |||
607 | { | 607 | { |
608 | struct ipvl_addr *addr; | 608 | struct ipvl_addr *addr; |
609 | 609 | ||
610 | if (ipvlan_addr_busy(ipvlan, ip6_addr, true)) { | 610 | if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true)) { |
611 | netif_err(ipvlan, ifup, ipvlan->dev, | 611 | netif_err(ipvlan, ifup, ipvlan->dev, |
612 | "Failed to add IPv6=%pI6c addr for %s intf\n", | 612 | "Failed to add IPv6=%pI6c addr for %s intf\n", |
613 | ip6_addr, ipvlan->dev->name); | 613 | ip6_addr, ipvlan->dev->name); |
@@ -620,9 +620,13 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) | |||
620 | addr->master = ipvlan; | 620 | addr->master = ipvlan; |
621 | memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr)); | 621 | memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr)); |
622 | addr->atype = IPVL_IPV6; | 622 | addr->atype = IPVL_IPV6; |
623 | list_add_tail_rcu(&addr->anode, &ipvlan->addrs); | 623 | list_add_tail(&addr->anode, &ipvlan->addrs); |
624 | ipvlan->ipv6cnt++; | 624 | ipvlan->ipv6cnt++; |
625 | ipvlan_ht_addr_add(ipvlan, addr); | 625 | /* If the interface is not up, the address will be added to the hash |
626 | * list by ipvlan_open. | ||
627 | */ | ||
628 | if (netif_running(ipvlan->dev)) | ||
629 | ipvlan_ht_addr_add(ipvlan, addr); | ||
626 | 630 | ||
627 | return 0; | 631 | return 0; |
628 | } | 632 | } |
@@ -631,12 +635,12 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) | |||
631 | { | 635 | { |
632 | struct ipvl_addr *addr; | 636 | struct ipvl_addr *addr; |
633 | 637 | ||
634 | addr = ipvlan_ht_addr_lookup(ipvlan->port, ip6_addr, true); | 638 | addr = ipvlan_find_addr(ipvlan, ip6_addr, true); |
635 | if (!addr) | 639 | if (!addr) |
636 | return; | 640 | return; |
637 | 641 | ||
638 | ipvlan_ht_addr_del(addr, true); | 642 | ipvlan_ht_addr_del(addr, true); |
639 | list_del_rcu(&addr->anode); | 643 | list_del(&addr->anode); |
640 | ipvlan->ipv6cnt--; | 644 | ipvlan->ipv6cnt--; |
641 | WARN_ON(ipvlan->ipv6cnt < 0); | 645 | WARN_ON(ipvlan->ipv6cnt < 0); |
642 | kfree_rcu(addr, rcu); | 646 | kfree_rcu(addr, rcu); |
@@ -675,7 +679,7 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) | |||
675 | { | 679 | { |
676 | struct ipvl_addr *addr; | 680 | struct ipvl_addr *addr; |
677 | 681 | ||
678 | if (ipvlan_addr_busy(ipvlan, ip4_addr, false)) { | 682 | if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false)) { |
679 | netif_err(ipvlan, ifup, ipvlan->dev, | 683 | netif_err(ipvlan, ifup, ipvlan->dev, |
680 | "Failed to add IPv4=%pI4 on %s intf.\n", | 684 | "Failed to add IPv4=%pI4 on %s intf.\n", |
681 | ip4_addr, ipvlan->dev->name); | 685 | ip4_addr, ipvlan->dev->name); |
@@ -688,9 +692,13 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) | |||
688 | addr->master = ipvlan; | 692 | addr->master = ipvlan; |
689 | memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr)); | 693 | memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr)); |
690 | addr->atype = IPVL_IPV4; | 694 | addr->atype = IPVL_IPV4; |
691 | list_add_tail_rcu(&addr->anode, &ipvlan->addrs); | 695 | list_add_tail(&addr->anode, &ipvlan->addrs); |
692 | ipvlan->ipv4cnt++; | 696 | ipvlan->ipv4cnt++; |
693 | ipvlan_ht_addr_add(ipvlan, addr); | 697 | /* If the interface is not up, the address will be added to the hash |
698 | * list by ipvlan_open. | ||
699 | */ | ||
700 | if (netif_running(ipvlan->dev)) | ||
701 | ipvlan_ht_addr_add(ipvlan, addr); | ||
694 | ipvlan_set_broadcast_mac_filter(ipvlan, true); | 702 | ipvlan_set_broadcast_mac_filter(ipvlan, true); |
695 | 703 | ||
696 | return 0; | 704 | return 0; |
@@ -700,12 +708,12 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) | |||
700 | { | 708 | { |
701 | struct ipvl_addr *addr; | 709 | struct ipvl_addr *addr; |
702 | 710 | ||
703 | addr = ipvlan_ht_addr_lookup(ipvlan->port, ip4_addr, false); | 711 | addr = ipvlan_find_addr(ipvlan, ip4_addr, false); |
704 | if (!addr) | 712 | if (!addr) |
705 | return; | 713 | return; |
706 | 714 | ||
707 | ipvlan_ht_addr_del(addr, true); | 715 | ipvlan_ht_addr_del(addr, true); |
708 | list_del_rcu(&addr->anode); | 716 | list_del(&addr->anode); |
709 | ipvlan->ipv4cnt--; | 717 | ipvlan->ipv4cnt--; |
710 | WARN_ON(ipvlan->ipv4cnt < 0); | 718 | WARN_ON(ipvlan->ipv4cnt < 0); |
711 | if (!ipvlan->ipv4cnt) | 719 | if (!ipvlan->ipv4cnt) |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index e40fdfccc9c1..27ecc5c4fa26 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -654,11 +654,14 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q, | |||
654 | } /* else everything is zero */ | 654 | } /* else everything is zero */ |
655 | } | 655 | } |
656 | 656 | ||
657 | /* Neighbour code has some assumptions on HH_DATA_MOD alignment */ | ||
658 | #define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN) | ||
659 | |||
657 | /* Get packet from user space buffer */ | 660 | /* Get packet from user space buffer */ |
658 | static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, | 661 | static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, |
659 | struct iov_iter *from, int noblock) | 662 | struct iov_iter *from, int noblock) |
660 | { | 663 | { |
661 | int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN); | 664 | int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE); |
662 | struct sk_buff *skb; | 665 | struct sk_buff *skb; |
663 | struct macvlan_dev *vlan; | 666 | struct macvlan_dev *vlan; |
664 | unsigned long total_len = iov_iter_count(from); | 667 | unsigned long total_len = iov_iter_count(from); |
@@ -722,7 +725,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, | |||
722 | linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len); | 725 | linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len); |
723 | } | 726 | } |
724 | 727 | ||
725 | skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, | 728 | skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen, |
726 | linear, noblock, &err); | 729 | linear, noblock, &err); |
727 | if (!skb) | 730 | if (!skb) |
728 | goto err; | 731 | goto err; |
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c index 9e3af54c9010..32efbd48f326 100644 --- a/drivers/net/phy/amd-xgbe-phy.c +++ b/drivers/net/phy/amd-xgbe-phy.c | |||
@@ -92,6 +92,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); | |||
92 | #define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate" | 92 | #define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate" |
93 | #define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew" | 93 | #define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew" |
94 | #define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp" | 94 | #define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp" |
95 | #define XGBE_PHY_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config" | ||
96 | #define XGBE_PHY_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable" | ||
95 | 97 | ||
96 | #define XGBE_PHY_SPEEDS 3 | 98 | #define XGBE_PHY_SPEEDS 3 |
97 | #define XGBE_PHY_SPEED_1000 0 | 99 | #define XGBE_PHY_SPEED_1000 0 |
@@ -177,10 +179,12 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); | |||
177 | #define SPEED_10000_BLWC 0 | 179 | #define SPEED_10000_BLWC 0 |
178 | #define SPEED_10000_CDR 0x7 | 180 | #define SPEED_10000_CDR 0x7 |
179 | #define SPEED_10000_PLL 0x1 | 181 | #define SPEED_10000_PLL 0x1 |
180 | #define SPEED_10000_PQ 0x1e | 182 | #define SPEED_10000_PQ 0x12 |
181 | #define SPEED_10000_RATE 0x0 | 183 | #define SPEED_10000_RATE 0x0 |
182 | #define SPEED_10000_TXAMP 0xa | 184 | #define SPEED_10000_TXAMP 0xa |
183 | #define SPEED_10000_WORD 0x7 | 185 | #define SPEED_10000_WORD 0x7 |
186 | #define SPEED_10000_DFE_TAP_CONFIG 0x1 | ||
187 | #define SPEED_10000_DFE_TAP_ENABLE 0x7f | ||
184 | 188 | ||
185 | #define SPEED_2500_BLWC 1 | 189 | #define SPEED_2500_BLWC 1 |
186 | #define SPEED_2500_CDR 0x2 | 190 | #define SPEED_2500_CDR 0x2 |
@@ -189,6 +193,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); | |||
189 | #define SPEED_2500_RATE 0x1 | 193 | #define SPEED_2500_RATE 0x1 |
190 | #define SPEED_2500_TXAMP 0xf | 194 | #define SPEED_2500_TXAMP 0xf |
191 | #define SPEED_2500_WORD 0x1 | 195 | #define SPEED_2500_WORD 0x1 |
196 | #define SPEED_2500_DFE_TAP_CONFIG 0x3 | ||
197 | #define SPEED_2500_DFE_TAP_ENABLE 0x0 | ||
192 | 198 | ||
193 | #define SPEED_1000_BLWC 1 | 199 | #define SPEED_1000_BLWC 1 |
194 | #define SPEED_1000_CDR 0x2 | 200 | #define SPEED_1000_CDR 0x2 |
@@ -197,16 +203,25 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); | |||
197 | #define SPEED_1000_RATE 0x3 | 203 | #define SPEED_1000_RATE 0x3 |
198 | #define SPEED_1000_TXAMP 0xf | 204 | #define SPEED_1000_TXAMP 0xf |
199 | #define SPEED_1000_WORD 0x1 | 205 | #define SPEED_1000_WORD 0x1 |
206 | #define SPEED_1000_DFE_TAP_CONFIG 0x3 | ||
207 | #define SPEED_1000_DFE_TAP_ENABLE 0x0 | ||
200 | 208 | ||
201 | /* SerDes RxTx register offsets */ | 209 | /* SerDes RxTx register offsets */ |
210 | #define RXTX_REG6 0x0018 | ||
202 | #define RXTX_REG20 0x0050 | 211 | #define RXTX_REG20 0x0050 |
212 | #define RXTX_REG22 0x0058 | ||
203 | #define RXTX_REG114 0x01c8 | 213 | #define RXTX_REG114 0x01c8 |
214 | #define RXTX_REG129 0x0204 | ||
204 | 215 | ||
205 | /* SerDes RxTx register entry bit positions and sizes */ | 216 | /* SerDes RxTx register entry bit positions and sizes */ |
217 | #define RXTX_REG6_RESETB_RXD_INDEX 8 | ||
218 | #define RXTX_REG6_RESETB_RXD_WIDTH 1 | ||
206 | #define RXTX_REG20_BLWC_ENA_INDEX 2 | 219 | #define RXTX_REG20_BLWC_ENA_INDEX 2 |
207 | #define RXTX_REG20_BLWC_ENA_WIDTH 1 | 220 | #define RXTX_REG20_BLWC_ENA_WIDTH 1 |
208 | #define RXTX_REG114_PQ_REG_INDEX 9 | 221 | #define RXTX_REG114_PQ_REG_INDEX 9 |
209 | #define RXTX_REG114_PQ_REG_WIDTH 7 | 222 | #define RXTX_REG114_PQ_REG_WIDTH 7 |
223 | #define RXTX_REG129_RXDFE_CONFIG_INDEX 14 | ||
224 | #define RXTX_REG129_RXDFE_CONFIG_WIDTH 2 | ||
210 | 225 | ||
211 | /* Bit setting and getting macros | 226 | /* Bit setting and getting macros |
212 | * The get macro will extract the current bit field value from within | 227 | * The get macro will extract the current bit field value from within |
@@ -333,6 +348,18 @@ static const u32 amd_xgbe_phy_serdes_tx_amp[] = { | |||
333 | SPEED_10000_TXAMP, | 348 | SPEED_10000_TXAMP, |
334 | }; | 349 | }; |
335 | 350 | ||
351 | static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = { | ||
352 | SPEED_1000_DFE_TAP_CONFIG, | ||
353 | SPEED_2500_DFE_TAP_CONFIG, | ||
354 | SPEED_10000_DFE_TAP_CONFIG, | ||
355 | }; | ||
356 | |||
357 | static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = { | ||
358 | SPEED_1000_DFE_TAP_ENABLE, | ||
359 | SPEED_2500_DFE_TAP_ENABLE, | ||
360 | SPEED_10000_DFE_TAP_ENABLE, | ||
361 | }; | ||
362 | |||
336 | enum amd_xgbe_phy_an { | 363 | enum amd_xgbe_phy_an { |
337 | AMD_XGBE_AN_READY = 0, | 364 | AMD_XGBE_AN_READY = 0, |
338 | AMD_XGBE_AN_PAGE_RECEIVED, | 365 | AMD_XGBE_AN_PAGE_RECEIVED, |
@@ -393,6 +420,8 @@ struct amd_xgbe_phy_priv { | |||
393 | u32 serdes_cdr_rate[XGBE_PHY_SPEEDS]; | 420 | u32 serdes_cdr_rate[XGBE_PHY_SPEEDS]; |
394 | u32 serdes_pq_skew[XGBE_PHY_SPEEDS]; | 421 | u32 serdes_pq_skew[XGBE_PHY_SPEEDS]; |
395 | u32 serdes_tx_amp[XGBE_PHY_SPEEDS]; | 422 | u32 serdes_tx_amp[XGBE_PHY_SPEEDS]; |
423 | u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS]; | ||
424 | u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS]; | ||
396 | 425 | ||
397 | /* Auto-negotiation state machine support */ | 426 | /* Auto-negotiation state machine support */ |
398 | struct mutex an_mutex; | 427 | struct mutex an_mutex; |
@@ -481,11 +510,16 @@ static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev) | |||
481 | status = XSIR0_IOREAD(priv, SIR0_STATUS); | 510 | status = XSIR0_IOREAD(priv, SIR0_STATUS); |
482 | if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) && | 511 | if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) && |
483 | XSIR_GET_BITS(status, SIR0_STATUS, TX_READY)) | 512 | XSIR_GET_BITS(status, SIR0_STATUS, TX_READY)) |
484 | return; | 513 | goto rx_reset; |
485 | } | 514 | } |
486 | 515 | ||
487 | netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n", | 516 | netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n", |
488 | status); | 517 | status); |
518 | |||
519 | rx_reset: | ||
520 | /* Perform Rx reset for the DFE changes */ | ||
521 | XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0); | ||
522 | XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1); | ||
489 | } | 523 | } |
490 | 524 | ||
491 | static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev) | 525 | static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev) |
@@ -534,6 +568,10 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev) | |||
534 | priv->serdes_blwc[XGBE_PHY_SPEED_10000]); | 568 | priv->serdes_blwc[XGBE_PHY_SPEED_10000]); |
535 | XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, | 569 | XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, |
536 | priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]); | 570 | priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]); |
571 | XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG, | ||
572 | priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]); | ||
573 | XRXTX_IOWRITE(priv, RXTX_REG22, | ||
574 | priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]); | ||
537 | 575 | ||
538 | amd_xgbe_phy_serdes_complete_ratechange(phydev); | 576 | amd_xgbe_phy_serdes_complete_ratechange(phydev); |
539 | 577 | ||
@@ -586,6 +624,10 @@ static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev) | |||
586 | priv->serdes_blwc[XGBE_PHY_SPEED_2500]); | 624 | priv->serdes_blwc[XGBE_PHY_SPEED_2500]); |
587 | XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, | 625 | XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, |
588 | priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]); | 626 | priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]); |
627 | XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG, | ||
628 | priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]); | ||
629 | XRXTX_IOWRITE(priv, RXTX_REG22, | ||
630 | priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]); | ||
589 | 631 | ||
590 | amd_xgbe_phy_serdes_complete_ratechange(phydev); | 632 | amd_xgbe_phy_serdes_complete_ratechange(phydev); |
591 | 633 | ||
@@ -638,6 +680,10 @@ static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev) | |||
638 | priv->serdes_blwc[XGBE_PHY_SPEED_1000]); | 680 | priv->serdes_blwc[XGBE_PHY_SPEED_1000]); |
639 | XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, | 681 | XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, |
640 | priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]); | 682 | priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]); |
683 | XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG, | ||
684 | priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]); | ||
685 | XRXTX_IOWRITE(priv, RXTX_REG22, | ||
686 | priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]); | ||
641 | 687 | ||
642 | amd_xgbe_phy_serdes_complete_ratechange(phydev); | 688 | amd_xgbe_phy_serdes_complete_ratechange(phydev); |
643 | 689 | ||
@@ -1668,6 +1714,38 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev) | |||
1668 | sizeof(priv->serdes_tx_amp)); | 1714 | sizeof(priv->serdes_tx_amp)); |
1669 | } | 1715 | } |
1670 | 1716 | ||
1717 | if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) { | ||
1718 | ret = device_property_read_u32_array(phy_dev, | ||
1719 | XGBE_PHY_DFE_CFG_PROPERTY, | ||
1720 | priv->serdes_dfe_tap_cfg, | ||
1721 | XGBE_PHY_SPEEDS); | ||
1722 | if (ret) { | ||
1723 | dev_err(dev, "invalid %s property\n", | ||
1724 | XGBE_PHY_DFE_CFG_PROPERTY); | ||
1725 | goto err_sir1; | ||
1726 | } | ||
1727 | } else { | ||
1728 | memcpy(priv->serdes_dfe_tap_cfg, | ||
1729 | amd_xgbe_phy_serdes_dfe_tap_cfg, | ||
1730 | sizeof(priv->serdes_dfe_tap_cfg)); | ||
1731 | } | ||
1732 | |||
1733 | if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) { | ||
1734 | ret = device_property_read_u32_array(phy_dev, | ||
1735 | XGBE_PHY_DFE_ENA_PROPERTY, | ||
1736 | priv->serdes_dfe_tap_ena, | ||
1737 | XGBE_PHY_SPEEDS); | ||
1738 | if (ret) { | ||
1739 | dev_err(dev, "invalid %s property\n", | ||
1740 | XGBE_PHY_DFE_ENA_PROPERTY); | ||
1741 | goto err_sir1; | ||
1742 | } | ||
1743 | } else { | ||
1744 | memcpy(priv->serdes_dfe_tap_ena, | ||
1745 | amd_xgbe_phy_serdes_dfe_tap_ena, | ||
1746 | sizeof(priv->serdes_dfe_tap_ena)); | ||
1747 | } | ||
1748 | |||
1671 | phydev->priv = priv; | 1749 | phydev->priv = priv; |
1672 | 1750 | ||
1673 | if (!priv->adev || acpi_disabled) | 1751 | if (!priv->adev || acpi_disabled) |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index cdcac6aa4260..52cd8db2c57d 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -236,6 +236,25 @@ static inline unsigned int phy_find_valid(unsigned int idx, u32 features) | |||
236 | } | 236 | } |
237 | 237 | ||
238 | /** | 238 | /** |
239 | * phy_check_valid - check if there is a valid PHY setting which matches | ||
240 | * speed, duplex, and feature mask | ||
241 | * @speed: speed to match | ||
242 | * @duplex: duplex to match | ||
243 | * @features: A mask of the valid settings | ||
244 | * | ||
245 | * Description: Returns true if there is a valid setting, false otherwise. | ||
246 | */ | ||
247 | static inline bool phy_check_valid(int speed, int duplex, u32 features) | ||
248 | { | ||
249 | unsigned int idx; | ||
250 | |||
251 | idx = phy_find_valid(phy_find_setting(speed, duplex), features); | ||
252 | |||
253 | return settings[idx].speed == speed && settings[idx].duplex == duplex && | ||
254 | (settings[idx].setting & features); | ||
255 | } | ||
256 | |||
257 | /** | ||
239 | * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex | 258 | * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex |
240 | * @phydev: the target phy_device struct | 259 | * @phydev: the target phy_device struct |
241 | * | 260 | * |
@@ -1045,7 +1064,6 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) | |||
1045 | int eee_lp, eee_cap, eee_adv; | 1064 | int eee_lp, eee_cap, eee_adv; |
1046 | u32 lp, cap, adv; | 1065 | u32 lp, cap, adv; |
1047 | int status; | 1066 | int status; |
1048 | unsigned int idx; | ||
1049 | 1067 | ||
1050 | /* Read phy status to properly get the right settings */ | 1068 | /* Read phy status to properly get the right settings */ |
1051 | status = phy_read_status(phydev); | 1069 | status = phy_read_status(phydev); |
@@ -1077,8 +1095,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) | |||
1077 | 1095 | ||
1078 | adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); | 1096 | adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); |
1079 | lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); | 1097 | lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); |
1080 | idx = phy_find_setting(phydev->speed, phydev->duplex); | 1098 | if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv)) |
1081 | if (!(lp & adv & settings[idx].setting)) | ||
1082 | goto eee_exit_err; | 1099 | goto eee_exit_err; |
1083 | 1100 | ||
1084 | if (clk_stop_enable) { | 1101 | if (clk_stop_enable) { |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 0e62274e884a..7d394846afc2 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
@@ -43,9 +43,7 @@ | |||
43 | 43 | ||
44 | static struct team_port *team_port_get_rcu(const struct net_device *dev) | 44 | static struct team_port *team_port_get_rcu(const struct net_device *dev) |
45 | { | 45 | { |
46 | struct team_port *port = rcu_dereference(dev->rx_handler_data); | 46 | return rcu_dereference(dev->rx_handler_data); |
47 | |||
48 | return team_port_exists(dev) ? port : NULL; | ||
49 | } | 47 | } |
50 | 48 | ||
51 | static struct team_port *team_port_get_rtnl(const struct net_device *dev) | 49 | static struct team_port *team_port_get_rtnl(const struct net_device *dev) |
@@ -1732,11 +1730,11 @@ static int team_set_mac_address(struct net_device *dev, void *p) | |||
1732 | if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) | 1730 | if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) |
1733 | return -EADDRNOTAVAIL; | 1731 | return -EADDRNOTAVAIL; |
1734 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | 1732 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); |
1735 | rcu_read_lock(); | 1733 | mutex_lock(&team->lock); |
1736 | list_for_each_entry_rcu(port, &team->port_list, list) | 1734 | list_for_each_entry(port, &team->port_list, list) |
1737 | if (team->ops.port_change_dev_addr) | 1735 | if (team->ops.port_change_dev_addr) |
1738 | team->ops.port_change_dev_addr(team, port); | 1736 | team->ops.port_change_dev_addr(team, port); |
1739 | rcu_read_unlock(); | 1737 | mutex_unlock(&team->lock); |
1740 | return 0; | 1738 | return 0; |
1741 | } | 1739 | } |
1742 | 1740 | ||
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 3bd9678315ad..7ba8d0885f12 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig | |||
@@ -161,6 +161,7 @@ config USB_NET_AX8817X | |||
161 | * Linksys USB200M | 161 | * Linksys USB200M |
162 | * Netgear FA120 | 162 | * Netgear FA120 |
163 | * Sitecom LN-029 | 163 | * Sitecom LN-029 |
164 | * Sitecom LN-028 | ||
164 | * Intellinet USB 2.0 Ethernet | 165 | * Intellinet USB 2.0 Ethernet |
165 | * ST Lab USB 2.0 Ethernet | 166 | * ST Lab USB 2.0 Ethernet |
166 | * TrendNet TU2-ET100 | 167 | * TrendNet TU2-ET100 |
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 5c55f11572ba..75d6f26729a3 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c | |||
@@ -188,6 +188,8 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | |||
188 | memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes)); | 188 | memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes)); |
189 | skb_put(skb, sizeof(padbytes)); | 189 | skb_put(skb, sizeof(padbytes)); |
190 | } | 190 | } |
191 | |||
192 | usbnet_set_skb_tx_stats(skb, 1, 0); | ||
191 | return skb; | 193 | return skb; |
192 | } | 194 | } |
193 | 195 | ||
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index bf49792062a2..1173a24feda3 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c | |||
@@ -979,6 +979,10 @@ static const struct usb_device_id products [] = { | |||
979 | USB_DEVICE (0x0df6, 0x0056), | 979 | USB_DEVICE (0x0df6, 0x0056), |
980 | .driver_info = (unsigned long) &ax88178_info, | 980 | .driver_info = (unsigned long) &ax88178_info, |
981 | }, { | 981 | }, { |
982 | // Sitecom LN-028 "USB 2.0 10/100/1000 Ethernet adapter" | ||
983 | USB_DEVICE (0x0df6, 0x061c), | ||
984 | .driver_info = (unsigned long) &ax88178_info, | ||
985 | }, { | ||
982 | // corega FEther USB2-TX | 986 | // corega FEther USB2-TX |
983 | USB_DEVICE (0x07aa, 0x0017), | 987 | USB_DEVICE (0x07aa, 0x0017), |
984 | .driver_info = (unsigned long) &ax8817x_info, | 988 | .driver_info = (unsigned long) &ax8817x_info, |
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 9311a08565be..4545e78840b0 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
@@ -522,6 +522,7 @@ static const struct driver_info wwan_info = { | |||
522 | #define DELL_VENDOR_ID 0x413C | 522 | #define DELL_VENDOR_ID 0x413C |
523 | #define REALTEK_VENDOR_ID 0x0bda | 523 | #define REALTEK_VENDOR_ID 0x0bda |
524 | #define SAMSUNG_VENDOR_ID 0x04e8 | 524 | #define SAMSUNG_VENDOR_ID 0x04e8 |
525 | #define LENOVO_VENDOR_ID 0x17ef | ||
525 | 526 | ||
526 | static const struct usb_device_id products[] = { | 527 | static const struct usb_device_id products[] = { |
527 | /* BLACKLIST !! | 528 | /* BLACKLIST !! |
@@ -702,6 +703,13 @@ static const struct usb_device_id products[] = { | |||
702 | .driver_info = 0, | 703 | .driver_info = 0, |
703 | }, | 704 | }, |
704 | 705 | ||
706 | /* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ | ||
707 | { | ||
708 | USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM, | ||
709 | USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), | ||
710 | .driver_info = 0, | ||
711 | }, | ||
712 | |||
705 | /* WHITELIST!!! | 713 | /* WHITELIST!!! |
706 | * | 714 | * |
707 | * CDC Ether uses two interfaces, not necessarily consecutive. | 715 | * CDC Ether uses two interfaces, not necessarily consecutive. |
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 80a844e0ae03..c3e4da9e79ca 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
@@ -1172,17 +1172,17 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) | |||
1172 | 1172 | ||
1173 | /* return skb */ | 1173 | /* return skb */ |
1174 | ctx->tx_curr_skb = NULL; | 1174 | ctx->tx_curr_skb = NULL; |
1175 | dev->net->stats.tx_packets += ctx->tx_curr_frame_num; | ||
1176 | 1175 | ||
1177 | /* keep private stats: framing overhead and number of NTBs */ | 1176 | /* keep private stats: framing overhead and number of NTBs */ |
1178 | ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload; | 1177 | ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload; |
1179 | ctx->tx_ntbs++; | 1178 | ctx->tx_ntbs++; |
1180 | 1179 | ||
1181 | /* usbnet has already counted all the framing overhead. | 1180 | /* usbnet will count all the framing overhead by default. |
1182 | * Adjust the stats so that the tx_bytes counter show real | 1181 | * Adjust the stats so that the tx_bytes counter show real |
1183 | * payload data instead. | 1182 | * payload data instead. |
1184 | */ | 1183 | */ |
1185 | dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload; | 1184 | usbnet_set_skb_tx_stats(skb_out, n, |
1185 | ctx->tx_curr_frame_payload - skb_out->len); | ||
1186 | 1186 | ||
1187 | return skb_out; | 1187 | return skb_out; |
1188 | 1188 | ||
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c index 3eed708a6182..1762ad3910b2 100644 --- a/drivers/net/usb/cx82310_eth.c +++ b/drivers/net/usb/cx82310_eth.c | |||
@@ -46,8 +46,7 @@ enum cx82310_status { | |||
46 | }; | 46 | }; |
47 | 47 | ||
48 | #define CMD_PACKET_SIZE 64 | 48 | #define CMD_PACKET_SIZE 64 |
49 | /* first command after power on can take around 8 seconds */ | 49 | #define CMD_TIMEOUT 100 |
50 | #define CMD_TIMEOUT 15000 | ||
51 | #define CMD_REPLY_RETRY 5 | 50 | #define CMD_REPLY_RETRY 5 |
52 | 51 | ||
53 | #define CX82310_MTU 1514 | 52 | #define CX82310_MTU 1514 |
@@ -78,8 +77,9 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply, | |||
78 | ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf, | 77 | ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf, |
79 | CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT); | 78 | CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT); |
80 | if (ret < 0) { | 79 | if (ret < 0) { |
81 | dev_err(&dev->udev->dev, "send command %#x: error %d\n", | 80 | if (cmd != CMD_GET_LINK_STATUS) |
82 | cmd, ret); | 81 | dev_err(&dev->udev->dev, "send command %#x: error %d\n", |
82 | cmd, ret); | ||
83 | goto end; | 83 | goto end; |
84 | } | 84 | } |
85 | 85 | ||
@@ -90,8 +90,10 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply, | |||
90 | buf, CMD_PACKET_SIZE, &actual_len, | 90 | buf, CMD_PACKET_SIZE, &actual_len, |
91 | CMD_TIMEOUT); | 91 | CMD_TIMEOUT); |
92 | if (ret < 0) { | 92 | if (ret < 0) { |
93 | dev_err(&dev->udev->dev, | 93 | if (cmd != CMD_GET_LINK_STATUS) |
94 | "reply receive error %d\n", ret); | 94 | dev_err(&dev->udev->dev, |
95 | "reply receive error %d\n", | ||
96 | ret); | ||
95 | goto end; | 97 | goto end; |
96 | } | 98 | } |
97 | if (actual_len > 0) | 99 | if (actual_len > 0) |
@@ -134,6 +136,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf) | |||
134 | int ret; | 136 | int ret; |
135 | char buf[15]; | 137 | char buf[15]; |
136 | struct usb_device *udev = dev->udev; | 138 | struct usb_device *udev = dev->udev; |
139 | u8 link[3]; | ||
140 | int timeout = 50; | ||
137 | 141 | ||
138 | /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */ | 142 | /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */ |
139 | if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0 | 143 | if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0 |
@@ -160,6 +164,20 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf) | |||
160 | if (!dev->partial_data) | 164 | if (!dev->partial_data) |
161 | return -ENOMEM; | 165 | return -ENOMEM; |
162 | 166 | ||
167 | /* wait for firmware to become ready (indicated by the link being up) */ | ||
168 | while (--timeout) { | ||
169 | ret = cx82310_cmd(dev, CMD_GET_LINK_STATUS, true, NULL, 0, | ||
170 | link, sizeof(link)); | ||
171 | /* the command can time out during boot - it's not an error */ | ||
172 | if (!ret && link[0] == 1 && link[2] == 1) | ||
173 | break; | ||
174 | msleep(500); | ||
175 | }; | ||
176 | if (!timeout) { | ||
177 | dev_err(&udev->dev, "firmware not ready in time\n"); | ||
178 | return -ETIMEDOUT; | ||
179 | } | ||
180 | |||
163 | /* enable ethernet mode (?) */ | 181 | /* enable ethernet mode (?) */ |
164 | ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0); | 182 | ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0); |
165 | if (ret) { | 183 | if (ret) { |
@@ -300,9 +318,18 @@ static const struct driver_info cx82310_info = { | |||
300 | .tx_fixup = cx82310_tx_fixup, | 318 | .tx_fixup = cx82310_tx_fixup, |
301 | }; | 319 | }; |
302 | 320 | ||
321 | #define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \ | ||
322 | .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ | ||
323 | USB_DEVICE_ID_MATCH_DEV_INFO, \ | ||
324 | .idVendor = (vend), \ | ||
325 | .idProduct = (prod), \ | ||
326 | .bDeviceClass = (cl), \ | ||
327 | .bDeviceSubClass = (sc), \ | ||
328 | .bDeviceProtocol = (pr) | ||
329 | |||
303 | static const struct usb_device_id products[] = { | 330 | static const struct usb_device_id products[] = { |
304 | { | 331 | { |
305 | USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0), | 332 | USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0), |
306 | .driver_info = (unsigned long) &cx82310_info | 333 | .driver_info = (unsigned long) &cx82310_info |
307 | }, | 334 | }, |
308 | { }, | 335 | { }, |
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 9cdfb3fe9c15..778e91531fac 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c | |||
@@ -1594,7 +1594,7 @@ hso_wait_modem_status(struct hso_serial *serial, unsigned long arg) | |||
1594 | } | 1594 | } |
1595 | cprev = cnow; | 1595 | cprev = cnow; |
1596 | } | 1596 | } |
1597 | current->state = TASK_RUNNING; | 1597 | __set_current_state(TASK_RUNNING); |
1598 | remove_wait_queue(&tiocmget->waitq, &wait); | 1598 | remove_wait_queue(&tiocmget->waitq, &wait); |
1599 | 1599 | ||
1600 | return ret; | 1600 | return ret; |
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c index 3d18bb0eee85..1bfe0fcaccf5 100644 --- a/drivers/net/usb/plusb.c +++ b/drivers/net/usb/plusb.c | |||
@@ -134,6 +134,11 @@ static const struct usb_device_id products [] = { | |||
134 | }, { | 134 | }, { |
135 | USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */ | 135 | USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */ |
136 | .driver_info = (unsigned long) &prolific_info, | 136 | .driver_info = (unsigned long) &prolific_info, |
137 | }, { | ||
138 | USB_DEVICE(0x3923, 0x7825), /* National Instruments USB | ||
139 | * Host-to-Host Cable | ||
140 | */ | ||
141 | .driver_info = (unsigned long) &prolific_info, | ||
137 | }, | 142 | }, |
138 | 143 | ||
139 | { }, // END | 144 | { }, // END |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 438fc6bcaef1..9f7c0ab3b349 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -492,6 +492,7 @@ enum rtl8152_flags { | |||
492 | /* Define these values to match your device */ | 492 | /* Define these values to match your device */ |
493 | #define VENDOR_ID_REALTEK 0x0bda | 493 | #define VENDOR_ID_REALTEK 0x0bda |
494 | #define VENDOR_ID_SAMSUNG 0x04e8 | 494 | #define VENDOR_ID_SAMSUNG 0x04e8 |
495 | #define VENDOR_ID_LENOVO 0x17ef | ||
495 | 496 | ||
496 | #define MCU_TYPE_PLA 0x0100 | 497 | #define MCU_TYPE_PLA 0x0100 |
497 | #define MCU_TYPE_USB 0x0000 | 498 | #define MCU_TYPE_USB 0x0000 |
@@ -4037,6 +4038,7 @@ static struct usb_device_id rtl8152_table[] = { | |||
4037 | {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)}, | 4038 | {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)}, |
4038 | {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, | 4039 | {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, |
4039 | {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, | 4040 | {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, |
4041 | {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, | ||
4040 | {} | 4042 | {} |
4041 | }; | 4043 | }; |
4042 | 4044 | ||
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c index b94a0fbb8b3b..953de13267df 100644 --- a/drivers/net/usb/sr9800.c +++ b/drivers/net/usb/sr9800.c | |||
@@ -144,6 +144,7 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | |||
144 | skb_put(skb, sizeof(padbytes)); | 144 | skb_put(skb, sizeof(padbytes)); |
145 | } | 145 | } |
146 | 146 | ||
147 | usbnet_set_skb_tx_stats(skb, 1, 0); | ||
147 | return skb; | 148 | return skb; |
148 | } | 149 | } |
149 | 150 | ||
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 449835f4331e..777757ae1973 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
@@ -1188,8 +1188,7 @@ static void tx_complete (struct urb *urb) | |||
1188 | struct usbnet *dev = entry->dev; | 1188 | struct usbnet *dev = entry->dev; |
1189 | 1189 | ||
1190 | if (urb->status == 0) { | 1190 | if (urb->status == 0) { |
1191 | if (!(dev->driver_info->flags & FLAG_MULTI_PACKET)) | 1191 | dev->net->stats.tx_packets += entry->packets; |
1192 | dev->net->stats.tx_packets++; | ||
1193 | dev->net->stats.tx_bytes += entry->length; | 1192 | dev->net->stats.tx_bytes += entry->length; |
1194 | } else { | 1193 | } else { |
1195 | dev->net->stats.tx_errors++; | 1194 | dev->net->stats.tx_errors++; |
@@ -1347,7 +1346,19 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, | |||
1347 | } else | 1346 | } else |
1348 | urb->transfer_flags |= URB_ZERO_PACKET; | 1347 | urb->transfer_flags |= URB_ZERO_PACKET; |
1349 | } | 1348 | } |
1350 | entry->length = urb->transfer_buffer_length = length; | 1349 | urb->transfer_buffer_length = length; |
1350 | |||
1351 | if (info->flags & FLAG_MULTI_PACKET) { | ||
1352 | /* Driver has set number of packets and a length delta. | ||
1353 | * Calculate the complete length and ensure that it's | ||
1354 | * positive. | ||
1355 | */ | ||
1356 | entry->length += length; | ||
1357 | if (WARN_ON_ONCE(entry->length <= 0)) | ||
1358 | entry->length = length; | ||
1359 | } else { | ||
1360 | usbnet_set_skb_tx_stats(skb, 1, length); | ||
1361 | } | ||
1351 | 1362 | ||
1352 | spin_lock_irqsave(&dev->txq.lock, flags); | 1363 | spin_lock_irqsave(&dev->txq.lock, flags); |
1353 | retval = usb_autopm_get_interface_async(dev->intf); | 1364 | retval = usb_autopm_get_interface_async(dev->intf); |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index f1ff3666f090..59b0e9754ae3 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -1448,8 +1448,10 @@ static void virtnet_free_queues(struct virtnet_info *vi) | |||
1448 | { | 1448 | { |
1449 | int i; | 1449 | int i; |
1450 | 1450 | ||
1451 | for (i = 0; i < vi->max_queue_pairs; i++) | 1451 | for (i = 0; i < vi->max_queue_pairs; i++) { |
1452 | napi_hash_del(&vi->rq[i].napi); | ||
1452 | netif_napi_del(&vi->rq[i].napi); | 1453 | netif_napi_del(&vi->rq[i].napi); |
1454 | } | ||
1453 | 1455 | ||
1454 | kfree(vi->rq); | 1456 | kfree(vi->rq); |
1455 | kfree(vi->sq); | 1457 | kfree(vi->sq); |
@@ -1948,11 +1950,8 @@ static int virtnet_freeze(struct virtio_device *vdev) | |||
1948 | cancel_delayed_work_sync(&vi->refill); | 1950 | cancel_delayed_work_sync(&vi->refill); |
1949 | 1951 | ||
1950 | if (netif_running(vi->dev)) { | 1952 | if (netif_running(vi->dev)) { |
1951 | for (i = 0; i < vi->max_queue_pairs; i++) { | 1953 | for (i = 0; i < vi->max_queue_pairs; i++) |
1952 | napi_disable(&vi->rq[i].napi); | 1954 | napi_disable(&vi->rq[i].napi); |
1953 | napi_hash_del(&vi->rq[i].napi); | ||
1954 | netif_napi_del(&vi->rq[i].napi); | ||
1955 | } | ||
1956 | } | 1955 | } |
1957 | 1956 | ||
1958 | remove_vq_common(vi); | 1957 | remove_vq_common(vi); |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 1e0a775ea882..f8528a4cf54f 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -1218,7 +1218,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) | |||
1218 | goto drop; | 1218 | goto drop; |
1219 | 1219 | ||
1220 | flags &= ~VXLAN_HF_RCO; | 1220 | flags &= ~VXLAN_HF_RCO; |
1221 | vni &= VXLAN_VID_MASK; | 1221 | vni &= VXLAN_VNI_MASK; |
1222 | } | 1222 | } |
1223 | 1223 | ||
1224 | /* For backwards compatibility, only allow reserved fields to be | 1224 | /* For backwards compatibility, only allow reserved fields to be |
@@ -1239,7 +1239,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) | |||
1239 | flags &= ~VXLAN_GBP_USED_BITS; | 1239 | flags &= ~VXLAN_GBP_USED_BITS; |
1240 | } | 1240 | } |
1241 | 1241 | ||
1242 | if (flags || (vni & ~VXLAN_VID_MASK)) { | 1242 | if (flags || vni & ~VXLAN_VNI_MASK) { |
1243 | /* If there are any unprocessed flags remaining treat | 1243 | /* If there are any unprocessed flags remaining treat |
1244 | * this as a malformed packet. This behavior diverges from | 1244 | * this as a malformed packet. This behavior diverges from |
1245 | * VXLAN RFC (RFC7348) which stipulates that bits in reserved | 1245 | * VXLAN RFC (RFC7348) which stipulates that bits in reserved |
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 83c39e2858bf..88d121d43c08 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c | |||
@@ -806,21 +806,21 @@ static ssize_t cosa_read(struct file *file, | |||
806 | spin_lock_irqsave(&cosa->lock, flags); | 806 | spin_lock_irqsave(&cosa->lock, flags); |
807 | add_wait_queue(&chan->rxwaitq, &wait); | 807 | add_wait_queue(&chan->rxwaitq, &wait); |
808 | while (!chan->rx_status) { | 808 | while (!chan->rx_status) { |
809 | current->state = TASK_INTERRUPTIBLE; | 809 | set_current_state(TASK_INTERRUPTIBLE); |
810 | spin_unlock_irqrestore(&cosa->lock, flags); | 810 | spin_unlock_irqrestore(&cosa->lock, flags); |
811 | schedule(); | 811 | schedule(); |
812 | spin_lock_irqsave(&cosa->lock, flags); | 812 | spin_lock_irqsave(&cosa->lock, flags); |
813 | if (signal_pending(current) && chan->rx_status == 0) { | 813 | if (signal_pending(current) && chan->rx_status == 0) { |
814 | chan->rx_status = 1; | 814 | chan->rx_status = 1; |
815 | remove_wait_queue(&chan->rxwaitq, &wait); | 815 | remove_wait_queue(&chan->rxwaitq, &wait); |
816 | current->state = TASK_RUNNING; | 816 | __set_current_state(TASK_RUNNING); |
817 | spin_unlock_irqrestore(&cosa->lock, flags); | 817 | spin_unlock_irqrestore(&cosa->lock, flags); |
818 | mutex_unlock(&chan->rlock); | 818 | mutex_unlock(&chan->rlock); |
819 | return -ERESTARTSYS; | 819 | return -ERESTARTSYS; |
820 | } | 820 | } |
821 | } | 821 | } |
822 | remove_wait_queue(&chan->rxwaitq, &wait); | 822 | remove_wait_queue(&chan->rxwaitq, &wait); |
823 | current->state = TASK_RUNNING; | 823 | __set_current_state(TASK_RUNNING); |
824 | kbuf = chan->rxdata; | 824 | kbuf = chan->rxdata; |
825 | count = chan->rxsize; | 825 | count = chan->rxsize; |
826 | spin_unlock_irqrestore(&cosa->lock, flags); | 826 | spin_unlock_irqrestore(&cosa->lock, flags); |
@@ -890,14 +890,14 @@ static ssize_t cosa_write(struct file *file, | |||
890 | spin_lock_irqsave(&cosa->lock, flags); | 890 | spin_lock_irqsave(&cosa->lock, flags); |
891 | add_wait_queue(&chan->txwaitq, &wait); | 891 | add_wait_queue(&chan->txwaitq, &wait); |
892 | while (!chan->tx_status) { | 892 | while (!chan->tx_status) { |
893 | current->state = TASK_INTERRUPTIBLE; | 893 | set_current_state(TASK_INTERRUPTIBLE); |
894 | spin_unlock_irqrestore(&cosa->lock, flags); | 894 | spin_unlock_irqrestore(&cosa->lock, flags); |
895 | schedule(); | 895 | schedule(); |
896 | spin_lock_irqsave(&cosa->lock, flags); | 896 | spin_lock_irqsave(&cosa->lock, flags); |
897 | if (signal_pending(current) && chan->tx_status == 0) { | 897 | if (signal_pending(current) && chan->tx_status == 0) { |
898 | chan->tx_status = 1; | 898 | chan->tx_status = 1; |
899 | remove_wait_queue(&chan->txwaitq, &wait); | 899 | remove_wait_queue(&chan->txwaitq, &wait); |
900 | current->state = TASK_RUNNING; | 900 | __set_current_state(TASK_RUNNING); |
901 | chan->tx_status = 1; | 901 | chan->tx_status = 1; |
902 | spin_unlock_irqrestore(&cosa->lock, flags); | 902 | spin_unlock_irqrestore(&cosa->lock, flags); |
903 | up(&chan->wsem); | 903 | up(&chan->wsem); |
@@ -905,7 +905,7 @@ static ssize_t cosa_write(struct file *file, | |||
905 | } | 905 | } |
906 | } | 906 | } |
907 | remove_wait_queue(&chan->txwaitq, &wait); | 907 | remove_wait_queue(&chan->txwaitq, &wait); |
908 | current->state = TASK_RUNNING; | 908 | __set_current_state(TASK_RUNNING); |
909 | up(&chan->wsem); | 909 | up(&chan->wsem); |
910 | spin_unlock_irqrestore(&cosa->lock, flags); | 910 | spin_unlock_irqrestore(&cosa->lock, flags); |
911 | kfree(kbuf); | 911 | kfree(kbuf); |
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index cb366adc820b..f50a6bc5d06e 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c | |||
@@ -219,12 +219,15 @@ void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif) | |||
219 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | 219 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
220 | struct ath_vif *avp = (void *)vif->drv_priv; | 220 | struct ath_vif *avp = (void *)vif->drv_priv; |
221 | struct ath_buf *bf = avp->av_bcbuf; | 221 | struct ath_buf *bf = avp->av_bcbuf; |
222 | struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon; | ||
222 | 223 | ||
223 | ath_dbg(common, CONFIG, "Removing interface at beacon slot: %d\n", | 224 | ath_dbg(common, CONFIG, "Removing interface at beacon slot: %d\n", |
224 | avp->av_bslot); | 225 | avp->av_bslot); |
225 | 226 | ||
226 | tasklet_disable(&sc->bcon_tasklet); | 227 | tasklet_disable(&sc->bcon_tasklet); |
227 | 228 | ||
229 | cur_conf->enable_beacon &= ~BIT(avp->av_bslot); | ||
230 | |||
228 | if (bf && bf->bf_mpdu) { | 231 | if (bf && bf->bf_mpdu) { |
229 | struct sk_buff *skb = bf->bf_mpdu; | 232 | struct sk_buff *skb = bf->bf_mpdu; |
230 | dma_unmap_single(sc->dev, bf->bf_buf_addr, | 233 | dma_unmap_single(sc->dev, bf->bf_buf_addr, |
@@ -521,8 +524,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc, | |||
521 | } | 524 | } |
522 | 525 | ||
523 | if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) { | 526 | if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) { |
524 | if ((vif->type != NL80211_IFTYPE_AP) || | 527 | if (vif->type != NL80211_IFTYPE_AP) { |
525 | (sc->nbcnvifs > 1)) { | ||
526 | ath_dbg(common, CONFIG, | 528 | ath_dbg(common, CONFIG, |
527 | "An AP interface is already present !\n"); | 529 | "An AP interface is already present !\n"); |
528 | return false; | 530 | return false; |
@@ -616,12 +618,14 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif, | |||
616 | * enabling/disabling SWBA. | 618 | * enabling/disabling SWBA. |
617 | */ | 619 | */ |
618 | if (changed & BSS_CHANGED_BEACON_ENABLED) { | 620 | if (changed & BSS_CHANGED_BEACON_ENABLED) { |
619 | if (!bss_conf->enable_beacon && | 621 | bool enabled = cur_conf->enable_beacon; |
620 | (sc->nbcnvifs <= 1)) { | 622 | |
621 | cur_conf->enable_beacon = false; | 623 | if (!bss_conf->enable_beacon) { |
622 | } else if (bss_conf->enable_beacon) { | 624 | cur_conf->enable_beacon &= ~BIT(avp->av_bslot); |
623 | cur_conf->enable_beacon = true; | 625 | } else { |
624 | ath9k_cache_beacon_config(sc, ctx, bss_conf); | 626 | cur_conf->enable_beacon |= BIT(avp->av_bslot); |
627 | if (!enabled) | ||
628 | ath9k_cache_beacon_config(sc, ctx, bss_conf); | ||
625 | } | 629 | } |
626 | } | 630 | } |
627 | 631 | ||
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h index 2b79a568e803..d23737342f4f 100644 --- a/drivers/net/wireless/ath/ath9k/common.h +++ b/drivers/net/wireless/ath/ath9k/common.h | |||
@@ -54,7 +54,7 @@ struct ath_beacon_config { | |||
54 | u16 dtim_period; | 54 | u16 dtim_period; |
55 | u16 bmiss_timeout; | 55 | u16 bmiss_timeout; |
56 | u8 dtim_count; | 56 | u8 dtim_count; |
57 | bool enable_beacon; | 57 | u8 enable_beacon; |
58 | bool ibss_creator; | 58 | bool ibss_creator; |
59 | u32 nexttbtt; | 59 | u32 nexttbtt; |
60 | u32 intval; | 60 | u32 intval; |
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 60aa8d71e753..8529014e1a5e 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c | |||
@@ -424,7 +424,7 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah) | |||
424 | ah->power_mode = ATH9K_PM_UNDEFINED; | 424 | ah->power_mode = ATH9K_PM_UNDEFINED; |
425 | ah->htc_reset_init = true; | 425 | ah->htc_reset_init = true; |
426 | 426 | ||
427 | ah->tpc_enabled = true; | 427 | ah->tpc_enabled = false; |
428 | 428 | ||
429 | ah->ani_function = ATH9K_ANI_ALL; | 429 | ah->ani_function = ATH9K_ANI_ALL; |
430 | if (!AR_SREV_9300_20_OR_LATER(ah)) | 430 | if (!AR_SREV_9300_20_OR_LATER(ah)) |
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index ccbdb05b28cd..75345c1e8c34 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c | |||
@@ -5370,6 +5370,7 @@ static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy, | |||
5370 | case 0x432a: /* BCM4321 */ | 5370 | case 0x432a: /* BCM4321 */ |
5371 | case 0x432d: /* BCM4322 */ | 5371 | case 0x432d: /* BCM4322 */ |
5372 | case 0x4352: /* BCM43222 */ | 5372 | case 0x4352: /* BCM43222 */ |
5373 | case 0x435a: /* BCM43228 */ | ||
5373 | case 0x4333: /* BCM4331 */ | 5374 | case 0x4333: /* BCM4331 */ |
5374 | case 0x43a2: /* BCM4360 */ | 5375 | case 0x43a2: /* BCM4360 */ |
5375 | case 0x43b3: /* BCM4352 */ | 5376 | case 0x43b3: /* BCM4352 */ |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/brcm80211/brcmfmac/feature.c index defb7a44e0bc..7748a1ccf14f 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.c | |||
@@ -126,7 +126,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) | |||
126 | brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan"); | 126 | brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan"); |
127 | if (drvr->bus_if->wowl_supported) | 127 | if (drvr->bus_if->wowl_supported) |
128 | brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl"); | 128 | brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl"); |
129 | brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0); | 129 | if (drvr->bus_if->chip != BRCM_CC_43362_CHIP_ID) |
130 | brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0); | ||
130 | 131 | ||
131 | /* set chip related quirks */ | 132 | /* set chip related quirks */ |
132 | switch (drvr->bus_if->chip) { | 133 | switch (drvr->bus_if->chip) { |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c index 50cdf7090198..8eff2753abad 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c | |||
@@ -39,13 +39,22 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy, | |||
39 | void *dcmd_buf = NULL, *wr_pointer; | 39 | void *dcmd_buf = NULL, *wr_pointer; |
40 | u16 msglen, maxmsglen = PAGE_SIZE - 0x100; | 40 | u16 msglen, maxmsglen = PAGE_SIZE - 0x100; |
41 | 41 | ||
42 | brcmf_dbg(TRACE, "cmd %x set %d len %d\n", cmdhdr->cmd, cmdhdr->set, | 42 | if (len < sizeof(*cmdhdr)) { |
43 | cmdhdr->len); | 43 | brcmf_err("vendor command too short: %d\n", len); |
44 | return -EINVAL; | ||
45 | } | ||
44 | 46 | ||
45 | vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); | 47 | vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); |
46 | ifp = vif->ifp; | 48 | ifp = vif->ifp; |
47 | 49 | ||
48 | len -= sizeof(struct brcmf_vndr_dcmd_hdr); | 50 | brcmf_dbg(TRACE, "ifidx=%d, cmd=%d\n", ifp->ifidx, cmdhdr->cmd); |
51 | |||
52 | if (cmdhdr->offset > len) { | ||
53 | brcmf_err("bad buffer offset %d > %d\n", cmdhdr->offset, len); | ||
54 | return -EINVAL; | ||
55 | } | ||
56 | |||
57 | len -= cmdhdr->offset; | ||
49 | ret_len = cmdhdr->len; | 58 | ret_len = cmdhdr->len; |
50 | if (ret_len > 0 || len > 0) { | 59 | if (ret_len > 0 || len > 0) { |
51 | if (len > BRCMF_DCMD_MAXLEN) { | 60 | if (len > BRCMF_DCMD_MAXLEN) { |
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h index a6f22c32a279..3811878ab9cd 100644 --- a/drivers/net/wireless/iwlwifi/dvm/dev.h +++ b/drivers/net/wireless/iwlwifi/dvm/dev.h | |||
@@ -708,7 +708,6 @@ struct iwl_priv { | |||
708 | unsigned long reload_jiffies; | 708 | unsigned long reload_jiffies; |
709 | int reload_count; | 709 | int reload_count; |
710 | bool ucode_loaded; | 710 | bool ucode_loaded; |
711 | bool init_ucode_run; /* Don't run init uCode again */ | ||
712 | 711 | ||
713 | u8 plcp_delta_threshold; | 712 | u8 plcp_delta_threshold; |
714 | 713 | ||
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index 47e64e8b9517..cceb026e0793 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c | |||
@@ -1114,16 +1114,17 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | |||
1114 | scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) | | 1114 | scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) | |
1115 | BIT(IWL_DEFAULT_CMD_QUEUE_NUM)); | 1115 | BIT(IWL_DEFAULT_CMD_QUEUE_NUM)); |
1116 | 1116 | ||
1117 | if (vif) | 1117 | if (drop) { |
1118 | scd_queues &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]); | 1118 | IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n", |
1119 | 1119 | scd_queues); | |
1120 | IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n", scd_queues); | 1120 | if (iwlagn_txfifo_flush(priv, scd_queues)) { |
1121 | if (iwlagn_txfifo_flush(priv, scd_queues)) { | 1121 | IWL_ERR(priv, "flush request fail\n"); |
1122 | IWL_ERR(priv, "flush request fail\n"); | 1122 | goto done; |
1123 | goto done; | 1123 | } |
1124 | } | 1124 | } |
1125 | |||
1125 | IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n"); | 1126 | IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n"); |
1126 | iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff); | 1127 | iwl_trans_wait_tx_queue_empty(priv->trans, scd_queues); |
1127 | done: | 1128 | done: |
1128 | mutex_unlock(&priv->mutex); | 1129 | mutex_unlock(&priv->mutex); |
1129 | IWL_DEBUG_MAC80211(priv, "leave\n"); | 1130 | IWL_DEBUG_MAC80211(priv, "leave\n"); |
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c index 4dbef7e58c2e..5244e43bfafb 100644 --- a/drivers/net/wireless/iwlwifi/dvm/ucode.c +++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c | |||
@@ -418,9 +418,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv) | |||
418 | if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len) | 418 | if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len) |
419 | return 0; | 419 | return 0; |
420 | 420 | ||
421 | if (priv->init_ucode_run) | ||
422 | return 0; | ||
423 | |||
424 | iwl_init_notification_wait(&priv->notif_wait, &calib_wait, | 421 | iwl_init_notification_wait(&priv->notif_wait, &calib_wait, |
425 | calib_complete, ARRAY_SIZE(calib_complete), | 422 | calib_complete, ARRAY_SIZE(calib_complete), |
426 | iwlagn_wait_calib, priv); | 423 | iwlagn_wait_calib, priv); |
@@ -440,8 +437,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv) | |||
440 | */ | 437 | */ |
441 | ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, | 438 | ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, |
442 | UCODE_CALIB_TIMEOUT); | 439 | UCODE_CALIB_TIMEOUT); |
443 | if (!ret) | ||
444 | priv->init_ucode_run = true; | ||
445 | 440 | ||
446 | goto out; | 441 | goto out; |
447 | 442 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c index c3817fae16c0..06f6cc08f451 100644 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c | |||
@@ -95,7 +95,8 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = { | |||
95 | .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ | 95 | .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ |
96 | .base_params = &iwl1000_base_params, \ | 96 | .base_params = &iwl1000_base_params, \ |
97 | .eeprom_params = &iwl1000_eeprom_params, \ | 97 | .eeprom_params = &iwl1000_eeprom_params, \ |
98 | .led_mode = IWL_LED_BLINK | 98 | .led_mode = IWL_LED_BLINK, \ |
99 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
99 | 100 | ||
100 | const struct iwl_cfg iwl1000_bgn_cfg = { | 101 | const struct iwl_cfg iwl1000_bgn_cfg = { |
101 | .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN", | 102 | .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN", |
@@ -121,7 +122,8 @@ const struct iwl_cfg iwl1000_bg_cfg = { | |||
121 | .base_params = &iwl1000_base_params, \ | 122 | .base_params = &iwl1000_base_params, \ |
122 | .eeprom_params = &iwl1000_eeprom_params, \ | 123 | .eeprom_params = &iwl1000_eeprom_params, \ |
123 | .led_mode = IWL_LED_RF_STATE, \ | 124 | .led_mode = IWL_LED_RF_STATE, \ |
124 | .rx_with_siso_diversity = true | 125 | .rx_with_siso_diversity = true, \ |
126 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
125 | 127 | ||
126 | const struct iwl_cfg iwl100_bgn_cfg = { | 128 | const struct iwl_cfg iwl100_bgn_cfg = { |
127 | .name = "Intel(R) Centrino(R) Wireless-N 100 BGN", | 129 | .name = "Intel(R) Centrino(R) Wireless-N 100 BGN", |
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c index 21e5d0843a62..890b95f497d6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-2000.c +++ b/drivers/net/wireless/iwlwifi/iwl-2000.c | |||
@@ -123,7 +123,9 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = { | |||
123 | .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ | 123 | .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ |
124 | .base_params = &iwl2000_base_params, \ | 124 | .base_params = &iwl2000_base_params, \ |
125 | .eeprom_params = &iwl20x0_eeprom_params, \ | 125 | .eeprom_params = &iwl20x0_eeprom_params, \ |
126 | .led_mode = IWL_LED_RF_STATE | 126 | .led_mode = IWL_LED_RF_STATE, \ |
127 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
128 | |||
127 | 129 | ||
128 | const struct iwl_cfg iwl2000_2bgn_cfg = { | 130 | const struct iwl_cfg iwl2000_2bgn_cfg = { |
129 | .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN", | 131 | .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN", |
@@ -149,7 +151,8 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = { | |||
149 | .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ | 151 | .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ |
150 | .base_params = &iwl2030_base_params, \ | 152 | .base_params = &iwl2030_base_params, \ |
151 | .eeprom_params = &iwl20x0_eeprom_params, \ | 153 | .eeprom_params = &iwl20x0_eeprom_params, \ |
152 | .led_mode = IWL_LED_RF_STATE | 154 | .led_mode = IWL_LED_RF_STATE, \ |
155 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
153 | 156 | ||
154 | const struct iwl_cfg iwl2030_2bgn_cfg = { | 157 | const struct iwl_cfg iwl2030_2bgn_cfg = { |
155 | .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", | 158 | .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", |
@@ -170,7 +173,8 @@ const struct iwl_cfg iwl2030_2bgn_cfg = { | |||
170 | .base_params = &iwl2000_base_params, \ | 173 | .base_params = &iwl2000_base_params, \ |
171 | .eeprom_params = &iwl20x0_eeprom_params, \ | 174 | .eeprom_params = &iwl20x0_eeprom_params, \ |
172 | .led_mode = IWL_LED_RF_STATE, \ | 175 | .led_mode = IWL_LED_RF_STATE, \ |
173 | .rx_with_siso_diversity = true | 176 | .rx_with_siso_diversity = true, \ |
177 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
174 | 178 | ||
175 | const struct iwl_cfg iwl105_bgn_cfg = { | 179 | const struct iwl_cfg iwl105_bgn_cfg = { |
176 | .name = "Intel(R) Centrino(R) Wireless-N 105 BGN", | 180 | .name = "Intel(R) Centrino(R) Wireless-N 105 BGN", |
@@ -197,7 +201,8 @@ const struct iwl_cfg iwl105_bgn_d_cfg = { | |||
197 | .base_params = &iwl2030_base_params, \ | 201 | .base_params = &iwl2030_base_params, \ |
198 | .eeprom_params = &iwl20x0_eeprom_params, \ | 202 | .eeprom_params = &iwl20x0_eeprom_params, \ |
199 | .led_mode = IWL_LED_RF_STATE, \ | 203 | .led_mode = IWL_LED_RF_STATE, \ |
200 | .rx_with_siso_diversity = true | 204 | .rx_with_siso_diversity = true, \ |
205 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
201 | 206 | ||
202 | const struct iwl_cfg iwl135_bgn_cfg = { | 207 | const struct iwl_cfg iwl135_bgn_cfg = { |
203 | .name = "Intel(R) Centrino(R) Wireless-N 135 BGN", | 208 | .name = "Intel(R) Centrino(R) Wireless-N 135 BGN", |
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index 332bbede39e5..724194e23414 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c | |||
@@ -93,7 +93,8 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = { | |||
93 | .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \ | 93 | .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \ |
94 | .base_params = &iwl5000_base_params, \ | 94 | .base_params = &iwl5000_base_params, \ |
95 | .eeprom_params = &iwl5000_eeprom_params, \ | 95 | .eeprom_params = &iwl5000_eeprom_params, \ |
96 | .led_mode = IWL_LED_BLINK | 96 | .led_mode = IWL_LED_BLINK, \ |
97 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
97 | 98 | ||
98 | const struct iwl_cfg iwl5300_agn_cfg = { | 99 | const struct iwl_cfg iwl5300_agn_cfg = { |
99 | .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", | 100 | .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", |
@@ -158,7 +159,8 @@ const struct iwl_cfg iwl5350_agn_cfg = { | |||
158 | .base_params = &iwl5000_base_params, \ | 159 | .base_params = &iwl5000_base_params, \ |
159 | .eeprom_params = &iwl5000_eeprom_params, \ | 160 | .eeprom_params = &iwl5000_eeprom_params, \ |
160 | .led_mode = IWL_LED_BLINK, \ | 161 | .led_mode = IWL_LED_BLINK, \ |
161 | .internal_wimax_coex = true | 162 | .internal_wimax_coex = true, \ |
163 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
162 | 164 | ||
163 | const struct iwl_cfg iwl5150_agn_cfg = { | 165 | const struct iwl_cfg iwl5150_agn_cfg = { |
164 | .name = "Intel(R) WiMAX/WiFi Link 5150 AGN", | 166 | .name = "Intel(R) WiMAX/WiFi Link 5150 AGN", |
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index 8f2c3c8c6b84..21b2630763dc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c | |||
@@ -145,7 +145,8 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = { | |||
145 | .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ | 145 | .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ |
146 | .base_params = &iwl6000_g2_base_params, \ | 146 | .base_params = &iwl6000_g2_base_params, \ |
147 | .eeprom_params = &iwl6000_eeprom_params, \ | 147 | .eeprom_params = &iwl6000_eeprom_params, \ |
148 | .led_mode = IWL_LED_RF_STATE | 148 | .led_mode = IWL_LED_RF_STATE, \ |
149 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
149 | 150 | ||
150 | const struct iwl_cfg iwl6005_2agn_cfg = { | 151 | const struct iwl_cfg iwl6005_2agn_cfg = { |
151 | .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN", | 152 | .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN", |
@@ -199,7 +200,8 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = { | |||
199 | .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ | 200 | .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ |
200 | .base_params = &iwl6000_g2_base_params, \ | 201 | .base_params = &iwl6000_g2_base_params, \ |
201 | .eeprom_params = &iwl6000_eeprom_params, \ | 202 | .eeprom_params = &iwl6000_eeprom_params, \ |
202 | .led_mode = IWL_LED_RF_STATE | 203 | .led_mode = IWL_LED_RF_STATE, \ |
204 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
203 | 205 | ||
204 | const struct iwl_cfg iwl6030_2agn_cfg = { | 206 | const struct iwl_cfg iwl6030_2agn_cfg = { |
205 | .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN", | 207 | .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN", |
@@ -235,7 +237,8 @@ const struct iwl_cfg iwl6030_2bg_cfg = { | |||
235 | .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ | 237 | .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ |
236 | .base_params = &iwl6000_g2_base_params, \ | 238 | .base_params = &iwl6000_g2_base_params, \ |
237 | .eeprom_params = &iwl6000_eeprom_params, \ | 239 | .eeprom_params = &iwl6000_eeprom_params, \ |
238 | .led_mode = IWL_LED_RF_STATE | 240 | .led_mode = IWL_LED_RF_STATE, \ |
241 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
239 | 242 | ||
240 | const struct iwl_cfg iwl6035_2agn_cfg = { | 243 | const struct iwl_cfg iwl6035_2agn_cfg = { |
241 | .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", | 244 | .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", |
@@ -290,7 +293,8 @@ const struct iwl_cfg iwl130_bg_cfg = { | |||
290 | .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \ | 293 | .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \ |
291 | .base_params = &iwl6000_base_params, \ | 294 | .base_params = &iwl6000_base_params, \ |
292 | .eeprom_params = &iwl6000_eeprom_params, \ | 295 | .eeprom_params = &iwl6000_eeprom_params, \ |
293 | .led_mode = IWL_LED_BLINK | 296 | .led_mode = IWL_LED_BLINK, \ |
297 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
294 | 298 | ||
295 | const struct iwl_cfg iwl6000i_2agn_cfg = { | 299 | const struct iwl_cfg iwl6000i_2agn_cfg = { |
296 | .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN", | 300 | .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN", |
@@ -322,7 +326,8 @@ const struct iwl_cfg iwl6000i_2bg_cfg = { | |||
322 | .base_params = &iwl6050_base_params, \ | 326 | .base_params = &iwl6050_base_params, \ |
323 | .eeprom_params = &iwl6000_eeprom_params, \ | 327 | .eeprom_params = &iwl6000_eeprom_params, \ |
324 | .led_mode = IWL_LED_BLINK, \ | 328 | .led_mode = IWL_LED_BLINK, \ |
325 | .internal_wimax_coex = true | 329 | .internal_wimax_coex = true, \ |
330 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
326 | 331 | ||
327 | const struct iwl_cfg iwl6050_2agn_cfg = { | 332 | const struct iwl_cfg iwl6050_2agn_cfg = { |
328 | .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN", | 333 | .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN", |
@@ -347,7 +352,8 @@ const struct iwl_cfg iwl6050_2abg_cfg = { | |||
347 | .base_params = &iwl6050_base_params, \ | 352 | .base_params = &iwl6050_base_params, \ |
348 | .eeprom_params = &iwl6000_eeprom_params, \ | 353 | .eeprom_params = &iwl6000_eeprom_params, \ |
349 | .led_mode = IWL_LED_BLINK, \ | 354 | .led_mode = IWL_LED_BLINK, \ |
350 | .internal_wimax_coex = true | 355 | .internal_wimax_coex = true, \ |
356 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
351 | 357 | ||
352 | const struct iwl_cfg iwl6150_bgn_cfg = { | 358 | const struct iwl_cfg iwl6150_bgn_cfg = { |
353 | .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN", | 359 | .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN", |
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index 996e7f16adf9..c7154ac42c8c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c | |||
@@ -1257,6 +1257,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) | |||
1257 | op->name, err); | 1257 | op->name, err); |
1258 | #endif | 1258 | #endif |
1259 | } | 1259 | } |
1260 | kfree(pieces); | ||
1260 | return; | 1261 | return; |
1261 | 1262 | ||
1262 | try_again: | 1263 | try_again: |
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index 1ec4d55155f7..7810c41cf9a7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c | |||
@@ -793,7 +793,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, | |||
793 | if (!vif->bss_conf.assoc) | 793 | if (!vif->bss_conf.assoc) |
794 | smps_mode = IEEE80211_SMPS_AUTOMATIC; | 794 | smps_mode = IEEE80211_SMPS_AUTOMATIC; |
795 | 795 | ||
796 | if (IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status, | 796 | if (mvmvif->phy_ctxt && |
797 | IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status, | ||
797 | mvmvif->phy_ctxt->id)) | 798 | mvmvif->phy_ctxt->id)) |
798 | smps_mode = IEEE80211_SMPS_AUTOMATIC; | 799 | smps_mode = IEEE80211_SMPS_AUTOMATIC; |
799 | 800 | ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c index d530ef3da107..542ee74f290a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c | |||
@@ -832,7 +832,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, | |||
832 | if (!vif->bss_conf.assoc) | 832 | if (!vif->bss_conf.assoc) |
833 | smps_mode = IEEE80211_SMPS_AUTOMATIC; | 833 | smps_mode = IEEE80211_SMPS_AUTOMATIC; |
834 | 834 | ||
835 | if (data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id)) | 835 | if (mvmvif->phy_ctxt && |
836 | data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id)) | ||
836 | smps_mode = IEEE80211_SMPS_AUTOMATIC; | 837 | smps_mode = IEEE80211_SMPS_AUTOMATIC; |
837 | 838 | ||
838 | IWL_DEBUG_COEX(data->mvm, | 839 | IWL_DEBUG_COEX(data->mvm, |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 1ff7ec08532d..09654e73a533 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c | |||
@@ -405,7 +405,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) | |||
405 | hw->wiphy->bands[IEEE80211_BAND_5GHZ] = | 405 | hw->wiphy->bands[IEEE80211_BAND_5GHZ] = |
406 | &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; | 406 | &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; |
407 | 407 | ||
408 | if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER) | 408 | if ((mvm->fw->ucode_capa.capa[0] & |
409 | IWL_UCODE_TLV_CAPA_BEAMFORMER) && | ||
410 | (mvm->fw->ucode_capa.api[0] & | ||
411 | IWL_UCODE_TLV_API_LQ_SS_PARAMS)) | ||
409 | hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |= | 412 | hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |= |
410 | IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; | 413 | IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; |
411 | } | 414 | } |
@@ -2215,7 +2218,19 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, | |||
2215 | 2218 | ||
2216 | mutex_lock(&mvm->mutex); | 2219 | mutex_lock(&mvm->mutex); |
2217 | 2220 | ||
2218 | iwl_mvm_cancel_scan(mvm); | 2221 | /* Due to a race condition, it's possible that mac80211 asks |
2222 | * us to stop a hw_scan when it's already stopped. This can | ||
2223 | * happen, for instance, if we stopped the scan ourselves, | ||
2224 | * called ieee80211_scan_completed() and the userspace called | ||
2225 | * cancel scan scan before ieee80211_scan_work() could run. | ||
2226 | * To handle that, simply return if the scan is not running. | ||
2227 | */ | ||
2228 | /* FIXME: for now, we ignore this race for UMAC scans, since | ||
2229 | * they don't set the scan_status. | ||
2230 | */ | ||
2231 | if ((mvm->scan_status == IWL_MVM_SCAN_OS) || | ||
2232 | (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) | ||
2233 | iwl_mvm_cancel_scan(mvm); | ||
2219 | 2234 | ||
2220 | mutex_unlock(&mvm->mutex); | 2235 | mutex_unlock(&mvm->mutex); |
2221 | } | 2236 | } |
@@ -2559,12 +2574,29 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, | |||
2559 | int ret; | 2574 | int ret; |
2560 | 2575 | ||
2561 | mutex_lock(&mvm->mutex); | 2576 | mutex_lock(&mvm->mutex); |
2577 | |||
2578 | /* Due to a race condition, it's possible that mac80211 asks | ||
2579 | * us to stop a sched_scan when it's already stopped. This | ||
2580 | * can happen, for instance, if we stopped the scan ourselves, | ||
2581 | * called ieee80211_sched_scan_stopped() and the userspace called | ||
2582 | * stop sched scan scan before ieee80211_sched_scan_stopped_work() | ||
2583 | * could run. To handle this, simply return if the scan is | ||
2584 | * not running. | ||
2585 | */ | ||
2586 | /* FIXME: for now, we ignore this race for UMAC scans, since | ||
2587 | * they don't set the scan_status. | ||
2588 | */ | ||
2589 | if (mvm->scan_status != IWL_MVM_SCAN_SCHED && | ||
2590 | !(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { | ||
2591 | mutex_unlock(&mvm->mutex); | ||
2592 | return 0; | ||
2593 | } | ||
2594 | |||
2562 | ret = iwl_mvm_scan_offload_stop(mvm, false); | 2595 | ret = iwl_mvm_scan_offload_stop(mvm, false); |
2563 | mutex_unlock(&mvm->mutex); | 2596 | mutex_unlock(&mvm->mutex); |
2564 | iwl_mvm_wait_for_async_handlers(mvm); | 2597 | iwl_mvm_wait_for_async_handlers(mvm); |
2565 | 2598 | ||
2566 | return ret; | 2599 | return ret; |
2567 | |||
2568 | } | 2600 | } |
2569 | 2601 | ||
2570 | static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, | 2602 | static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, |
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index 194bd1f939ca..078f24cf4af3 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c | |||
@@ -134,9 +134,12 @@ enum rs_column_mode { | |||
134 | #define MAX_NEXT_COLUMNS 7 | 134 | #define MAX_NEXT_COLUMNS 7 |
135 | #define MAX_COLUMN_CHECKS 3 | 135 | #define MAX_COLUMN_CHECKS 3 |
136 | 136 | ||
137 | struct rs_tx_column; | ||
138 | |||
137 | typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm, | 139 | typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm, |
138 | struct ieee80211_sta *sta, | 140 | struct ieee80211_sta *sta, |
139 | struct iwl_scale_tbl_info *tbl); | 141 | struct iwl_scale_tbl_info *tbl, |
142 | const struct rs_tx_column *next_col); | ||
140 | 143 | ||
141 | struct rs_tx_column { | 144 | struct rs_tx_column { |
142 | enum rs_column_mode mode; | 145 | enum rs_column_mode mode; |
@@ -147,13 +150,15 @@ struct rs_tx_column { | |||
147 | }; | 150 | }; |
148 | 151 | ||
149 | static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | 152 | static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, |
150 | struct iwl_scale_tbl_info *tbl) | 153 | struct iwl_scale_tbl_info *tbl, |
154 | const struct rs_tx_column *next_col) | ||
151 | { | 155 | { |
152 | return iwl_mvm_bt_coex_is_ant_avail(mvm, tbl->rate.ant); | 156 | return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant); |
153 | } | 157 | } |
154 | 158 | ||
155 | static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | 159 | static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, |
156 | struct iwl_scale_tbl_info *tbl) | 160 | struct iwl_scale_tbl_info *tbl, |
161 | const struct rs_tx_column *next_col) | ||
157 | { | 162 | { |
158 | if (!sta->ht_cap.ht_supported) | 163 | if (!sta->ht_cap.ht_supported) |
159 | return false; | 164 | return false; |
@@ -171,7 +176,8 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | |||
171 | } | 176 | } |
172 | 177 | ||
173 | static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | 178 | static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, |
174 | struct iwl_scale_tbl_info *tbl) | 179 | struct iwl_scale_tbl_info *tbl, |
180 | const struct rs_tx_column *next_col) | ||
175 | { | 181 | { |
176 | if (!sta->ht_cap.ht_supported) | 182 | if (!sta->ht_cap.ht_supported) |
177 | return false; | 183 | return false; |
@@ -180,7 +186,8 @@ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | |||
180 | } | 186 | } |
181 | 187 | ||
182 | static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | 188 | static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, |
183 | struct iwl_scale_tbl_info *tbl) | 189 | struct iwl_scale_tbl_info *tbl, |
190 | const struct rs_tx_column *next_col) | ||
184 | { | 191 | { |
185 | struct rs_rate *rate = &tbl->rate; | 192 | struct rs_rate *rate = &tbl->rate; |
186 | struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; | 193 | struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; |
@@ -1271,6 +1278,9 @@ static void rs_mac80211_tx_status(void *mvm_r, | |||
1271 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | 1278 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
1272 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 1279 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1273 | 1280 | ||
1281 | if (!iwl_mvm_sta_from_mac80211(sta)->vif) | ||
1282 | return; | ||
1283 | |||
1274 | if (!ieee80211_is_data(hdr->frame_control) || | 1284 | if (!ieee80211_is_data(hdr->frame_control) || |
1275 | info->flags & IEEE80211_TX_CTL_NO_ACK) | 1285 | info->flags & IEEE80211_TX_CTL_NO_ACK) |
1276 | return; | 1286 | return; |
@@ -1590,7 +1600,7 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm, | |||
1590 | 1600 | ||
1591 | for (j = 0; j < MAX_COLUMN_CHECKS; j++) { | 1601 | for (j = 0; j < MAX_COLUMN_CHECKS; j++) { |
1592 | allow_func = next_col->checks[j]; | 1602 | allow_func = next_col->checks[j]; |
1593 | if (allow_func && !allow_func(mvm, sta, tbl)) | 1603 | if (allow_func && !allow_func(mvm, sta, tbl, next_col)) |
1594 | break; | 1604 | break; |
1595 | } | 1605 | } |
1596 | 1606 | ||
@@ -2504,6 +2514,14 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta, | |||
2504 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 2514 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
2505 | struct iwl_lq_sta *lq_sta = mvm_sta; | 2515 | struct iwl_lq_sta *lq_sta = mvm_sta; |
2506 | 2516 | ||
2517 | if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) { | ||
2518 | /* if vif isn't initialized mvm doesn't know about | ||
2519 | * this station, so don't do anything with the it | ||
2520 | */ | ||
2521 | sta = NULL; | ||
2522 | mvm_sta = NULL; | ||
2523 | } | ||
2524 | |||
2507 | /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */ | 2525 | /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */ |
2508 | 2526 | ||
2509 | /* Treat uninitialized rate scaling data same as non-existing. */ | 2527 | /* Treat uninitialized rate scaling data same as non-existing. */ |
@@ -2820,6 +2838,9 @@ static void rs_rate_update(void *mvm_r, | |||
2820 | (struct iwl_op_mode *)mvm_r; | 2838 | (struct iwl_op_mode *)mvm_r; |
2821 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | 2839 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
2822 | 2840 | ||
2841 | if (!iwl_mvm_sta_from_mac80211(sta)->vif) | ||
2842 | return; | ||
2843 | |||
2823 | /* Stop any ongoing aggregations as rs starts off assuming no agg */ | 2844 | /* Stop any ongoing aggregations as rs starts off assuming no agg */ |
2824 | for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) | 2845 | for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) |
2825 | ieee80211_stop_tx_ba_session(sta, tid); | 2846 | ieee80211_stop_tx_ba_session(sta, tid); |
@@ -3580,9 +3601,15 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf, | |||
3580 | 3601 | ||
3581 | MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32); | 3602 | MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32); |
3582 | 3603 | ||
3583 | static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir) | 3604 | static void rs_add_debugfs(void *mvm, void *priv_sta, struct dentry *dir) |
3584 | { | 3605 | { |
3585 | struct iwl_lq_sta *lq_sta = mvm_sta; | 3606 | struct iwl_lq_sta *lq_sta = priv_sta; |
3607 | struct iwl_mvm_sta *mvmsta; | ||
3608 | |||
3609 | mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta); | ||
3610 | |||
3611 | if (!mvmsta->vif) | ||
3612 | return; | ||
3586 | 3613 | ||
3587 | debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir, | 3614 | debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir, |
3588 | lq_sta, &rs_sta_dbgfs_scale_table_ops); | 3615 | lq_sta, &rs_sta_dbgfs_scale_table_ops); |
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 7e9aa3cb3254..c47c8051da77 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c | |||
@@ -1128,8 +1128,10 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) | |||
1128 | if (mvm->scan_status == IWL_MVM_SCAN_NONE) | 1128 | if (mvm->scan_status == IWL_MVM_SCAN_NONE) |
1129 | return 0; | 1129 | return 0; |
1130 | 1130 | ||
1131 | if (iwl_mvm_is_radio_killed(mvm)) | 1131 | if (iwl_mvm_is_radio_killed(mvm)) { |
1132 | ret = 0; | ||
1132 | goto out; | 1133 | goto out; |
1134 | } | ||
1133 | 1135 | ||
1134 | if (mvm->scan_status != IWL_MVM_SCAN_SCHED && | 1136 | if (mvm->scan_status != IWL_MVM_SCAN_SCHED && |
1135 | (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || | 1137 | (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || |
@@ -1148,16 +1150,14 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) | |||
1148 | IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n", | 1150 | IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n", |
1149 | sched ? "offloaded " : "", ret); | 1151 | sched ? "offloaded " : "", ret); |
1150 | iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); | 1152 | iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); |
1151 | return ret; | 1153 | goto out; |
1152 | } | 1154 | } |
1153 | 1155 | ||
1154 | IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n", | 1156 | IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n", |
1155 | sched ? "offloaded " : ""); | 1157 | sched ? "offloaded " : ""); |
1156 | 1158 | ||
1157 | ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ); | 1159 | ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ); |
1158 | if (ret) | 1160 | out: |
1159 | return ret; | ||
1160 | |||
1161 | /* | 1161 | /* |
1162 | * Clear the scan status so the next scan requests will succeed. This | 1162 | * Clear the scan status so the next scan requests will succeed. This |
1163 | * also ensures the Rx handler doesn't do anything, as the scan was | 1163 | * also ensures the Rx handler doesn't do anything, as the scan was |
@@ -1167,7 +1167,6 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) | |||
1167 | if (mvm->scan_status == IWL_MVM_SCAN_OS) | 1167 | if (mvm->scan_status == IWL_MVM_SCAN_OS) |
1168 | iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); | 1168 | iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); |
1169 | 1169 | ||
1170 | out: | ||
1171 | mvm->scan_status = IWL_MVM_SCAN_NONE; | 1170 | mvm->scan_status = IWL_MVM_SCAN_NONE; |
1172 | 1171 | ||
1173 | if (notify) { | 1172 | if (notify) { |
@@ -1177,7 +1176,7 @@ out: | |||
1177 | ieee80211_scan_completed(mvm->hw, true); | 1176 | ieee80211_scan_completed(mvm->hw, true); |
1178 | } | 1177 | } |
1179 | 1178 | ||
1180 | return 0; | 1179 | return ret; |
1181 | } | 1180 | } |
1182 | 1181 | ||
1183 | static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm, | 1182 | static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm, |
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index 54fafbf9a711..4b81c0bf63b0 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c | |||
@@ -197,6 +197,8 @@ iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm, | |||
197 | struct iwl_time_event_notif *notif) | 197 | struct iwl_time_event_notif *notif) |
198 | { | 198 | { |
199 | if (!le32_to_cpu(notif->status)) { | 199 | if (!le32_to_cpu(notif->status)) { |
200 | if (te_data->vif->type == NL80211_IFTYPE_STATION) | ||
201 | ieee80211_connection_loss(te_data->vif); | ||
200 | IWL_DEBUG_TE(mvm, "CSA time event failed to start\n"); | 202 | IWL_DEBUG_TE(mvm, "CSA time event failed to start\n"); |
201 | iwl_mvm_te_clear_data(mvm, te_data); | 203 | iwl_mvm_te_clear_data(mvm, te_data); |
202 | return; | 204 | return; |
@@ -750,8 +752,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm) | |||
750 | * request | 752 | * request |
751 | */ | 753 | */ |
752 | list_for_each_entry(te_data, &mvm->time_event_list, list) { | 754 | list_for_each_entry(te_data, &mvm->time_event_list, list) { |
753 | if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE && | 755 | if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { |
754 | te_data->running) { | ||
755 | mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); | 756 | mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); |
756 | is_p2p = true; | 757 | is_p2p = true; |
757 | goto remove_te; | 758 | goto remove_te; |
@@ -766,10 +767,8 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm) | |||
766 | * request | 767 | * request |
767 | */ | 768 | */ |
768 | list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) { | 769 | list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) { |
769 | if (te_data->running) { | 770 | mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); |
770 | mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); | 771 | goto remove_te; |
771 | goto remove_te; | ||
772 | } | ||
773 | } | 772 | } |
774 | 773 | ||
775 | remove_te: | 774 | remove_te: |
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 07304e1fd64a..96a05406babf 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c | |||
@@ -949,8 +949,10 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, | |||
949 | mvmsta = iwl_mvm_sta_from_mac80211(sta); | 949 | mvmsta = iwl_mvm_sta_from_mac80211(sta); |
950 | tid_data = &mvmsta->tid_data[tid]; | 950 | tid_data = &mvmsta->tid_data[tid]; |
951 | 951 | ||
952 | if (WARN_ONCE(tid_data->txq_id != scd_flow, "Q %d, tid %d, flow %d", | 952 | if (tid_data->txq_id != scd_flow) { |
953 | tid_data->txq_id, tid, scd_flow)) { | 953 | IWL_ERR(mvm, |
954 | "invalid BA notification: Q %d, tid %d, flow %d\n", | ||
955 | tid_data->txq_id, tid, scd_flow); | ||
954 | rcu_read_unlock(); | 956 | rcu_read_unlock(); |
955 | return 0; | 957 | return 0; |
956 | } | 958 | } |
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index dbd6bcf52205..686dd301cd53 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c | |||
@@ -368,10 +368,12 @@ static const struct pci_device_id iwl_hw_card_ids[] = { | |||
368 | /* 3165 Series */ | 368 | /* 3165 Series */ |
369 | {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, | 369 | {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, |
370 | {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, | 370 | {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, |
371 | {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, | ||
372 | {IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)}, | ||
373 | {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, | 371 | {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, |
374 | {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, | 372 | {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, |
373 | {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, | ||
374 | {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)}, | ||
375 | {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)}, | ||
376 | {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, | ||
375 | 377 | ||
376 | /* 7265 Series */ | 378 | /* 7265 Series */ |
377 | {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, | 379 | {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 4a4c6586a8d2..8908be6dbc48 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
@@ -946,7 +946,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, | |||
946 | goto nla_put_failure; | 946 | goto nla_put_failure; |
947 | 947 | ||
948 | genlmsg_end(skb, msg_head); | 948 | genlmsg_end(skb, msg_head); |
949 | genlmsg_unicast(&init_net, skb, dst_portid); | 949 | if (genlmsg_unicast(&init_net, skb, dst_portid)) |
950 | goto err_free_txskb; | ||
950 | 951 | ||
951 | /* Enqueue the packet */ | 952 | /* Enqueue the packet */ |
952 | skb_queue_tail(&data->pending, my_skb); | 953 | skb_queue_tail(&data->pending, my_skb); |
@@ -955,6 +956,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, | |||
955 | return; | 956 | return; |
956 | 957 | ||
957 | nla_put_failure: | 958 | nla_put_failure: |
959 | nlmsg_free(skb); | ||
960 | err_free_txskb: | ||
958 | printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__); | 961 | printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__); |
959 | ieee80211_free_txskb(hw, my_skb); | 962 | ieee80211_free_txskb(hw, my_skb); |
960 | data->tx_failed++; | 963 | data->tx_failed++; |
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c index 1d4677460711..074f716020aa 100644 --- a/drivers/net/wireless/rtlwifi/base.c +++ b/drivers/net/wireless/rtlwifi/base.c | |||
@@ -1386,8 +1386,11 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) | |||
1386 | } | 1386 | } |
1387 | 1387 | ||
1388 | return true; | 1388 | return true; |
1389 | } else if (0x86DD == ether_type) { | 1389 | } else if (ETH_P_IPV6 == ether_type) { |
1390 | return true; | 1390 | /* TODO: Handle any IPv6 cases that need special handling. |
1391 | * For now, always return false | ||
1392 | */ | ||
1393 | goto end; | ||
1391 | } | 1394 | } |
1392 | 1395 | ||
1393 | end: | 1396 | end: |
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index a62170ea0481..8c45cf44ce24 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c | |||
@@ -1124,12 +1124,22 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw) | |||
1124 | /*This is for new trx flow*/ | 1124 | /*This is for new trx flow*/ |
1125 | struct rtl_tx_buffer_desc *pbuffer_desc = NULL; | 1125 | struct rtl_tx_buffer_desc *pbuffer_desc = NULL; |
1126 | u8 temp_one = 1; | 1126 | u8 temp_one = 1; |
1127 | u8 *entry; | ||
1127 | 1128 | ||
1128 | memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); | 1129 | memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); |
1129 | ring = &rtlpci->tx_ring[BEACON_QUEUE]; | 1130 | ring = &rtlpci->tx_ring[BEACON_QUEUE]; |
1130 | pskb = __skb_dequeue(&ring->queue); | 1131 | pskb = __skb_dequeue(&ring->queue); |
1131 | if (pskb) | 1132 | if (rtlpriv->use_new_trx_flow) |
1133 | entry = (u8 *)(&ring->buffer_desc[ring->idx]); | ||
1134 | else | ||
1135 | entry = (u8 *)(&ring->desc[ring->idx]); | ||
1136 | if (pskb) { | ||
1137 | pci_unmap_single(rtlpci->pdev, | ||
1138 | rtlpriv->cfg->ops->get_desc( | ||
1139 | (u8 *)entry, true, HW_DESC_TXBUFF_ADDR), | ||
1140 | pskb->len, PCI_DMA_TODEVICE); | ||
1132 | kfree_skb(pskb); | 1141 | kfree_skb(pskb); |
1142 | } | ||
1133 | 1143 | ||
1134 | /*NB: the beacon data buffer must be 32-bit aligned. */ | 1144 | /*NB: the beacon data buffer must be 32-bit aligned. */ |
1135 | pskb = ieee80211_beacon_get(hw, mac->vif); | 1145 | pskb = ieee80211_beacon_get(hw, mac->vif); |
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index f38227afe099..3aa8648080c8 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -340,12 +340,11 @@ static void xenvif_get_ethtool_stats(struct net_device *dev, | |||
340 | unsigned int num_queues = vif->num_queues; | 340 | unsigned int num_queues = vif->num_queues; |
341 | int i; | 341 | int i; |
342 | unsigned int queue_index; | 342 | unsigned int queue_index; |
343 | struct xenvif_stats *vif_stats; | ||
344 | 343 | ||
345 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { | 344 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { |
346 | unsigned long accum = 0; | 345 | unsigned long accum = 0; |
347 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | 346 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
348 | vif_stats = &vif->queues[queue_index].stats; | 347 | void *vif_stats = &vif->queues[queue_index].stats; |
349 | accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); | 348 | accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); |
350 | } | 349 | } |
351 | data[i] = accum; | 350 | data[i] = accum; |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index f7a31d2cb3f1..997cf0901ac2 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -96,6 +96,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, | |||
96 | static void make_tx_response(struct xenvif_queue *queue, | 96 | static void make_tx_response(struct xenvif_queue *queue, |
97 | struct xen_netif_tx_request *txp, | 97 | struct xen_netif_tx_request *txp, |
98 | s8 st); | 98 | s8 st); |
99 | static void push_tx_responses(struct xenvif_queue *queue); | ||
99 | 100 | ||
100 | static inline int tx_work_todo(struct xenvif_queue *queue); | 101 | static inline int tx_work_todo(struct xenvif_queue *queue); |
101 | 102 | ||
@@ -657,6 +658,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue, | |||
657 | do { | 658 | do { |
658 | spin_lock_irqsave(&queue->response_lock, flags); | 659 | spin_lock_irqsave(&queue->response_lock, flags); |
659 | make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); | 660 | make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); |
661 | push_tx_responses(queue); | ||
660 | spin_unlock_irqrestore(&queue->response_lock, flags); | 662 | spin_unlock_irqrestore(&queue->response_lock, flags); |
661 | if (cons == end) | 663 | if (cons == end) |
662 | break; | 664 | break; |
@@ -1343,7 +1345,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s | |||
1343 | { | 1345 | { |
1344 | unsigned int offset = skb_headlen(skb); | 1346 | unsigned int offset = skb_headlen(skb); |
1345 | skb_frag_t frags[MAX_SKB_FRAGS]; | 1347 | skb_frag_t frags[MAX_SKB_FRAGS]; |
1346 | int i; | 1348 | int i, f; |
1347 | struct ubuf_info *uarg; | 1349 | struct ubuf_info *uarg; |
1348 | struct sk_buff *nskb = skb_shinfo(skb)->frag_list; | 1350 | struct sk_buff *nskb = skb_shinfo(skb)->frag_list; |
1349 | 1351 | ||
@@ -1383,23 +1385,25 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s | |||
1383 | frags[i].page_offset = 0; | 1385 | frags[i].page_offset = 0; |
1384 | skb_frag_size_set(&frags[i], len); | 1386 | skb_frag_size_set(&frags[i], len); |
1385 | } | 1387 | } |
1386 | /* swap out with old one */ | ||
1387 | memcpy(skb_shinfo(skb)->frags, | ||
1388 | frags, | ||
1389 | i * sizeof(skb_frag_t)); | ||
1390 | skb_shinfo(skb)->nr_frags = i; | ||
1391 | skb->truesize += i * PAGE_SIZE; | ||
1392 | 1388 | ||
1393 | /* remove traces of mapped pages and frag_list */ | 1389 | /* Copied all the bits from the frag list -- free it. */ |
1394 | skb_frag_list_init(skb); | 1390 | skb_frag_list_init(skb); |
1391 | xenvif_skb_zerocopy_prepare(queue, nskb); | ||
1392 | kfree_skb(nskb); | ||
1393 | |||
1394 | /* Release all the original (foreign) frags. */ | ||
1395 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) | ||
1396 | skb_frag_unref(skb, f); | ||
1395 | uarg = skb_shinfo(skb)->destructor_arg; | 1397 | uarg = skb_shinfo(skb)->destructor_arg; |
1396 | /* increase inflight counter to offset decrement in callback */ | 1398 | /* increase inflight counter to offset decrement in callback */ |
1397 | atomic_inc(&queue->inflight_packets); | 1399 | atomic_inc(&queue->inflight_packets); |
1398 | uarg->callback(uarg, true); | 1400 | uarg->callback(uarg, true); |
1399 | skb_shinfo(skb)->destructor_arg = NULL; | 1401 | skb_shinfo(skb)->destructor_arg = NULL; |
1400 | 1402 | ||
1401 | xenvif_skb_zerocopy_prepare(queue, nskb); | 1403 | /* Fill the skb with the new (local) frags. */ |
1402 | kfree_skb(nskb); | 1404 | memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t)); |
1405 | skb_shinfo(skb)->nr_frags = i; | ||
1406 | skb->truesize += i * PAGE_SIZE; | ||
1403 | 1407 | ||
1404 | return 0; | 1408 | return 0; |
1405 | } | 1409 | } |
@@ -1652,13 +1656,20 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, | |||
1652 | unsigned long flags; | 1656 | unsigned long flags; |
1653 | 1657 | ||
1654 | pending_tx_info = &queue->pending_tx_info[pending_idx]; | 1658 | pending_tx_info = &queue->pending_tx_info[pending_idx]; |
1659 | |||
1655 | spin_lock_irqsave(&queue->response_lock, flags); | 1660 | spin_lock_irqsave(&queue->response_lock, flags); |
1661 | |||
1656 | make_tx_response(queue, &pending_tx_info->req, status); | 1662 | make_tx_response(queue, &pending_tx_info->req, status); |
1657 | index = pending_index(queue->pending_prod); | 1663 | |
1664 | /* Release the pending index before pusing the Tx response so | ||
1665 | * its available before a new Tx request is pushed by the | ||
1666 | * frontend. | ||
1667 | */ | ||
1668 | index = pending_index(queue->pending_prod++); | ||
1658 | queue->pending_ring[index] = pending_idx; | 1669 | queue->pending_ring[index] = pending_idx; |
1659 | /* TX shouldn't use the index before we give it back here */ | 1670 | |
1660 | mb(); | 1671 | push_tx_responses(queue); |
1661 | queue->pending_prod++; | 1672 | |
1662 | spin_unlock_irqrestore(&queue->response_lock, flags); | 1673 | spin_unlock_irqrestore(&queue->response_lock, flags); |
1663 | } | 1674 | } |
1664 | 1675 | ||
@@ -1669,7 +1680,6 @@ static void make_tx_response(struct xenvif_queue *queue, | |||
1669 | { | 1680 | { |
1670 | RING_IDX i = queue->tx.rsp_prod_pvt; | 1681 | RING_IDX i = queue->tx.rsp_prod_pvt; |
1671 | struct xen_netif_tx_response *resp; | 1682 | struct xen_netif_tx_response *resp; |
1672 | int notify; | ||
1673 | 1683 | ||
1674 | resp = RING_GET_RESPONSE(&queue->tx, i); | 1684 | resp = RING_GET_RESPONSE(&queue->tx, i); |
1675 | resp->id = txp->id; | 1685 | resp->id = txp->id; |
@@ -1679,6 +1689,12 @@ static void make_tx_response(struct xenvif_queue *queue, | |||
1679 | RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; | 1689 | RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; |
1680 | 1690 | ||
1681 | queue->tx.rsp_prod_pvt = ++i; | 1691 | queue->tx.rsp_prod_pvt = ++i; |
1692 | } | ||
1693 | |||
1694 | static void push_tx_responses(struct xenvif_queue *queue) | ||
1695 | { | ||
1696 | int notify; | ||
1697 | |||
1682 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); | 1698 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); |
1683 | if (notify) | 1699 | if (notify) |
1684 | notify_remote_via_irq(queue->tx_irq); | 1700 | notify_remote_via_irq(queue->tx_irq); |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index e9b960f0ff32..720aaf6313d2 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -1008,8 +1008,7 @@ err: | |||
1008 | 1008 | ||
1009 | static int xennet_change_mtu(struct net_device *dev, int mtu) | 1009 | static int xennet_change_mtu(struct net_device *dev, int mtu) |
1010 | { | 1010 | { |
1011 | int max = xennet_can_sg(dev) ? | 1011 | int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN; |
1012 | XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN; | ||
1013 | 1012 | ||
1014 | if (mtu > max) | 1013 | if (mtu > max) |
1015 | return -EINVAL; | 1014 | return -EINVAL; |
@@ -1279,8 +1278,6 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) | |||
1279 | netdev->ethtool_ops = &xennet_ethtool_ops; | 1278 | netdev->ethtool_ops = &xennet_ethtool_ops; |
1280 | SET_NETDEV_DEV(netdev, &dev->dev); | 1279 | SET_NETDEV_DEV(netdev, &dev->dev); |
1281 | 1280 | ||
1282 | netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER); | ||
1283 | |||
1284 | np->netdev = netdev; | 1281 | np->netdev = netdev; |
1285 | 1282 | ||
1286 | netif_carrier_off(netdev); | 1283 | netif_carrier_off(netdev); |
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index 38d1c51f58b1..7bcaeec876c0 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig | |||
@@ -84,8 +84,7 @@ config OF_RESOLVE | |||
84 | bool | 84 | bool |
85 | 85 | ||
86 | config OF_OVERLAY | 86 | config OF_OVERLAY |
87 | bool | 87 | bool "Device Tree overlays" |
88 | depends on OF | ||
89 | select OF_DYNAMIC | 88 | select OF_DYNAMIC |
90 | select OF_RESOLVE | 89 | select OF_RESOLVE |
91 | 90 | ||
diff --git a/drivers/of/address.c b/drivers/of/address.c index ad2906919d45..78a7dcbec7d8 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c | |||
@@ -450,12 +450,17 @@ static struct of_bus *of_match_bus(struct device_node *np) | |||
450 | return NULL; | 450 | return NULL; |
451 | } | 451 | } |
452 | 452 | ||
453 | static int of_empty_ranges_quirk(void) | 453 | static int of_empty_ranges_quirk(struct device_node *np) |
454 | { | 454 | { |
455 | if (IS_ENABLED(CONFIG_PPC)) { | 455 | if (IS_ENABLED(CONFIG_PPC)) { |
456 | /* To save cycles, we cache the result */ | 456 | /* To save cycles, we cache the result for global "Mac" setting */ |
457 | static int quirk_state = -1; | 457 | static int quirk_state = -1; |
458 | 458 | ||
459 | /* PA-SEMI sdc DT bug */ | ||
460 | if (of_device_is_compatible(np, "1682m-sdc")) | ||
461 | return true; | ||
462 | |||
463 | /* Make quirk cached */ | ||
459 | if (quirk_state < 0) | 464 | if (quirk_state < 0) |
460 | quirk_state = | 465 | quirk_state = |
461 | of_machine_is_compatible("Power Macintosh") || | 466 | of_machine_is_compatible("Power Macintosh") || |
@@ -490,7 +495,7 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus, | |||
490 | * This code is only enabled on powerpc. --gcl | 495 | * This code is only enabled on powerpc. --gcl |
491 | */ | 496 | */ |
492 | ranges = of_get_property(parent, rprop, &rlen); | 497 | ranges = of_get_property(parent, rprop, &rlen); |
493 | if (ranges == NULL && !of_empty_ranges_quirk()) { | 498 | if (ranges == NULL && !of_empty_ranges_quirk(parent)) { |
494 | pr_debug("OF: no ranges; cannot translate\n"); | 499 | pr_debug("OF: no ranges; cannot translate\n"); |
495 | return 1; | 500 | return 1; |
496 | } | 501 | } |
diff --git a/drivers/of/base.c b/drivers/of/base.c index 0a8aeb8523fe..8f165b112e03 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
@@ -714,16 +714,12 @@ static struct device_node *__of_find_node_by_path(struct device_node *parent, | |||
714 | const char *path) | 714 | const char *path) |
715 | { | 715 | { |
716 | struct device_node *child; | 716 | struct device_node *child; |
717 | int len = strchrnul(path, '/') - path; | 717 | int len; |
718 | int term; | ||
719 | 718 | ||
719 | len = strcspn(path, "/:"); | ||
720 | if (!len) | 720 | if (!len) |
721 | return NULL; | 721 | return NULL; |
722 | 722 | ||
723 | term = strchrnul(path, ':') - path; | ||
724 | if (term < len) | ||
725 | len = term; | ||
726 | |||
727 | __for_each_child_of_node(parent, child) { | 723 | __for_each_child_of_node(parent, child) { |
728 | const char *name = strrchr(child->full_name, '/'); | 724 | const char *name = strrchr(child->full_name, '/'); |
729 | if (WARN(!name, "malformed device_node %s\n", child->full_name)) | 725 | if (WARN(!name, "malformed device_node %s\n", child->full_name)) |
@@ -768,8 +764,12 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt | |||
768 | 764 | ||
769 | /* The path could begin with an alias */ | 765 | /* The path could begin with an alias */ |
770 | if (*path != '/') { | 766 | if (*path != '/') { |
771 | char *p = strchrnul(path, '/'); | 767 | int len; |
772 | int len = separator ? separator - path : p - path; | 768 | const char *p = separator; |
769 | |||
770 | if (!p) | ||
771 | p = strchrnul(path, '/'); | ||
772 | len = p - path; | ||
773 | 773 | ||
774 | /* of_aliases must not be NULL */ | 774 | /* of_aliases must not be NULL */ |
775 | if (!of_aliases) | 775 | if (!of_aliases) |
@@ -794,6 +794,8 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt | |||
794 | path++; /* Increment past '/' delimiter */ | 794 | path++; /* Increment past '/' delimiter */ |
795 | np = __of_find_node_by_path(np, path); | 795 | np = __of_find_node_by_path(np, path); |
796 | path = strchrnul(path, '/'); | 796 | path = strchrnul(path, '/'); |
797 | if (separator && separator < path) | ||
798 | break; | ||
797 | } | 799 | } |
798 | raw_spin_unlock_irqrestore(&devtree_lock, flags); | 800 | raw_spin_unlock_irqrestore(&devtree_lock, flags); |
799 | return np; | 801 | return np; |
diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 0d7765807f49..1a7980692f25 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c | |||
@@ -290,7 +290,7 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar | |||
290 | struct device_node *p; | 290 | struct device_node *p; |
291 | const __be32 *intspec, *tmp, *addr; | 291 | const __be32 *intspec, *tmp, *addr; |
292 | u32 intsize, intlen; | 292 | u32 intsize, intlen; |
293 | int i, res = -EINVAL; | 293 | int i, res; |
294 | 294 | ||
295 | pr_debug("of_irq_parse_one: dev=%s, index=%d\n", of_node_full_name(device), index); | 295 | pr_debug("of_irq_parse_one: dev=%s, index=%d\n", of_node_full_name(device), index); |
296 | 296 | ||
@@ -323,15 +323,19 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar | |||
323 | 323 | ||
324 | /* Get size of interrupt specifier */ | 324 | /* Get size of interrupt specifier */ |
325 | tmp = of_get_property(p, "#interrupt-cells", NULL); | 325 | tmp = of_get_property(p, "#interrupt-cells", NULL); |
326 | if (tmp == NULL) | 326 | if (tmp == NULL) { |
327 | res = -EINVAL; | ||
327 | goto out; | 328 | goto out; |
329 | } | ||
328 | intsize = be32_to_cpu(*tmp); | 330 | intsize = be32_to_cpu(*tmp); |
329 | 331 | ||
330 | pr_debug(" intsize=%d intlen=%d\n", intsize, intlen); | 332 | pr_debug(" intsize=%d intlen=%d\n", intsize, intlen); |
331 | 333 | ||
332 | /* Check index */ | 334 | /* Check index */ |
333 | if ((index + 1) * intsize > intlen) | 335 | if ((index + 1) * intsize > intlen) { |
336 | res = -EINVAL; | ||
334 | goto out; | 337 | goto out; |
338 | } | ||
335 | 339 | ||
336 | /* Copy intspec into irq structure */ | 340 | /* Copy intspec into irq structure */ |
337 | intspec += index * intsize; | 341 | intspec += index * intsize; |
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 352b4f28f82c..dee9270ba547 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/string.h> | 19 | #include <linux/string.h> |
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
22 | #include <linux/idr.h> | ||
22 | 23 | ||
23 | #include "of_private.h" | 24 | #include "of_private.h" |
24 | 25 | ||
@@ -85,7 +86,7 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov, | |||
85 | struct device_node *target, struct device_node *child) | 86 | struct device_node *target, struct device_node *child) |
86 | { | 87 | { |
87 | const char *cname; | 88 | const char *cname; |
88 | struct device_node *tchild, *grandchild; | 89 | struct device_node *tchild; |
89 | int ret = 0; | 90 | int ret = 0; |
90 | 91 | ||
91 | cname = kbasename(child->full_name); | 92 | cname = kbasename(child->full_name); |
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 0cf9a236d438..52c45c7df07f 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c | |||
@@ -92,6 +92,16 @@ static void __init of_selftest_find_node_by_name(void) | |||
92 | "option path test failed\n"); | 92 | "option path test failed\n"); |
93 | of_node_put(np); | 93 | of_node_put(np); |
94 | 94 | ||
95 | np = of_find_node_opts_by_path("/testcase-data:test/option", &options); | ||
96 | selftest(np && !strcmp("test/option", options), | ||
97 | "option path test, subcase #1 failed\n"); | ||
98 | of_node_put(np); | ||
99 | |||
100 | np = of_find_node_opts_by_path("/testcase-data/testcase-device1:test/option", &options); | ||
101 | selftest(np && !strcmp("test/option", options), | ||
102 | "option path test, subcase #2 failed\n"); | ||
103 | of_node_put(np); | ||
104 | |||
95 | np = of_find_node_opts_by_path("/testcase-data:testoption", NULL); | 105 | np = of_find_node_opts_by_path("/testcase-data:testoption", NULL); |
96 | selftest(np, "NULL option path test failed\n"); | 106 | selftest(np, "NULL option path test failed\n"); |
97 | of_node_put(np); | 107 | of_node_put(np); |
@@ -102,6 +112,12 @@ static void __init of_selftest_find_node_by_name(void) | |||
102 | "option alias path test failed\n"); | 112 | "option alias path test failed\n"); |
103 | of_node_put(np); | 113 | of_node_put(np); |
104 | 114 | ||
115 | np = of_find_node_opts_by_path("testcase-alias:test/alias/option", | ||
116 | &options); | ||
117 | selftest(np && !strcmp("test/alias/option", options), | ||
118 | "option alias path test, subcase #1 failed\n"); | ||
119 | of_node_put(np); | ||
120 | |||
105 | np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL); | 121 | np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL); |
106 | selftest(np, "NULL option alias path test failed\n"); | 122 | selftest(np, "NULL option alias path test failed\n"); |
107 | of_node_put(np); | 123 | of_node_put(np); |
@@ -378,9 +394,9 @@ static void __init of_selftest_property_string(void) | |||
378 | rc = of_property_match_string(np, "phandle-list-names", "first"); | 394 | rc = of_property_match_string(np, "phandle-list-names", "first"); |
379 | selftest(rc == 0, "first expected:0 got:%i\n", rc); | 395 | selftest(rc == 0, "first expected:0 got:%i\n", rc); |
380 | rc = of_property_match_string(np, "phandle-list-names", "second"); | 396 | rc = of_property_match_string(np, "phandle-list-names", "second"); |
381 | selftest(rc == 1, "second expected:0 got:%i\n", rc); | 397 | selftest(rc == 1, "second expected:1 got:%i\n", rc); |
382 | rc = of_property_match_string(np, "phandle-list-names", "third"); | 398 | rc = of_property_match_string(np, "phandle-list-names", "third"); |
383 | selftest(rc == 2, "third expected:0 got:%i\n", rc); | 399 | selftest(rc == 2, "third expected:2 got:%i\n", rc); |
384 | rc = of_property_match_string(np, "phandle-list-names", "fourth"); | 400 | rc = of_property_match_string(np, "phandle-list-names", "fourth"); |
385 | selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc); | 401 | selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc); |
386 | rc = of_property_match_string(np, "missing-property", "blah"); | 402 | rc = of_property_match_string(np, "missing-property", "blah"); |
@@ -478,7 +494,6 @@ static void __init of_selftest_changeset(void) | |||
478 | struct device_node *n1, *n2, *n21, *nremove, *parent, *np; | 494 | struct device_node *n1, *n2, *n21, *nremove, *parent, *np; |
479 | struct of_changeset chgset; | 495 | struct of_changeset chgset; |
480 | 496 | ||
481 | of_changeset_init(&chgset); | ||
482 | n1 = __of_node_dup(NULL, "/testcase-data/changeset/n1"); | 497 | n1 = __of_node_dup(NULL, "/testcase-data/changeset/n1"); |
483 | selftest(n1, "testcase setup failure\n"); | 498 | selftest(n1, "testcase setup failure\n"); |
484 | n2 = __of_node_dup(NULL, "/testcase-data/changeset/n2"); | 499 | n2 = __of_node_dup(NULL, "/testcase-data/changeset/n2"); |
@@ -979,7 +994,7 @@ static int of_path_platform_device_exists(const char *path) | |||
979 | return pdev != NULL; | 994 | return pdev != NULL; |
980 | } | 995 | } |
981 | 996 | ||
982 | #if IS_ENABLED(CONFIG_I2C) | 997 | #if IS_BUILTIN(CONFIG_I2C) |
983 | 998 | ||
984 | /* get the i2c client device instantiated at the path */ | 999 | /* get the i2c client device instantiated at the path */ |
985 | static struct i2c_client *of_path_to_i2c_client(const char *path) | 1000 | static struct i2c_client *of_path_to_i2c_client(const char *path) |
@@ -1445,7 +1460,7 @@ static void of_selftest_overlay_11(void) | |||
1445 | return; | 1460 | return; |
1446 | } | 1461 | } |
1447 | 1462 | ||
1448 | #if IS_ENABLED(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY) | 1463 | #if IS_BUILTIN(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY) |
1449 | 1464 | ||
1450 | struct selftest_i2c_bus_data { | 1465 | struct selftest_i2c_bus_data { |
1451 | struct platform_device *pdev; | 1466 | struct platform_device *pdev; |
@@ -1584,7 +1599,7 @@ static struct i2c_driver selftest_i2c_dev_driver = { | |||
1584 | .id_table = selftest_i2c_dev_id, | 1599 | .id_table = selftest_i2c_dev_id, |
1585 | }; | 1600 | }; |
1586 | 1601 | ||
1587 | #if IS_ENABLED(CONFIG_I2C_MUX) | 1602 | #if IS_BUILTIN(CONFIG_I2C_MUX) |
1588 | 1603 | ||
1589 | struct selftest_i2c_mux_data { | 1604 | struct selftest_i2c_mux_data { |
1590 | int nchans; | 1605 | int nchans; |
@@ -1695,7 +1710,7 @@ static int of_selftest_overlay_i2c_init(void) | |||
1695 | "could not register selftest i2c bus driver\n")) | 1710 | "could not register selftest i2c bus driver\n")) |
1696 | return ret; | 1711 | return ret; |
1697 | 1712 | ||
1698 | #if IS_ENABLED(CONFIG_I2C_MUX) | 1713 | #if IS_BUILTIN(CONFIG_I2C_MUX) |
1699 | ret = i2c_add_driver(&selftest_i2c_mux_driver); | 1714 | ret = i2c_add_driver(&selftest_i2c_mux_driver); |
1700 | if (selftest(ret == 0, | 1715 | if (selftest(ret == 0, |
1701 | "could not register selftest i2c mux driver\n")) | 1716 | "could not register selftest i2c mux driver\n")) |
@@ -1707,7 +1722,7 @@ static int of_selftest_overlay_i2c_init(void) | |||
1707 | 1722 | ||
1708 | static void of_selftest_overlay_i2c_cleanup(void) | 1723 | static void of_selftest_overlay_i2c_cleanup(void) |
1709 | { | 1724 | { |
1710 | #if IS_ENABLED(CONFIG_I2C_MUX) | 1725 | #if IS_BUILTIN(CONFIG_I2C_MUX) |
1711 | i2c_del_driver(&selftest_i2c_mux_driver); | 1726 | i2c_del_driver(&selftest_i2c_mux_driver); |
1712 | #endif | 1727 | #endif |
1713 | platform_driver_unregister(&selftest_i2c_bus_driver); | 1728 | platform_driver_unregister(&selftest_i2c_bus_driver); |
@@ -1814,7 +1829,7 @@ static void __init of_selftest_overlay(void) | |||
1814 | of_selftest_overlay_10(); | 1829 | of_selftest_overlay_10(); |
1815 | of_selftest_overlay_11(); | 1830 | of_selftest_overlay_11(); |
1816 | 1831 | ||
1817 | #if IS_ENABLED(CONFIG_I2C) | 1832 | #if IS_BUILTIN(CONFIG_I2C) |
1818 | if (selftest(of_selftest_overlay_i2c_init() == 0, "i2c init failed\n")) | 1833 | if (selftest(of_selftest_overlay_i2c_init() == 0, "i2c init failed\n")) |
1819 | goto out; | 1834 | goto out; |
1820 | 1835 | ||
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c index 1ec694a52379..464bf492ee2a 100644 --- a/drivers/pci/host/pci-versatile.c +++ b/drivers/pci/host/pci-versatile.c | |||
@@ -80,7 +80,7 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev, | |||
80 | if (err) | 80 | if (err) |
81 | return err; | 81 | return err; |
82 | 82 | ||
83 | resource_list_for_each_entry(win, res, list) { | 83 | resource_list_for_each_entry(win, res) { |
84 | struct resource *parent, *res = win->res; | 84 | struct resource *parent, *res = win->res; |
85 | 85 | ||
86 | switch (resource_type(res)) { | 86 | switch (resource_type(res)) { |
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c index aab55474dd0d..ee082c0366ec 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c | |||
@@ -127,7 +127,7 @@ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset) | |||
127 | return false; | 127 | return false; |
128 | } | 128 | } |
129 | 129 | ||
130 | static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, | 130 | static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, |
131 | int offset) | 131 | int offset) |
132 | { | 132 | { |
133 | struct xgene_pcie_port *port = bus->sysdata; | 133 | struct xgene_pcie_port *port = bus->sysdata; |
@@ -137,7 +137,7 @@ static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, | |||
137 | return NULL; | 137 | return NULL; |
138 | 138 | ||
139 | xgene_pcie_set_rtdid_reg(bus, devfn); | 139 | xgene_pcie_set_rtdid_reg(bus, devfn); |
140 | return xgene_pcie_get_cfg_base(bus); | 140 | return xgene_pcie_get_cfg_base(bus) + offset; |
141 | } | 141 | } |
142 | 142 | ||
143 | static struct pci_ops xgene_pcie_ops = { | 143 | static struct pci_ops xgene_pcie_ops = { |
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index 1f4ea6f2d910..2e9f84fdd9ce 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c | |||
@@ -342,7 +342,7 @@ static const struct irq_domain_ops msi_domain_ops = { | |||
342 | .map = dw_pcie_msi_map, | 342 | .map = dw_pcie_msi_map, |
343 | }; | 343 | }; |
344 | 344 | ||
345 | int __init dw_pcie_host_init(struct pcie_port *pp) | 345 | int dw_pcie_host_init(struct pcie_port *pp) |
346 | { | 346 | { |
347 | struct device_node *np = pp->dev->of_node; | 347 | struct device_node *np = pp->dev->of_node; |
348 | struct platform_device *pdev = to_platform_device(pp->dev); | 348 | struct platform_device *pdev = to_platform_device(pp->dev); |
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c index 866465fd3dbf..020d78890719 100644 --- a/drivers/pci/host/pcie-spear13xx.c +++ b/drivers/pci/host/pcie-spear13xx.c | |||
@@ -269,7 +269,7 @@ static struct pcie_host_ops spear13xx_pcie_host_ops = { | |||
269 | .host_init = spear13xx_pcie_host_init, | 269 | .host_init = spear13xx_pcie_host_init, |
270 | }; | 270 | }; |
271 | 271 | ||
272 | static int __init spear13xx_add_pcie_port(struct pcie_port *pp, | 272 | static int spear13xx_add_pcie_port(struct pcie_port *pp, |
273 | struct platform_device *pdev) | 273 | struct platform_device *pdev) |
274 | { | 274 | { |
275 | struct device *dev = &pdev->dev; | 275 | struct device *dev = &pdev->dev; |
@@ -299,7 +299,7 @@ static int __init spear13xx_add_pcie_port(struct pcie_port *pp, | |||
299 | return 0; | 299 | return 0; |
300 | } | 300 | } |
301 | 301 | ||
302 | static int __init spear13xx_pcie_probe(struct platform_device *pdev) | 302 | static int spear13xx_pcie_probe(struct platform_device *pdev) |
303 | { | 303 | { |
304 | struct spear13xx_pcie *spear13xx_pcie; | 304 | struct spear13xx_pcie *spear13xx_pcie; |
305 | struct pcie_port *pp; | 305 | struct pcie_port *pp; |
@@ -370,7 +370,7 @@ static const struct of_device_id spear13xx_pcie_of_match[] = { | |||
370 | }; | 370 | }; |
371 | MODULE_DEVICE_TABLE(of, spear13xx_pcie_of_match); | 371 | MODULE_DEVICE_TABLE(of, spear13xx_pcie_of_match); |
372 | 372 | ||
373 | static struct platform_driver spear13xx_pcie_driver __initdata = { | 373 | static struct platform_driver spear13xx_pcie_driver = { |
374 | .probe = spear13xx_pcie_probe, | 374 | .probe = spear13xx_pcie_probe, |
375 | .driver = { | 375 | .driver = { |
376 | .name = "spear-pcie", | 376 | .name = "spear-pcie", |
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c index 7d48ecae6695..788db48dbbad 100644 --- a/drivers/pci/hotplug/cpci_hotplug_pci.c +++ b/drivers/pci/hotplug/cpci_hotplug_pci.c | |||
@@ -286,11 +286,12 @@ int cpci_configure_slot(struct slot *slot) | |||
286 | } | 286 | } |
287 | parent = slot->dev->bus; | 287 | parent = slot->dev->bus; |
288 | 288 | ||
289 | list_for_each_entry(dev, &parent->devices, bus_list) | 289 | list_for_each_entry(dev, &parent->devices, bus_list) { |
290 | if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn)) | 290 | if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn)) |
291 | continue; | 291 | continue; |
292 | if (pci_is_bridge(dev)) | 292 | if (pci_is_bridge(dev)) |
293 | pci_hp_add_bridge(dev); | 293 | pci_hp_add_bridge(dev); |
294 | } | ||
294 | 295 | ||
295 | 296 | ||
296 | pci_assign_unassigned_bridge_resources(parent->self); | 297 | pci_assign_unassigned_bridge_resources(parent->self); |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 489063987325..c93fbe76d281 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -248,6 +248,9 @@ int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp) | |||
248 | acpi_handle handle, phandle; | 248 | acpi_handle handle, phandle; |
249 | struct pci_bus *pbus; | 249 | struct pci_bus *pbus; |
250 | 250 | ||
251 | if (acpi_pci_disabled) | ||
252 | return -ENODEV; | ||
253 | |||
251 | handle = NULL; | 254 | handle = NULL; |
252 | for (pbus = dev->bus; pbus; pbus = pbus->parent) { | 255 | for (pbus = dev->bus; pbus; pbus = pbus->parent) { |
253 | handle = acpi_pci_get_bridge_handle(pbus); | 256 | handle = acpi_pci_get_bridge_handle(pbus); |
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index aa012fb3834b..312f23a8429c 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
@@ -521,7 +521,8 @@ static ssize_t driver_override_store(struct device *dev, | |||
521 | struct pci_dev *pdev = to_pci_dev(dev); | 521 | struct pci_dev *pdev = to_pci_dev(dev); |
522 | char *driver_override, *old = pdev->driver_override, *cp; | 522 | char *driver_override, *old = pdev->driver_override, *cp; |
523 | 523 | ||
524 | if (count > PATH_MAX) | 524 | /* We need to keep extra room for a newline */ |
525 | if (count >= (PAGE_SIZE - 1)) | ||
525 | return -EINVAL; | 526 | return -EINVAL; |
526 | 527 | ||
527 | driver_override = kstrndup(buf, count, GFP_KERNEL); | 528 | driver_override = kstrndup(buf, count, GFP_KERNEL); |
@@ -549,7 +550,7 @@ static ssize_t driver_override_show(struct device *dev, | |||
549 | { | 550 | { |
550 | struct pci_dev *pdev = to_pci_dev(dev); | 551 | struct pci_dev *pdev = to_pci_dev(dev); |
551 | 552 | ||
552 | return sprintf(buf, "%s\n", pdev->driver_override); | 553 | return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); |
553 | } | 554 | } |
554 | static DEVICE_ATTR_RW(driver_override); | 555 | static DEVICE_ATTR_RW(driver_override); |
555 | 556 | ||
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c index c6849d9e86ce..167fe411ce2e 100644 --- a/drivers/pci/pcie/aer/aerdrv_errprint.c +++ b/drivers/pci/pcie/aer/aerdrv_errprint.c | |||
@@ -132,16 +132,8 @@ static const char *aer_agent_string[] = { | |||
132 | static void __print_tlp_header(struct pci_dev *dev, | 132 | static void __print_tlp_header(struct pci_dev *dev, |
133 | struct aer_header_log_regs *t) | 133 | struct aer_header_log_regs *t) |
134 | { | 134 | { |
135 | unsigned char *tlp = (unsigned char *)&t; | 135 | dev_err(&dev->dev, " TLP Header: %08x %08x %08x %08x\n", |
136 | 136 | t->dw0, t->dw1, t->dw2, t->dw3); | |
137 | dev_err(&dev->dev, " TLP Header:" | ||
138 | " %02x%02x%02x%02x %02x%02x%02x%02x" | ||
139 | " %02x%02x%02x%02x %02x%02x%02x%02x\n", | ||
140 | *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp, | ||
141 | *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4), | ||
142 | *(tlp + 11), *(tlp + 10), *(tlp + 9), | ||
143 | *(tlp + 8), *(tlp + 15), *(tlp + 14), | ||
144 | *(tlp + 13), *(tlp + 12)); | ||
145 | } | 137 | } |
146 | 138 | ||
147 | static void __aer_print_error(struct pci_dev *dev, | 139 | static void __aer_print_error(struct pci_dev *dev, |
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig index 3bb49252a098..45f67c63d385 100644 --- a/drivers/pcmcia/Kconfig +++ b/drivers/pcmcia/Kconfig | |||
@@ -69,8 +69,7 @@ config YENTA | |||
69 | tristate "CardBus yenta-compatible bridge support" | 69 | tristate "CardBus yenta-compatible bridge support" |
70 | depends on PCI | 70 | depends on PCI |
71 | select CARDBUS if !EXPERT | 71 | select CARDBUS if !EXPERT |
72 | select PCCARD_NONSTATIC if PCMCIA != n && ISA | 72 | select PCCARD_NONSTATIC if PCMCIA != n |
73 | select PCCARD_PCI if PCMCIA !=n && !ISA | ||
74 | ---help--- | 73 | ---help--- |
75 | This option enables support for CardBus host bridges. Virtually | 74 | This option enables support for CardBus host bridges. Virtually |
76 | all modern PCMCIA bridges are CardBus compatible. A "bridge" is | 75 | all modern PCMCIA bridges are CardBus compatible. A "bridge" is |
@@ -110,8 +109,7 @@ config YENTA_TOSHIBA | |||
110 | config PD6729 | 109 | config PD6729 |
111 | tristate "Cirrus PD6729 compatible bridge support" | 110 | tristate "Cirrus PD6729 compatible bridge support" |
112 | depends on PCMCIA && PCI | 111 | depends on PCMCIA && PCI |
113 | select PCCARD_NONSTATIC if PCMCIA != n && ISA | 112 | select PCCARD_NONSTATIC |
114 | select PCCARD_PCI if PCMCIA !=n && !ISA | ||
115 | help | 113 | help |
116 | This provides support for the Cirrus PD6729 PCI-to-PCMCIA bridge | 114 | This provides support for the Cirrus PD6729 PCI-to-PCMCIA bridge |
117 | device, found in some older laptops and PCMCIA card readers. | 115 | device, found in some older laptops and PCMCIA card readers. |
@@ -119,8 +117,7 @@ config PD6729 | |||
119 | config I82092 | 117 | config I82092 |
120 | tristate "i82092 compatible bridge support" | 118 | tristate "i82092 compatible bridge support" |
121 | depends on PCMCIA && PCI | 119 | depends on PCMCIA && PCI |
122 | select PCCARD_NONSTATIC if PCMCIA != n && ISA | 120 | select PCCARD_NONSTATIC |
123 | select PCCARD_PCI if PCMCIA !=n && !ISA | ||
124 | help | 121 | help |
125 | This provides support for the Intel I82092AA PCI-to-PCMCIA bridge device, | 122 | This provides support for the Intel I82092AA PCI-to-PCMCIA bridge device, |
126 | found in some older laptops and more commonly in evaluation boards for the | 123 | found in some older laptops and more commonly in evaluation boards for the |
@@ -291,9 +288,6 @@ config ELECTRA_CF | |||
291 | Say Y here to support the CompactFlash controller on the | 288 | Say Y here to support the CompactFlash controller on the |
292 | PA Semi Electra eval board. | 289 | PA Semi Electra eval board. |
293 | 290 | ||
294 | config PCCARD_PCI | ||
295 | bool | ||
296 | |||
297 | config PCCARD_NONSTATIC | 291 | config PCCARD_NONSTATIC |
298 | bool | 292 | bool |
299 | 293 | ||
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile index f1a7ca04d89e..27e94b30cf96 100644 --- a/drivers/pcmcia/Makefile +++ b/drivers/pcmcia/Makefile | |||
@@ -12,7 +12,6 @@ obj-$(CONFIG_PCMCIA) += pcmcia.o | |||
12 | pcmcia_rsrc-y += rsrc_mgr.o | 12 | pcmcia_rsrc-y += rsrc_mgr.o |
13 | pcmcia_rsrc-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o | 13 | pcmcia_rsrc-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o |
14 | pcmcia_rsrc-$(CONFIG_PCCARD_IODYN) += rsrc_iodyn.o | 14 | pcmcia_rsrc-$(CONFIG_PCCARD_IODYN) += rsrc_iodyn.o |
15 | pcmcia_rsrc-$(CONFIG_PCCARD_PCI) += rsrc_pci.o | ||
16 | obj-$(CONFIG_PCCARD) += pcmcia_rsrc.o | 15 | obj-$(CONFIG_PCCARD) += pcmcia_rsrc.o |
17 | 16 | ||
18 | 17 | ||
diff --git a/drivers/pcmcia/rsrc_pci.c b/drivers/pcmcia/rsrc_pci.c deleted file mode 100644 index 1f67b3ba70fb..000000000000 --- a/drivers/pcmcia/rsrc_pci.c +++ /dev/null | |||
@@ -1,173 +0,0 @@ | |||
1 | #include <linux/slab.h> | ||
2 | #include <linux/module.h> | ||
3 | #include <linux/kernel.h> | ||
4 | #include <linux/pci.h> | ||
5 | |||
6 | #include <pcmcia/ss.h> | ||
7 | #include <pcmcia/cistpl.h> | ||
8 | #include "cs_internal.h" | ||
9 | |||
10 | |||
11 | struct pcmcia_align_data { | ||
12 | unsigned long mask; | ||
13 | unsigned long offset; | ||
14 | }; | ||
15 | |||
16 | static resource_size_t pcmcia_align(void *align_data, | ||
17 | const struct resource *res, | ||
18 | resource_size_t size, resource_size_t align) | ||
19 | { | ||
20 | struct pcmcia_align_data *data = align_data; | ||
21 | resource_size_t start; | ||
22 | |||
23 | start = (res->start & ~data->mask) + data->offset; | ||
24 | if (start < res->start) | ||
25 | start += data->mask + 1; | ||
26 | return start; | ||
27 | } | ||
28 | |||
29 | static struct resource *find_io_region(struct pcmcia_socket *s, | ||
30 | unsigned long base, int num, | ||
31 | unsigned long align) | ||
32 | { | ||
33 | struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_IO, | ||
34 | dev_name(&s->dev)); | ||
35 | struct pcmcia_align_data data; | ||
36 | int ret; | ||
37 | |||
38 | data.mask = align - 1; | ||
39 | data.offset = base & data.mask; | ||
40 | |||
41 | ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1, | ||
42 | base, 0, pcmcia_align, &data); | ||
43 | if (ret != 0) { | ||
44 | kfree(res); | ||
45 | res = NULL; | ||
46 | } | ||
47 | return res; | ||
48 | } | ||
49 | |||
50 | static int res_pci_find_io(struct pcmcia_socket *s, unsigned int attr, | ||
51 | unsigned int *base, unsigned int num, | ||
52 | unsigned int align, struct resource **parent) | ||
53 | { | ||
54 | int i, ret = 0; | ||
55 | |||
56 | /* Check for an already-allocated window that must conflict with | ||
57 | * what was asked for. It is a hack because it does not catch all | ||
58 | * potential conflicts, just the most obvious ones. | ||
59 | */ | ||
60 | for (i = 0; i < MAX_IO_WIN; i++) { | ||
61 | if (!s->io[i].res) | ||
62 | continue; | ||
63 | |||
64 | if (!*base) | ||
65 | continue; | ||
66 | |||
67 | if ((s->io[i].res->start & (align-1)) == *base) | ||
68 | return -EBUSY; | ||
69 | } | ||
70 | |||
71 | for (i = 0; i < MAX_IO_WIN; i++) { | ||
72 | struct resource *res = s->io[i].res; | ||
73 | unsigned int try; | ||
74 | |||
75 | if (res && (res->flags & IORESOURCE_BITS) != | ||
76 | (attr & IORESOURCE_BITS)) | ||
77 | continue; | ||
78 | |||
79 | if (!res) { | ||
80 | if (align == 0) | ||
81 | align = 0x10000; | ||
82 | |||
83 | res = s->io[i].res = find_io_region(s, *base, num, | ||
84 | align); | ||
85 | if (!res) | ||
86 | return -EINVAL; | ||
87 | |||
88 | *base = res->start; | ||
89 | s->io[i].res->flags = | ||
90 | ((res->flags & ~IORESOURCE_BITS) | | ||
91 | (attr & IORESOURCE_BITS)); | ||
92 | s->io[i].InUse = num; | ||
93 | *parent = res; | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | /* Try to extend top of window */ | ||
98 | try = res->end + 1; | ||
99 | if ((*base == 0) || (*base == try)) { | ||
100 | ret = adjust_resource(s->io[i].res, res->start, | ||
101 | resource_size(res) + num); | ||
102 | if (ret) | ||
103 | continue; | ||
104 | *base = try; | ||
105 | s->io[i].InUse += num; | ||
106 | *parent = res; | ||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | /* Try to extend bottom of window */ | ||
111 | try = res->start - num; | ||
112 | if ((*base == 0) || (*base == try)) { | ||
113 | ret = adjust_resource(s->io[i].res, | ||
114 | res->start - num, | ||
115 | resource_size(res) + num); | ||
116 | if (ret) | ||
117 | continue; | ||
118 | *base = try; | ||
119 | s->io[i].InUse += num; | ||
120 | *parent = res; | ||
121 | return 0; | ||
122 | } | ||
123 | } | ||
124 | return -EINVAL; | ||
125 | } | ||
126 | |||
127 | static struct resource *res_pci_find_mem(u_long base, u_long num, | ||
128 | u_long align, int low, struct pcmcia_socket *s) | ||
129 | { | ||
130 | struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_MEM, | ||
131 | dev_name(&s->dev)); | ||
132 | struct pcmcia_align_data data; | ||
133 | unsigned long min; | ||
134 | int ret; | ||
135 | |||
136 | if (align < 0x20000) | ||
137 | align = 0x20000; | ||
138 | data.mask = align - 1; | ||
139 | data.offset = base & data.mask; | ||
140 | |||
141 | min = 0; | ||
142 | if (!low) | ||
143 | min = 0x100000UL; | ||
144 | |||
145 | ret = pci_bus_alloc_resource(s->cb_dev->bus, | ||
146 | res, num, 1, min, 0, | ||
147 | pcmcia_align, &data); | ||
148 | |||
149 | if (ret != 0) { | ||
150 | kfree(res); | ||
151 | res = NULL; | ||
152 | } | ||
153 | return res; | ||
154 | } | ||
155 | |||
156 | |||
157 | static int res_pci_init(struct pcmcia_socket *s) | ||
158 | { | ||
159 | if (!s->cb_dev || !(s->features & SS_CAP_PAGE_REGS)) { | ||
160 | dev_err(&s->dev, "not supported by res_pci\n"); | ||
161 | return -EOPNOTSUPP; | ||
162 | } | ||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | struct pccard_resource_ops pccard_nonstatic_ops = { | ||
167 | .validate_mem = NULL, | ||
168 | .find_io = res_pci_find_io, | ||
169 | .find_mem = res_pci_find_mem, | ||
170 | .init = res_pci_init, | ||
171 | .exit = NULL, | ||
172 | }; | ||
173 | EXPORT_SYMBOL(pccard_nonstatic_ops); | ||
diff --git a/drivers/phy/phy-armada375-usb2.c b/drivers/phy/phy-armada375-usb2.c index 7c99ca256f05..8ccc3952c13d 100644 --- a/drivers/phy/phy-armada375-usb2.c +++ b/drivers/phy/phy-armada375-usb2.c | |||
@@ -37,7 +37,7 @@ static int armada375_usb_phy_init(struct phy *phy) | |||
37 | struct armada375_cluster_phy *cluster_phy; | 37 | struct armada375_cluster_phy *cluster_phy; |
38 | u32 reg; | 38 | u32 reg; |
39 | 39 | ||
40 | cluster_phy = dev_get_drvdata(phy->dev.parent); | 40 | cluster_phy = phy_get_drvdata(phy); |
41 | if (!cluster_phy) | 41 | if (!cluster_phy) |
42 | return -ENODEV; | 42 | return -ENODEV; |
43 | 43 | ||
@@ -131,6 +131,7 @@ static int armada375_usb_phy_probe(struct platform_device *pdev) | |||
131 | cluster_phy->reg = usb_cluster_base; | 131 | cluster_phy->reg = usb_cluster_base; |
132 | 132 | ||
133 | dev_set_drvdata(dev, cluster_phy); | 133 | dev_set_drvdata(dev, cluster_phy); |
134 | phy_set_drvdata(phy, cluster_phy); | ||
134 | 135 | ||
135 | phy_provider = devm_of_phy_provider_register(&pdev->dev, | 136 | phy_provider = devm_of_phy_provider_register(&pdev->dev, |
136 | armada375_usb_phy_xlate); | 137 | armada375_usb_phy_xlate); |
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index a12d35338313..3791838f4bd4 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c | |||
@@ -52,7 +52,9 @@ static void devm_phy_consume(struct device *dev, void *res) | |||
52 | 52 | ||
53 | static int devm_phy_match(struct device *dev, void *res, void *match_data) | 53 | static int devm_phy_match(struct device *dev, void *res, void *match_data) |
54 | { | 54 | { |
55 | return res == match_data; | 55 | struct phy **phy = res; |
56 | |||
57 | return *phy == match_data; | ||
56 | } | 58 | } |
57 | 59 | ||
58 | /** | 60 | /** |
@@ -223,6 +225,7 @@ int phy_init(struct phy *phy) | |||
223 | ret = phy_pm_runtime_get_sync(phy); | 225 | ret = phy_pm_runtime_get_sync(phy); |
224 | if (ret < 0 && ret != -ENOTSUPP) | 226 | if (ret < 0 && ret != -ENOTSUPP) |
225 | return ret; | 227 | return ret; |
228 | ret = 0; /* Override possible ret == -ENOTSUPP */ | ||
226 | 229 | ||
227 | mutex_lock(&phy->mutex); | 230 | mutex_lock(&phy->mutex); |
228 | if (phy->init_count == 0 && phy->ops->init) { | 231 | if (phy->init_count == 0 && phy->ops->init) { |
@@ -231,8 +234,6 @@ int phy_init(struct phy *phy) | |||
231 | dev_err(&phy->dev, "phy init failed --> %d\n", ret); | 234 | dev_err(&phy->dev, "phy init failed --> %d\n", ret); |
232 | goto out; | 235 | goto out; |
233 | } | 236 | } |
234 | } else { | ||
235 | ret = 0; /* Override possible ret == -ENOTSUPP */ | ||
236 | } | 237 | } |
237 | ++phy->init_count; | 238 | ++phy->init_count; |
238 | 239 | ||
@@ -253,6 +254,7 @@ int phy_exit(struct phy *phy) | |||
253 | ret = phy_pm_runtime_get_sync(phy); | 254 | ret = phy_pm_runtime_get_sync(phy); |
254 | if (ret < 0 && ret != -ENOTSUPP) | 255 | if (ret < 0 && ret != -ENOTSUPP) |
255 | return ret; | 256 | return ret; |
257 | ret = 0; /* Override possible ret == -ENOTSUPP */ | ||
256 | 258 | ||
257 | mutex_lock(&phy->mutex); | 259 | mutex_lock(&phy->mutex); |
258 | if (phy->init_count == 1 && phy->ops->exit) { | 260 | if (phy->init_count == 1 && phy->ops->exit) { |
@@ -287,6 +289,7 @@ int phy_power_on(struct phy *phy) | |||
287 | ret = phy_pm_runtime_get_sync(phy); | 289 | ret = phy_pm_runtime_get_sync(phy); |
288 | if (ret < 0 && ret != -ENOTSUPP) | 290 | if (ret < 0 && ret != -ENOTSUPP) |
289 | return ret; | 291 | return ret; |
292 | ret = 0; /* Override possible ret == -ENOTSUPP */ | ||
290 | 293 | ||
291 | mutex_lock(&phy->mutex); | 294 | mutex_lock(&phy->mutex); |
292 | if (phy->power_count == 0 && phy->ops->power_on) { | 295 | if (phy->power_count == 0 && phy->ops->power_on) { |
@@ -295,8 +298,6 @@ int phy_power_on(struct phy *phy) | |||
295 | dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); | 298 | dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); |
296 | goto out; | 299 | goto out; |
297 | } | 300 | } |
298 | } else { | ||
299 | ret = 0; /* Override possible ret == -ENOTSUPP */ | ||
300 | } | 301 | } |
301 | ++phy->power_count; | 302 | ++phy->power_count; |
302 | mutex_unlock(&phy->mutex); | 303 | mutex_unlock(&phy->mutex); |
diff --git a/drivers/phy/phy-exynos-dp-video.c b/drivers/phy/phy-exynos-dp-video.c index f86cbe68ddaf..179cbf9451aa 100644 --- a/drivers/phy/phy-exynos-dp-video.c +++ b/drivers/phy/phy-exynos-dp-video.c | |||
@@ -30,28 +30,13 @@ struct exynos_dp_video_phy { | |||
30 | const struct exynos_dp_video_phy_drvdata *drvdata; | 30 | const struct exynos_dp_video_phy_drvdata *drvdata; |
31 | }; | 31 | }; |
32 | 32 | ||
33 | static void exynos_dp_video_phy_pwr_isol(struct exynos_dp_video_phy *state, | ||
34 | unsigned int on) | ||
35 | { | ||
36 | unsigned int val; | ||
37 | |||
38 | if (IS_ERR(state->regs)) | ||
39 | return; | ||
40 | |||
41 | val = on ? 0 : EXYNOS5_PHY_ENABLE; | ||
42 | |||
43 | regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset, | ||
44 | EXYNOS5_PHY_ENABLE, val); | ||
45 | } | ||
46 | |||
47 | static int exynos_dp_video_phy_power_on(struct phy *phy) | 33 | static int exynos_dp_video_phy_power_on(struct phy *phy) |
48 | { | 34 | { |
49 | struct exynos_dp_video_phy *state = phy_get_drvdata(phy); | 35 | struct exynos_dp_video_phy *state = phy_get_drvdata(phy); |
50 | 36 | ||
51 | /* Disable power isolation on DP-PHY */ | 37 | /* Disable power isolation on DP-PHY */ |
52 | exynos_dp_video_phy_pwr_isol(state, 0); | 38 | return regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset, |
53 | 39 | EXYNOS5_PHY_ENABLE, EXYNOS5_PHY_ENABLE); | |
54 | return 0; | ||
55 | } | 40 | } |
56 | 41 | ||
57 | static int exynos_dp_video_phy_power_off(struct phy *phy) | 42 | static int exynos_dp_video_phy_power_off(struct phy *phy) |
@@ -59,9 +44,8 @@ static int exynos_dp_video_phy_power_off(struct phy *phy) | |||
59 | struct exynos_dp_video_phy *state = phy_get_drvdata(phy); | 44 | struct exynos_dp_video_phy *state = phy_get_drvdata(phy); |
60 | 45 | ||
61 | /* Enable power isolation on DP-PHY */ | 46 | /* Enable power isolation on DP-PHY */ |
62 | exynos_dp_video_phy_pwr_isol(state, 1); | 47 | return regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset, |
63 | 48 | EXYNOS5_PHY_ENABLE, 0); | |
64 | return 0; | ||
65 | } | 49 | } |
66 | 50 | ||
67 | static struct phy_ops exynos_dp_video_phy_ops = { | 51 | static struct phy_ops exynos_dp_video_phy_ops = { |
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c index f017b2f2a54e..df7519a39ba0 100644 --- a/drivers/phy/phy-exynos-mipi-video.c +++ b/drivers/phy/phy-exynos-mipi-video.c | |||
@@ -43,7 +43,6 @@ struct exynos_mipi_video_phy { | |||
43 | } phys[EXYNOS_MIPI_PHYS_NUM]; | 43 | } phys[EXYNOS_MIPI_PHYS_NUM]; |
44 | spinlock_t slock; | 44 | spinlock_t slock; |
45 | void __iomem *regs; | 45 | void __iomem *regs; |
46 | struct mutex mutex; | ||
47 | struct regmap *regmap; | 46 | struct regmap *regmap; |
48 | }; | 47 | }; |
49 | 48 | ||
@@ -59,8 +58,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state, | |||
59 | else | 58 | else |
60 | reset = EXYNOS4_MIPI_PHY_SRESETN; | 59 | reset = EXYNOS4_MIPI_PHY_SRESETN; |
61 | 60 | ||
62 | if (state->regmap) { | 61 | spin_lock(&state->slock); |
63 | mutex_lock(&state->mutex); | 62 | |
63 | if (!IS_ERR(state->regmap)) { | ||
64 | regmap_read(state->regmap, offset, &val); | 64 | regmap_read(state->regmap, offset, &val); |
65 | if (on) | 65 | if (on) |
66 | val |= reset; | 66 | val |= reset; |
@@ -72,11 +72,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state, | |||
72 | else if (!(val & EXYNOS4_MIPI_PHY_RESET_MASK)) | 72 | else if (!(val & EXYNOS4_MIPI_PHY_RESET_MASK)) |
73 | val &= ~EXYNOS4_MIPI_PHY_ENABLE; | 73 | val &= ~EXYNOS4_MIPI_PHY_ENABLE; |
74 | regmap_write(state->regmap, offset, val); | 74 | regmap_write(state->regmap, offset, val); |
75 | mutex_unlock(&state->mutex); | ||
76 | } else { | 75 | } else { |
77 | addr = state->regs + EXYNOS_MIPI_PHY_CONTROL(id / 2); | 76 | addr = state->regs + EXYNOS_MIPI_PHY_CONTROL(id / 2); |
78 | 77 | ||
79 | spin_lock(&state->slock); | ||
80 | val = readl(addr); | 78 | val = readl(addr); |
81 | if (on) | 79 | if (on) |
82 | val |= reset; | 80 | val |= reset; |
@@ -90,9 +88,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state, | |||
90 | val &= ~EXYNOS4_MIPI_PHY_ENABLE; | 88 | val &= ~EXYNOS4_MIPI_PHY_ENABLE; |
91 | 89 | ||
92 | writel(val, addr); | 90 | writel(val, addr); |
93 | spin_unlock(&state->slock); | ||
94 | } | 91 | } |
95 | 92 | ||
93 | spin_unlock(&state->slock); | ||
96 | return 0; | 94 | return 0; |
97 | } | 95 | } |
98 | 96 | ||
@@ -158,7 +156,6 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev) | |||
158 | 156 | ||
159 | dev_set_drvdata(dev, state); | 157 | dev_set_drvdata(dev, state); |
160 | spin_lock_init(&state->slock); | 158 | spin_lock_init(&state->slock); |
161 | mutex_init(&state->mutex); | ||
162 | 159 | ||
163 | for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) { | 160 | for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) { |
164 | struct phy *phy = devm_phy_create(dev, NULL, | 161 | struct phy *phy = devm_phy_create(dev, NULL, |
diff --git a/drivers/phy/phy-exynos4210-usb2.c b/drivers/phy/phy-exynos4210-usb2.c index 236a52ad94eb..f30bbb0fb3b2 100644 --- a/drivers/phy/phy-exynos4210-usb2.c +++ b/drivers/phy/phy-exynos4210-usb2.c | |||
@@ -250,7 +250,6 @@ static const struct samsung_usb2_common_phy exynos4210_phys[] = { | |||
250 | .power_on = exynos4210_power_on, | 250 | .power_on = exynos4210_power_on, |
251 | .power_off = exynos4210_power_off, | 251 | .power_off = exynos4210_power_off, |
252 | }, | 252 | }, |
253 | {}, | ||
254 | }; | 253 | }; |
255 | 254 | ||
256 | const struct samsung_usb2_phy_config exynos4210_usb2_phy_config = { | 255 | const struct samsung_usb2_phy_config exynos4210_usb2_phy_config = { |
diff --git a/drivers/phy/phy-exynos4x12-usb2.c b/drivers/phy/phy-exynos4x12-usb2.c index 0b9de88579b1..765da90a536f 100644 --- a/drivers/phy/phy-exynos4x12-usb2.c +++ b/drivers/phy/phy-exynos4x12-usb2.c | |||
@@ -361,7 +361,6 @@ static const struct samsung_usb2_common_phy exynos4x12_phys[] = { | |||
361 | .power_on = exynos4x12_power_on, | 361 | .power_on = exynos4x12_power_on, |
362 | .power_off = exynos4x12_power_off, | 362 | .power_off = exynos4x12_power_off, |
363 | }, | 363 | }, |
364 | {}, | ||
365 | }; | 364 | }; |
366 | 365 | ||
367 | const struct samsung_usb2_phy_config exynos3250_usb2_phy_config = { | 366 | const struct samsung_usb2_phy_config exynos3250_usb2_phy_config = { |
diff --git a/drivers/phy/phy-exynos5-usbdrd.c b/drivers/phy/phy-exynos5-usbdrd.c index 04374018425f..e2a0be750ad9 100644 --- a/drivers/phy/phy-exynos5-usbdrd.c +++ b/drivers/phy/phy-exynos5-usbdrd.c | |||
@@ -531,7 +531,7 @@ static struct phy *exynos5_usbdrd_phy_xlate(struct device *dev, | |||
531 | { | 531 | { |
532 | struct exynos5_usbdrd_phy *phy_drd = dev_get_drvdata(dev); | 532 | struct exynos5_usbdrd_phy *phy_drd = dev_get_drvdata(dev); |
533 | 533 | ||
534 | if (WARN_ON(args->args[0] > EXYNOS5_DRDPHYS_NUM)) | 534 | if (WARN_ON(args->args[0] >= EXYNOS5_DRDPHYS_NUM)) |
535 | return ERR_PTR(-ENODEV); | 535 | return ERR_PTR(-ENODEV); |
536 | 536 | ||
537 | return phy_drd->phys[args->args[0]].phy; | 537 | return phy_drd->phys[args->args[0]].phy; |
diff --git a/drivers/phy/phy-exynos5250-usb2.c b/drivers/phy/phy-exynos5250-usb2.c index 1c139aa0d074..2ed1735a076a 100644 --- a/drivers/phy/phy-exynos5250-usb2.c +++ b/drivers/phy/phy-exynos5250-usb2.c | |||
@@ -391,7 +391,6 @@ static const struct samsung_usb2_common_phy exynos5250_phys[] = { | |||
391 | .power_on = exynos5250_power_on, | 391 | .power_on = exynos5250_power_on, |
392 | .power_off = exynos5250_power_off, | 392 | .power_off = exynos5250_power_off, |
393 | }, | 393 | }, |
394 | {}, | ||
395 | }; | 394 | }; |
396 | 395 | ||
397 | const struct samsung_usb2_phy_config exynos5250_usb2_phy_config = { | 396 | const struct samsung_usb2_phy_config exynos5250_usb2_phy_config = { |
diff --git a/drivers/phy/phy-hix5hd2-sata.c b/drivers/phy/phy-hix5hd2-sata.c index 34915b4202f1..d6b22659cac1 100644 --- a/drivers/phy/phy-hix5hd2-sata.c +++ b/drivers/phy/phy-hix5hd2-sata.c | |||
@@ -147,6 +147,9 @@ static int hix5hd2_sata_phy_probe(struct platform_device *pdev) | |||
147 | return -ENOMEM; | 147 | return -ENOMEM; |
148 | 148 | ||
149 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 149 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
150 | if (!res) | ||
151 | return -EINVAL; | ||
152 | |||
150 | priv->base = devm_ioremap(dev, res->start, resource_size(res)); | 153 | priv->base = devm_ioremap(dev, res->start, resource_size(res)); |
151 | if (!priv->base) | 154 | if (!priv->base) |
152 | return -ENOMEM; | 155 | return -ENOMEM; |
diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c index 9b2848e6115d..933435214acc 100644 --- a/drivers/phy/phy-miphy28lp.c +++ b/drivers/phy/phy-miphy28lp.c | |||
@@ -228,6 +228,7 @@ struct miphy28lp_dev { | |||
228 | struct regmap *regmap; | 228 | struct regmap *regmap; |
229 | struct mutex miphy_mutex; | 229 | struct mutex miphy_mutex; |
230 | struct miphy28lp_phy **phys; | 230 | struct miphy28lp_phy **phys; |
231 | int nphys; | ||
231 | }; | 232 | }; |
232 | 233 | ||
233 | struct miphy_initval { | 234 | struct miphy_initval { |
@@ -1116,7 +1117,7 @@ static struct phy *miphy28lp_xlate(struct device *dev, | |||
1116 | return ERR_PTR(-EINVAL); | 1117 | return ERR_PTR(-EINVAL); |
1117 | } | 1118 | } |
1118 | 1119 | ||
1119 | for (index = 0; index < of_get_child_count(dev->of_node); index++) | 1120 | for (index = 0; index < miphy_dev->nphys; index++) |
1120 | if (phynode == miphy_dev->phys[index]->phy->dev.of_node) { | 1121 | if (phynode == miphy_dev->phys[index]->phy->dev.of_node) { |
1121 | miphy_phy = miphy_dev->phys[index]; | 1122 | miphy_phy = miphy_dev->phys[index]; |
1122 | break; | 1123 | break; |
@@ -1138,6 +1139,7 @@ static struct phy *miphy28lp_xlate(struct device *dev, | |||
1138 | 1139 | ||
1139 | static struct phy_ops miphy28lp_ops = { | 1140 | static struct phy_ops miphy28lp_ops = { |
1140 | .init = miphy28lp_init, | 1141 | .init = miphy28lp_init, |
1142 | .owner = THIS_MODULE, | ||
1141 | }; | 1143 | }; |
1142 | 1144 | ||
1143 | static int miphy28lp_probe_resets(struct device_node *node, | 1145 | static int miphy28lp_probe_resets(struct device_node *node, |
@@ -1200,16 +1202,15 @@ static int miphy28lp_probe(struct platform_device *pdev) | |||
1200 | struct miphy28lp_dev *miphy_dev; | 1202 | struct miphy28lp_dev *miphy_dev; |
1201 | struct phy_provider *provider; | 1203 | struct phy_provider *provider; |
1202 | struct phy *phy; | 1204 | struct phy *phy; |
1203 | int chancount, port = 0; | 1205 | int ret, port = 0; |
1204 | int ret; | ||
1205 | 1206 | ||
1206 | miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL); | 1207 | miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL); |
1207 | if (!miphy_dev) | 1208 | if (!miphy_dev) |
1208 | return -ENOMEM; | 1209 | return -ENOMEM; |
1209 | 1210 | ||
1210 | chancount = of_get_child_count(np); | 1211 | miphy_dev->nphys = of_get_child_count(np); |
1211 | miphy_dev->phys = devm_kzalloc(&pdev->dev, sizeof(phy) * chancount, | 1212 | miphy_dev->phys = devm_kcalloc(&pdev->dev, miphy_dev->nphys, |
1212 | GFP_KERNEL); | 1213 | sizeof(*miphy_dev->phys), GFP_KERNEL); |
1213 | if (!miphy_dev->phys) | 1214 | if (!miphy_dev->phys) |
1214 | return -ENOMEM; | 1215 | return -ENOMEM; |
1215 | 1216 | ||
diff --git a/drivers/phy/phy-miphy365x.c b/drivers/phy/phy-miphy365x.c index 6c80154e8bff..51b459db9137 100644 --- a/drivers/phy/phy-miphy365x.c +++ b/drivers/phy/phy-miphy365x.c | |||
@@ -150,6 +150,7 @@ struct miphy365x_dev { | |||
150 | struct regmap *regmap; | 150 | struct regmap *regmap; |
151 | struct mutex miphy_mutex; | 151 | struct mutex miphy_mutex; |
152 | struct miphy365x_phy **phys; | 152 | struct miphy365x_phy **phys; |
153 | int nphys; | ||
153 | }; | 154 | }; |
154 | 155 | ||
155 | /* | 156 | /* |
@@ -485,7 +486,7 @@ static struct phy *miphy365x_xlate(struct device *dev, | |||
485 | return ERR_PTR(-EINVAL); | 486 | return ERR_PTR(-EINVAL); |
486 | } | 487 | } |
487 | 488 | ||
488 | for (index = 0; index < of_get_child_count(dev->of_node); index++) | 489 | for (index = 0; index < miphy_dev->nphys; index++) |
489 | if (phynode == miphy_dev->phys[index]->phy->dev.of_node) { | 490 | if (phynode == miphy_dev->phys[index]->phy->dev.of_node) { |
490 | miphy_phy = miphy_dev->phys[index]; | 491 | miphy_phy = miphy_dev->phys[index]; |
491 | break; | 492 | break; |
@@ -541,16 +542,15 @@ static int miphy365x_probe(struct platform_device *pdev) | |||
541 | struct miphy365x_dev *miphy_dev; | 542 | struct miphy365x_dev *miphy_dev; |
542 | struct phy_provider *provider; | 543 | struct phy_provider *provider; |
543 | struct phy *phy; | 544 | struct phy *phy; |
544 | int chancount, port = 0; | 545 | int ret, port = 0; |
545 | int ret; | ||
546 | 546 | ||
547 | miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL); | 547 | miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL); |
548 | if (!miphy_dev) | 548 | if (!miphy_dev) |
549 | return -ENOMEM; | 549 | return -ENOMEM; |
550 | 550 | ||
551 | chancount = of_get_child_count(np); | 551 | miphy_dev->nphys = of_get_child_count(np); |
552 | miphy_dev->phys = devm_kzalloc(&pdev->dev, sizeof(phy) * chancount, | 552 | miphy_dev->phys = devm_kcalloc(&pdev->dev, miphy_dev->nphys, |
553 | GFP_KERNEL); | 553 | sizeof(*miphy_dev->phys), GFP_KERNEL); |
554 | if (!miphy_dev->phys) | 554 | if (!miphy_dev->phys) |
555 | return -ENOMEM; | 555 | return -ENOMEM; |
556 | 556 | ||
diff --git a/drivers/phy/phy-omap-control.c b/drivers/phy/phy-omap-control.c index efe724f97e02..93252e053a31 100644 --- a/drivers/phy/phy-omap-control.c +++ b/drivers/phy/phy-omap-control.c | |||
@@ -360,7 +360,7 @@ static void __exit omap_control_phy_exit(void) | |||
360 | } | 360 | } |
361 | module_exit(omap_control_phy_exit); | 361 | module_exit(omap_control_phy_exit); |
362 | 362 | ||
363 | MODULE_ALIAS("platform: omap_control_phy"); | 363 | MODULE_ALIAS("platform:omap_control_phy"); |
364 | MODULE_AUTHOR("Texas Instruments Inc."); | 364 | MODULE_AUTHOR("Texas Instruments Inc."); |
365 | MODULE_DESCRIPTION("OMAP Control Module PHY Driver"); | 365 | MODULE_DESCRIPTION("OMAP Control Module PHY Driver"); |
366 | MODULE_LICENSE("GPL v2"); | 366 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c index 6f4aef3db248..4757e765696a 100644 --- a/drivers/phy/phy-omap-usb2.c +++ b/drivers/phy/phy-omap-usb2.c | |||
@@ -296,10 +296,11 @@ static int omap_usb2_probe(struct platform_device *pdev) | |||
296 | dev_warn(&pdev->dev, | 296 | dev_warn(&pdev->dev, |
297 | "found usb_otg_ss_refclk960m, please fix DTS\n"); | 297 | "found usb_otg_ss_refclk960m, please fix DTS\n"); |
298 | } | 298 | } |
299 | } else { | ||
300 | clk_prepare(phy->optclk); | ||
301 | } | 299 | } |
302 | 300 | ||
301 | if (!IS_ERR(phy->optclk)) | ||
302 | clk_prepare(phy->optclk); | ||
303 | |||
303 | usb_add_phy_dev(&phy->phy); | 304 | usb_add_phy_dev(&phy->phy); |
304 | 305 | ||
305 | return 0; | 306 | return 0; |
@@ -383,7 +384,7 @@ static struct platform_driver omap_usb2_driver = { | |||
383 | 384 | ||
384 | module_platform_driver(omap_usb2_driver); | 385 | module_platform_driver(omap_usb2_driver); |
385 | 386 | ||
386 | MODULE_ALIAS("platform: omap_usb2"); | 387 | MODULE_ALIAS("platform:omap_usb2"); |
387 | MODULE_AUTHOR("Texas Instruments Inc."); | 388 | MODULE_AUTHOR("Texas Instruments Inc."); |
388 | MODULE_DESCRIPTION("OMAP USB2 phy driver"); | 389 | MODULE_DESCRIPTION("OMAP USB2 phy driver"); |
389 | MODULE_LICENSE("GPL v2"); | 390 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c index 22011c3b6a4b..7d4c33643768 100644 --- a/drivers/phy/phy-rockchip-usb.c +++ b/drivers/phy/phy-rockchip-usb.c | |||
@@ -61,8 +61,6 @@ static int rockchip_usb_phy_power_off(struct phy *_phy) | |||
61 | return ret; | 61 | return ret; |
62 | 62 | ||
63 | clk_disable_unprepare(phy->clk); | 63 | clk_disable_unprepare(phy->clk); |
64 | if (ret) | ||
65 | return ret; | ||
66 | 64 | ||
67 | return 0; | 65 | return 0; |
68 | } | 66 | } |
@@ -78,8 +76,10 @@ static int rockchip_usb_phy_power_on(struct phy *_phy) | |||
78 | 76 | ||
79 | /* Power up usb phy analog blocks by set siddq 0 */ | 77 | /* Power up usb phy analog blocks by set siddq 0 */ |
80 | ret = rockchip_usb_phy_power(phy, 0); | 78 | ret = rockchip_usb_phy_power(phy, 0); |
81 | if (ret) | 79 | if (ret) { |
80 | clk_disable_unprepare(phy->clk); | ||
82 | return ret; | 81 | return ret; |
82 | } | ||
83 | 83 | ||
84 | return 0; | 84 | return 0; |
85 | } | 85 | } |
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c index 95c88f929f27..2ba610b72ca2 100644 --- a/drivers/phy/phy-ti-pipe3.c +++ b/drivers/phy/phy-ti-pipe3.c | |||
@@ -165,15 +165,11 @@ static int ti_pipe3_dpll_wait_lock(struct ti_pipe3 *phy) | |||
165 | cpu_relax(); | 165 | cpu_relax(); |
166 | val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); | 166 | val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); |
167 | if (val & PLL_LOCK) | 167 | if (val & PLL_LOCK) |
168 | break; | 168 | return 0; |
169 | } while (!time_after(jiffies, timeout)); | 169 | } while (!time_after(jiffies, timeout)); |
170 | 170 | ||
171 | if (!(val & PLL_LOCK)) { | 171 | dev_err(phy->dev, "DPLL failed to lock\n"); |
172 | dev_err(phy->dev, "DPLL failed to lock\n"); | 172 | return -EBUSY; |
173 | return -EBUSY; | ||
174 | } | ||
175 | |||
176 | return 0; | ||
177 | } | 173 | } |
178 | 174 | ||
179 | static int ti_pipe3_dpll_program(struct ti_pipe3 *phy) | 175 | static int ti_pipe3_dpll_program(struct ti_pipe3 *phy) |
@@ -608,7 +604,7 @@ static struct platform_driver ti_pipe3_driver = { | |||
608 | 604 | ||
609 | module_platform_driver(ti_pipe3_driver); | 605 | module_platform_driver(ti_pipe3_driver); |
610 | 606 | ||
611 | MODULE_ALIAS("platform: ti_pipe3"); | 607 | MODULE_ALIAS("platform:ti_pipe3"); |
612 | MODULE_AUTHOR("Texas Instruments Inc."); | 608 | MODULE_AUTHOR("Texas Instruments Inc."); |
613 | MODULE_DESCRIPTION("TI PIPE3 phy driver"); | 609 | MODULE_DESCRIPTION("TI PIPE3 phy driver"); |
614 | MODULE_LICENSE("GPL v2"); | 610 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c index 8e87f54671f3..bc42d6a8939f 100644 --- a/drivers/phy/phy-twl4030-usb.c +++ b/drivers/phy/phy-twl4030-usb.c | |||
@@ -666,7 +666,6 @@ static int twl4030_usb_probe(struct platform_device *pdev) | |||
666 | twl->dev = &pdev->dev; | 666 | twl->dev = &pdev->dev; |
667 | twl->irq = platform_get_irq(pdev, 0); | 667 | twl->irq = platform_get_irq(pdev, 0); |
668 | twl->vbus_supplied = false; | 668 | twl->vbus_supplied = false; |
669 | twl->linkstat = -EINVAL; | ||
670 | twl->linkstat = OMAP_MUSB_UNKNOWN; | 669 | twl->linkstat = OMAP_MUSB_UNKNOWN; |
671 | 670 | ||
672 | twl->phy.dev = twl->dev; | 671 | twl->phy.dev = twl->dev; |
diff --git a/drivers/phy/phy-xgene.c b/drivers/phy/phy-xgene.c index 29214a36ea28..2263cd010032 100644 --- a/drivers/phy/phy-xgene.c +++ b/drivers/phy/phy-xgene.c | |||
@@ -1704,7 +1704,6 @@ static int xgene_phy_probe(struct platform_device *pdev) | |||
1704 | for (i = 0; i < MAX_LANE; i++) | 1704 | for (i = 0; i < MAX_LANE; i++) |
1705 | ctx->sata_param.speed[i] = 2; /* Default to Gen3 */ | 1705 | ctx->sata_param.speed[i] = 2; /* Default to Gen3 */ |
1706 | 1706 | ||
1707 | ctx->dev = &pdev->dev; | ||
1708 | platform_set_drvdata(pdev, ctx); | 1707 | platform_set_drvdata(pdev, ctx); |
1709 | 1708 | ||
1710 | ctx->phy = devm_phy_create(ctx->dev, NULL, &xgene_phy_ops); | 1709 | ctx->phy = devm_phy_create(ctx->dev, NULL, &xgene_phy_ops); |
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index 5afe03e28b91..2062c224e32f 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c | |||
@@ -66,6 +66,10 @@ | |||
66 | #define BYT_DIR_MASK (BIT(1) | BIT(2)) | 66 | #define BYT_DIR_MASK (BIT(1) | BIT(2)) |
67 | #define BYT_TRIG_MASK (BIT(26) | BIT(25) | BIT(24)) | 67 | #define BYT_TRIG_MASK (BIT(26) | BIT(25) | BIT(24)) |
68 | 68 | ||
69 | #define BYT_CONF0_RESTORE_MASK (BYT_DIRECT_IRQ_EN | BYT_TRIG_MASK | \ | ||
70 | BYT_PIN_MUX) | ||
71 | #define BYT_VAL_RESTORE_MASK (BYT_DIR_MASK | BYT_LEVEL) | ||
72 | |||
69 | #define BYT_NGPIO_SCORE 102 | 73 | #define BYT_NGPIO_SCORE 102 |
70 | #define BYT_NGPIO_NCORE 28 | 74 | #define BYT_NGPIO_NCORE 28 |
71 | #define BYT_NGPIO_SUS 44 | 75 | #define BYT_NGPIO_SUS 44 |
@@ -134,12 +138,18 @@ static struct pinctrl_gpio_range byt_ranges[] = { | |||
134 | }, | 138 | }, |
135 | }; | 139 | }; |
136 | 140 | ||
141 | struct byt_gpio_pin_context { | ||
142 | u32 conf0; | ||
143 | u32 val; | ||
144 | }; | ||
145 | |||
137 | struct byt_gpio { | 146 | struct byt_gpio { |
138 | struct gpio_chip chip; | 147 | struct gpio_chip chip; |
139 | struct platform_device *pdev; | 148 | struct platform_device *pdev; |
140 | spinlock_t lock; | 149 | spinlock_t lock; |
141 | void __iomem *reg_base; | 150 | void __iomem *reg_base; |
142 | struct pinctrl_gpio_range *range; | 151 | struct pinctrl_gpio_range *range; |
152 | struct byt_gpio_pin_context *saved_context; | ||
143 | }; | 153 | }; |
144 | 154 | ||
145 | #define to_byt_gpio(c) container_of(c, struct byt_gpio, chip) | 155 | #define to_byt_gpio(c) container_of(c, struct byt_gpio, chip) |
@@ -158,40 +168,62 @@ static void __iomem *byt_gpio_reg(struct gpio_chip *chip, unsigned offset, | |||
158 | return vg->reg_base + reg_offset + reg; | 168 | return vg->reg_base + reg_offset + reg; |
159 | } | 169 | } |
160 | 170 | ||
161 | static bool is_special_pin(struct byt_gpio *vg, unsigned offset) | 171 | static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned offset) |
172 | { | ||
173 | void __iomem *reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG); | ||
174 | unsigned long flags; | ||
175 | u32 value; | ||
176 | |||
177 | spin_lock_irqsave(&vg->lock, flags); | ||
178 | value = readl(reg); | ||
179 | value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL); | ||
180 | writel(value, reg); | ||
181 | spin_unlock_irqrestore(&vg->lock, flags); | ||
182 | } | ||
183 | |||
184 | static u32 byt_get_gpio_mux(struct byt_gpio *vg, unsigned offset) | ||
162 | { | 185 | { |
163 | /* SCORE pin 92-93 */ | 186 | /* SCORE pin 92-93 */ |
164 | if (!strcmp(vg->range->name, BYT_SCORE_ACPI_UID) && | 187 | if (!strcmp(vg->range->name, BYT_SCORE_ACPI_UID) && |
165 | offset >= 92 && offset <= 93) | 188 | offset >= 92 && offset <= 93) |
166 | return true; | 189 | return 1; |
167 | 190 | ||
168 | /* SUS pin 11-21 */ | 191 | /* SUS pin 11-21 */ |
169 | if (!strcmp(vg->range->name, BYT_SUS_ACPI_UID) && | 192 | if (!strcmp(vg->range->name, BYT_SUS_ACPI_UID) && |
170 | offset >= 11 && offset <= 21) | 193 | offset >= 11 && offset <= 21) |
171 | return true; | 194 | return 1; |
172 | 195 | ||
173 | return false; | 196 | return 0; |
174 | } | 197 | } |
175 | 198 | ||
176 | static int byt_gpio_request(struct gpio_chip *chip, unsigned offset) | 199 | static int byt_gpio_request(struct gpio_chip *chip, unsigned offset) |
177 | { | 200 | { |
178 | struct byt_gpio *vg = to_byt_gpio(chip); | 201 | struct byt_gpio *vg = to_byt_gpio(chip); |
179 | void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG); | 202 | void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG); |
180 | u32 value; | 203 | u32 value, gpio_mux; |
181 | bool special; | ||
182 | 204 | ||
183 | /* | 205 | /* |
184 | * In most cases, func pin mux 000 means GPIO function. | 206 | * In most cases, func pin mux 000 means GPIO function. |
185 | * But, some pins may have func pin mux 001 represents | 207 | * But, some pins may have func pin mux 001 represents |
186 | * GPIO function. Only allow user to export pin with | 208 | * GPIO function. |
187 | * func pin mux preset as GPIO function by BIOS/FW. | 209 | * |
210 | * Because there are devices out there where some pins were not | ||
211 | * configured correctly we allow changing the mux value from | ||
212 | * request (but print out warning about that). | ||
188 | */ | 213 | */ |
189 | value = readl(reg) & BYT_PIN_MUX; | 214 | value = readl(reg) & BYT_PIN_MUX; |
190 | special = is_special_pin(vg, offset); | 215 | gpio_mux = byt_get_gpio_mux(vg, offset); |
191 | if ((special && value != 1) || (!special && value)) { | 216 | if (WARN_ON(gpio_mux != value)) { |
192 | dev_err(&vg->pdev->dev, | 217 | unsigned long flags; |
193 | "pin %u cannot be used as GPIO.\n", offset); | 218 | |
194 | return -EINVAL; | 219 | spin_lock_irqsave(&vg->lock, flags); |
220 | value = readl(reg) & ~BYT_PIN_MUX; | ||
221 | value |= gpio_mux; | ||
222 | writel(value, reg); | ||
223 | spin_unlock_irqrestore(&vg->lock, flags); | ||
224 | |||
225 | dev_warn(&vg->pdev->dev, | ||
226 | "pin %u forcibly re-configured as GPIO\n", offset); | ||
195 | } | 227 | } |
196 | 228 | ||
197 | pm_runtime_get(&vg->pdev->dev); | 229 | pm_runtime_get(&vg->pdev->dev); |
@@ -202,14 +234,8 @@ static int byt_gpio_request(struct gpio_chip *chip, unsigned offset) | |||
202 | static void byt_gpio_free(struct gpio_chip *chip, unsigned offset) | 234 | static void byt_gpio_free(struct gpio_chip *chip, unsigned offset) |
203 | { | 235 | { |
204 | struct byt_gpio *vg = to_byt_gpio(chip); | 236 | struct byt_gpio *vg = to_byt_gpio(chip); |
205 | void __iomem *reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG); | ||
206 | u32 value; | ||
207 | |||
208 | /* clear interrupt triggering */ | ||
209 | value = readl(reg); | ||
210 | value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL); | ||
211 | writel(value, reg); | ||
212 | 237 | ||
238 | byt_gpio_clear_triggering(vg, offset); | ||
213 | pm_runtime_put(&vg->pdev->dev); | 239 | pm_runtime_put(&vg->pdev->dev); |
214 | } | 240 | } |
215 | 241 | ||
@@ -236,23 +262,13 @@ static int byt_irq_type(struct irq_data *d, unsigned type) | |||
236 | value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG | | 262 | value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG | |
237 | BYT_TRIG_LVL); | 263 | BYT_TRIG_LVL); |
238 | 264 | ||
239 | switch (type) { | ||
240 | case IRQ_TYPE_LEVEL_HIGH: | ||
241 | value |= BYT_TRIG_LVL; | ||
242 | case IRQ_TYPE_EDGE_RISING: | ||
243 | value |= BYT_TRIG_POS; | ||
244 | break; | ||
245 | case IRQ_TYPE_LEVEL_LOW: | ||
246 | value |= BYT_TRIG_LVL; | ||
247 | case IRQ_TYPE_EDGE_FALLING: | ||
248 | value |= BYT_TRIG_NEG; | ||
249 | break; | ||
250 | case IRQ_TYPE_EDGE_BOTH: | ||
251 | value |= (BYT_TRIG_NEG | BYT_TRIG_POS); | ||
252 | break; | ||
253 | } | ||
254 | writel(value, reg); | 265 | writel(value, reg); |
255 | 266 | ||
267 | if (type & IRQ_TYPE_EDGE_BOTH) | ||
268 | __irq_set_handler_locked(d->irq, handle_edge_irq); | ||
269 | else if (type & IRQ_TYPE_LEVEL_MASK) | ||
270 | __irq_set_handler_locked(d->irq, handle_level_irq); | ||
271 | |||
256 | spin_unlock_irqrestore(&vg->lock, flags); | 272 | spin_unlock_irqrestore(&vg->lock, flags); |
257 | 273 | ||
258 | return 0; | 274 | return 0; |
@@ -410,58 +426,80 @@ static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc) | |||
410 | struct irq_data *data = irq_desc_get_irq_data(desc); | 426 | struct irq_data *data = irq_desc_get_irq_data(desc); |
411 | struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc)); | 427 | struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc)); |
412 | struct irq_chip *chip = irq_data_get_irq_chip(data); | 428 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
413 | u32 base, pin, mask; | 429 | u32 base, pin; |
414 | void __iomem *reg; | 430 | void __iomem *reg; |
415 | u32 pending; | 431 | unsigned long pending; |
416 | unsigned virq; | 432 | unsigned virq; |
417 | int looplimit = 0; | ||
418 | 433 | ||
419 | /* check from GPIO controller which pin triggered the interrupt */ | 434 | /* check from GPIO controller which pin triggered the interrupt */ |
420 | for (base = 0; base < vg->chip.ngpio; base += 32) { | 435 | for (base = 0; base < vg->chip.ngpio; base += 32) { |
421 | |||
422 | reg = byt_gpio_reg(&vg->chip, base, BYT_INT_STAT_REG); | 436 | reg = byt_gpio_reg(&vg->chip, base, BYT_INT_STAT_REG); |
423 | 437 | pending = readl(reg); | |
424 | while ((pending = readl(reg))) { | 438 | for_each_set_bit(pin, &pending, 32) { |
425 | pin = __ffs(pending); | ||
426 | mask = BIT(pin); | ||
427 | /* Clear before handling so we can't lose an edge */ | ||
428 | writel(mask, reg); | ||
429 | |||
430 | virq = irq_find_mapping(vg->chip.irqdomain, base + pin); | 439 | virq = irq_find_mapping(vg->chip.irqdomain, base + pin); |
431 | generic_handle_irq(virq); | 440 | generic_handle_irq(virq); |
432 | |||
433 | /* In case bios or user sets triggering incorretly a pin | ||
434 | * might remain in "interrupt triggered" state. | ||
435 | */ | ||
436 | if (looplimit++ > 32) { | ||
437 | dev_err(&vg->pdev->dev, | ||
438 | "Gpio %d interrupt flood, disabling\n", | ||
439 | base + pin); | ||
440 | |||
441 | reg = byt_gpio_reg(&vg->chip, base + pin, | ||
442 | BYT_CONF0_REG); | ||
443 | mask = readl(reg); | ||
444 | mask &= ~(BYT_TRIG_NEG | BYT_TRIG_POS | | ||
445 | BYT_TRIG_LVL); | ||
446 | writel(mask, reg); | ||
447 | mask = readl(reg); /* flush */ | ||
448 | break; | ||
449 | } | ||
450 | } | 441 | } |
451 | } | 442 | } |
452 | chip->irq_eoi(data); | 443 | chip->irq_eoi(data); |
453 | } | 444 | } |
454 | 445 | ||
446 | static void byt_irq_ack(struct irq_data *d) | ||
447 | { | ||
448 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | ||
449 | struct byt_gpio *vg = to_byt_gpio(gc); | ||
450 | unsigned offset = irqd_to_hwirq(d); | ||
451 | void __iomem *reg; | ||
452 | |||
453 | reg = byt_gpio_reg(&vg->chip, offset, BYT_INT_STAT_REG); | ||
454 | writel(BIT(offset % 32), reg); | ||
455 | } | ||
456 | |||
455 | static void byt_irq_unmask(struct irq_data *d) | 457 | static void byt_irq_unmask(struct irq_data *d) |
456 | { | 458 | { |
459 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | ||
460 | struct byt_gpio *vg = to_byt_gpio(gc); | ||
461 | unsigned offset = irqd_to_hwirq(d); | ||
462 | unsigned long flags; | ||
463 | void __iomem *reg; | ||
464 | u32 value; | ||
465 | |||
466 | spin_lock_irqsave(&vg->lock, flags); | ||
467 | |||
468 | reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG); | ||
469 | value = readl(reg); | ||
470 | |||
471 | switch (irqd_get_trigger_type(d)) { | ||
472 | case IRQ_TYPE_LEVEL_HIGH: | ||
473 | value |= BYT_TRIG_LVL; | ||
474 | case IRQ_TYPE_EDGE_RISING: | ||
475 | value |= BYT_TRIG_POS; | ||
476 | break; | ||
477 | case IRQ_TYPE_LEVEL_LOW: | ||
478 | value |= BYT_TRIG_LVL; | ||
479 | case IRQ_TYPE_EDGE_FALLING: | ||
480 | value |= BYT_TRIG_NEG; | ||
481 | break; | ||
482 | case IRQ_TYPE_EDGE_BOTH: | ||
483 | value |= (BYT_TRIG_NEG | BYT_TRIG_POS); | ||
484 | break; | ||
485 | } | ||
486 | |||
487 | writel(value, reg); | ||
488 | |||
489 | spin_unlock_irqrestore(&vg->lock, flags); | ||
457 | } | 490 | } |
458 | 491 | ||
459 | static void byt_irq_mask(struct irq_data *d) | 492 | static void byt_irq_mask(struct irq_data *d) |
460 | { | 493 | { |
494 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | ||
495 | struct byt_gpio *vg = to_byt_gpio(gc); | ||
496 | |||
497 | byt_gpio_clear_triggering(vg, irqd_to_hwirq(d)); | ||
461 | } | 498 | } |
462 | 499 | ||
463 | static struct irq_chip byt_irqchip = { | 500 | static struct irq_chip byt_irqchip = { |
464 | .name = "BYT-GPIO", | 501 | .name = "BYT-GPIO", |
502 | .irq_ack = byt_irq_ack, | ||
465 | .irq_mask = byt_irq_mask, | 503 | .irq_mask = byt_irq_mask, |
466 | .irq_unmask = byt_irq_unmask, | 504 | .irq_unmask = byt_irq_unmask, |
467 | .irq_set_type = byt_irq_type, | 505 | .irq_set_type = byt_irq_type, |
@@ -472,6 +510,21 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg) | |||
472 | { | 510 | { |
473 | void __iomem *reg; | 511 | void __iomem *reg; |
474 | u32 base, value; | 512 | u32 base, value; |
513 | int i; | ||
514 | |||
515 | /* | ||
516 | * Clear interrupt triggers for all pins that are GPIOs and | ||
517 | * do not use direct IRQ mode. This will prevent spurious | ||
518 | * interrupts from misconfigured pins. | ||
519 | */ | ||
520 | for (i = 0; i < vg->chip.ngpio; i++) { | ||
521 | value = readl(byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG)); | ||
522 | if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) && | ||
523 | !(value & BYT_DIRECT_IRQ_EN)) { | ||
524 | byt_gpio_clear_triggering(vg, i); | ||
525 | dev_dbg(&vg->pdev->dev, "disabling GPIO %d\n", i); | ||
526 | } | ||
527 | } | ||
475 | 528 | ||
476 | /* clear interrupt status trigger registers */ | 529 | /* clear interrupt status trigger registers */ |
477 | for (base = 0; base < vg->chip.ngpio; base += 32) { | 530 | for (base = 0; base < vg->chip.ngpio; base += 32) { |
@@ -541,6 +594,11 @@ static int byt_gpio_probe(struct platform_device *pdev) | |||
541 | gc->can_sleep = false; | 594 | gc->can_sleep = false; |
542 | gc->dev = dev; | 595 | gc->dev = dev; |
543 | 596 | ||
597 | #ifdef CONFIG_PM_SLEEP | ||
598 | vg->saved_context = devm_kcalloc(&pdev->dev, gc->ngpio, | ||
599 | sizeof(*vg->saved_context), GFP_KERNEL); | ||
600 | #endif | ||
601 | |||
544 | ret = gpiochip_add(gc); | 602 | ret = gpiochip_add(gc); |
545 | if (ret) { | 603 | if (ret) { |
546 | dev_err(&pdev->dev, "failed adding byt-gpio chip\n"); | 604 | dev_err(&pdev->dev, "failed adding byt-gpio chip\n"); |
@@ -569,6 +627,69 @@ static int byt_gpio_probe(struct platform_device *pdev) | |||
569 | return 0; | 627 | return 0; |
570 | } | 628 | } |
571 | 629 | ||
630 | #ifdef CONFIG_PM_SLEEP | ||
631 | static int byt_gpio_suspend(struct device *dev) | ||
632 | { | ||
633 | struct platform_device *pdev = to_platform_device(dev); | ||
634 | struct byt_gpio *vg = platform_get_drvdata(pdev); | ||
635 | int i; | ||
636 | |||
637 | for (i = 0; i < vg->chip.ngpio; i++) { | ||
638 | void __iomem *reg; | ||
639 | u32 value; | ||
640 | |||
641 | reg = byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG); | ||
642 | value = readl(reg) & BYT_CONF0_RESTORE_MASK; | ||
643 | vg->saved_context[i].conf0 = value; | ||
644 | |||
645 | reg = byt_gpio_reg(&vg->chip, i, BYT_VAL_REG); | ||
646 | value = readl(reg) & BYT_VAL_RESTORE_MASK; | ||
647 | vg->saved_context[i].val = value; | ||
648 | } | ||
649 | |||
650 | return 0; | ||
651 | } | ||
652 | |||
653 | static int byt_gpio_resume(struct device *dev) | ||
654 | { | ||
655 | struct platform_device *pdev = to_platform_device(dev); | ||
656 | struct byt_gpio *vg = platform_get_drvdata(pdev); | ||
657 | int i; | ||
658 | |||
659 | for (i = 0; i < vg->chip.ngpio; i++) { | ||
660 | void __iomem *reg; | ||
661 | u32 value; | ||
662 | |||
663 | reg = byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG); | ||
664 | value = readl(reg); | ||
665 | if ((value & BYT_CONF0_RESTORE_MASK) != | ||
666 | vg->saved_context[i].conf0) { | ||
667 | value &= ~BYT_CONF0_RESTORE_MASK; | ||
668 | value |= vg->saved_context[i].conf0; | ||
669 | writel(value, reg); | ||
670 | dev_info(dev, "restored pin %d conf0 %#08x", i, value); | ||
671 | } | ||
672 | |||
673 | reg = byt_gpio_reg(&vg->chip, i, BYT_VAL_REG); | ||
674 | value = readl(reg); | ||
675 | if ((value & BYT_VAL_RESTORE_MASK) != | ||
676 | vg->saved_context[i].val) { | ||
677 | u32 v; | ||
678 | |||
679 | v = value & ~BYT_VAL_RESTORE_MASK; | ||
680 | v |= vg->saved_context[i].val; | ||
681 | if (v != value) { | ||
682 | writel(v, reg); | ||
683 | dev_dbg(dev, "restored pin %d val %#08x\n", | ||
684 | i, v); | ||
685 | } | ||
686 | } | ||
687 | } | ||
688 | |||
689 | return 0; | ||
690 | } | ||
691 | #endif | ||
692 | |||
572 | static int byt_gpio_runtime_suspend(struct device *dev) | 693 | static int byt_gpio_runtime_suspend(struct device *dev) |
573 | { | 694 | { |
574 | return 0; | 695 | return 0; |
@@ -580,8 +701,9 @@ static int byt_gpio_runtime_resume(struct device *dev) | |||
580 | } | 701 | } |
581 | 702 | ||
582 | static const struct dev_pm_ops byt_gpio_pm_ops = { | 703 | static const struct dev_pm_ops byt_gpio_pm_ops = { |
583 | .runtime_suspend = byt_gpio_runtime_suspend, | 704 | SET_LATE_SYSTEM_SLEEP_PM_OPS(byt_gpio_suspend, byt_gpio_resume) |
584 | .runtime_resume = byt_gpio_runtime_resume, | 705 | SET_RUNTIME_PM_OPS(byt_gpio_runtime_suspend, byt_gpio_runtime_resume, |
706 | NULL) | ||
585 | }; | 707 | }; |
586 | 708 | ||
587 | static const struct acpi_device_id byt_gpio_acpi_match[] = { | 709 | static const struct acpi_device_id byt_gpio_acpi_match[] = { |
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 3034fd03bced..82f691eeeec4 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c | |||
@@ -1226,6 +1226,7 @@ static int chv_gpio_direction_input(struct gpio_chip *chip, unsigned offset) | |||
1226 | static int chv_gpio_direction_output(struct gpio_chip *chip, unsigned offset, | 1226 | static int chv_gpio_direction_output(struct gpio_chip *chip, unsigned offset, |
1227 | int value) | 1227 | int value) |
1228 | { | 1228 | { |
1229 | chv_gpio_set(chip, offset, value); | ||
1229 | return pinctrl_gpio_direction_output(chip->base + offset); | 1230 | return pinctrl_gpio_direction_output(chip->base + offset); |
1230 | } | 1231 | } |
1231 | 1232 | ||
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index f4cd0b9b2438..a4814066ea08 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c | |||
@@ -1477,28 +1477,25 @@ static void gpio_irq_ack(struct irq_data *d) | |||
1477 | /* the interrupt is already cleared before by reading ISR */ | 1477 | /* the interrupt is already cleared before by reading ISR */ |
1478 | } | 1478 | } |
1479 | 1479 | ||
1480 | static unsigned int gpio_irq_startup(struct irq_data *d) | 1480 | static int gpio_irq_request_res(struct irq_data *d) |
1481 | { | 1481 | { |
1482 | struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); | 1482 | struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); |
1483 | unsigned pin = d->hwirq; | 1483 | unsigned pin = d->hwirq; |
1484 | int ret; | 1484 | int ret; |
1485 | 1485 | ||
1486 | ret = gpiochip_lock_as_irq(&at91_gpio->chip, pin); | 1486 | ret = gpiochip_lock_as_irq(&at91_gpio->chip, pin); |
1487 | if (ret) { | 1487 | if (ret) |
1488 | dev_err(at91_gpio->chip.dev, "unable to lock pind %lu IRQ\n", | 1488 | dev_err(at91_gpio->chip.dev, "unable to lock pind %lu IRQ\n", |
1489 | d->hwirq); | 1489 | d->hwirq); |
1490 | return ret; | 1490 | |
1491 | } | 1491 | return ret; |
1492 | gpio_irq_unmask(d); | ||
1493 | return 0; | ||
1494 | } | 1492 | } |
1495 | 1493 | ||
1496 | static void gpio_irq_shutdown(struct irq_data *d) | 1494 | static void gpio_irq_release_res(struct irq_data *d) |
1497 | { | 1495 | { |
1498 | struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); | 1496 | struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); |
1499 | unsigned pin = d->hwirq; | 1497 | unsigned pin = d->hwirq; |
1500 | 1498 | ||
1501 | gpio_irq_mask(d); | ||
1502 | gpiochip_unlock_as_irq(&at91_gpio->chip, pin); | 1499 | gpiochip_unlock_as_irq(&at91_gpio->chip, pin); |
1503 | } | 1500 | } |
1504 | 1501 | ||
@@ -1577,8 +1574,8 @@ void at91_pinctrl_gpio_resume(void) | |||
1577 | static struct irq_chip gpio_irqchip = { | 1574 | static struct irq_chip gpio_irqchip = { |
1578 | .name = "GPIO", | 1575 | .name = "GPIO", |
1579 | .irq_ack = gpio_irq_ack, | 1576 | .irq_ack = gpio_irq_ack, |
1580 | .irq_startup = gpio_irq_startup, | 1577 | .irq_request_resources = gpio_irq_request_res, |
1581 | .irq_shutdown = gpio_irq_shutdown, | 1578 | .irq_release_resources = gpio_irq_release_res, |
1582 | .irq_disable = gpio_irq_mask, | 1579 | .irq_disable = gpio_irq_mask, |
1583 | .irq_mask = gpio_irq_mask, | 1580 | .irq_mask = gpio_irq_mask, |
1584 | .irq_unmask = gpio_irq_unmask, | 1581 | .irq_unmask = gpio_irq_unmask, |
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c index 24c5d88f943f..3c68a8e5e0dd 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c | |||
@@ -1011,6 +1011,7 @@ static const struct sunxi_pinctrl_desc sun4i_a10_pinctrl_data = { | |||
1011 | .pins = sun4i_a10_pins, | 1011 | .pins = sun4i_a10_pins, |
1012 | .npins = ARRAY_SIZE(sun4i_a10_pins), | 1012 | .npins = ARRAY_SIZE(sun4i_a10_pins), |
1013 | .irq_banks = 1, | 1013 | .irq_banks = 1, |
1014 | .irq_read_needs_mux = true, | ||
1014 | }; | 1015 | }; |
1015 | 1016 | ||
1016 | static int sun4i_a10_pinctrl_probe(struct platform_device *pdev) | 1017 | static int sun4i_a10_pinctrl_probe(struct platform_device *pdev) |
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index 3d0744337736..f8e171b76693 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | 30 | ||
31 | #include "../core.h" | 31 | #include "../core.h" |
32 | #include "../../gpio/gpiolib.h" | ||
32 | #include "pinctrl-sunxi.h" | 33 | #include "pinctrl-sunxi.h" |
33 | 34 | ||
34 | static struct irq_chip sunxi_pinctrl_edge_irq_chip; | 35 | static struct irq_chip sunxi_pinctrl_edge_irq_chip; |
@@ -464,10 +465,19 @@ static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip, | |||
464 | static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset) | 465 | static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset) |
465 | { | 466 | { |
466 | struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev); | 467 | struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev); |
467 | |||
468 | u32 reg = sunxi_data_reg(offset); | 468 | u32 reg = sunxi_data_reg(offset); |
469 | u8 index = sunxi_data_offset(offset); | 469 | u8 index = sunxi_data_offset(offset); |
470 | u32 val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK; | 470 | u32 set_mux = pctl->desc->irq_read_needs_mux && |
471 | test_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags); | ||
472 | u32 val; | ||
473 | |||
474 | if (set_mux) | ||
475 | sunxi_pmx_set(pctl->pctl_dev, offset, SUN4I_FUNC_INPUT); | ||
476 | |||
477 | val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK; | ||
478 | |||
479 | if (set_mux) | ||
480 | sunxi_pmx_set(pctl->pctl_dev, offset, SUN4I_FUNC_IRQ); | ||
471 | 481 | ||
472 | return val; | 482 | return val; |
473 | } | 483 | } |
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h index 5a51523a3459..e248e81a0f9e 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h | |||
@@ -77,6 +77,9 @@ | |||
77 | #define IRQ_LEVEL_LOW 0x03 | 77 | #define IRQ_LEVEL_LOW 0x03 |
78 | #define IRQ_EDGE_BOTH 0x04 | 78 | #define IRQ_EDGE_BOTH 0x04 |
79 | 79 | ||
80 | #define SUN4I_FUNC_INPUT 0 | ||
81 | #define SUN4I_FUNC_IRQ 6 | ||
82 | |||
80 | struct sunxi_desc_function { | 83 | struct sunxi_desc_function { |
81 | const char *name; | 84 | const char *name; |
82 | u8 muxval; | 85 | u8 muxval; |
@@ -94,6 +97,7 @@ struct sunxi_pinctrl_desc { | |||
94 | int npins; | 97 | int npins; |
95 | unsigned pin_base; | 98 | unsigned pin_base; |
96 | unsigned irq_banks; | 99 | unsigned irq_banks; |
100 | bool irq_read_needs_mux; | ||
97 | }; | 101 | }; |
98 | 102 | ||
99 | struct sunxi_pinctrl_function { | 103 | struct sunxi_pinctrl_function { |
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c index 97b5e4ee1ca4..63d4033eb683 100644 --- a/drivers/powercap/intel_rapl.c +++ b/drivers/powercap/intel_rapl.c | |||
@@ -73,7 +73,7 @@ | |||
73 | 73 | ||
74 | #define TIME_WINDOW_MAX_MSEC 40000 | 74 | #define TIME_WINDOW_MAX_MSEC 40000 |
75 | #define TIME_WINDOW_MIN_MSEC 250 | 75 | #define TIME_WINDOW_MIN_MSEC 250 |
76 | 76 | #define ENERGY_UNIT_SCALE 1000 /* scale from driver unit to powercap unit */ | |
77 | enum unit_type { | 77 | enum unit_type { |
78 | ARBITRARY_UNIT, /* no translation */ | 78 | ARBITRARY_UNIT, /* no translation */ |
79 | POWER_UNIT, | 79 | POWER_UNIT, |
@@ -158,6 +158,7 @@ struct rapl_domain { | |||
158 | struct rapl_power_limit rpl[NR_POWER_LIMITS]; | 158 | struct rapl_power_limit rpl[NR_POWER_LIMITS]; |
159 | u64 attr_map; /* track capabilities */ | 159 | u64 attr_map; /* track capabilities */ |
160 | unsigned int state; | 160 | unsigned int state; |
161 | unsigned int domain_energy_unit; | ||
161 | int package_id; | 162 | int package_id; |
162 | }; | 163 | }; |
163 | #define power_zone_to_rapl_domain(_zone) \ | 164 | #define power_zone_to_rapl_domain(_zone) \ |
@@ -190,6 +191,7 @@ struct rapl_defaults { | |||
190 | void (*set_floor_freq)(struct rapl_domain *rd, bool mode); | 191 | void (*set_floor_freq)(struct rapl_domain *rd, bool mode); |
191 | u64 (*compute_time_window)(struct rapl_package *rp, u64 val, | 192 | u64 (*compute_time_window)(struct rapl_package *rp, u64 val, |
192 | bool to_raw); | 193 | bool to_raw); |
194 | unsigned int dram_domain_energy_unit; | ||
193 | }; | 195 | }; |
194 | static struct rapl_defaults *rapl_defaults; | 196 | static struct rapl_defaults *rapl_defaults; |
195 | 197 | ||
@@ -227,7 +229,8 @@ static int rapl_read_data_raw(struct rapl_domain *rd, | |||
227 | static int rapl_write_data_raw(struct rapl_domain *rd, | 229 | static int rapl_write_data_raw(struct rapl_domain *rd, |
228 | enum rapl_primitives prim, | 230 | enum rapl_primitives prim, |
229 | unsigned long long value); | 231 | unsigned long long value); |
230 | static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, | 232 | static u64 rapl_unit_xlate(struct rapl_domain *rd, int package, |
233 | enum unit_type type, u64 value, | ||
231 | int to_raw); | 234 | int to_raw); |
232 | static void package_power_limit_irq_save(int package_id); | 235 | static void package_power_limit_irq_save(int package_id); |
233 | 236 | ||
@@ -305,7 +308,9 @@ static int get_energy_counter(struct powercap_zone *power_zone, u64 *energy_raw) | |||
305 | 308 | ||
306 | static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy) | 309 | static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy) |
307 | { | 310 | { |
308 | *energy = rapl_unit_xlate(0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0); | 311 | struct rapl_domain *rd = power_zone_to_rapl_domain(pcd_dev); |
312 | |||
313 | *energy = rapl_unit_xlate(rd, 0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0); | ||
309 | return 0; | 314 | return 0; |
310 | } | 315 | } |
311 | 316 | ||
@@ -639,6 +644,11 @@ static void rapl_init_domains(struct rapl_package *rp) | |||
639 | rd->msrs[4] = MSR_DRAM_POWER_INFO; | 644 | rd->msrs[4] = MSR_DRAM_POWER_INFO; |
640 | rd->rpl[0].prim_id = PL1_ENABLE; | 645 | rd->rpl[0].prim_id = PL1_ENABLE; |
641 | rd->rpl[0].name = pl1_name; | 646 | rd->rpl[0].name = pl1_name; |
647 | rd->domain_energy_unit = | ||
648 | rapl_defaults->dram_domain_energy_unit; | ||
649 | if (rd->domain_energy_unit) | ||
650 | pr_info("DRAM domain energy unit %dpj\n", | ||
651 | rd->domain_energy_unit); | ||
642 | break; | 652 | break; |
643 | } | 653 | } |
644 | if (mask) { | 654 | if (mask) { |
@@ -648,11 +658,13 @@ static void rapl_init_domains(struct rapl_package *rp) | |||
648 | } | 658 | } |
649 | } | 659 | } |
650 | 660 | ||
651 | static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, | 661 | static u64 rapl_unit_xlate(struct rapl_domain *rd, int package, |
662 | enum unit_type type, u64 value, | ||
652 | int to_raw) | 663 | int to_raw) |
653 | { | 664 | { |
654 | u64 units = 1; | 665 | u64 units = 1; |
655 | struct rapl_package *rp; | 666 | struct rapl_package *rp; |
667 | u64 scale = 1; | ||
656 | 668 | ||
657 | rp = find_package_by_id(package); | 669 | rp = find_package_by_id(package); |
658 | if (!rp) | 670 | if (!rp) |
@@ -663,7 +675,12 @@ static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, | |||
663 | units = rp->power_unit; | 675 | units = rp->power_unit; |
664 | break; | 676 | break; |
665 | case ENERGY_UNIT: | 677 | case ENERGY_UNIT: |
666 | units = rp->energy_unit; | 678 | scale = ENERGY_UNIT_SCALE; |
679 | /* per domain unit takes precedence */ | ||
680 | if (rd && rd->domain_energy_unit) | ||
681 | units = rd->domain_energy_unit; | ||
682 | else | ||
683 | units = rp->energy_unit; | ||
667 | break; | 684 | break; |
668 | case TIME_UNIT: | 685 | case TIME_UNIT: |
669 | return rapl_defaults->compute_time_window(rp, value, to_raw); | 686 | return rapl_defaults->compute_time_window(rp, value, to_raw); |
@@ -673,11 +690,11 @@ static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, | |||
673 | }; | 690 | }; |
674 | 691 | ||
675 | if (to_raw) | 692 | if (to_raw) |
676 | return div64_u64(value, units); | 693 | return div64_u64(value, units) * scale; |
677 | 694 | ||
678 | value *= units; | 695 | value *= units; |
679 | 696 | ||
680 | return value; | 697 | return div64_u64(value, scale); |
681 | } | 698 | } |
682 | 699 | ||
683 | /* in the order of enum rapl_primitives */ | 700 | /* in the order of enum rapl_primitives */ |
@@ -773,7 +790,7 @@ static int rapl_read_data_raw(struct rapl_domain *rd, | |||
773 | final = value & rp->mask; | 790 | final = value & rp->mask; |
774 | final = final >> rp->shift; | 791 | final = final >> rp->shift; |
775 | if (xlate) | 792 | if (xlate) |
776 | *data = rapl_unit_xlate(rd->package_id, rp->unit, final, 0); | 793 | *data = rapl_unit_xlate(rd, rd->package_id, rp->unit, final, 0); |
777 | else | 794 | else |
778 | *data = final; | 795 | *data = final; |
779 | 796 | ||
@@ -799,7 +816,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd, | |||
799 | "failed to read msr 0x%x on cpu %d\n", msr, cpu); | 816 | "failed to read msr 0x%x on cpu %d\n", msr, cpu); |
800 | return -EIO; | 817 | return -EIO; |
801 | } | 818 | } |
802 | value = rapl_unit_xlate(rd->package_id, rp->unit, value, 1); | 819 | value = rapl_unit_xlate(rd, rd->package_id, rp->unit, value, 1); |
803 | msr_val &= ~rp->mask; | 820 | msr_val &= ~rp->mask; |
804 | msr_val |= value << rp->shift; | 821 | msr_val |= value << rp->shift; |
805 | if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) { | 822 | if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) { |
@@ -818,7 +835,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd, | |||
818 | * calculate units differ on different CPUs. | 835 | * calculate units differ on different CPUs. |
819 | * We convert the units to below format based on CPUs. | 836 | * We convert the units to below format based on CPUs. |
820 | * i.e. | 837 | * i.e. |
821 | * energy unit: microJoules : Represented in microJoules by default | 838 | * energy unit: picoJoules : Represented in picoJoules by default |
822 | * power unit : microWatts : Represented in milliWatts by default | 839 | * power unit : microWatts : Represented in milliWatts by default |
823 | * time unit : microseconds: Represented in seconds by default | 840 | * time unit : microseconds: Represented in seconds by default |
824 | */ | 841 | */ |
@@ -834,7 +851,7 @@ static int rapl_check_unit_core(struct rapl_package *rp, int cpu) | |||
834 | } | 851 | } |
835 | 852 | ||
836 | value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; | 853 | value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; |
837 | rp->energy_unit = 1000000 / (1 << value); | 854 | rp->energy_unit = ENERGY_UNIT_SCALE * 1000000 / (1 << value); |
838 | 855 | ||
839 | value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; | 856 | value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; |
840 | rp->power_unit = 1000000 / (1 << value); | 857 | rp->power_unit = 1000000 / (1 << value); |
@@ -842,7 +859,7 @@ static int rapl_check_unit_core(struct rapl_package *rp, int cpu) | |||
842 | value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; | 859 | value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; |
843 | rp->time_unit = 1000000 / (1 << value); | 860 | rp->time_unit = 1000000 / (1 << value); |
844 | 861 | ||
845 | pr_debug("Core CPU package %d energy=%duJ, time=%dus, power=%duW\n", | 862 | pr_debug("Core CPU package %d energy=%dpJ, time=%dus, power=%duW\n", |
846 | rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); | 863 | rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); |
847 | 864 | ||
848 | return 0; | 865 | return 0; |
@@ -859,7 +876,7 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu) | |||
859 | return -ENODEV; | 876 | return -ENODEV; |
860 | } | 877 | } |
861 | value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; | 878 | value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; |
862 | rp->energy_unit = 1 << value; | 879 | rp->energy_unit = ENERGY_UNIT_SCALE * 1 << value; |
863 | 880 | ||
864 | value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; | 881 | value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; |
865 | rp->power_unit = (1 << value) * 1000; | 882 | rp->power_unit = (1 << value) * 1000; |
@@ -867,7 +884,7 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu) | |||
867 | value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; | 884 | value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; |
868 | rp->time_unit = 1000000 / (1 << value); | 885 | rp->time_unit = 1000000 / (1 << value); |
869 | 886 | ||
870 | pr_debug("Atom package %d energy=%duJ, time=%dus, power=%duW\n", | 887 | pr_debug("Atom package %d energy=%dpJ, time=%dus, power=%duW\n", |
871 | rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); | 888 | rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); |
872 | 889 | ||
873 | return 0; | 890 | return 0; |
@@ -1017,6 +1034,13 @@ static const struct rapl_defaults rapl_defaults_core = { | |||
1017 | .compute_time_window = rapl_compute_time_window_core, | 1034 | .compute_time_window = rapl_compute_time_window_core, |
1018 | }; | 1035 | }; |
1019 | 1036 | ||
1037 | static const struct rapl_defaults rapl_defaults_hsw_server = { | ||
1038 | .check_unit = rapl_check_unit_core, | ||
1039 | .set_floor_freq = set_floor_freq_default, | ||
1040 | .compute_time_window = rapl_compute_time_window_core, | ||
1041 | .dram_domain_energy_unit = 15300, | ||
1042 | }; | ||
1043 | |||
1020 | static const struct rapl_defaults rapl_defaults_atom = { | 1044 | static const struct rapl_defaults rapl_defaults_atom = { |
1021 | .check_unit = rapl_check_unit_atom, | 1045 | .check_unit = rapl_check_unit_atom, |
1022 | .set_floor_freq = set_floor_freq_atom, | 1046 | .set_floor_freq = set_floor_freq_atom, |
@@ -1037,7 +1061,7 @@ static const struct x86_cpu_id rapl_ids[] = { | |||
1037 | RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */ | 1061 | RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */ |
1038 | RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */ | 1062 | RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */ |
1039 | RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */ | 1063 | RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */ |
1040 | RAPL_CPU(0x3f, rapl_defaults_core),/* Haswell */ | 1064 | RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */ |
1041 | RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */ | 1065 | RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */ |
1042 | RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */ | 1066 | RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */ |
1043 | RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */ | 1067 | RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */ |
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index b899947d839d..a4a8a6dc60c4 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
@@ -1839,10 +1839,12 @@ static int _regulator_do_enable(struct regulator_dev *rdev) | |||
1839 | } | 1839 | } |
1840 | 1840 | ||
1841 | if (rdev->ena_pin) { | 1841 | if (rdev->ena_pin) { |
1842 | ret = regulator_ena_gpio_ctrl(rdev, true); | 1842 | if (!rdev->ena_gpio_state) { |
1843 | if (ret < 0) | 1843 | ret = regulator_ena_gpio_ctrl(rdev, true); |
1844 | return ret; | 1844 | if (ret < 0) |
1845 | rdev->ena_gpio_state = 1; | 1845 | return ret; |
1846 | rdev->ena_gpio_state = 1; | ||
1847 | } | ||
1846 | } else if (rdev->desc->ops->enable) { | 1848 | } else if (rdev->desc->ops->enable) { |
1847 | ret = rdev->desc->ops->enable(rdev); | 1849 | ret = rdev->desc->ops->enable(rdev); |
1848 | if (ret < 0) | 1850 | if (ret < 0) |
@@ -1939,10 +1941,12 @@ static int _regulator_do_disable(struct regulator_dev *rdev) | |||
1939 | trace_regulator_disable(rdev_get_name(rdev)); | 1941 | trace_regulator_disable(rdev_get_name(rdev)); |
1940 | 1942 | ||
1941 | if (rdev->ena_pin) { | 1943 | if (rdev->ena_pin) { |
1942 | ret = regulator_ena_gpio_ctrl(rdev, false); | 1944 | if (rdev->ena_gpio_state) { |
1943 | if (ret < 0) | 1945 | ret = regulator_ena_gpio_ctrl(rdev, false); |
1944 | return ret; | 1946 | if (ret < 0) |
1945 | rdev->ena_gpio_state = 0; | 1947 | return ret; |
1948 | rdev->ena_gpio_state = 0; | ||
1949 | } | ||
1946 | 1950 | ||
1947 | } else if (rdev->desc->ops->disable) { | 1951 | } else if (rdev->desc->ops->disable) { |
1948 | ret = rdev->desc->ops->disable(rdev); | 1952 | ret = rdev->desc->ops->disable(rdev); |
@@ -3444,13 +3448,6 @@ static umode_t regulator_attr_is_visible(struct kobject *kobj, | |||
3444 | if (attr == &dev_attr_requested_microamps.attr) | 3448 | if (attr == &dev_attr_requested_microamps.attr) |
3445 | return rdev->desc->type == REGULATOR_CURRENT ? mode : 0; | 3449 | return rdev->desc->type == REGULATOR_CURRENT ? mode : 0; |
3446 | 3450 | ||
3447 | /* all the other attributes exist to support constraints; | ||
3448 | * don't show them if there are no constraints, or if the | ||
3449 | * relevant supporting methods are missing. | ||
3450 | */ | ||
3451 | if (!rdev->constraints) | ||
3452 | return 0; | ||
3453 | |||
3454 | /* constraints need specific supporting methods */ | 3451 | /* constraints need specific supporting methods */ |
3455 | if (attr == &dev_attr_min_microvolts.attr || | 3452 | if (attr == &dev_attr_min_microvolts.attr || |
3456 | attr == &dev_attr_max_microvolts.attr) | 3453 | attr == &dev_attr_max_microvolts.attr) |
@@ -3633,12 +3630,6 @@ regulator_register(const struct regulator_desc *regulator_desc, | |||
3633 | config->ena_gpio, ret); | 3630 | config->ena_gpio, ret); |
3634 | goto wash; | 3631 | goto wash; |
3635 | } | 3632 | } |
3636 | |||
3637 | if (config->ena_gpio_flags & GPIOF_OUT_INIT_HIGH) | ||
3638 | rdev->ena_gpio_state = 1; | ||
3639 | |||
3640 | if (config->ena_gpio_invert) | ||
3641 | rdev->ena_gpio_state = !rdev->ena_gpio_state; | ||
3642 | } | 3633 | } |
3643 | 3634 | ||
3644 | /* set regulator constraints */ | 3635 | /* set regulator constraints */ |
@@ -3807,9 +3798,11 @@ int regulator_suspend_finish(void) | |||
3807 | list_for_each_entry(rdev, ®ulator_list, list) { | 3798 | list_for_each_entry(rdev, ®ulator_list, list) { |
3808 | mutex_lock(&rdev->mutex); | 3799 | mutex_lock(&rdev->mutex); |
3809 | if (rdev->use_count > 0 || rdev->constraints->always_on) { | 3800 | if (rdev->use_count > 0 || rdev->constraints->always_on) { |
3810 | error = _regulator_do_enable(rdev); | 3801 | if (!_regulator_is_enabled(rdev)) { |
3811 | if (error) | 3802 | error = _regulator_do_enable(rdev); |
3812 | ret = error; | 3803 | if (error) |
3804 | ret = error; | ||
3805 | } | ||
3813 | } else { | 3806 | } else { |
3814 | if (!have_full_constraints()) | 3807 | if (!have_full_constraints()) |
3815 | goto unlock; | 3808 | goto unlock; |
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c index bc6100103f7f..f0489cb9018b 100644 --- a/drivers/regulator/da9210-regulator.c +++ b/drivers/regulator/da9210-regulator.c | |||
@@ -152,6 +152,15 @@ static int da9210_i2c_probe(struct i2c_client *i2c, | |||
152 | config.regmap = chip->regmap; | 152 | config.regmap = chip->regmap; |
153 | config.of_node = dev->of_node; | 153 | config.of_node = dev->of_node; |
154 | 154 | ||
155 | /* Mask all interrupt sources to deassert interrupt line */ | ||
156 | error = regmap_write(chip->regmap, DA9210_REG_MASK_A, ~0); | ||
157 | if (!error) | ||
158 | error = regmap_write(chip->regmap, DA9210_REG_MASK_B, ~0); | ||
159 | if (error) { | ||
160 | dev_err(&i2c->dev, "Failed to write to mask reg: %d\n", error); | ||
161 | return error; | ||
162 | } | ||
163 | |||
155 | rdev = devm_regulator_register(&i2c->dev, &da9210_reg, &config); | 164 | rdev = devm_regulator_register(&i2c->dev, &da9210_reg, &config); |
156 | if (IS_ERR(rdev)) { | 165 | if (IS_ERR(rdev)) { |
157 | dev_err(&i2c->dev, "Failed to register DA9210 regulator\n"); | 166 | dev_err(&i2c->dev, "Failed to register DA9210 regulator\n"); |
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c index 9205f433573c..18198316b6cf 100644 --- a/drivers/regulator/palmas-regulator.c +++ b/drivers/regulator/palmas-regulator.c | |||
@@ -1572,6 +1572,10 @@ static int palmas_regulators_probe(struct platform_device *pdev) | |||
1572 | if (!pmic) | 1572 | if (!pmic) |
1573 | return -ENOMEM; | 1573 | return -ENOMEM; |
1574 | 1574 | ||
1575 | if (of_device_is_compatible(node, "ti,tps659038-pmic")) | ||
1576 | palmas_generic_regs_info[PALMAS_REG_REGEN2].ctrl_addr = | ||
1577 | TPS659038_REGEN2_CTRL; | ||
1578 | |||
1575 | pmic->dev = &pdev->dev; | 1579 | pmic->dev = &pdev->dev; |
1576 | pmic->palmas = palmas; | 1580 | pmic->palmas = palmas; |
1577 | palmas->pmic = pmic; | 1581 | palmas->pmic = pmic; |
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c index 1f93b752a81c..3fd44353cc80 100644 --- a/drivers/regulator/rk808-regulator.c +++ b/drivers/regulator/rk808-regulator.c | |||
@@ -235,6 +235,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
235 | .vsel_mask = RK808_LDO_VSEL_MASK, | 235 | .vsel_mask = RK808_LDO_VSEL_MASK, |
236 | .enable_reg = RK808_LDO_EN_REG, | 236 | .enable_reg = RK808_LDO_EN_REG, |
237 | .enable_mask = BIT(0), | 237 | .enable_mask = BIT(0), |
238 | .enable_time = 400, | ||
238 | .owner = THIS_MODULE, | 239 | .owner = THIS_MODULE, |
239 | }, { | 240 | }, { |
240 | .name = "LDO_REG2", | 241 | .name = "LDO_REG2", |
@@ -249,6 +250,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
249 | .vsel_mask = RK808_LDO_VSEL_MASK, | 250 | .vsel_mask = RK808_LDO_VSEL_MASK, |
250 | .enable_reg = RK808_LDO_EN_REG, | 251 | .enable_reg = RK808_LDO_EN_REG, |
251 | .enable_mask = BIT(1), | 252 | .enable_mask = BIT(1), |
253 | .enable_time = 400, | ||
252 | .owner = THIS_MODULE, | 254 | .owner = THIS_MODULE, |
253 | }, { | 255 | }, { |
254 | .name = "LDO_REG3", | 256 | .name = "LDO_REG3", |
@@ -263,6 +265,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
263 | .vsel_mask = RK808_BUCK4_VSEL_MASK, | 265 | .vsel_mask = RK808_BUCK4_VSEL_MASK, |
264 | .enable_reg = RK808_LDO_EN_REG, | 266 | .enable_reg = RK808_LDO_EN_REG, |
265 | .enable_mask = BIT(2), | 267 | .enable_mask = BIT(2), |
268 | .enable_time = 400, | ||
266 | .owner = THIS_MODULE, | 269 | .owner = THIS_MODULE, |
267 | }, { | 270 | }, { |
268 | .name = "LDO_REG4", | 271 | .name = "LDO_REG4", |
@@ -277,6 +280,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
277 | .vsel_mask = RK808_LDO_VSEL_MASK, | 280 | .vsel_mask = RK808_LDO_VSEL_MASK, |
278 | .enable_reg = RK808_LDO_EN_REG, | 281 | .enable_reg = RK808_LDO_EN_REG, |
279 | .enable_mask = BIT(3), | 282 | .enable_mask = BIT(3), |
283 | .enable_time = 400, | ||
280 | .owner = THIS_MODULE, | 284 | .owner = THIS_MODULE, |
281 | }, { | 285 | }, { |
282 | .name = "LDO_REG5", | 286 | .name = "LDO_REG5", |
@@ -291,6 +295,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
291 | .vsel_mask = RK808_LDO_VSEL_MASK, | 295 | .vsel_mask = RK808_LDO_VSEL_MASK, |
292 | .enable_reg = RK808_LDO_EN_REG, | 296 | .enable_reg = RK808_LDO_EN_REG, |
293 | .enable_mask = BIT(4), | 297 | .enable_mask = BIT(4), |
298 | .enable_time = 400, | ||
294 | .owner = THIS_MODULE, | 299 | .owner = THIS_MODULE, |
295 | }, { | 300 | }, { |
296 | .name = "LDO_REG6", | 301 | .name = "LDO_REG6", |
@@ -305,6 +310,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
305 | .vsel_mask = RK808_LDO_VSEL_MASK, | 310 | .vsel_mask = RK808_LDO_VSEL_MASK, |
306 | .enable_reg = RK808_LDO_EN_REG, | 311 | .enable_reg = RK808_LDO_EN_REG, |
307 | .enable_mask = BIT(5), | 312 | .enable_mask = BIT(5), |
313 | .enable_time = 400, | ||
308 | .owner = THIS_MODULE, | 314 | .owner = THIS_MODULE, |
309 | }, { | 315 | }, { |
310 | .name = "LDO_REG7", | 316 | .name = "LDO_REG7", |
@@ -319,6 +325,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
319 | .vsel_mask = RK808_LDO_VSEL_MASK, | 325 | .vsel_mask = RK808_LDO_VSEL_MASK, |
320 | .enable_reg = RK808_LDO_EN_REG, | 326 | .enable_reg = RK808_LDO_EN_REG, |
321 | .enable_mask = BIT(6), | 327 | .enable_mask = BIT(6), |
328 | .enable_time = 400, | ||
322 | .owner = THIS_MODULE, | 329 | .owner = THIS_MODULE, |
323 | }, { | 330 | }, { |
324 | .name = "LDO_REG8", | 331 | .name = "LDO_REG8", |
@@ -333,6 +340,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
333 | .vsel_mask = RK808_LDO_VSEL_MASK, | 340 | .vsel_mask = RK808_LDO_VSEL_MASK, |
334 | .enable_reg = RK808_LDO_EN_REG, | 341 | .enable_reg = RK808_LDO_EN_REG, |
335 | .enable_mask = BIT(7), | 342 | .enable_mask = BIT(7), |
343 | .enable_time = 400, | ||
336 | .owner = THIS_MODULE, | 344 | .owner = THIS_MODULE, |
337 | }, { | 345 | }, { |
338 | .name = "SWITCH_REG1", | 346 | .name = "SWITCH_REG1", |
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c index e2cffe01b807..fb991ec76423 100644 --- a/drivers/regulator/tps65910-regulator.c +++ b/drivers/regulator/tps65910-regulator.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/err.h> | 19 | #include <linux/err.h> |
20 | #include <linux/of.h> | ||
20 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
21 | #include <linux/regulator/driver.h> | 22 | #include <linux/regulator/driver.h> |
22 | #include <linux/regulator/machine.h> | 23 | #include <linux/regulator/machine.h> |
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c index 92f6af6da699..73354ee27877 100644 --- a/drivers/rpmsg/virtio_rpmsg_bus.c +++ b/drivers/rpmsg/virtio_rpmsg_bus.c | |||
@@ -951,6 +951,7 @@ static int rpmsg_probe(struct virtio_device *vdev) | |||
951 | void *bufs_va; | 951 | void *bufs_va; |
952 | int err = 0, i; | 952 | int err = 0, i; |
953 | size_t total_buf_space; | 953 | size_t total_buf_space; |
954 | bool notify; | ||
954 | 955 | ||
955 | vrp = kzalloc(sizeof(*vrp), GFP_KERNEL); | 956 | vrp = kzalloc(sizeof(*vrp), GFP_KERNEL); |
956 | if (!vrp) | 957 | if (!vrp) |
@@ -1030,8 +1031,22 @@ static int rpmsg_probe(struct virtio_device *vdev) | |||
1030 | } | 1031 | } |
1031 | } | 1032 | } |
1032 | 1033 | ||
1034 | /* | ||
1035 | * Prepare to kick but don't notify yet - we can't do this before | ||
1036 | * device is ready. | ||
1037 | */ | ||
1038 | notify = virtqueue_kick_prepare(vrp->rvq); | ||
1039 | |||
1040 | /* From this point on, we can notify and get callbacks. */ | ||
1041 | virtio_device_ready(vdev); | ||
1042 | |||
1033 | /* tell the remote processor it can start sending messages */ | 1043 | /* tell the remote processor it can start sending messages */ |
1034 | virtqueue_kick(vrp->rvq); | 1044 | /* |
1045 | * this might be concurrent with callbacks, but we are only | ||
1046 | * doing notify, not a full kick here, so that's ok. | ||
1047 | */ | ||
1048 | if (notify) | ||
1049 | virtqueue_notify(vrp->rvq); | ||
1035 | 1050 | ||
1036 | dev_info(&vdev->dev, "rpmsg host is online\n"); | 1051 | dev_info(&vdev->dev, "rpmsg host is online\n"); |
1037 | 1052 | ||
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index 70a5d94cc766..b283a1a573b3 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/io.h> | 31 | #include <linux/io.h> |
32 | #include <linux/of.h> | 32 | #include <linux/of.h> |
33 | #include <linux/of_device.h> | 33 | #include <linux/of_device.h> |
34 | #include <linux/suspend.h> | ||
34 | #include <linux/uaccess.h> | 35 | #include <linux/uaccess.h> |
35 | 36 | ||
36 | #include "rtc-at91rm9200.h" | 37 | #include "rtc-at91rm9200.h" |
@@ -54,6 +55,10 @@ static void __iomem *at91_rtc_regs; | |||
54 | static int irq; | 55 | static int irq; |
55 | static DEFINE_SPINLOCK(at91_rtc_lock); | 56 | static DEFINE_SPINLOCK(at91_rtc_lock); |
56 | static u32 at91_rtc_shadow_imr; | 57 | static u32 at91_rtc_shadow_imr; |
58 | static bool suspended; | ||
59 | static DEFINE_SPINLOCK(suspended_lock); | ||
60 | static unsigned long cached_events; | ||
61 | static u32 at91_rtc_imr; | ||
57 | 62 | ||
58 | static void at91_rtc_write_ier(u32 mask) | 63 | static void at91_rtc_write_ier(u32 mask) |
59 | { | 64 | { |
@@ -290,7 +295,9 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id) | |||
290 | struct rtc_device *rtc = platform_get_drvdata(pdev); | 295 | struct rtc_device *rtc = platform_get_drvdata(pdev); |
291 | unsigned int rtsr; | 296 | unsigned int rtsr; |
292 | unsigned long events = 0; | 297 | unsigned long events = 0; |
298 | int ret = IRQ_NONE; | ||
293 | 299 | ||
300 | spin_lock(&suspended_lock); | ||
294 | rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read_imr(); | 301 | rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read_imr(); |
295 | if (rtsr) { /* this interrupt is shared! Is it ours? */ | 302 | if (rtsr) { /* this interrupt is shared! Is it ours? */ |
296 | if (rtsr & AT91_RTC_ALARM) | 303 | if (rtsr & AT91_RTC_ALARM) |
@@ -304,14 +311,22 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id) | |||
304 | 311 | ||
305 | at91_rtc_write(AT91_RTC_SCCR, rtsr); /* clear status reg */ | 312 | at91_rtc_write(AT91_RTC_SCCR, rtsr); /* clear status reg */ |
306 | 313 | ||
307 | rtc_update_irq(rtc, 1, events); | 314 | if (!suspended) { |
315 | rtc_update_irq(rtc, 1, events); | ||
308 | 316 | ||
309 | dev_dbg(&pdev->dev, "%s(): num=%ld, events=0x%02lx\n", __func__, | 317 | dev_dbg(&pdev->dev, "%s(): num=%ld, events=0x%02lx\n", |
310 | events >> 8, events & 0x000000FF); | 318 | __func__, events >> 8, events & 0x000000FF); |
319 | } else { | ||
320 | cached_events |= events; | ||
321 | at91_rtc_write_idr(at91_rtc_imr); | ||
322 | pm_system_wakeup(); | ||
323 | } | ||
311 | 324 | ||
312 | return IRQ_HANDLED; | 325 | ret = IRQ_HANDLED; |
313 | } | 326 | } |
314 | return IRQ_NONE; /* not handled */ | 327 | spin_unlock(&suspended_lock); |
328 | |||
329 | return ret; | ||
315 | } | 330 | } |
316 | 331 | ||
317 | static const struct at91_rtc_config at91rm9200_config = { | 332 | static const struct at91_rtc_config at91rm9200_config = { |
@@ -401,8 +416,8 @@ static int __init at91_rtc_probe(struct platform_device *pdev) | |||
401 | AT91_RTC_CALEV); | 416 | AT91_RTC_CALEV); |
402 | 417 | ||
403 | ret = devm_request_irq(&pdev->dev, irq, at91_rtc_interrupt, | 418 | ret = devm_request_irq(&pdev->dev, irq, at91_rtc_interrupt, |
404 | IRQF_SHARED, | 419 | IRQF_SHARED | IRQF_COND_SUSPEND, |
405 | "at91_rtc", pdev); | 420 | "at91_rtc", pdev); |
406 | if (ret) { | 421 | if (ret) { |
407 | dev_err(&pdev->dev, "IRQ %d already in use.\n", irq); | 422 | dev_err(&pdev->dev, "IRQ %d already in use.\n", irq); |
408 | return ret; | 423 | return ret; |
@@ -454,8 +469,6 @@ static void at91_rtc_shutdown(struct platform_device *pdev) | |||
454 | 469 | ||
455 | /* AT91RM9200 RTC Power management control */ | 470 | /* AT91RM9200 RTC Power management control */ |
456 | 471 | ||
457 | static u32 at91_rtc_imr; | ||
458 | |||
459 | static int at91_rtc_suspend(struct device *dev) | 472 | static int at91_rtc_suspend(struct device *dev) |
460 | { | 473 | { |
461 | /* this IRQ is shared with DBGU and other hardware which isn't | 474 | /* this IRQ is shared with DBGU and other hardware which isn't |
@@ -464,21 +477,42 @@ static int at91_rtc_suspend(struct device *dev) | |||
464 | at91_rtc_imr = at91_rtc_read_imr() | 477 | at91_rtc_imr = at91_rtc_read_imr() |
465 | & (AT91_RTC_ALARM|AT91_RTC_SECEV); | 478 | & (AT91_RTC_ALARM|AT91_RTC_SECEV); |
466 | if (at91_rtc_imr) { | 479 | if (at91_rtc_imr) { |
467 | if (device_may_wakeup(dev)) | 480 | if (device_may_wakeup(dev)) { |
481 | unsigned long flags; | ||
482 | |||
468 | enable_irq_wake(irq); | 483 | enable_irq_wake(irq); |
469 | else | 484 | |
485 | spin_lock_irqsave(&suspended_lock, flags); | ||
486 | suspended = true; | ||
487 | spin_unlock_irqrestore(&suspended_lock, flags); | ||
488 | } else { | ||
470 | at91_rtc_write_idr(at91_rtc_imr); | 489 | at91_rtc_write_idr(at91_rtc_imr); |
490 | } | ||
471 | } | 491 | } |
472 | return 0; | 492 | return 0; |
473 | } | 493 | } |
474 | 494 | ||
475 | static int at91_rtc_resume(struct device *dev) | 495 | static int at91_rtc_resume(struct device *dev) |
476 | { | 496 | { |
497 | struct rtc_device *rtc = dev_get_drvdata(dev); | ||
498 | |||
477 | if (at91_rtc_imr) { | 499 | if (at91_rtc_imr) { |
478 | if (device_may_wakeup(dev)) | 500 | if (device_may_wakeup(dev)) { |
501 | unsigned long flags; | ||
502 | |||
503 | spin_lock_irqsave(&suspended_lock, flags); | ||
504 | |||
505 | if (cached_events) { | ||
506 | rtc_update_irq(rtc, 1, cached_events); | ||
507 | cached_events = 0; | ||
508 | } | ||
509 | |||
510 | suspended = false; | ||
511 | spin_unlock_irqrestore(&suspended_lock, flags); | ||
512 | |||
479 | disable_irq_wake(irq); | 513 | disable_irq_wake(irq); |
480 | else | 514 | } |
481 | at91_rtc_write_ier(at91_rtc_imr); | 515 | at91_rtc_write_ier(at91_rtc_imr); |
482 | } | 516 | } |
483 | return 0; | 517 | return 0; |
484 | } | 518 | } |
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c index 2183fd2750ab..5ccaee32df72 100644 --- a/drivers/rtc/rtc-at91sam9.c +++ b/drivers/rtc/rtc-at91sam9.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/io.h> | 23 | #include <linux/io.h> |
24 | #include <linux/mfd/syscon.h> | 24 | #include <linux/mfd/syscon.h> |
25 | #include <linux/regmap.h> | 25 | #include <linux/regmap.h> |
26 | #include <linux/suspend.h> | ||
26 | #include <linux/clk.h> | 27 | #include <linux/clk.h> |
27 | 28 | ||
28 | /* | 29 | /* |
@@ -77,6 +78,9 @@ struct sam9_rtc { | |||
77 | unsigned int gpbr_offset; | 78 | unsigned int gpbr_offset; |
78 | int irq; | 79 | int irq; |
79 | struct clk *sclk; | 80 | struct clk *sclk; |
81 | bool suspended; | ||
82 | unsigned long events; | ||
83 | spinlock_t lock; | ||
80 | }; | 84 | }; |
81 | 85 | ||
82 | #define rtt_readl(rtc, field) \ | 86 | #define rtt_readl(rtc, field) \ |
@@ -271,14 +275,9 @@ static int at91_rtc_proc(struct device *dev, struct seq_file *seq) | |||
271 | return 0; | 275 | return 0; |
272 | } | 276 | } |
273 | 277 | ||
274 | /* | 278 | static irqreturn_t at91_rtc_cache_events(struct sam9_rtc *rtc) |
275 | * IRQ handler for the RTC | ||
276 | */ | ||
277 | static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc) | ||
278 | { | 279 | { |
279 | struct sam9_rtc *rtc = _rtc; | ||
280 | u32 sr, mr; | 280 | u32 sr, mr; |
281 | unsigned long events = 0; | ||
282 | 281 | ||
283 | /* Shared interrupt may be for another device. Note: reading | 282 | /* Shared interrupt may be for another device. Note: reading |
284 | * SR clears it, so we must only read it in this irq handler! | 283 | * SR clears it, so we must only read it in this irq handler! |
@@ -290,18 +289,54 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc) | |||
290 | 289 | ||
291 | /* alarm status */ | 290 | /* alarm status */ |
292 | if (sr & AT91_RTT_ALMS) | 291 | if (sr & AT91_RTT_ALMS) |
293 | events |= (RTC_AF | RTC_IRQF); | 292 | rtc->events |= (RTC_AF | RTC_IRQF); |
294 | 293 | ||
295 | /* timer update/increment */ | 294 | /* timer update/increment */ |
296 | if (sr & AT91_RTT_RTTINC) | 295 | if (sr & AT91_RTT_RTTINC) |
297 | events |= (RTC_UF | RTC_IRQF); | 296 | rtc->events |= (RTC_UF | RTC_IRQF); |
297 | |||
298 | return IRQ_HANDLED; | ||
299 | } | ||
300 | |||
301 | static void at91_rtc_flush_events(struct sam9_rtc *rtc) | ||
302 | { | ||
303 | if (!rtc->events) | ||
304 | return; | ||
298 | 305 | ||
299 | rtc_update_irq(rtc->rtcdev, 1, events); | 306 | rtc_update_irq(rtc->rtcdev, 1, rtc->events); |
307 | rtc->events = 0; | ||
300 | 308 | ||
301 | pr_debug("%s: num=%ld, events=0x%02lx\n", __func__, | 309 | pr_debug("%s: num=%ld, events=0x%02lx\n", __func__, |
302 | events >> 8, events & 0x000000FF); | 310 | rtc->events >> 8, rtc->events & 0x000000FF); |
311 | } | ||
303 | 312 | ||
304 | return IRQ_HANDLED; | 313 | /* |
314 | * IRQ handler for the RTC | ||
315 | */ | ||
316 | static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc) | ||
317 | { | ||
318 | struct sam9_rtc *rtc = _rtc; | ||
319 | int ret; | ||
320 | |||
321 | spin_lock(&rtc->lock); | ||
322 | |||
323 | ret = at91_rtc_cache_events(rtc); | ||
324 | |||
325 | /* We're called in suspended state */ | ||
326 | if (rtc->suspended) { | ||
327 | /* Mask irqs coming from this peripheral */ | ||
328 | rtt_writel(rtc, MR, | ||
329 | rtt_readl(rtc, MR) & | ||
330 | ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN)); | ||
331 | /* Trigger a system wakeup */ | ||
332 | pm_system_wakeup(); | ||
333 | } else { | ||
334 | at91_rtc_flush_events(rtc); | ||
335 | } | ||
336 | |||
337 | spin_unlock(&rtc->lock); | ||
338 | |||
339 | return ret; | ||
305 | } | 340 | } |
306 | 341 | ||
307 | static const struct rtc_class_ops at91_rtc_ops = { | 342 | static const struct rtc_class_ops at91_rtc_ops = { |
@@ -421,7 +456,8 @@ static int at91_rtc_probe(struct platform_device *pdev) | |||
421 | 456 | ||
422 | /* register irq handler after we know what name we'll use */ | 457 | /* register irq handler after we know what name we'll use */ |
423 | ret = devm_request_irq(&pdev->dev, rtc->irq, at91_rtc_interrupt, | 458 | ret = devm_request_irq(&pdev->dev, rtc->irq, at91_rtc_interrupt, |
424 | IRQF_SHARED, dev_name(&rtc->rtcdev->dev), rtc); | 459 | IRQF_SHARED | IRQF_COND_SUSPEND, |
460 | dev_name(&rtc->rtcdev->dev), rtc); | ||
425 | if (ret) { | 461 | if (ret) { |
426 | dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq); | 462 | dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq); |
427 | return ret; | 463 | return ret; |
@@ -482,7 +518,12 @@ static int at91_rtc_suspend(struct device *dev) | |||
482 | rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN); | 518 | rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN); |
483 | if (rtc->imr) { | 519 | if (rtc->imr) { |
484 | if (device_may_wakeup(dev) && (mr & AT91_RTT_ALMIEN)) { | 520 | if (device_may_wakeup(dev) && (mr & AT91_RTT_ALMIEN)) { |
521 | unsigned long flags; | ||
522 | |||
485 | enable_irq_wake(rtc->irq); | 523 | enable_irq_wake(rtc->irq); |
524 | spin_lock_irqsave(&rtc->lock, flags); | ||
525 | rtc->suspended = true; | ||
526 | spin_unlock_irqrestore(&rtc->lock, flags); | ||
486 | /* don't let RTTINC cause wakeups */ | 527 | /* don't let RTTINC cause wakeups */ |
487 | if (mr & AT91_RTT_RTTINCIEN) | 528 | if (mr & AT91_RTT_RTTINCIEN) |
488 | rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN); | 529 | rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN); |
@@ -499,10 +540,18 @@ static int at91_rtc_resume(struct device *dev) | |||
499 | u32 mr; | 540 | u32 mr; |
500 | 541 | ||
501 | if (rtc->imr) { | 542 | if (rtc->imr) { |
543 | unsigned long flags; | ||
544 | |||
502 | if (device_may_wakeup(dev)) | 545 | if (device_may_wakeup(dev)) |
503 | disable_irq_wake(rtc->irq); | 546 | disable_irq_wake(rtc->irq); |
504 | mr = rtt_readl(rtc, MR); | 547 | mr = rtt_readl(rtc, MR); |
505 | rtt_writel(rtc, MR, mr | rtc->imr); | 548 | rtt_writel(rtc, MR, mr | rtc->imr); |
549 | |||
550 | spin_lock_irqsave(&rtc->lock, flags); | ||
551 | rtc->suspended = false; | ||
552 | at91_rtc_cache_events(rtc); | ||
553 | at91_rtc_flush_events(rtc); | ||
554 | spin_unlock_irqrestore(&rtc->lock, flags); | ||
506 | } | 555 | } |
507 | 556 | ||
508 | return 0; | 557 | return 0; |
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c index 8c3bfcb115b7..803869c7d7c2 100644 --- a/drivers/rtc/rtc-ds1685.c +++ b/drivers/rtc/rtc-ds1685.c | |||
@@ -399,21 +399,21 @@ ds1685_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
399 | * of this RTC chip. We check for it anyways in case support is | 399 | * of this RTC chip. We check for it anyways in case support is |
400 | * added in the future. | 400 | * added in the future. |
401 | */ | 401 | */ |
402 | if (unlikely((seconds >= 0xc0) && (seconds <= 0xff))) | 402 | if (unlikely(seconds >= 0xc0)) |
403 | alrm->time.tm_sec = -1; | 403 | alrm->time.tm_sec = -1; |
404 | else | 404 | else |
405 | alrm->time.tm_sec = ds1685_rtc_bcd2bin(rtc, seconds, | 405 | alrm->time.tm_sec = ds1685_rtc_bcd2bin(rtc, seconds, |
406 | RTC_SECS_BCD_MASK, | 406 | RTC_SECS_BCD_MASK, |
407 | RTC_SECS_BIN_MASK); | 407 | RTC_SECS_BIN_MASK); |
408 | 408 | ||
409 | if (unlikely((minutes >= 0xc0) && (minutes <= 0xff))) | 409 | if (unlikely(minutes >= 0xc0)) |
410 | alrm->time.tm_min = -1; | 410 | alrm->time.tm_min = -1; |
411 | else | 411 | else |
412 | alrm->time.tm_min = ds1685_rtc_bcd2bin(rtc, minutes, | 412 | alrm->time.tm_min = ds1685_rtc_bcd2bin(rtc, minutes, |
413 | RTC_MINS_BCD_MASK, | 413 | RTC_MINS_BCD_MASK, |
414 | RTC_MINS_BIN_MASK); | 414 | RTC_MINS_BIN_MASK); |
415 | 415 | ||
416 | if (unlikely((hours >= 0xc0) && (hours <= 0xff))) | 416 | if (unlikely(hours >= 0xc0)) |
417 | alrm->time.tm_hour = -1; | 417 | alrm->time.tm_hour = -1; |
418 | else | 418 | else |
419 | alrm->time.tm_hour = ds1685_rtc_bcd2bin(rtc, hours, | 419 | alrm->time.tm_hour = ds1685_rtc_bcd2bin(rtc, hours, |
@@ -472,13 +472,13 @@ ds1685_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
472 | * field, and we only support four fields. We put the support | 472 | * field, and we only support four fields. We put the support |
473 | * here anyways for the future. | 473 | * here anyways for the future. |
474 | */ | 474 | */ |
475 | if (unlikely((seconds >= 0xc0) && (seconds <= 0xff))) | 475 | if (unlikely(seconds >= 0xc0)) |
476 | seconds = 0xff; | 476 | seconds = 0xff; |
477 | 477 | ||
478 | if (unlikely((minutes >= 0xc0) && (minutes <= 0xff))) | 478 | if (unlikely(minutes >= 0xc0)) |
479 | minutes = 0xff; | 479 | minutes = 0xff; |
480 | 480 | ||
481 | if (unlikely((hours >= 0xc0) && (hours <= 0xff))) | 481 | if (unlikely(hours >= 0xc0)) |
482 | hours = 0xff; | 482 | hours = 0xff; |
483 | 483 | ||
484 | alrm->time.tm_mon = -1; | 484 | alrm->time.tm_mon = -1; |
@@ -528,7 +528,6 @@ ds1685_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
528 | /* ----------------------------------------------------------------------- */ | 528 | /* ----------------------------------------------------------------------- */ |
529 | /* /dev/rtcX Interface functions */ | 529 | /* /dev/rtcX Interface functions */ |
530 | 530 | ||
531 | #ifdef CONFIG_RTC_INTF_DEV | ||
532 | /** | 531 | /** |
533 | * ds1685_rtc_alarm_irq_enable - replaces ioctl() RTC_AIE on/off. | 532 | * ds1685_rtc_alarm_irq_enable - replaces ioctl() RTC_AIE on/off. |
534 | * @dev: pointer to device structure. | 533 | * @dev: pointer to device structure. |
@@ -557,7 +556,6 @@ ds1685_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | |||
557 | 556 | ||
558 | return 0; | 557 | return 0; |
559 | } | 558 | } |
560 | #endif | ||
561 | /* ----------------------------------------------------------------------- */ | 559 | /* ----------------------------------------------------------------------- */ |
562 | 560 | ||
563 | 561 | ||
@@ -1612,7 +1610,7 @@ ds1685_rtc_sysfs_time_regs_show(struct device *dev, | |||
1612 | ds1685_rtc_sysfs_time_regs_lookup(attr->attr.name, false); | 1610 | ds1685_rtc_sysfs_time_regs_lookup(attr->attr.name, false); |
1613 | 1611 | ||
1614 | /* Make sure we actually matched something. */ | 1612 | /* Make sure we actually matched something. */ |
1615 | if (!bcd_reg_info && !bin_reg_info) | 1613 | if (!bcd_reg_info || !bin_reg_info) |
1616 | return -EINVAL; | 1614 | return -EINVAL; |
1617 | 1615 | ||
1618 | /* bcd_reg_info->reg == bin_reg_info->reg. */ | 1616 | /* bcd_reg_info->reg == bin_reg_info->reg. */ |
@@ -1650,7 +1648,7 @@ ds1685_rtc_sysfs_time_regs_store(struct device *dev, | |||
1650 | return -EINVAL; | 1648 | return -EINVAL; |
1651 | 1649 | ||
1652 | /* Make sure we actually matched something. */ | 1650 | /* Make sure we actually matched something. */ |
1653 | if (!bcd_reg_info && !bin_reg_info) | 1651 | if (!bcd_reg_info || !bin_reg_info) |
1654 | return -EINVAL; | 1652 | return -EINVAL; |
1655 | 1653 | ||
1656 | /* Check for a valid range. */ | 1654 | /* Check for a valid range. */ |
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c index e2436d140175..3a6fd3a8a2ec 100644 --- a/drivers/rtc/rtc-mrst.c +++ b/drivers/rtc/rtc-mrst.c | |||
@@ -413,8 +413,8 @@ static void rtc_mrst_do_remove(struct device *dev) | |||
413 | mrst->dev = NULL; | 413 | mrst->dev = NULL; |
414 | } | 414 | } |
415 | 415 | ||
416 | #ifdef CONFIG_PM | 416 | #ifdef CONFIG_PM_SLEEP |
417 | static int mrst_suspend(struct device *dev, pm_message_t mesg) | 417 | static int mrst_suspend(struct device *dev) |
418 | { | 418 | { |
419 | struct mrst_rtc *mrst = dev_get_drvdata(dev); | 419 | struct mrst_rtc *mrst = dev_get_drvdata(dev); |
420 | unsigned char tmp; | 420 | unsigned char tmp; |
@@ -453,7 +453,7 @@ static int mrst_suspend(struct device *dev, pm_message_t mesg) | |||
453 | */ | 453 | */ |
454 | static inline int mrst_poweroff(struct device *dev) | 454 | static inline int mrst_poweroff(struct device *dev) |
455 | { | 455 | { |
456 | return mrst_suspend(dev, PMSG_HIBERNATE); | 456 | return mrst_suspend(dev); |
457 | } | 457 | } |
458 | 458 | ||
459 | static int mrst_resume(struct device *dev) | 459 | static int mrst_resume(struct device *dev) |
@@ -490,9 +490,11 @@ static int mrst_resume(struct device *dev) | |||
490 | return 0; | 490 | return 0; |
491 | } | 491 | } |
492 | 492 | ||
493 | static SIMPLE_DEV_PM_OPS(mrst_pm_ops, mrst_suspend, mrst_resume); | ||
494 | #define MRST_PM_OPS (&mrst_pm_ops) | ||
495 | |||
493 | #else | 496 | #else |
494 | #define mrst_suspend NULL | 497 | #define MRST_PM_OPS NULL |
495 | #define mrst_resume NULL | ||
496 | 498 | ||
497 | static inline int mrst_poweroff(struct device *dev) | 499 | static inline int mrst_poweroff(struct device *dev) |
498 | { | 500 | { |
@@ -529,9 +531,8 @@ static struct platform_driver vrtc_mrst_platform_driver = { | |||
529 | .remove = vrtc_mrst_platform_remove, | 531 | .remove = vrtc_mrst_platform_remove, |
530 | .shutdown = vrtc_mrst_platform_shutdown, | 532 | .shutdown = vrtc_mrst_platform_shutdown, |
531 | .driver = { | 533 | .driver = { |
532 | .name = (char *) driver_name, | 534 | .name = driver_name, |
533 | .suspend = mrst_suspend, | 535 | .pm = MRST_PM_OPS, |
534 | .resume = mrst_resume, | ||
535 | } | 536 | } |
536 | }; | 537 | }; |
537 | 538 | ||
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 4241eeab3386..f4cf6851fae9 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c | |||
@@ -849,6 +849,7 @@ static struct s3c_rtc_data const s3c2443_rtc_data = { | |||
849 | 849 | ||
850 | static struct s3c_rtc_data const s3c6410_rtc_data = { | 850 | static struct s3c_rtc_data const s3c6410_rtc_data = { |
851 | .max_user_freq = 32768, | 851 | .max_user_freq = 32768, |
852 | .needs_src_clk = true, | ||
852 | .irq_handler = s3c6410_rtc_irq, | 853 | .irq_handler = s3c6410_rtc_irq, |
853 | .set_freq = s3c6410_rtc_setfreq, | 854 | .set_freq = s3c6410_rtc_setfreq, |
854 | .enable_tick = s3c6410_rtc_enable_tick, | 855 | .enable_tick = s3c6410_rtc_enable_tick, |
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 96128cb009f3..da212813f2d5 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
@@ -547,7 +547,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char | |||
547 | * parse input | 547 | * parse input |
548 | */ | 548 | */ |
549 | num_of_segments = 0; | 549 | num_of_segments = 0; |
550 | for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) { | 550 | for (i = 0; (i < count && (buf[i] != '\0') && (buf[i] != '\n')); i++) { |
551 | for (j = i; (buf[j] != ':') && | 551 | for (j = i; (buf[j] != ':') && |
552 | (buf[j] != '\0') && | 552 | (buf[j] != '\0') && |
553 | (buf[j] != '\n') && | 553 | (buf[j] != '\n') && |
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c index 09db45296eed..7497ddde2dd6 100644 --- a/drivers/s390/block/scm_blk_cluster.c +++ b/drivers/s390/block/scm_blk_cluster.c | |||
@@ -92,7 +92,7 @@ bool scm_reserve_cluster(struct scm_request *scmrq) | |||
92 | add = 0; | 92 | add = 0; |
93 | continue; | 93 | continue; |
94 | } | 94 | } |
95 | for (pos = 0; pos <= iter->aob->request.msb_count; pos++) { | 95 | for (pos = 0; pos < iter->aob->request.msb_count; pos++) { |
96 | if (clusters_intersect(req, iter->request[pos]) && | 96 | if (clusters_intersect(req, iter->request[pos]) && |
97 | (rq_data_dir(req) == WRITE || | 97 | (rq_data_dir(req) == WRITE || |
98 | rq_data_dir(iter->request[pos]) == WRITE)) { | 98 | rq_data_dir(iter->request[pos]) == WRITE)) { |
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index a7cc61837818..923a2b5a2439 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c | |||
@@ -5734,9 +5734,9 @@ free_port: | |||
5734 | hba_free: | 5734 | hba_free: |
5735 | if (phba->msix_enabled) | 5735 | if (phba->msix_enabled) |
5736 | pci_disable_msix(phba->pcidev); | 5736 | pci_disable_msix(phba->pcidev); |
5737 | iscsi_host_remove(phba->shost); | ||
5738 | pci_dev_put(phba->pcidev); | 5737 | pci_dev_put(phba->pcidev); |
5739 | iscsi_host_free(phba->shost); | 5738 | iscsi_host_free(phba->shost); |
5739 | pci_set_drvdata(pcidev, NULL); | ||
5740 | disable_pci: | 5740 | disable_pci: |
5741 | pci_disable_device(pcidev); | 5741 | pci_disable_device(pcidev); |
5742 | return ret; | 5742 | return ret; |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 9219953ee949..d9afc51af7d3 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -6815,7 +6815,8 @@ static struct ata_port_operations ipr_sata_ops = { | |||
6815 | }; | 6815 | }; |
6816 | 6816 | ||
6817 | static struct ata_port_info sata_port_info = { | 6817 | static struct ata_port_info sata_port_info = { |
6818 | .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, | 6818 | .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | |
6819 | ATA_FLAG_SAS_HOST, | ||
6819 | .pio_mask = ATA_PIO4_ONLY, | 6820 | .pio_mask = ATA_PIO4_ONLY, |
6820 | .mwdma_mask = ATA_MWDMA2, | 6821 | .mwdma_mask = ATA_MWDMA2, |
6821 | .udma_mask = ATA_UDMA6, | 6822 | .udma_mask = ATA_UDMA6, |
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 932d9cc98d2f..9c706d8c1441 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c | |||
@@ -547,7 +547,8 @@ static struct ata_port_operations sas_sata_ops = { | |||
547 | }; | 547 | }; |
548 | 548 | ||
549 | static struct ata_port_info sata_port_info = { | 549 | static struct ata_port_info sata_port_info = { |
550 | .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ, | 550 | .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ | |
551 | ATA_FLAG_SAS_HOST, | ||
551 | .pio_mask = ATA_PIO4, | 552 | .pio_mask = ATA_PIO4, |
552 | .mwdma_mask = ATA_MWDMA2, | 553 | .mwdma_mask = ATA_MWDMA2, |
553 | .udma_mask = ATA_UDMA6, | 554 | .udma_mask = ATA_UDMA6, |
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index 62b58d38ce2e..60de66252fa2 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c | |||
@@ -500,6 +500,7 @@ static void sas_revalidate_domain(struct work_struct *work) | |||
500 | struct sas_discovery_event *ev = to_sas_discovery_event(work); | 500 | struct sas_discovery_event *ev = to_sas_discovery_event(work); |
501 | struct asd_sas_port *port = ev->port; | 501 | struct asd_sas_port *port = ev->port; |
502 | struct sas_ha_struct *ha = port->ha; | 502 | struct sas_ha_struct *ha = port->ha; |
503 | struct domain_device *ddev = port->port_dev; | ||
503 | 504 | ||
504 | /* prevent revalidation from finding sata links in recovery */ | 505 | /* prevent revalidation from finding sata links in recovery */ |
505 | mutex_lock(&ha->disco_mutex); | 506 | mutex_lock(&ha->disco_mutex); |
@@ -514,8 +515,9 @@ static void sas_revalidate_domain(struct work_struct *work) | |||
514 | SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, | 515 | SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, |
515 | task_pid_nr(current)); | 516 | task_pid_nr(current)); |
516 | 517 | ||
517 | if (port->port_dev) | 518 | if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE || |
518 | res = sas_ex_revalidate_domain(port->port_dev); | 519 | ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE)) |
520 | res = sas_ex_revalidate_domain(ddev); | ||
519 | 521 | ||
520 | SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", | 522 | SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", |
521 | port->id, task_pid_nr(current), res); | 523 | port->id, task_pid_nr(current), res); |
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 99f43b7fc9ab..ab4879e12ea7 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c | |||
@@ -1596,7 +1596,7 @@ static int tcm_qla2xxx_check_initiator_node_acl( | |||
1596 | /* | 1596 | /* |
1597 | * Finally register the new FC Nexus with TCM | 1597 | * Finally register the new FC Nexus with TCM |
1598 | */ | 1598 | */ |
1599 | __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess); | 1599 | transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess); |
1600 | 1600 | ||
1601 | return 0; | 1601 | return 0; |
1602 | } | 1602 | } |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 54d7a6cbb98a..b1a263137a23 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -1311,9 +1311,11 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req) | |||
1311 | "rejecting I/O to dead device\n"); | 1311 | "rejecting I/O to dead device\n"); |
1312 | ret = BLKPREP_KILL; | 1312 | ret = BLKPREP_KILL; |
1313 | break; | 1313 | break; |
1314 | case SDEV_QUIESCE: | ||
1315 | case SDEV_BLOCK: | 1314 | case SDEV_BLOCK: |
1316 | case SDEV_CREATED_BLOCK: | 1315 | case SDEV_CREATED_BLOCK: |
1316 | ret = BLKPREP_DEFER; | ||
1317 | break; | ||
1318 | case SDEV_QUIESCE: | ||
1317 | /* | 1319 | /* |
1318 | * If the devices is blocked we defer normal commands. | 1320 | * If the devices is blocked we defer normal commands. |
1319 | */ | 1321 | */ |
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c index f3ee439d6f0e..cd4c293f0dd0 100644 --- a/drivers/sh/pm_runtime.c +++ b/drivers/sh/pm_runtime.c | |||
@@ -81,7 +81,9 @@ static int __init sh_pm_runtime_init(void) | |||
81 | if (!of_machine_is_compatible("renesas,emev2") && | 81 | if (!of_machine_is_compatible("renesas,emev2") && |
82 | !of_machine_is_compatible("renesas,r7s72100") && | 82 | !of_machine_is_compatible("renesas,r7s72100") && |
83 | !of_machine_is_compatible("renesas,r8a73a4") && | 83 | !of_machine_is_compatible("renesas,r8a73a4") && |
84 | #ifndef CONFIG_PM_GENERIC_DOMAINS_OF | ||
84 | !of_machine_is_compatible("renesas,r8a7740") && | 85 | !of_machine_is_compatible("renesas,r8a7740") && |
86 | #endif | ||
85 | !of_machine_is_compatible("renesas,r8a7778") && | 87 | !of_machine_is_compatible("renesas,r8a7778") && |
86 | !of_machine_is_compatible("renesas,r8a7779") && | 88 | !of_machine_is_compatible("renesas,r8a7779") && |
87 | !of_machine_is_compatible("renesas,r8a7790") && | 89 | !of_machine_is_compatible("renesas,r8a7790") && |
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 00cc019ddddf..a132180a9251 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
@@ -160,10 +160,9 @@ config SPI_BUTTERFLY | |||
160 | 160 | ||
161 | config SPI_CADENCE | 161 | config SPI_CADENCE |
162 | tristate "Cadence SPI controller" | 162 | tristate "Cadence SPI controller" |
163 | depends on ARM | ||
164 | help | 163 | help |
165 | This selects the Cadence SPI controller master driver | 164 | This selects the Cadence SPI controller master driver |
166 | used by Xilinx Zynq. | 165 | used by Xilinx Zynq and ZynqMP. |
167 | 166 | ||
168 | config SPI_CLPS711X | 167 | config SPI_CLPS711X |
169 | tristate "CLPS711X host SPI controller" | 168 | tristate "CLPS711X host SPI controller" |
@@ -633,7 +632,7 @@ config SPI_DW_PCI | |||
633 | 632 | ||
634 | config SPI_DW_MID_DMA | 633 | config SPI_DW_MID_DMA |
635 | bool "DMA support for DW SPI controller on Intel MID platform" | 634 | bool "DMA support for DW SPI controller on Intel MID platform" |
636 | depends on SPI_DW_PCI && INTEL_MID_DMAC | 635 | depends on SPI_DW_PCI && DW_DMAC_PCI |
637 | 636 | ||
638 | config SPI_DW_MMIO | 637 | config SPI_DW_MMIO |
639 | tristate "Memory-mapped io interface driver for DW SPI core" | 638 | tristate "Memory-mapped io interface driver for DW SPI core" |
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 9af7841f2e8c..a2f40b1b2225 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c | |||
@@ -180,11 +180,17 @@ | |||
180 | | SPI_BF(name, value)) | 180 | | SPI_BF(name, value)) |
181 | 181 | ||
182 | /* Register access macros */ | 182 | /* Register access macros */ |
183 | #ifdef CONFIG_AVR32 | ||
183 | #define spi_readl(port, reg) \ | 184 | #define spi_readl(port, reg) \ |
184 | __raw_readl((port)->regs + SPI_##reg) | 185 | __raw_readl((port)->regs + SPI_##reg) |
185 | #define spi_writel(port, reg, value) \ | 186 | #define spi_writel(port, reg, value) \ |
186 | __raw_writel((value), (port)->regs + SPI_##reg) | 187 | __raw_writel((value), (port)->regs + SPI_##reg) |
187 | 188 | #else | |
189 | #define spi_readl(port, reg) \ | ||
190 | readl_relaxed((port)->regs + SPI_##reg) | ||
191 | #define spi_writel(port, reg, value) \ | ||
192 | writel_relaxed((value), (port)->regs + SPI_##reg) | ||
193 | #endif | ||
188 | /* use PIO for small transfers, avoiding DMA setup/teardown overhead and | 194 | /* use PIO for small transfers, avoiding DMA setup/teardown overhead and |
189 | * cache operations; better heuristics consider wordsize and bitrate. | 195 | * cache operations; better heuristics consider wordsize and bitrate. |
190 | */ | 196 | */ |
@@ -764,17 +770,17 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master, | |||
764 | (unsigned long long)xfer->rx_dma); | 770 | (unsigned long long)xfer->rx_dma); |
765 | } | 771 | } |
766 | 772 | ||
767 | /* REVISIT: We're waiting for ENDRX before we start the next | 773 | /* REVISIT: We're waiting for RXBUFF before we start the next |
768 | * transfer because we need to handle some difficult timing | 774 | * transfer because we need to handle some difficult timing |
769 | * issues otherwise. If we wait for ENDTX in one transfer and | 775 | * issues otherwise. If we wait for TXBUFE in one transfer and |
770 | * then starts waiting for ENDRX in the next, it's difficult | 776 | * then starts waiting for RXBUFF in the next, it's difficult |
771 | * to tell the difference between the ENDRX interrupt we're | 777 | * to tell the difference between the RXBUFF interrupt we're |
772 | * actually waiting for and the ENDRX interrupt of the | 778 | * actually waiting for and the RXBUFF interrupt of the |
773 | * previous transfer. | 779 | * previous transfer. |
774 | * | 780 | * |
775 | * It should be doable, though. Just not now... | 781 | * It should be doable, though. Just not now... |
776 | */ | 782 | */ |
777 | spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES)); | 783 | spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES)); |
778 | spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); | 784 | spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); |
779 | } | 785 | } |
780 | 786 | ||
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index 419a782ab6d5..37875cf942f7 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2012 Chris Boot | 4 | * Copyright (C) 2012 Chris Boot |
5 | * Copyright (C) 2013 Stephen Warren | 5 | * Copyright (C) 2013 Stephen Warren |
6 | * Copyright (C) 2015 Martin Sperl | ||
6 | * | 7 | * |
7 | * This driver is inspired by: | 8 | * This driver is inspired by: |
8 | * spi-ath79.c, Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org> | 9 | * spi-ath79.c, Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org> |
@@ -29,6 +30,7 @@ | |||
29 | #include <linux/module.h> | 30 | #include <linux/module.h> |
30 | #include <linux/of.h> | 31 | #include <linux/of.h> |
31 | #include <linux/of_irq.h> | 32 | #include <linux/of_irq.h> |
33 | #include <linux/of_gpio.h> | ||
32 | #include <linux/of_device.h> | 34 | #include <linux/of_device.h> |
33 | #include <linux/spi/spi.h> | 35 | #include <linux/spi/spi.h> |
34 | 36 | ||
@@ -66,8 +68,10 @@ | |||
66 | #define BCM2835_SPI_CS_CS_10 0x00000002 | 68 | #define BCM2835_SPI_CS_CS_10 0x00000002 |
67 | #define BCM2835_SPI_CS_CS_01 0x00000001 | 69 | #define BCM2835_SPI_CS_CS_01 0x00000001 |
68 | 70 | ||
69 | #define BCM2835_SPI_TIMEOUT_MS 30000 | 71 | #define BCM2835_SPI_POLLING_LIMIT_US 30 |
70 | #define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_NO_CS) | 72 | #define BCM2835_SPI_TIMEOUT_MS 30000 |
73 | #define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \ | ||
74 | | SPI_NO_CS | SPI_3WIRE) | ||
71 | 75 | ||
72 | #define DRV_NAME "spi-bcm2835" | 76 | #define DRV_NAME "spi-bcm2835" |
73 | 77 | ||
@@ -75,10 +79,10 @@ struct bcm2835_spi { | |||
75 | void __iomem *regs; | 79 | void __iomem *regs; |
76 | struct clk *clk; | 80 | struct clk *clk; |
77 | int irq; | 81 | int irq; |
78 | struct completion done; | ||
79 | const u8 *tx_buf; | 82 | const u8 *tx_buf; |
80 | u8 *rx_buf; | 83 | u8 *rx_buf; |
81 | int len; | 84 | int tx_len; |
85 | int rx_len; | ||
82 | }; | 86 | }; |
83 | 87 | ||
84 | static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg) | 88 | static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg) |
@@ -91,205 +95,314 @@ static inline void bcm2835_wr(struct bcm2835_spi *bs, unsigned reg, u32 val) | |||
91 | writel(val, bs->regs + reg); | 95 | writel(val, bs->regs + reg); |
92 | } | 96 | } |
93 | 97 | ||
94 | static inline void bcm2835_rd_fifo(struct bcm2835_spi *bs, int len) | 98 | static inline void bcm2835_rd_fifo(struct bcm2835_spi *bs) |
95 | { | 99 | { |
96 | u8 byte; | 100 | u8 byte; |
97 | 101 | ||
98 | while (len--) { | 102 | while ((bs->rx_len) && |
103 | (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_RXD)) { | ||
99 | byte = bcm2835_rd(bs, BCM2835_SPI_FIFO); | 104 | byte = bcm2835_rd(bs, BCM2835_SPI_FIFO); |
100 | if (bs->rx_buf) | 105 | if (bs->rx_buf) |
101 | *bs->rx_buf++ = byte; | 106 | *bs->rx_buf++ = byte; |
107 | bs->rx_len--; | ||
102 | } | 108 | } |
103 | } | 109 | } |
104 | 110 | ||
105 | static inline void bcm2835_wr_fifo(struct bcm2835_spi *bs, int len) | 111 | static inline void bcm2835_wr_fifo(struct bcm2835_spi *bs) |
106 | { | 112 | { |
107 | u8 byte; | 113 | u8 byte; |
108 | 114 | ||
109 | if (len > bs->len) | 115 | while ((bs->tx_len) && |
110 | len = bs->len; | 116 | (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_TXD)) { |
111 | |||
112 | while (len--) { | ||
113 | byte = bs->tx_buf ? *bs->tx_buf++ : 0; | 117 | byte = bs->tx_buf ? *bs->tx_buf++ : 0; |
114 | bcm2835_wr(bs, BCM2835_SPI_FIFO, byte); | 118 | bcm2835_wr(bs, BCM2835_SPI_FIFO, byte); |
115 | bs->len--; | 119 | bs->tx_len--; |
116 | } | 120 | } |
117 | } | 121 | } |
118 | 122 | ||
123 | static void bcm2835_spi_reset_hw(struct spi_master *master) | ||
124 | { | ||
125 | struct bcm2835_spi *bs = spi_master_get_devdata(master); | ||
126 | u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); | ||
127 | |||
128 | /* Disable SPI interrupts and transfer */ | ||
129 | cs &= ~(BCM2835_SPI_CS_INTR | | ||
130 | BCM2835_SPI_CS_INTD | | ||
131 | BCM2835_SPI_CS_TA); | ||
132 | /* and reset RX/TX FIFOS */ | ||
133 | cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX; | ||
134 | |||
135 | /* and reset the SPI_HW */ | ||
136 | bcm2835_wr(bs, BCM2835_SPI_CS, cs); | ||
137 | } | ||
138 | |||
119 | static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id) | 139 | static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id) |
120 | { | 140 | { |
121 | struct spi_master *master = dev_id; | 141 | struct spi_master *master = dev_id; |
122 | struct bcm2835_spi *bs = spi_master_get_devdata(master); | 142 | struct bcm2835_spi *bs = spi_master_get_devdata(master); |
123 | u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); | ||
124 | 143 | ||
125 | /* | 144 | /* Read as many bytes as possible from FIFO */ |
126 | * RXR - RX needs Reading. This means 12 (or more) bytes have been | 145 | bcm2835_rd_fifo(bs); |
127 | * transmitted and hence 12 (or more) bytes have been received. | 146 | /* Write as many bytes as possible to FIFO */ |
128 | * | 147 | bcm2835_wr_fifo(bs); |
129 | * The FIFO is 16-bytes deep. We check for this interrupt to keep the | 148 | |
130 | * FIFO full; we have a 4-byte-time buffer for IRQ latency. We check | 149 | /* based on flags decide if we can finish the transfer */ |
131 | * this before DONE (TX empty) just in case we delayed processing this | 150 | if (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE) { |
132 | * interrupt for some reason. | 151 | /* Transfer complete - reset SPI HW */ |
133 | * | 152 | bcm2835_spi_reset_hw(master); |
134 | * We only check for this case if we have more bytes to TX; at the end | 153 | /* wake up the framework */ |
135 | * of the transfer, we ignore this pipelining optimization, and let | 154 | complete(&master->xfer_completion); |
136 | * bcm2835_spi_finish_transfer() drain the RX FIFO. | 155 | } |
156 | |||
157 | return IRQ_HANDLED; | ||
158 | } | ||
159 | |||
160 | static int bcm2835_spi_transfer_one_poll(struct spi_master *master, | ||
161 | struct spi_device *spi, | ||
162 | struct spi_transfer *tfr, | ||
163 | u32 cs, | ||
164 | unsigned long xfer_time_us) | ||
165 | { | ||
166 | struct bcm2835_spi *bs = spi_master_get_devdata(master); | ||
167 | /* set timeout to 1 second of maximum polling */ | ||
168 | unsigned long timeout = jiffies + HZ; | ||
169 | |||
170 | /* enable HW block without interrupts */ | ||
171 | bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA); | ||
172 | |||
173 | /* loop until finished the transfer */ | ||
174 | while (bs->rx_len) { | ||
175 | /* read from fifo as much as possible */ | ||
176 | bcm2835_rd_fifo(bs); | ||
177 | /* fill in tx fifo as much as possible */ | ||
178 | bcm2835_wr_fifo(bs); | ||
179 | /* if we still expect some data after the read, | ||
180 | * check for a possible timeout | ||
181 | */ | ||
182 | if (bs->rx_len && time_after(jiffies, timeout)) { | ||
183 | /* Transfer complete - reset SPI HW */ | ||
184 | bcm2835_spi_reset_hw(master); | ||
185 | /* and return timeout */ | ||
186 | return -ETIMEDOUT; | ||
187 | } | ||
188 | } | ||
189 | |||
190 | /* Transfer complete - reset SPI HW */ | ||
191 | bcm2835_spi_reset_hw(master); | ||
192 | /* and return without waiting for completion */ | ||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | static int bcm2835_spi_transfer_one_irq(struct spi_master *master, | ||
197 | struct spi_device *spi, | ||
198 | struct spi_transfer *tfr, | ||
199 | u32 cs) | ||
200 | { | ||
201 | struct bcm2835_spi *bs = spi_master_get_devdata(master); | ||
202 | |||
203 | /* fill in fifo if we have gpio-cs | ||
204 | * note that there have been rare events where the native-CS | ||
205 | * flapped for <1us which may change the behaviour | ||
206 | * with gpio-cs this does not happen, so it is implemented | ||
207 | * only for this case | ||
137 | */ | 208 | */ |
138 | if (bs->len && (cs & BCM2835_SPI_CS_RXR)) { | 209 | if (gpio_is_valid(spi->cs_gpio)) { |
139 | /* Read 12 bytes of data */ | 210 | /* enable HW block, but without interrupts enabled |
140 | bcm2835_rd_fifo(bs, 12); | 211 | * this would triggern an immediate interrupt |
141 | |||
142 | /* Write up to 12 bytes */ | ||
143 | bcm2835_wr_fifo(bs, 12); | ||
144 | |||
145 | /* | ||
146 | * We must have written something to the TX FIFO due to the | ||
147 | * bs->len check above, so cannot be DONE. Hence, return | ||
148 | * early. Note that DONE could also be set if we serviced an | ||
149 | * RXR interrupt really late. | ||
150 | */ | 212 | */ |
151 | return IRQ_HANDLED; | 213 | bcm2835_wr(bs, BCM2835_SPI_CS, |
214 | cs | BCM2835_SPI_CS_TA); | ||
215 | /* fill in tx fifo as much as possible */ | ||
216 | bcm2835_wr_fifo(bs); | ||
152 | } | 217 | } |
153 | 218 | ||
154 | /* | 219 | /* |
155 | * DONE - TX empty. This occurs when we first enable the transfer | 220 | * Enable the HW block. This will immediately trigger a DONE (TX |
156 | * since we do not pre-fill the TX FIFO. At any other time, given that | 221 | * empty) interrupt, upon which we will fill the TX FIFO with the |
157 | * we refill the TX FIFO above based on RXR, and hence ignore DONE if | 222 | * first TX bytes. Pre-filling the TX FIFO here to avoid the |
158 | * RXR is set, DONE really does mean end-of-transfer. | 223 | * interrupt doesn't work:-( |
159 | */ | 224 | */ |
160 | if (cs & BCM2835_SPI_CS_DONE) { | 225 | cs |= BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_TA; |
161 | if (bs->len) { /* First interrupt in a transfer */ | 226 | bcm2835_wr(bs, BCM2835_SPI_CS, cs); |
162 | bcm2835_wr_fifo(bs, 16); | ||
163 | } else { /* Transfer complete */ | ||
164 | /* Disable SPI interrupts */ | ||
165 | cs &= ~(BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD); | ||
166 | bcm2835_wr(bs, BCM2835_SPI_CS, cs); | ||
167 | |||
168 | /* | ||
169 | * Wake up bcm2835_spi_transfer_one(), which will call | ||
170 | * bcm2835_spi_finish_transfer(), to drain the RX FIFO. | ||
171 | */ | ||
172 | complete(&bs->done); | ||
173 | } | ||
174 | |||
175 | return IRQ_HANDLED; | ||
176 | } | ||
177 | 227 | ||
178 | return IRQ_NONE; | 228 | /* signal that we need to wait for completion */ |
229 | return 1; | ||
179 | } | 230 | } |
180 | 231 | ||
181 | static int bcm2835_spi_start_transfer(struct spi_device *spi, | 232 | static int bcm2835_spi_transfer_one(struct spi_master *master, |
182 | struct spi_transfer *tfr) | 233 | struct spi_device *spi, |
234 | struct spi_transfer *tfr) | ||
183 | { | 235 | { |
184 | struct bcm2835_spi *bs = spi_master_get_devdata(spi->master); | 236 | struct bcm2835_spi *bs = spi_master_get_devdata(master); |
185 | unsigned long spi_hz, clk_hz, cdiv; | 237 | unsigned long spi_hz, clk_hz, cdiv; |
186 | u32 cs = BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_TA; | 238 | unsigned long spi_used_hz, xfer_time_us; |
239 | u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); | ||
187 | 240 | ||
241 | /* set clock */ | ||
188 | spi_hz = tfr->speed_hz; | 242 | spi_hz = tfr->speed_hz; |
189 | clk_hz = clk_get_rate(bs->clk); | 243 | clk_hz = clk_get_rate(bs->clk); |
190 | 244 | ||
191 | if (spi_hz >= clk_hz / 2) { | 245 | if (spi_hz >= clk_hz / 2) { |
192 | cdiv = 2; /* clk_hz/2 is the fastest we can go */ | 246 | cdiv = 2; /* clk_hz/2 is the fastest we can go */ |
193 | } else if (spi_hz) { | 247 | } else if (spi_hz) { |
194 | /* CDIV must be a power of two */ | 248 | /* CDIV must be a multiple of two */ |
195 | cdiv = roundup_pow_of_two(DIV_ROUND_UP(clk_hz, spi_hz)); | 249 | cdiv = DIV_ROUND_UP(clk_hz, spi_hz); |
250 | cdiv += (cdiv % 2); | ||
196 | 251 | ||
197 | if (cdiv >= 65536) | 252 | if (cdiv >= 65536) |
198 | cdiv = 0; /* 0 is the slowest we can go */ | 253 | cdiv = 0; /* 0 is the slowest we can go */ |
199 | } else | 254 | } else { |
200 | cdiv = 0; /* 0 is the slowest we can go */ | 255 | cdiv = 0; /* 0 is the slowest we can go */ |
256 | } | ||
257 | spi_used_hz = cdiv ? (clk_hz / cdiv) : (clk_hz / 65536); | ||
258 | bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv); | ||
201 | 259 | ||
260 | /* handle all the modes */ | ||
261 | if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf)) | ||
262 | cs |= BCM2835_SPI_CS_REN; | ||
202 | if (spi->mode & SPI_CPOL) | 263 | if (spi->mode & SPI_CPOL) |
203 | cs |= BCM2835_SPI_CS_CPOL; | 264 | cs |= BCM2835_SPI_CS_CPOL; |
204 | if (spi->mode & SPI_CPHA) | 265 | if (spi->mode & SPI_CPHA) |
205 | cs |= BCM2835_SPI_CS_CPHA; | 266 | cs |= BCM2835_SPI_CS_CPHA; |
206 | 267 | ||
207 | if (!(spi->mode & SPI_NO_CS)) { | 268 | /* for gpio_cs set dummy CS so that no HW-CS get changed |
208 | if (spi->mode & SPI_CS_HIGH) { | 269 | * we can not run this in bcm2835_spi_set_cs, as it does |
209 | cs |= BCM2835_SPI_CS_CSPOL; | 270 | * not get called for cs_gpio cases, so we need to do it here |
210 | cs |= BCM2835_SPI_CS_CSPOL0 << spi->chip_select; | 271 | */ |
211 | } | 272 | if (gpio_is_valid(spi->cs_gpio) || (spi->mode & SPI_NO_CS)) |
212 | 273 | cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01; | |
213 | cs |= spi->chip_select; | ||
214 | } | ||
215 | 274 | ||
216 | reinit_completion(&bs->done); | 275 | /* set transmit buffers and length */ |
217 | bs->tx_buf = tfr->tx_buf; | 276 | bs->tx_buf = tfr->tx_buf; |
218 | bs->rx_buf = tfr->rx_buf; | 277 | bs->rx_buf = tfr->rx_buf; |
219 | bs->len = tfr->len; | 278 | bs->tx_len = tfr->len; |
279 | bs->rx_len = tfr->len; | ||
220 | 280 | ||
221 | bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv); | 281 | /* calculate the estimated time in us the transfer runs */ |
222 | /* | 282 | xfer_time_us = tfr->len |
223 | * Enable the HW block. This will immediately trigger a DONE (TX | 283 | * 9 /* clocks/byte - SPI-HW waits 1 clock after each byte */ |
224 | * empty) interrupt, upon which we will fill the TX FIFO with the | 284 | * 1000000 / spi_used_hz; |
225 | * first TX bytes. Pre-filling the TX FIFO here to avoid the | ||
226 | * interrupt doesn't work:-( | ||
227 | */ | ||
228 | bcm2835_wr(bs, BCM2835_SPI_CS, cs); | ||
229 | 285 | ||
230 | return 0; | 286 | /* for short requests run polling*/ |
287 | if (xfer_time_us <= BCM2835_SPI_POLLING_LIMIT_US) | ||
288 | return bcm2835_spi_transfer_one_poll(master, spi, tfr, | ||
289 | cs, xfer_time_us); | ||
290 | |||
291 | return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs); | ||
231 | } | 292 | } |
232 | 293 | ||
233 | static int bcm2835_spi_finish_transfer(struct spi_device *spi, | 294 | static void bcm2835_spi_handle_err(struct spi_master *master, |
234 | struct spi_transfer *tfr, bool cs_change) | 295 | struct spi_message *msg) |
235 | { | 296 | { |
236 | struct bcm2835_spi *bs = spi_master_get_devdata(spi->master); | 297 | bcm2835_spi_reset_hw(master); |
237 | u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); | 298 | } |
299 | |||
300 | static void bcm2835_spi_set_cs(struct spi_device *spi, bool gpio_level) | ||
301 | { | ||
302 | /* | ||
303 | * we can assume that we are "native" as per spi_set_cs | ||
304 | * calling us ONLY when cs_gpio is not set | ||
305 | * we can also assume that we are CS < 3 as per bcm2835_spi_setup | ||
306 | * we would not get called because of error handling there. | ||
307 | * the level passed is the electrical level not enabled/disabled | ||
308 | * so it has to get translated back to enable/disable | ||
309 | * see spi_set_cs in spi.c for the implementation | ||
310 | */ | ||
238 | 311 | ||
239 | /* Drain RX FIFO */ | 312 | struct spi_master *master = spi->master; |
240 | while (cs & BCM2835_SPI_CS_RXD) { | 313 | struct bcm2835_spi *bs = spi_master_get_devdata(master); |
241 | bcm2835_rd_fifo(bs, 1); | 314 | u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); |
242 | cs = bcm2835_rd(bs, BCM2835_SPI_CS); | 315 | bool enable; |
316 | |||
317 | /* calculate the enable flag from the passed gpio_level */ | ||
318 | enable = (spi->mode & SPI_CS_HIGH) ? gpio_level : !gpio_level; | ||
319 | |||
320 | /* set flags for "reverse" polarity in the registers */ | ||
321 | if (spi->mode & SPI_CS_HIGH) { | ||
322 | /* set the correct CS-bits */ | ||
323 | cs |= BCM2835_SPI_CS_CSPOL; | ||
324 | cs |= BCM2835_SPI_CS_CSPOL0 << spi->chip_select; | ||
325 | } else { | ||
326 | /* clean the CS-bits */ | ||
327 | cs &= ~BCM2835_SPI_CS_CSPOL; | ||
328 | cs &= ~(BCM2835_SPI_CS_CSPOL0 << spi->chip_select); | ||
243 | } | 329 | } |
244 | 330 | ||
245 | if (tfr->delay_usecs) | 331 | /* select the correct chip_select depending on disabled/enabled */ |
246 | udelay(tfr->delay_usecs); | 332 | if (enable) { |
333 | /* set cs correctly */ | ||
334 | if (spi->mode & SPI_NO_CS) { | ||
335 | /* use the "undefined" chip-select */ | ||
336 | cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01; | ||
337 | } else { | ||
338 | /* set the chip select */ | ||
339 | cs &= ~(BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01); | ||
340 | cs |= spi->chip_select; | ||
341 | } | ||
342 | } else { | ||
343 | /* disable CSPOL which puts HW-CS into deselected state */ | ||
344 | cs &= ~BCM2835_SPI_CS_CSPOL; | ||
345 | /* use the "undefined" chip-select as precaution */ | ||
346 | cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01; | ||
347 | } | ||
247 | 348 | ||
248 | if (cs_change) | 349 | /* finally set the calculated flags in SPI_CS */ |
249 | /* Clear TA flag */ | 350 | bcm2835_wr(bs, BCM2835_SPI_CS, cs); |
250 | bcm2835_wr(bs, BCM2835_SPI_CS, cs & ~BCM2835_SPI_CS_TA); | 351 | } |
251 | 352 | ||
252 | return 0; | 353 | static int chip_match_name(struct gpio_chip *chip, void *data) |
354 | { | ||
355 | return !strcmp(chip->label, data); | ||
253 | } | 356 | } |
254 | 357 | ||
255 | static int bcm2835_spi_transfer_one(struct spi_master *master, | 358 | static int bcm2835_spi_setup(struct spi_device *spi) |
256 | struct spi_message *mesg) | ||
257 | { | 359 | { |
258 | struct bcm2835_spi *bs = spi_master_get_devdata(master); | 360 | int err; |
259 | struct spi_transfer *tfr; | 361 | struct gpio_chip *chip; |
260 | struct spi_device *spi = mesg->spi; | 362 | /* |
261 | int err = 0; | 363 | * sanity checking the native-chipselects |
262 | unsigned int timeout; | 364 | */ |
263 | bool cs_change; | 365 | if (spi->mode & SPI_NO_CS) |
264 | 366 | return 0; | |
265 | list_for_each_entry(tfr, &mesg->transfers, transfer_list) { | 367 | if (gpio_is_valid(spi->cs_gpio)) |
266 | err = bcm2835_spi_start_transfer(spi, tfr); | 368 | return 0; |
267 | if (err) | 369 | if (spi->chip_select > 1) { |
268 | goto out; | 370 | /* error in the case of native CS requested with CS > 1 |
269 | 371 | * officially there is a CS2, but it is not documented | |
270 | timeout = wait_for_completion_timeout(&bs->done, | 372 | * which GPIO is connected with that... |
271 | msecs_to_jiffies(BCM2835_SPI_TIMEOUT_MS)); | 373 | */ |
272 | if (!timeout) { | 374 | dev_err(&spi->dev, |
273 | err = -ETIMEDOUT; | 375 | "setup: only two native chip-selects are supported\n"); |
274 | goto out; | 376 | return -EINVAL; |
275 | } | 377 | } |
378 | /* now translate native cs to GPIO */ | ||
276 | 379 | ||
277 | cs_change = tfr->cs_change || | 380 | /* get the gpio chip for the base */ |
278 | list_is_last(&tfr->transfer_list, &mesg->transfers); | 381 | chip = gpiochip_find("pinctrl-bcm2835", chip_match_name); |
382 | if (!chip) | ||
383 | return 0; | ||
279 | 384 | ||
280 | err = bcm2835_spi_finish_transfer(spi, tfr, cs_change); | 385 | /* and calculate the real CS */ |
281 | if (err) | 386 | spi->cs_gpio = chip->base + 8 - spi->chip_select; |
282 | goto out; | ||
283 | 387 | ||
284 | mesg->actual_length += (tfr->len - bs->len); | 388 | /* and set up the "mode" and level */ |
285 | } | 389 | dev_info(&spi->dev, "setting up native-CS%i as GPIO %i\n", |
390 | spi->chip_select, spi->cs_gpio); | ||
286 | 391 | ||
287 | out: | 392 | /* set up GPIO as output and pull to the correct level */ |
288 | /* Clear FIFOs, and disable the HW block */ | 393 | err = gpio_direction_output(spi->cs_gpio, |
289 | bcm2835_wr(bs, BCM2835_SPI_CS, | 394 | (spi->mode & SPI_CS_HIGH) ? 0 : 1); |
290 | BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); | 395 | if (err) { |
291 | mesg->status = err; | 396 | dev_err(&spi->dev, |
292 | spi_finalize_current_message(master); | 397 | "could not set CS%i gpio %i as output: %i", |
398 | spi->chip_select, spi->cs_gpio, err); | ||
399 | return err; | ||
400 | } | ||
401 | /* the implementation of pinctrl-bcm2835 currently does not | ||
402 | * set the GPIO value when using gpio_direction_output | ||
403 | * so we are setting it here explicitly | ||
404 | */ | ||
405 | gpio_set_value(spi->cs_gpio, (spi->mode & SPI_CS_HIGH) ? 0 : 1); | ||
293 | 406 | ||
294 | return 0; | 407 | return 0; |
295 | } | 408 | } |
@@ -312,13 +425,14 @@ static int bcm2835_spi_probe(struct platform_device *pdev) | |||
312 | master->mode_bits = BCM2835_SPI_MODE_BITS; | 425 | master->mode_bits = BCM2835_SPI_MODE_BITS; |
313 | master->bits_per_word_mask = SPI_BPW_MASK(8); | 426 | master->bits_per_word_mask = SPI_BPW_MASK(8); |
314 | master->num_chipselect = 3; | 427 | master->num_chipselect = 3; |
315 | master->transfer_one_message = bcm2835_spi_transfer_one; | 428 | master->setup = bcm2835_spi_setup; |
429 | master->set_cs = bcm2835_spi_set_cs; | ||
430 | master->transfer_one = bcm2835_spi_transfer_one; | ||
431 | master->handle_err = bcm2835_spi_handle_err; | ||
316 | master->dev.of_node = pdev->dev.of_node; | 432 | master->dev.of_node = pdev->dev.of_node; |
317 | 433 | ||
318 | bs = spi_master_get_devdata(master); | 434 | bs = spi_master_get_devdata(master); |
319 | 435 | ||
320 | init_completion(&bs->done); | ||
321 | |||
322 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 436 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
323 | bs->regs = devm_ioremap_resource(&pdev->dev, res); | 437 | bs->regs = devm_ioremap_resource(&pdev->dev, res); |
324 | if (IS_ERR(bs->regs)) { | 438 | if (IS_ERR(bs->regs)) { |
@@ -343,13 +457,13 @@ static int bcm2835_spi_probe(struct platform_device *pdev) | |||
343 | clk_prepare_enable(bs->clk); | 457 | clk_prepare_enable(bs->clk); |
344 | 458 | ||
345 | err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0, | 459 | err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0, |
346 | dev_name(&pdev->dev), master); | 460 | dev_name(&pdev->dev), master); |
347 | if (err) { | 461 | if (err) { |
348 | dev_err(&pdev->dev, "could not request IRQ: %d\n", err); | 462 | dev_err(&pdev->dev, "could not request IRQ: %d\n", err); |
349 | goto out_clk_disable; | 463 | goto out_clk_disable; |
350 | } | 464 | } |
351 | 465 | ||
352 | /* initialise the hardware */ | 466 | /* initialise the hardware with the default polarities */ |
353 | bcm2835_wr(bs, BCM2835_SPI_CS, | 467 | bcm2835_wr(bs, BCM2835_SPI_CS, |
354 | BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); | 468 | BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); |
355 | 469 | ||
diff --git a/drivers/spi/spi-bcm53xx.c b/drivers/spi/spi-bcm53xx.c index 3fb91c81015a..1520554978a3 100644 --- a/drivers/spi/spi-bcm53xx.c +++ b/drivers/spi/spi-bcm53xx.c | |||
@@ -44,7 +44,7 @@ static int bcm53xxspi_wait(struct bcm53xxspi *b53spi, unsigned int timeout_ms) | |||
44 | u32 tmp; | 44 | u32 tmp; |
45 | 45 | ||
46 | /* SPE bit has to be 0 before we read MSPI STATUS */ | 46 | /* SPE bit has to be 0 before we read MSPI STATUS */ |
47 | deadline = jiffies + BCM53XXSPI_SPE_TIMEOUT_MS * HZ / 1000; | 47 | deadline = jiffies + msecs_to_jiffies(BCM53XXSPI_SPE_TIMEOUT_MS); |
48 | do { | 48 | do { |
49 | tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_SPCR2); | 49 | tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_SPCR2); |
50 | if (!(tmp & B53SPI_MSPI_SPCR2_SPE)) | 50 | if (!(tmp & B53SPI_MSPI_SPCR2_SPE)) |
@@ -56,7 +56,7 @@ static int bcm53xxspi_wait(struct bcm53xxspi *b53spi, unsigned int timeout_ms) | |||
56 | goto spi_timeout; | 56 | goto spi_timeout; |
57 | 57 | ||
58 | /* Check status */ | 58 | /* Check status */ |
59 | deadline = jiffies + timeout_ms * HZ / 1000; | 59 | deadline = jiffies + msecs_to_jiffies(timeout_ms); |
60 | do { | 60 | do { |
61 | tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_MSPI_STATUS); | 61 | tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_MSPI_STATUS); |
62 | if (tmp & B53SPI_MSPI_MSPI_STATUS_SPIF) { | 62 | if (tmp & B53SPI_MSPI_MSPI_STATUS_SPIF) { |
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c index 37079937d2f7..a3d65b4f4944 100644 --- a/drivers/spi/spi-bfin5xx.c +++ b/drivers/spi/spi-bfin5xx.c | |||
@@ -559,7 +559,7 @@ static void bfin_spi_pump_transfers(unsigned long data) | |||
559 | struct spi_transfer *previous = NULL; | 559 | struct spi_transfer *previous = NULL; |
560 | struct bfin_spi_slave_data *chip = NULL; | 560 | struct bfin_spi_slave_data *chip = NULL; |
561 | unsigned int bits_per_word; | 561 | unsigned int bits_per_word; |
562 | u16 cr, cr_width, dma_width, dma_config; | 562 | u16 cr, cr_width = 0, dma_width, dma_config; |
563 | u32 tranf_success = 1; | 563 | u32 tranf_success = 1; |
564 | u8 full_duplex = 0; | 564 | u8 full_duplex = 0; |
565 | 565 | ||
@@ -648,7 +648,6 @@ static void bfin_spi_pump_transfers(unsigned long data) | |||
648 | } else if (bits_per_word == 8) { | 648 | } else if (bits_per_word == 8) { |
649 | drv_data->n_bytes = bits_per_word/8; | 649 | drv_data->n_bytes = bits_per_word/8; |
650 | drv_data->len = transfer->len; | 650 | drv_data->len = transfer->len; |
651 | cr_width = 0; | ||
652 | drv_data->ops = &bfin_bfin_spi_transfer_ops_u8; | 651 | drv_data->ops = &bfin_bfin_spi_transfer_ops_u8; |
653 | } | 652 | } |
654 | cr = bfin_read(&drv_data->regs->ctl) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE); | 653 | cr = bfin_read(&drv_data->regs->ctl) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE); |
diff --git a/drivers/spi/spi-bitbang-txrx.h b/drivers/spi/spi-bitbang-txrx.h index c616e41521be..06b34e5bcfa3 100644 --- a/drivers/spi/spi-bitbang-txrx.h +++ b/drivers/spi/spi-bitbang-txrx.h | |||
@@ -49,12 +49,17 @@ bitbang_txrx_be_cpha0(struct spi_device *spi, | |||
49 | { | 49 | { |
50 | /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */ | 50 | /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */ |
51 | 51 | ||
52 | bool oldbit = !(word & 1); | ||
52 | /* clock starts at inactive polarity */ | 53 | /* clock starts at inactive polarity */ |
53 | for (word <<= (32 - bits); likely(bits); bits--) { | 54 | for (word <<= (32 - bits); likely(bits); bits--) { |
54 | 55 | ||
55 | /* setup MSB (to slave) on trailing edge */ | 56 | /* setup MSB (to slave) on trailing edge */ |
56 | if ((flags & SPI_MASTER_NO_TX) == 0) | 57 | if ((flags & SPI_MASTER_NO_TX) == 0) { |
57 | setmosi(spi, word & (1 << 31)); | 58 | if ((word & (1 << 31)) != oldbit) { |
59 | setmosi(spi, word & (1 << 31)); | ||
60 | oldbit = word & (1 << 31); | ||
61 | } | ||
62 | } | ||
58 | spidelay(nsecs); /* T(setup) */ | 63 | spidelay(nsecs); /* T(setup) */ |
59 | 64 | ||
60 | setsck(spi, !cpol); | 65 | setsck(spi, !cpol); |
@@ -76,13 +81,18 @@ bitbang_txrx_be_cpha1(struct spi_device *spi, | |||
76 | { | 81 | { |
77 | /* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */ | 82 | /* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */ |
78 | 83 | ||
84 | bool oldbit = !(word & (1 << 31)); | ||
79 | /* clock starts at inactive polarity */ | 85 | /* clock starts at inactive polarity */ |
80 | for (word <<= (32 - bits); likely(bits); bits--) { | 86 | for (word <<= (32 - bits); likely(bits); bits--) { |
81 | 87 | ||
82 | /* setup MSB (to slave) on leading edge */ | 88 | /* setup MSB (to slave) on leading edge */ |
83 | setsck(spi, !cpol); | 89 | setsck(spi, !cpol); |
84 | if ((flags & SPI_MASTER_NO_TX) == 0) | 90 | if ((flags & SPI_MASTER_NO_TX) == 0) { |
85 | setmosi(spi, word & (1 << 31)); | 91 | if ((word & (1 << 31)) != oldbit) { |
92 | setmosi(spi, word & (1 << 31)); | ||
93 | oldbit = word & (1 << 31); | ||
94 | } | ||
95 | } | ||
86 | spidelay(nsecs); /* T(setup) */ | 96 | spidelay(nsecs); /* T(setup) */ |
87 | 97 | ||
88 | setsck(spi, cpol); | 98 | setsck(spi, cpol); |
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c index 5ef6638d5e8a..840a4984d365 100644 --- a/drivers/spi/spi-bitbang.c +++ b/drivers/spi/spi-bitbang.c | |||
@@ -180,7 +180,6 @@ int spi_bitbang_setup(struct spi_device *spi) | |||
180 | { | 180 | { |
181 | struct spi_bitbang_cs *cs = spi->controller_state; | 181 | struct spi_bitbang_cs *cs = spi->controller_state; |
182 | struct spi_bitbang *bitbang; | 182 | struct spi_bitbang *bitbang; |
183 | int retval; | ||
184 | unsigned long flags; | 183 | unsigned long flags; |
185 | 184 | ||
186 | bitbang = spi_master_get_devdata(spi->master); | 185 | bitbang = spi_master_get_devdata(spi->master); |
@@ -197,9 +196,11 @@ int spi_bitbang_setup(struct spi_device *spi) | |||
197 | if (!cs->txrx_word) | 196 | if (!cs->txrx_word) |
198 | return -EINVAL; | 197 | return -EINVAL; |
199 | 198 | ||
200 | retval = bitbang->setup_transfer(spi, NULL); | 199 | if (bitbang->setup_transfer) { |
201 | if (retval < 0) | 200 | int retval = bitbang->setup_transfer(spi, NULL); |
202 | return retval; | 201 | if (retval < 0) |
202 | return retval; | ||
203 | } | ||
203 | 204 | ||
204 | dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs); | 205 | dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs); |
205 | 206 | ||
@@ -295,9 +296,11 @@ static int spi_bitbang_transfer_one(struct spi_master *master, | |||
295 | 296 | ||
296 | /* init (-1) or override (1) transfer params */ | 297 | /* init (-1) or override (1) transfer params */ |
297 | if (do_setup != 0) { | 298 | if (do_setup != 0) { |
298 | status = bitbang->setup_transfer(spi, t); | 299 | if (bitbang->setup_transfer) { |
299 | if (status < 0) | 300 | status = bitbang->setup_transfer(spi, t); |
300 | break; | 301 | if (status < 0) |
302 | break; | ||
303 | } | ||
301 | if (do_setup == -1) | 304 | if (do_setup == -1) |
302 | do_setup = 0; | 305 | do_setup = 0; |
303 | } | 306 | } |
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index a0197fd4e95c..bb1052e748f2 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c | |||
@@ -23,29 +23,31 @@ | |||
23 | #include "spi-dw.h" | 23 | #include "spi-dw.h" |
24 | 24 | ||
25 | #ifdef CONFIG_SPI_DW_MID_DMA | 25 | #ifdef CONFIG_SPI_DW_MID_DMA |
26 | #include <linux/intel_mid_dma.h> | ||
27 | #include <linux/pci.h> | 26 | #include <linux/pci.h> |
27 | #include <linux/platform_data/dma-dw.h> | ||
28 | 28 | ||
29 | #define RX_BUSY 0 | 29 | #define RX_BUSY 0 |
30 | #define TX_BUSY 1 | 30 | #define TX_BUSY 1 |
31 | 31 | ||
32 | struct mid_dma { | 32 | static struct dw_dma_slave mid_dma_tx = { .dst_id = 1 }; |
33 | struct intel_mid_dma_slave dmas_tx; | 33 | static struct dw_dma_slave mid_dma_rx = { .src_id = 0 }; |
34 | struct intel_mid_dma_slave dmas_rx; | ||
35 | }; | ||
36 | 34 | ||
37 | static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param) | 35 | static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param) |
38 | { | 36 | { |
39 | struct dw_spi *dws = param; | 37 | struct dw_dma_slave *s = param; |
38 | |||
39 | if (s->dma_dev != chan->device->dev) | ||
40 | return false; | ||
40 | 41 | ||
41 | return dws->dma_dev == chan->device->dev; | 42 | chan->private = s; |
43 | return true; | ||
42 | } | 44 | } |
43 | 45 | ||
44 | static int mid_spi_dma_init(struct dw_spi *dws) | 46 | static int mid_spi_dma_init(struct dw_spi *dws) |
45 | { | 47 | { |
46 | struct mid_dma *dw_dma = dws->dma_priv; | ||
47 | struct pci_dev *dma_dev; | 48 | struct pci_dev *dma_dev; |
48 | struct intel_mid_dma_slave *rxs, *txs; | 49 | struct dw_dma_slave *tx = dws->dma_tx; |
50 | struct dw_dma_slave *rx = dws->dma_rx; | ||
49 | dma_cap_mask_t mask; | 51 | dma_cap_mask_t mask; |
50 | 52 | ||
51 | /* | 53 | /* |
@@ -56,28 +58,22 @@ static int mid_spi_dma_init(struct dw_spi *dws) | |||
56 | if (!dma_dev) | 58 | if (!dma_dev) |
57 | return -ENODEV; | 59 | return -ENODEV; |
58 | 60 | ||
59 | dws->dma_dev = &dma_dev->dev; | ||
60 | |||
61 | dma_cap_zero(mask); | 61 | dma_cap_zero(mask); |
62 | dma_cap_set(DMA_SLAVE, mask); | 62 | dma_cap_set(DMA_SLAVE, mask); |
63 | 63 | ||
64 | /* 1. Init rx channel */ | 64 | /* 1. Init rx channel */ |
65 | dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws); | 65 | rx->dma_dev = &dma_dev->dev; |
66 | dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, rx); | ||
66 | if (!dws->rxchan) | 67 | if (!dws->rxchan) |
67 | goto err_exit; | 68 | goto err_exit; |
68 | rxs = &dw_dma->dmas_rx; | 69 | dws->master->dma_rx = dws->rxchan; |
69 | rxs->hs_mode = LNW_DMA_HW_HS; | ||
70 | rxs->cfg_mode = LNW_DMA_PER_TO_MEM; | ||
71 | dws->rxchan->private = rxs; | ||
72 | 70 | ||
73 | /* 2. Init tx channel */ | 71 | /* 2. Init tx channel */ |
74 | dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws); | 72 | tx->dma_dev = &dma_dev->dev; |
73 | dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, tx); | ||
75 | if (!dws->txchan) | 74 | if (!dws->txchan) |
76 | goto free_rxchan; | 75 | goto free_rxchan; |
77 | txs = &dw_dma->dmas_tx; | 76 | dws->master->dma_tx = dws->txchan; |
78 | txs->hs_mode = LNW_DMA_HW_HS; | ||
79 | txs->cfg_mode = LNW_DMA_MEM_TO_PER; | ||
80 | dws->txchan->private = txs; | ||
81 | 77 | ||
82 | dws->dma_inited = 1; | 78 | dws->dma_inited = 1; |
83 | return 0; | 79 | return 0; |
@@ -100,6 +96,42 @@ static void mid_spi_dma_exit(struct dw_spi *dws) | |||
100 | dma_release_channel(dws->rxchan); | 96 | dma_release_channel(dws->rxchan); |
101 | } | 97 | } |
102 | 98 | ||
99 | static irqreturn_t dma_transfer(struct dw_spi *dws) | ||
100 | { | ||
101 | u16 irq_status = dw_readl(dws, DW_SPI_ISR); | ||
102 | |||
103 | if (!irq_status) | ||
104 | return IRQ_NONE; | ||
105 | |||
106 | dw_readl(dws, DW_SPI_ICR); | ||
107 | spi_reset_chip(dws); | ||
108 | |||
109 | dev_err(&dws->master->dev, "%s: FIFO overrun/underrun\n", __func__); | ||
110 | dws->master->cur_msg->status = -EIO; | ||
111 | spi_finalize_current_transfer(dws->master); | ||
112 | return IRQ_HANDLED; | ||
113 | } | ||
114 | |||
115 | static bool mid_spi_can_dma(struct spi_master *master, struct spi_device *spi, | ||
116 | struct spi_transfer *xfer) | ||
117 | { | ||
118 | struct dw_spi *dws = spi_master_get_devdata(master); | ||
119 | |||
120 | if (!dws->dma_inited) | ||
121 | return false; | ||
122 | |||
123 | return xfer->len > dws->fifo_len; | ||
124 | } | ||
125 | |||
126 | static enum dma_slave_buswidth convert_dma_width(u32 dma_width) { | ||
127 | if (dma_width == 1) | ||
128 | return DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
129 | else if (dma_width == 2) | ||
130 | return DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
131 | |||
132 | return DMA_SLAVE_BUSWIDTH_UNDEFINED; | ||
133 | } | ||
134 | |||
103 | /* | 135 | /* |
104 | * dws->dma_chan_busy is set before the dma transfer starts, callback for tx | 136 | * dws->dma_chan_busy is set before the dma transfer starts, callback for tx |
105 | * channel will clear a corresponding bit. | 137 | * channel will clear a corresponding bit. |
@@ -108,37 +140,38 @@ static void dw_spi_dma_tx_done(void *arg) | |||
108 | { | 140 | { |
109 | struct dw_spi *dws = arg; | 141 | struct dw_spi *dws = arg; |
110 | 142 | ||
111 | if (test_and_clear_bit(TX_BUSY, &dws->dma_chan_busy) & BIT(RX_BUSY)) | 143 | clear_bit(TX_BUSY, &dws->dma_chan_busy); |
144 | if (test_bit(RX_BUSY, &dws->dma_chan_busy)) | ||
112 | return; | 145 | return; |
113 | dw_spi_xfer_done(dws); | 146 | spi_finalize_current_transfer(dws->master); |
114 | } | 147 | } |
115 | 148 | ||
116 | static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws) | 149 | static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws, |
150 | struct spi_transfer *xfer) | ||
117 | { | 151 | { |
118 | struct dma_slave_config txconf; | 152 | struct dma_slave_config txconf; |
119 | struct dma_async_tx_descriptor *txdesc; | 153 | struct dma_async_tx_descriptor *txdesc; |
120 | 154 | ||
121 | if (!dws->tx_dma) | 155 | if (!xfer->tx_buf) |
122 | return NULL; | 156 | return NULL; |
123 | 157 | ||
124 | txconf.direction = DMA_MEM_TO_DEV; | 158 | txconf.direction = DMA_MEM_TO_DEV; |
125 | txconf.dst_addr = dws->dma_addr; | 159 | txconf.dst_addr = dws->dma_addr; |
126 | txconf.dst_maxburst = LNW_DMA_MSIZE_16; | 160 | txconf.dst_maxburst = 16; |
127 | txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | 161 | txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
128 | txconf.dst_addr_width = dws->dma_width; | 162 | txconf.dst_addr_width = convert_dma_width(dws->dma_width); |
129 | txconf.device_fc = false; | 163 | txconf.device_fc = false; |
130 | 164 | ||
131 | dmaengine_slave_config(dws->txchan, &txconf); | 165 | dmaengine_slave_config(dws->txchan, &txconf); |
132 | 166 | ||
133 | memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl)); | ||
134 | dws->tx_sgl.dma_address = dws->tx_dma; | ||
135 | dws->tx_sgl.length = dws->len; | ||
136 | |||
137 | txdesc = dmaengine_prep_slave_sg(dws->txchan, | 167 | txdesc = dmaengine_prep_slave_sg(dws->txchan, |
138 | &dws->tx_sgl, | 168 | xfer->tx_sg.sgl, |
139 | 1, | 169 | xfer->tx_sg.nents, |
140 | DMA_MEM_TO_DEV, | 170 | DMA_MEM_TO_DEV, |
141 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 171 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
172 | if (!txdesc) | ||
173 | return NULL; | ||
174 | |||
142 | txdesc->callback = dw_spi_dma_tx_done; | 175 | txdesc->callback = dw_spi_dma_tx_done; |
143 | txdesc->callback_param = dws; | 176 | txdesc->callback_param = dws; |
144 | 177 | ||
@@ -153,74 +186,74 @@ static void dw_spi_dma_rx_done(void *arg) | |||
153 | { | 186 | { |
154 | struct dw_spi *dws = arg; | 187 | struct dw_spi *dws = arg; |
155 | 188 | ||
156 | if (test_and_clear_bit(RX_BUSY, &dws->dma_chan_busy) & BIT(TX_BUSY)) | 189 | clear_bit(RX_BUSY, &dws->dma_chan_busy); |
190 | if (test_bit(TX_BUSY, &dws->dma_chan_busy)) | ||
157 | return; | 191 | return; |
158 | dw_spi_xfer_done(dws); | 192 | spi_finalize_current_transfer(dws->master); |
159 | } | 193 | } |
160 | 194 | ||
161 | static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws) | 195 | static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, |
196 | struct spi_transfer *xfer) | ||
162 | { | 197 | { |
163 | struct dma_slave_config rxconf; | 198 | struct dma_slave_config rxconf; |
164 | struct dma_async_tx_descriptor *rxdesc; | 199 | struct dma_async_tx_descriptor *rxdesc; |
165 | 200 | ||
166 | if (!dws->rx_dma) | 201 | if (!xfer->rx_buf) |
167 | return NULL; | 202 | return NULL; |
168 | 203 | ||
169 | rxconf.direction = DMA_DEV_TO_MEM; | 204 | rxconf.direction = DMA_DEV_TO_MEM; |
170 | rxconf.src_addr = dws->dma_addr; | 205 | rxconf.src_addr = dws->dma_addr; |
171 | rxconf.src_maxburst = LNW_DMA_MSIZE_16; | 206 | rxconf.src_maxburst = 16; |
172 | rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | 207 | rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
173 | rxconf.src_addr_width = dws->dma_width; | 208 | rxconf.src_addr_width = convert_dma_width(dws->dma_width); |
174 | rxconf.device_fc = false; | 209 | rxconf.device_fc = false; |
175 | 210 | ||
176 | dmaengine_slave_config(dws->rxchan, &rxconf); | 211 | dmaengine_slave_config(dws->rxchan, &rxconf); |
177 | 212 | ||
178 | memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl)); | ||
179 | dws->rx_sgl.dma_address = dws->rx_dma; | ||
180 | dws->rx_sgl.length = dws->len; | ||
181 | |||
182 | rxdesc = dmaengine_prep_slave_sg(dws->rxchan, | 213 | rxdesc = dmaengine_prep_slave_sg(dws->rxchan, |
183 | &dws->rx_sgl, | 214 | xfer->rx_sg.sgl, |
184 | 1, | 215 | xfer->rx_sg.nents, |
185 | DMA_DEV_TO_MEM, | 216 | DMA_DEV_TO_MEM, |
186 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 217 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
218 | if (!rxdesc) | ||
219 | return NULL; | ||
220 | |||
187 | rxdesc->callback = dw_spi_dma_rx_done; | 221 | rxdesc->callback = dw_spi_dma_rx_done; |
188 | rxdesc->callback_param = dws; | 222 | rxdesc->callback_param = dws; |
189 | 223 | ||
190 | return rxdesc; | 224 | return rxdesc; |
191 | } | 225 | } |
192 | 226 | ||
193 | static void dw_spi_dma_setup(struct dw_spi *dws) | 227 | static int mid_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer) |
194 | { | 228 | { |
195 | u16 dma_ctrl = 0; | 229 | u16 dma_ctrl = 0; |
196 | 230 | ||
197 | spi_enable_chip(dws, 0); | 231 | dw_writel(dws, DW_SPI_DMARDLR, 0xf); |
198 | 232 | dw_writel(dws, DW_SPI_DMATDLR, 0x10); | |
199 | dw_writew(dws, DW_SPI_DMARDLR, 0xf); | ||
200 | dw_writew(dws, DW_SPI_DMATDLR, 0x10); | ||
201 | 233 | ||
202 | if (dws->tx_dma) | 234 | if (xfer->tx_buf) |
203 | dma_ctrl |= SPI_DMA_TDMAE; | 235 | dma_ctrl |= SPI_DMA_TDMAE; |
204 | if (dws->rx_dma) | 236 | if (xfer->rx_buf) |
205 | dma_ctrl |= SPI_DMA_RDMAE; | 237 | dma_ctrl |= SPI_DMA_RDMAE; |
206 | dw_writew(dws, DW_SPI_DMACR, dma_ctrl); | 238 | dw_writel(dws, DW_SPI_DMACR, dma_ctrl); |
239 | |||
240 | /* Set the interrupt mask */ | ||
241 | spi_umask_intr(dws, SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI); | ||
242 | |||
243 | dws->transfer_handler = dma_transfer; | ||
207 | 244 | ||
208 | spi_enable_chip(dws, 1); | 245 | return 0; |
209 | } | 246 | } |
210 | 247 | ||
211 | static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) | 248 | static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) |
212 | { | 249 | { |
213 | struct dma_async_tx_descriptor *txdesc, *rxdesc; | 250 | struct dma_async_tx_descriptor *txdesc, *rxdesc; |
214 | 251 | ||
215 | /* 1. setup DMA related registers */ | 252 | /* Prepare the TX dma transfer */ |
216 | if (cs_change) | 253 | txdesc = dw_spi_dma_prepare_tx(dws, xfer); |
217 | dw_spi_dma_setup(dws); | ||
218 | |||
219 | /* 2. Prepare the TX dma transfer */ | ||
220 | txdesc = dw_spi_dma_prepare_tx(dws); | ||
221 | 254 | ||
222 | /* 3. Prepare the RX dma transfer */ | 255 | /* Prepare the RX dma transfer */ |
223 | rxdesc = dw_spi_dma_prepare_rx(dws); | 256 | rxdesc = dw_spi_dma_prepare_rx(dws, xfer); |
224 | 257 | ||
225 | /* rx must be started before tx due to spi instinct */ | 258 | /* rx must be started before tx due to spi instinct */ |
226 | if (rxdesc) { | 259 | if (rxdesc) { |
@@ -238,10 +271,25 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) | |||
238 | return 0; | 271 | return 0; |
239 | } | 272 | } |
240 | 273 | ||
274 | static void mid_spi_dma_stop(struct dw_spi *dws) | ||
275 | { | ||
276 | if (test_bit(TX_BUSY, &dws->dma_chan_busy)) { | ||
277 | dmaengine_terminate_all(dws->txchan); | ||
278 | clear_bit(TX_BUSY, &dws->dma_chan_busy); | ||
279 | } | ||
280 | if (test_bit(RX_BUSY, &dws->dma_chan_busy)) { | ||
281 | dmaengine_terminate_all(dws->rxchan); | ||
282 | clear_bit(RX_BUSY, &dws->dma_chan_busy); | ||
283 | } | ||
284 | } | ||
285 | |||
241 | static struct dw_spi_dma_ops mid_dma_ops = { | 286 | static struct dw_spi_dma_ops mid_dma_ops = { |
242 | .dma_init = mid_spi_dma_init, | 287 | .dma_init = mid_spi_dma_init, |
243 | .dma_exit = mid_spi_dma_exit, | 288 | .dma_exit = mid_spi_dma_exit, |
289 | .dma_setup = mid_spi_dma_setup, | ||
290 | .can_dma = mid_spi_can_dma, | ||
244 | .dma_transfer = mid_spi_dma_transfer, | 291 | .dma_transfer = mid_spi_dma_transfer, |
292 | .dma_stop = mid_spi_dma_stop, | ||
245 | }; | 293 | }; |
246 | #endif | 294 | #endif |
247 | 295 | ||
@@ -274,9 +322,8 @@ int dw_spi_mid_init(struct dw_spi *dws) | |||
274 | iounmap(clk_reg); | 322 | iounmap(clk_reg); |
275 | 323 | ||
276 | #ifdef CONFIG_SPI_DW_MID_DMA | 324 | #ifdef CONFIG_SPI_DW_MID_DMA |
277 | dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL); | 325 | dws->dma_tx = &mid_dma_tx; |
278 | if (!dws->dma_priv) | 326 | dws->dma_rx = &mid_dma_rx; |
279 | return -ENOMEM; | ||
280 | dws->dma_ops = &mid_dma_ops; | 327 | dws->dma_ops = &mid_dma_ops; |
281 | #endif | 328 | #endif |
282 | return 0; | 329 | return 0; |
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c index 5ba331047cbe..6d331e0db331 100644 --- a/drivers/spi/spi-dw-pci.c +++ b/drivers/spi/spi-dw-pci.c | |||
@@ -36,13 +36,13 @@ struct spi_pci_desc { | |||
36 | 36 | ||
37 | static struct spi_pci_desc spi_pci_mid_desc_1 = { | 37 | static struct spi_pci_desc spi_pci_mid_desc_1 = { |
38 | .setup = dw_spi_mid_init, | 38 | .setup = dw_spi_mid_init, |
39 | .num_cs = 32, | 39 | .num_cs = 5, |
40 | .bus_num = 0, | 40 | .bus_num = 0, |
41 | }; | 41 | }; |
42 | 42 | ||
43 | static struct spi_pci_desc spi_pci_mid_desc_2 = { | 43 | static struct spi_pci_desc spi_pci_mid_desc_2 = { |
44 | .setup = dw_spi_mid_init, | 44 | .setup = dw_spi_mid_init, |
45 | .num_cs = 4, | 45 | .num_cs = 2, |
46 | .bus_num = 1, | 46 | .bus_num = 1, |
47 | }; | 47 | }; |
48 | 48 | ||
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index 5a97a62b298a..8d67d03c71eb 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c | |||
@@ -28,11 +28,6 @@ | |||
28 | #include <linux/debugfs.h> | 28 | #include <linux/debugfs.h> |
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | #define START_STATE ((void *)0) | ||
32 | #define RUNNING_STATE ((void *)1) | ||
33 | #define DONE_STATE ((void *)2) | ||
34 | #define ERROR_STATE ((void *)-1) | ||
35 | |||
36 | /* Slave spi_dev related */ | 31 | /* Slave spi_dev related */ |
37 | struct chip_data { | 32 | struct chip_data { |
38 | u16 cr0; | 33 | u16 cr0; |
@@ -143,13 +138,26 @@ static inline void dw_spi_debugfs_remove(struct dw_spi *dws) | |||
143 | } | 138 | } |
144 | #endif /* CONFIG_DEBUG_FS */ | 139 | #endif /* CONFIG_DEBUG_FS */ |
145 | 140 | ||
141 | static void dw_spi_set_cs(struct spi_device *spi, bool enable) | ||
142 | { | ||
143 | struct dw_spi *dws = spi_master_get_devdata(spi->master); | ||
144 | struct chip_data *chip = spi_get_ctldata(spi); | ||
145 | |||
146 | /* Chip select logic is inverted from spi_set_cs() */ | ||
147 | if (chip && chip->cs_control) | ||
148 | chip->cs_control(!enable); | ||
149 | |||
150 | if (!enable) | ||
151 | dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select)); | ||
152 | } | ||
153 | |||
146 | /* Return the max entries we can fill into tx fifo */ | 154 | /* Return the max entries we can fill into tx fifo */ |
147 | static inline u32 tx_max(struct dw_spi *dws) | 155 | static inline u32 tx_max(struct dw_spi *dws) |
148 | { | 156 | { |
149 | u32 tx_left, tx_room, rxtx_gap; | 157 | u32 tx_left, tx_room, rxtx_gap; |
150 | 158 | ||
151 | tx_left = (dws->tx_end - dws->tx) / dws->n_bytes; | 159 | tx_left = (dws->tx_end - dws->tx) / dws->n_bytes; |
152 | tx_room = dws->fifo_len - dw_readw(dws, DW_SPI_TXFLR); | 160 | tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR); |
153 | 161 | ||
154 | /* | 162 | /* |
155 | * Another concern is about the tx/rx mismatch, we | 163 | * Another concern is about the tx/rx mismatch, we |
@@ -170,7 +178,7 @@ static inline u32 rx_max(struct dw_spi *dws) | |||
170 | { | 178 | { |
171 | u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes; | 179 | u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes; |
172 | 180 | ||
173 | return min_t(u32, rx_left, dw_readw(dws, DW_SPI_RXFLR)); | 181 | return min_t(u32, rx_left, dw_readl(dws, DW_SPI_RXFLR)); |
174 | } | 182 | } |
175 | 183 | ||
176 | static void dw_writer(struct dw_spi *dws) | 184 | static void dw_writer(struct dw_spi *dws) |
@@ -186,7 +194,7 @@ static void dw_writer(struct dw_spi *dws) | |||
186 | else | 194 | else |
187 | txw = *(u16 *)(dws->tx); | 195 | txw = *(u16 *)(dws->tx); |
188 | } | 196 | } |
189 | dw_writew(dws, DW_SPI_DR, txw); | 197 | dw_writel(dws, DW_SPI_DR, txw); |
190 | dws->tx += dws->n_bytes; | 198 | dws->tx += dws->n_bytes; |
191 | } | 199 | } |
192 | } | 200 | } |
@@ -197,7 +205,7 @@ static void dw_reader(struct dw_spi *dws) | |||
197 | u16 rxw; | 205 | u16 rxw; |
198 | 206 | ||
199 | while (max--) { | 207 | while (max--) { |
200 | rxw = dw_readw(dws, DW_SPI_DR); | 208 | rxw = dw_readl(dws, DW_SPI_DR); |
201 | /* Care rx only if the transfer's original "rx" is not null */ | 209 | /* Care rx only if the transfer's original "rx" is not null */ |
202 | if (dws->rx_end - dws->len) { | 210 | if (dws->rx_end - dws->len) { |
203 | if (dws->n_bytes == 1) | 211 | if (dws->n_bytes == 1) |
@@ -209,103 +217,22 @@ static void dw_reader(struct dw_spi *dws) | |||
209 | } | 217 | } |
210 | } | 218 | } |
211 | 219 | ||
212 | static void *next_transfer(struct dw_spi *dws) | ||
213 | { | ||
214 | struct spi_message *msg = dws->cur_msg; | ||
215 | struct spi_transfer *trans = dws->cur_transfer; | ||
216 | |||
217 | /* Move to next transfer */ | ||
218 | if (trans->transfer_list.next != &msg->transfers) { | ||
219 | dws->cur_transfer = | ||
220 | list_entry(trans->transfer_list.next, | ||
221 | struct spi_transfer, | ||
222 | transfer_list); | ||
223 | return RUNNING_STATE; | ||
224 | } | ||
225 | |||
226 | return DONE_STATE; | ||
227 | } | ||
228 | |||
229 | /* | ||
230 | * Note: first step is the protocol driver prepares | ||
231 | * a dma-capable memory, and this func just need translate | ||
232 | * the virt addr to physical | ||
233 | */ | ||
234 | static int map_dma_buffers(struct dw_spi *dws) | ||
235 | { | ||
236 | if (!dws->cur_msg->is_dma_mapped | ||
237 | || !dws->dma_inited | ||
238 | || !dws->cur_chip->enable_dma | ||
239 | || !dws->dma_ops) | ||
240 | return 0; | ||
241 | |||
242 | if (dws->cur_transfer->tx_dma) | ||
243 | dws->tx_dma = dws->cur_transfer->tx_dma; | ||
244 | |||
245 | if (dws->cur_transfer->rx_dma) | ||
246 | dws->rx_dma = dws->cur_transfer->rx_dma; | ||
247 | |||
248 | return 1; | ||
249 | } | ||
250 | |||
251 | /* Caller already set message->status; dma and pio irqs are blocked */ | ||
252 | static void giveback(struct dw_spi *dws) | ||
253 | { | ||
254 | struct spi_transfer *last_transfer; | ||
255 | struct spi_message *msg; | ||
256 | |||
257 | msg = dws->cur_msg; | ||
258 | dws->cur_msg = NULL; | ||
259 | dws->cur_transfer = NULL; | ||
260 | dws->prev_chip = dws->cur_chip; | ||
261 | dws->cur_chip = NULL; | ||
262 | dws->dma_mapped = 0; | ||
263 | |||
264 | last_transfer = list_last_entry(&msg->transfers, struct spi_transfer, | ||
265 | transfer_list); | ||
266 | |||
267 | if (!last_transfer->cs_change) | ||
268 | spi_chip_sel(dws, msg->spi, 0); | ||
269 | |||
270 | spi_finalize_current_message(dws->master); | ||
271 | } | ||
272 | |||
273 | static void int_error_stop(struct dw_spi *dws, const char *msg) | 220 | static void int_error_stop(struct dw_spi *dws, const char *msg) |
274 | { | 221 | { |
275 | /* Stop the hw */ | 222 | spi_reset_chip(dws); |
276 | spi_enable_chip(dws, 0); | ||
277 | 223 | ||
278 | dev_err(&dws->master->dev, "%s\n", msg); | 224 | dev_err(&dws->master->dev, "%s\n", msg); |
279 | dws->cur_msg->state = ERROR_STATE; | 225 | dws->master->cur_msg->status = -EIO; |
280 | tasklet_schedule(&dws->pump_transfers); | 226 | spi_finalize_current_transfer(dws->master); |
281 | } | 227 | } |
282 | 228 | ||
283 | void dw_spi_xfer_done(struct dw_spi *dws) | ||
284 | { | ||
285 | /* Update total byte transferred return count actual bytes read */ | ||
286 | dws->cur_msg->actual_length += dws->len; | ||
287 | |||
288 | /* Move to next transfer */ | ||
289 | dws->cur_msg->state = next_transfer(dws); | ||
290 | |||
291 | /* Handle end of message */ | ||
292 | if (dws->cur_msg->state == DONE_STATE) { | ||
293 | dws->cur_msg->status = 0; | ||
294 | giveback(dws); | ||
295 | } else | ||
296 | tasklet_schedule(&dws->pump_transfers); | ||
297 | } | ||
298 | EXPORT_SYMBOL_GPL(dw_spi_xfer_done); | ||
299 | |||
300 | static irqreturn_t interrupt_transfer(struct dw_spi *dws) | 229 | static irqreturn_t interrupt_transfer(struct dw_spi *dws) |
301 | { | 230 | { |
302 | u16 irq_status = dw_readw(dws, DW_SPI_ISR); | 231 | u16 irq_status = dw_readl(dws, DW_SPI_ISR); |
303 | 232 | ||
304 | /* Error handling */ | 233 | /* Error handling */ |
305 | if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) { | 234 | if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) { |
306 | dw_readw(dws, DW_SPI_TXOICR); | 235 | dw_readl(dws, DW_SPI_ICR); |
307 | dw_readw(dws, DW_SPI_RXOICR); | ||
308 | dw_readw(dws, DW_SPI_RXUICR); | ||
309 | int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun"); | 236 | int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun"); |
310 | return IRQ_HANDLED; | 237 | return IRQ_HANDLED; |
311 | } | 238 | } |
@@ -313,7 +240,7 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws) | |||
313 | dw_reader(dws); | 240 | dw_reader(dws); |
314 | if (dws->rx_end == dws->rx) { | 241 | if (dws->rx_end == dws->rx) { |
315 | spi_mask_intr(dws, SPI_INT_TXEI); | 242 | spi_mask_intr(dws, SPI_INT_TXEI); |
316 | dw_spi_xfer_done(dws); | 243 | spi_finalize_current_transfer(dws->master); |
317 | return IRQ_HANDLED; | 244 | return IRQ_HANDLED; |
318 | } | 245 | } |
319 | if (irq_status & SPI_INT_TXEI) { | 246 | if (irq_status & SPI_INT_TXEI) { |
@@ -328,13 +255,14 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws) | |||
328 | 255 | ||
329 | static irqreturn_t dw_spi_irq(int irq, void *dev_id) | 256 | static irqreturn_t dw_spi_irq(int irq, void *dev_id) |
330 | { | 257 | { |
331 | struct dw_spi *dws = dev_id; | 258 | struct spi_master *master = dev_id; |
332 | u16 irq_status = dw_readw(dws, DW_SPI_ISR) & 0x3f; | 259 | struct dw_spi *dws = spi_master_get_devdata(master); |
260 | u16 irq_status = dw_readl(dws, DW_SPI_ISR) & 0x3f; | ||
333 | 261 | ||
334 | if (!irq_status) | 262 | if (!irq_status) |
335 | return IRQ_NONE; | 263 | return IRQ_NONE; |
336 | 264 | ||
337 | if (!dws->cur_msg) { | 265 | if (!master->cur_msg) { |
338 | spi_mask_intr(dws, SPI_INT_TXEI); | 266 | spi_mask_intr(dws, SPI_INT_TXEI); |
339 | return IRQ_HANDLED; | 267 | return IRQ_HANDLED; |
340 | } | 268 | } |
@@ -343,7 +271,7 @@ static irqreturn_t dw_spi_irq(int irq, void *dev_id) | |||
343 | } | 271 | } |
344 | 272 | ||
345 | /* Must be called inside pump_transfers() */ | 273 | /* Must be called inside pump_transfers() */ |
346 | static void poll_transfer(struct dw_spi *dws) | 274 | static int poll_transfer(struct dw_spi *dws) |
347 | { | 275 | { |
348 | do { | 276 | do { |
349 | dw_writer(dws); | 277 | dw_writer(dws); |
@@ -351,64 +279,32 @@ static void poll_transfer(struct dw_spi *dws) | |||
351 | cpu_relax(); | 279 | cpu_relax(); |
352 | } while (dws->rx_end > dws->rx); | 280 | } while (dws->rx_end > dws->rx); |
353 | 281 | ||
354 | dw_spi_xfer_done(dws); | 282 | return 0; |
355 | } | 283 | } |
356 | 284 | ||
357 | static void pump_transfers(unsigned long data) | 285 | static int dw_spi_transfer_one(struct spi_master *master, |
286 | struct spi_device *spi, struct spi_transfer *transfer) | ||
358 | { | 287 | { |
359 | struct dw_spi *dws = (struct dw_spi *)data; | 288 | struct dw_spi *dws = spi_master_get_devdata(master); |
360 | struct spi_message *message = NULL; | 289 | struct chip_data *chip = spi_get_ctldata(spi); |
361 | struct spi_transfer *transfer = NULL; | ||
362 | struct spi_transfer *previous = NULL; | ||
363 | struct spi_device *spi = NULL; | ||
364 | struct chip_data *chip = NULL; | ||
365 | u8 bits = 0; | ||
366 | u8 imask = 0; | 290 | u8 imask = 0; |
367 | u8 cs_change = 0; | 291 | u16 txlevel = 0; |
368 | u16 txint_level = 0; | ||
369 | u16 clk_div = 0; | 292 | u16 clk_div = 0; |
370 | u32 speed = 0; | 293 | u32 speed = 0; |
371 | u32 cr0 = 0; | 294 | u32 cr0 = 0; |
295 | int ret; | ||
372 | 296 | ||
373 | /* Get current state information */ | 297 | dws->dma_mapped = 0; |
374 | message = dws->cur_msg; | ||
375 | transfer = dws->cur_transfer; | ||
376 | chip = dws->cur_chip; | ||
377 | spi = message->spi; | ||
378 | |||
379 | if (message->state == ERROR_STATE) { | ||
380 | message->status = -EIO; | ||
381 | goto early_exit; | ||
382 | } | ||
383 | |||
384 | /* Handle end of message */ | ||
385 | if (message->state == DONE_STATE) { | ||
386 | message->status = 0; | ||
387 | goto early_exit; | ||
388 | } | ||
389 | |||
390 | /* Delay if requested at end of transfer */ | ||
391 | if (message->state == RUNNING_STATE) { | ||
392 | previous = list_entry(transfer->transfer_list.prev, | ||
393 | struct spi_transfer, | ||
394 | transfer_list); | ||
395 | if (previous->delay_usecs) | ||
396 | udelay(previous->delay_usecs); | ||
397 | } | ||
398 | |||
399 | dws->n_bytes = chip->n_bytes; | 298 | dws->n_bytes = chip->n_bytes; |
400 | dws->dma_width = chip->dma_width; | 299 | dws->dma_width = chip->dma_width; |
401 | dws->cs_control = chip->cs_control; | ||
402 | 300 | ||
403 | dws->rx_dma = transfer->rx_dma; | ||
404 | dws->tx_dma = transfer->tx_dma; | ||
405 | dws->tx = (void *)transfer->tx_buf; | 301 | dws->tx = (void *)transfer->tx_buf; |
406 | dws->tx_end = dws->tx + transfer->len; | 302 | dws->tx_end = dws->tx + transfer->len; |
407 | dws->rx = transfer->rx_buf; | 303 | dws->rx = transfer->rx_buf; |
408 | dws->rx_end = dws->rx + transfer->len; | 304 | dws->rx_end = dws->rx + transfer->len; |
409 | dws->len = dws->cur_transfer->len; | 305 | dws->len = transfer->len; |
410 | if (chip != dws->prev_chip) | 306 | |
411 | cs_change = 1; | 307 | spi_enable_chip(dws, 0); |
412 | 308 | ||
413 | cr0 = chip->cr0; | 309 | cr0 = chip->cr0; |
414 | 310 | ||
@@ -416,32 +312,37 @@ static void pump_transfers(unsigned long data) | |||
416 | if (transfer->speed_hz) { | 312 | if (transfer->speed_hz) { |
417 | speed = chip->speed_hz; | 313 | speed = chip->speed_hz; |
418 | 314 | ||
419 | if ((transfer->speed_hz != speed) || (!chip->clk_div)) { | 315 | if ((transfer->speed_hz != speed) || !chip->clk_div) { |
420 | speed = transfer->speed_hz; | 316 | speed = transfer->speed_hz; |
421 | 317 | ||
422 | /* clk_div doesn't support odd number */ | 318 | /* clk_div doesn't support odd number */ |
423 | clk_div = dws->max_freq / speed; | 319 | clk_div = (dws->max_freq / speed + 1) & 0xfffe; |
424 | clk_div = (clk_div + 1) & 0xfffe; | ||
425 | 320 | ||
426 | chip->speed_hz = speed; | 321 | chip->speed_hz = speed; |
427 | chip->clk_div = clk_div; | 322 | chip->clk_div = clk_div; |
323 | |||
324 | spi_set_clk(dws, chip->clk_div); | ||
428 | } | 325 | } |
429 | } | 326 | } |
430 | if (transfer->bits_per_word) { | 327 | if (transfer->bits_per_word) { |
431 | bits = transfer->bits_per_word; | 328 | if (transfer->bits_per_word == 8) { |
432 | dws->n_bytes = dws->dma_width = bits >> 3; | 329 | dws->n_bytes = 1; |
433 | cr0 = (bits - 1) | 330 | dws->dma_width = 1; |
331 | } else if (transfer->bits_per_word == 16) { | ||
332 | dws->n_bytes = 2; | ||
333 | dws->dma_width = 2; | ||
334 | } | ||
335 | cr0 = (transfer->bits_per_word - 1) | ||
434 | | (chip->type << SPI_FRF_OFFSET) | 336 | | (chip->type << SPI_FRF_OFFSET) |
435 | | (spi->mode << SPI_MODE_OFFSET) | 337 | | (spi->mode << SPI_MODE_OFFSET) |
436 | | (chip->tmode << SPI_TMOD_OFFSET); | 338 | | (chip->tmode << SPI_TMOD_OFFSET); |
437 | } | 339 | } |
438 | message->state = RUNNING_STATE; | ||
439 | 340 | ||
440 | /* | 341 | /* |
441 | * Adjust transfer mode if necessary. Requires platform dependent | 342 | * Adjust transfer mode if necessary. Requires platform dependent |
442 | * chipselect mechanism. | 343 | * chipselect mechanism. |
443 | */ | 344 | */ |
444 | if (dws->cs_control) { | 345 | if (chip->cs_control) { |
445 | if (dws->rx && dws->tx) | 346 | if (dws->rx && dws->tx) |
446 | chip->tmode = SPI_TMOD_TR; | 347 | chip->tmode = SPI_TMOD_TR; |
447 | else if (dws->rx) | 348 | else if (dws->rx) |
@@ -453,80 +354,60 @@ static void pump_transfers(unsigned long data) | |||
453 | cr0 |= (chip->tmode << SPI_TMOD_OFFSET); | 354 | cr0 |= (chip->tmode << SPI_TMOD_OFFSET); |
454 | } | 355 | } |
455 | 356 | ||
357 | dw_writel(dws, DW_SPI_CTRL0, cr0); | ||
358 | |||
456 | /* Check if current transfer is a DMA transaction */ | 359 | /* Check if current transfer is a DMA transaction */ |
457 | dws->dma_mapped = map_dma_buffers(dws); | 360 | if (master->can_dma && master->can_dma(master, spi, transfer)) |
361 | dws->dma_mapped = master->cur_msg_mapped; | ||
362 | |||
363 | /* For poll mode just disable all interrupts */ | ||
364 | spi_mask_intr(dws, 0xff); | ||
458 | 365 | ||
459 | /* | 366 | /* |
460 | * Interrupt mode | 367 | * Interrupt mode |
461 | * we only need set the TXEI IRQ, as TX/RX always happen syncronizely | 368 | * we only need set the TXEI IRQ, as TX/RX always happen syncronizely |
462 | */ | 369 | */ |
463 | if (!dws->dma_mapped && !chip->poll_mode) { | 370 | if (dws->dma_mapped) { |
464 | int templen = dws->len / dws->n_bytes; | 371 | ret = dws->dma_ops->dma_setup(dws, transfer); |
465 | 372 | if (ret < 0) { | |
466 | txint_level = dws->fifo_len / 2; | 373 | spi_enable_chip(dws, 1); |
467 | txint_level = (templen > txint_level) ? txint_level : templen; | 374 | return ret; |
375 | } | ||
376 | } else if (!chip->poll_mode) { | ||
377 | txlevel = min_t(u16, dws->fifo_len / 2, dws->len / dws->n_bytes); | ||
378 | dw_writel(dws, DW_SPI_TXFLTR, txlevel); | ||
468 | 379 | ||
380 | /* Set the interrupt mask */ | ||
469 | imask |= SPI_INT_TXEI | SPI_INT_TXOI | | 381 | imask |= SPI_INT_TXEI | SPI_INT_TXOI | |
470 | SPI_INT_RXUI | SPI_INT_RXOI; | 382 | SPI_INT_RXUI | SPI_INT_RXOI; |
383 | spi_umask_intr(dws, imask); | ||
384 | |||
471 | dws->transfer_handler = interrupt_transfer; | 385 | dws->transfer_handler = interrupt_transfer; |
472 | } | 386 | } |
473 | 387 | ||
474 | /* | 388 | spi_enable_chip(dws, 1); |
475 | * Reprogram registers only if | ||
476 | * 1. chip select changes | ||
477 | * 2. clk_div is changed | ||
478 | * 3. control value changes | ||
479 | */ | ||
480 | if (dw_readw(dws, DW_SPI_CTRL0) != cr0 || cs_change || clk_div || imask) { | ||
481 | spi_enable_chip(dws, 0); | ||
482 | |||
483 | if (dw_readw(dws, DW_SPI_CTRL0) != cr0) | ||
484 | dw_writew(dws, DW_SPI_CTRL0, cr0); | ||
485 | |||
486 | spi_set_clk(dws, clk_div ? clk_div : chip->clk_div); | ||
487 | spi_chip_sel(dws, spi, 1); | ||
488 | |||
489 | /* Set the interrupt mask, for poll mode just disable all int */ | ||
490 | spi_mask_intr(dws, 0xff); | ||
491 | if (imask) | ||
492 | spi_umask_intr(dws, imask); | ||
493 | if (txint_level) | ||
494 | dw_writew(dws, DW_SPI_TXFLTR, txint_level); | ||
495 | 389 | ||
496 | spi_enable_chip(dws, 1); | 390 | if (dws->dma_mapped) { |
497 | if (cs_change) | 391 | ret = dws->dma_ops->dma_transfer(dws, transfer); |
498 | dws->prev_chip = chip; | 392 | if (ret < 0) |
393 | return ret; | ||
499 | } | 394 | } |
500 | 395 | ||
501 | if (dws->dma_mapped) | ||
502 | dws->dma_ops->dma_transfer(dws, cs_change); | ||
503 | |||
504 | if (chip->poll_mode) | 396 | if (chip->poll_mode) |
505 | poll_transfer(dws); | 397 | return poll_transfer(dws); |
506 | |||
507 | return; | ||
508 | 398 | ||
509 | early_exit: | 399 | return 1; |
510 | giveback(dws); | ||
511 | } | 400 | } |
512 | 401 | ||
513 | static int dw_spi_transfer_one_message(struct spi_master *master, | 402 | static void dw_spi_handle_err(struct spi_master *master, |
514 | struct spi_message *msg) | 403 | struct spi_message *msg) |
515 | { | 404 | { |
516 | struct dw_spi *dws = spi_master_get_devdata(master); | 405 | struct dw_spi *dws = spi_master_get_devdata(master); |
517 | 406 | ||
518 | dws->cur_msg = msg; | 407 | if (dws->dma_mapped) |
519 | /* Initial message state */ | 408 | dws->dma_ops->dma_stop(dws); |
520 | dws->cur_msg->state = START_STATE; | ||
521 | dws->cur_transfer = list_entry(dws->cur_msg->transfers.next, | ||
522 | struct spi_transfer, | ||
523 | transfer_list); | ||
524 | dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi); | ||
525 | |||
526 | /* Launch transfers */ | ||
527 | tasklet_schedule(&dws->pump_transfers); | ||
528 | 409 | ||
529 | return 0; | 410 | spi_reset_chip(dws); |
530 | } | 411 | } |
531 | 412 | ||
532 | /* This may be called twice for each spi dev */ | 413 | /* This may be called twice for each spi dev */ |
@@ -561,8 +442,6 @@ static int dw_spi_setup(struct spi_device *spi) | |||
561 | 442 | ||
562 | chip->rx_threshold = 0; | 443 | chip->rx_threshold = 0; |
563 | chip->tx_threshold = 0; | 444 | chip->tx_threshold = 0; |
564 | |||
565 | chip->enable_dma = chip_info->enable_dma; | ||
566 | } | 445 | } |
567 | 446 | ||
568 | if (spi->bits_per_word == 8) { | 447 | if (spi->bits_per_word == 8) { |
@@ -610,9 +489,7 @@ static void dw_spi_cleanup(struct spi_device *spi) | |||
610 | /* Restart the controller, disable all interrupts, clean rx fifo */ | 489 | /* Restart the controller, disable all interrupts, clean rx fifo */ |
611 | static void spi_hw_init(struct device *dev, struct dw_spi *dws) | 490 | static void spi_hw_init(struct device *dev, struct dw_spi *dws) |
612 | { | 491 | { |
613 | spi_enable_chip(dws, 0); | 492 | spi_reset_chip(dws); |
614 | spi_mask_intr(dws, 0xff); | ||
615 | spi_enable_chip(dws, 1); | ||
616 | 493 | ||
617 | /* | 494 | /* |
618 | * Try to detect the FIFO depth if not set by interface driver, | 495 | * Try to detect the FIFO depth if not set by interface driver, |
@@ -621,14 +498,14 @@ static void spi_hw_init(struct device *dev, struct dw_spi *dws) | |||
621 | if (!dws->fifo_len) { | 498 | if (!dws->fifo_len) { |
622 | u32 fifo; | 499 | u32 fifo; |
623 | 500 | ||
624 | for (fifo = 2; fifo <= 256; fifo++) { | 501 | for (fifo = 1; fifo < 256; fifo++) { |
625 | dw_writew(dws, DW_SPI_TXFLTR, fifo); | 502 | dw_writel(dws, DW_SPI_TXFLTR, fifo); |
626 | if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) | 503 | if (fifo != dw_readl(dws, DW_SPI_TXFLTR)) |
627 | break; | 504 | break; |
628 | } | 505 | } |
629 | dw_writew(dws, DW_SPI_TXFLTR, 0); | 506 | dw_writel(dws, DW_SPI_TXFLTR, 0); |
630 | 507 | ||
631 | dws->fifo_len = (fifo == 2) ? 0 : fifo - 1; | 508 | dws->fifo_len = (fifo == 1) ? 0 : fifo; |
632 | dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len); | 509 | dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len); |
633 | } | 510 | } |
634 | } | 511 | } |
@@ -646,13 +523,12 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) | |||
646 | 523 | ||
647 | dws->master = master; | 524 | dws->master = master; |
648 | dws->type = SSI_MOTO_SPI; | 525 | dws->type = SSI_MOTO_SPI; |
649 | dws->prev_chip = NULL; | ||
650 | dws->dma_inited = 0; | 526 | dws->dma_inited = 0; |
651 | dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60); | 527 | dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60); |
652 | snprintf(dws->name, sizeof(dws->name), "dw_spi%d", dws->bus_num); | 528 | snprintf(dws->name, sizeof(dws->name), "dw_spi%d", dws->bus_num); |
653 | 529 | ||
654 | ret = devm_request_irq(dev, dws->irq, dw_spi_irq, IRQF_SHARED, | 530 | ret = devm_request_irq(dev, dws->irq, dw_spi_irq, IRQF_SHARED, |
655 | dws->name, dws); | 531 | dws->name, master); |
656 | if (ret < 0) { | 532 | if (ret < 0) { |
657 | dev_err(&master->dev, "can not get IRQ\n"); | 533 | dev_err(&master->dev, "can not get IRQ\n"); |
658 | goto err_free_master; | 534 | goto err_free_master; |
@@ -664,7 +540,9 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) | |||
664 | master->num_chipselect = dws->num_cs; | 540 | master->num_chipselect = dws->num_cs; |
665 | master->setup = dw_spi_setup; | 541 | master->setup = dw_spi_setup; |
666 | master->cleanup = dw_spi_cleanup; | 542 | master->cleanup = dw_spi_cleanup; |
667 | master->transfer_one_message = dw_spi_transfer_one_message; | 543 | master->set_cs = dw_spi_set_cs; |
544 | master->transfer_one = dw_spi_transfer_one; | ||
545 | master->handle_err = dw_spi_handle_err; | ||
668 | master->max_speed_hz = dws->max_freq; | 546 | master->max_speed_hz = dws->max_freq; |
669 | master->dev.of_node = dev->of_node; | 547 | master->dev.of_node = dev->of_node; |
670 | 548 | ||
@@ -676,11 +554,11 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) | |||
676 | if (ret) { | 554 | if (ret) { |
677 | dev_warn(dev, "DMA init failed\n"); | 555 | dev_warn(dev, "DMA init failed\n"); |
678 | dws->dma_inited = 0; | 556 | dws->dma_inited = 0; |
557 | } else { | ||
558 | master->can_dma = dws->dma_ops->can_dma; | ||
679 | } | 559 | } |
680 | } | 560 | } |
681 | 561 | ||
682 | tasklet_init(&dws->pump_transfers, pump_transfers, (unsigned long)dws); | ||
683 | |||
684 | spi_master_set_devdata(master, dws); | 562 | spi_master_set_devdata(master, dws); |
685 | ret = devm_spi_register_master(dev, master); | 563 | ret = devm_spi_register_master(dev, master); |
686 | if (ret) { | 564 | if (ret) { |
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h index 3d32be68c142..6c91391c1a4f 100644 --- a/drivers/spi/spi-dw.h +++ b/drivers/spi/spi-dw.h | |||
@@ -91,12 +91,15 @@ struct dw_spi; | |||
91 | struct dw_spi_dma_ops { | 91 | struct dw_spi_dma_ops { |
92 | int (*dma_init)(struct dw_spi *dws); | 92 | int (*dma_init)(struct dw_spi *dws); |
93 | void (*dma_exit)(struct dw_spi *dws); | 93 | void (*dma_exit)(struct dw_spi *dws); |
94 | int (*dma_transfer)(struct dw_spi *dws, int cs_change); | 94 | int (*dma_setup)(struct dw_spi *dws, struct spi_transfer *xfer); |
95 | bool (*can_dma)(struct spi_master *master, struct spi_device *spi, | ||
96 | struct spi_transfer *xfer); | ||
97 | int (*dma_transfer)(struct dw_spi *dws, struct spi_transfer *xfer); | ||
98 | void (*dma_stop)(struct dw_spi *dws); | ||
95 | }; | 99 | }; |
96 | 100 | ||
97 | struct dw_spi { | 101 | struct dw_spi { |
98 | struct spi_master *master; | 102 | struct spi_master *master; |
99 | struct spi_device *cur_dev; | ||
100 | enum dw_ssi_type type; | 103 | enum dw_ssi_type type; |
101 | char name[16]; | 104 | char name[16]; |
102 | 105 | ||
@@ -109,41 +112,26 @@ struct dw_spi { | |||
109 | u16 bus_num; | 112 | u16 bus_num; |
110 | u16 num_cs; /* supported slave numbers */ | 113 | u16 num_cs; /* supported slave numbers */ |
111 | 114 | ||
112 | /* Message Transfer pump */ | ||
113 | struct tasklet_struct pump_transfers; | ||
114 | |||
115 | /* Current message transfer state info */ | 115 | /* Current message transfer state info */ |
116 | struct spi_message *cur_msg; | ||
117 | struct spi_transfer *cur_transfer; | ||
118 | struct chip_data *cur_chip; | ||
119 | struct chip_data *prev_chip; | ||
120 | size_t len; | 116 | size_t len; |
121 | void *tx; | 117 | void *tx; |
122 | void *tx_end; | 118 | void *tx_end; |
123 | void *rx; | 119 | void *rx; |
124 | void *rx_end; | 120 | void *rx_end; |
125 | int dma_mapped; | 121 | int dma_mapped; |
126 | dma_addr_t rx_dma; | ||
127 | dma_addr_t tx_dma; | ||
128 | size_t rx_map_len; | ||
129 | size_t tx_map_len; | ||
130 | u8 n_bytes; /* current is a 1/2 bytes op */ | 122 | u8 n_bytes; /* current is a 1/2 bytes op */ |
131 | u8 max_bits_per_word; /* maxim is 16b */ | ||
132 | u32 dma_width; | 123 | u32 dma_width; |
133 | irqreturn_t (*transfer_handler)(struct dw_spi *dws); | 124 | irqreturn_t (*transfer_handler)(struct dw_spi *dws); |
134 | void (*cs_control)(u32 command); | ||
135 | 125 | ||
136 | /* Dma info */ | 126 | /* DMA info */ |
137 | int dma_inited; | 127 | int dma_inited; |
138 | struct dma_chan *txchan; | 128 | struct dma_chan *txchan; |
139 | struct scatterlist tx_sgl; | ||
140 | struct dma_chan *rxchan; | 129 | struct dma_chan *rxchan; |
141 | struct scatterlist rx_sgl; | ||
142 | unsigned long dma_chan_busy; | 130 | unsigned long dma_chan_busy; |
143 | struct device *dma_dev; | ||
144 | dma_addr_t dma_addr; /* phy address of the Data register */ | 131 | dma_addr_t dma_addr; /* phy address of the Data register */ |
145 | struct dw_spi_dma_ops *dma_ops; | 132 | struct dw_spi_dma_ops *dma_ops; |
146 | void *dma_priv; /* platform relate info */ | 133 | void *dma_tx; |
134 | void *dma_rx; | ||
147 | 135 | ||
148 | /* Bus interface info */ | 136 | /* Bus interface info */ |
149 | void *priv; | 137 | void *priv; |
@@ -162,16 +150,6 @@ static inline void dw_writel(struct dw_spi *dws, u32 offset, u32 val) | |||
162 | __raw_writel(val, dws->regs + offset); | 150 | __raw_writel(val, dws->regs + offset); |
163 | } | 151 | } |
164 | 152 | ||
165 | static inline u16 dw_readw(struct dw_spi *dws, u32 offset) | ||
166 | { | ||
167 | return __raw_readw(dws->regs + offset); | ||
168 | } | ||
169 | |||
170 | static inline void dw_writew(struct dw_spi *dws, u32 offset, u16 val) | ||
171 | { | ||
172 | __raw_writew(val, dws->regs + offset); | ||
173 | } | ||
174 | |||
175 | static inline void spi_enable_chip(struct dw_spi *dws, int enable) | 153 | static inline void spi_enable_chip(struct dw_spi *dws, int enable) |
176 | { | 154 | { |
177 | dw_writel(dws, DW_SPI_SSIENR, (enable ? 1 : 0)); | 155 | dw_writel(dws, DW_SPI_SSIENR, (enable ? 1 : 0)); |
@@ -182,22 +160,6 @@ static inline void spi_set_clk(struct dw_spi *dws, u16 div) | |||
182 | dw_writel(dws, DW_SPI_BAUDR, div); | 160 | dw_writel(dws, DW_SPI_BAUDR, div); |
183 | } | 161 | } |
184 | 162 | ||
185 | static inline void spi_chip_sel(struct dw_spi *dws, struct spi_device *spi, | ||
186 | int active) | ||
187 | { | ||
188 | u16 cs = spi->chip_select; | ||
189 | int gpio_val = active ? (spi->mode & SPI_CS_HIGH) : | ||
190 | !(spi->mode & SPI_CS_HIGH); | ||
191 | |||
192 | if (dws->cs_control) | ||
193 | dws->cs_control(active); | ||
194 | if (gpio_is_valid(spi->cs_gpio)) | ||
195 | gpio_set_value(spi->cs_gpio, gpio_val); | ||
196 | |||
197 | if (active) | ||
198 | dw_writel(dws, DW_SPI_SER, 1 << cs); | ||
199 | } | ||
200 | |||
201 | /* Disable IRQ bits */ | 163 | /* Disable IRQ bits */ |
202 | static inline void spi_mask_intr(struct dw_spi *dws, u32 mask) | 164 | static inline void spi_mask_intr(struct dw_spi *dws, u32 mask) |
203 | { | 165 | { |
@@ -217,15 +179,26 @@ static inline void spi_umask_intr(struct dw_spi *dws, u32 mask) | |||
217 | } | 179 | } |
218 | 180 | ||
219 | /* | 181 | /* |
182 | * This does disable the SPI controller, interrupts, and re-enable the | ||
183 | * controller back. Transmit and receive FIFO buffers are cleared when the | ||
184 | * device is disabled. | ||
185 | */ | ||
186 | static inline void spi_reset_chip(struct dw_spi *dws) | ||
187 | { | ||
188 | spi_enable_chip(dws, 0); | ||
189 | spi_mask_intr(dws, 0xff); | ||
190 | spi_enable_chip(dws, 1); | ||
191 | } | ||
192 | |||
193 | /* | ||
220 | * Each SPI slave device to work with dw_api controller should | 194 | * Each SPI slave device to work with dw_api controller should |
221 | * has such a structure claiming its working mode (PIO/DMA etc), | 195 | * has such a structure claiming its working mode (poll or PIO/DMA), |
222 | * which can be save in the "controller_data" member of the | 196 | * which can be save in the "controller_data" member of the |
223 | * struct spi_device. | 197 | * struct spi_device. |
224 | */ | 198 | */ |
225 | struct dw_spi_chip { | 199 | struct dw_spi_chip { |
226 | u8 poll_mode; /* 1 for controller polling mode */ | 200 | u8 poll_mode; /* 1 for controller polling mode */ |
227 | u8 type; /* SPI/SSP/MicroWire */ | 201 | u8 type; /* SPI/SSP/MicroWire */ |
228 | u8 enable_dma; | ||
229 | void (*cs_control)(u32 command); | 202 | void (*cs_control)(u32 command); |
230 | }; | 203 | }; |
231 | 204 | ||
@@ -233,7 +206,6 @@ extern int dw_spi_add_host(struct device *dev, struct dw_spi *dws); | |||
233 | extern void dw_spi_remove_host(struct dw_spi *dws); | 206 | extern void dw_spi_remove_host(struct dw_spi *dws); |
234 | extern int dw_spi_suspend_host(struct dw_spi *dws); | 207 | extern int dw_spi_suspend_host(struct dw_spi *dws); |
235 | extern int dw_spi_resume_host(struct dw_spi *dws); | 208 | extern int dw_spi_resume_host(struct dw_spi *dws); |
236 | extern void dw_spi_xfer_done(struct dw_spi *dws); | ||
237 | 209 | ||
238 | /* platform related setup */ | 210 | /* platform related setup */ |
239 | extern int dw_spi_mid_init(struct dw_spi *dws); /* Intel MID platforms */ | 211 | extern int dw_spi_mid_init(struct dw_spi *dws); /* Intel MID platforms */ |
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c index 9c46a3058743..6f466ab1201a 100644 --- a/drivers/spi/spi-fsl-cpm.c +++ b/drivers/spi/spi-fsl-cpm.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/of_address.h> | 24 | #include <linux/of_address.h> |
25 | #include <linux/spi/spi.h> | 25 | #include <linux/spi/spi.h> |
26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
27 | #include <linux/platform_device.h> | ||
27 | 28 | ||
28 | #include "spi-fsl-cpm.h" | 29 | #include "spi-fsl-cpm.h" |
29 | #include "spi-fsl-lib.h" | 30 | #include "spi-fsl-lib.h" |
@@ -269,17 +270,6 @@ static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi) | |||
269 | if (mspi->flags & SPI_CPM2) { | 270 | if (mspi->flags & SPI_CPM2) { |
270 | pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); | 271 | pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); |
271 | out_be16(spi_base, pram_ofs); | 272 | out_be16(spi_base, pram_ofs); |
272 | } else { | ||
273 | struct spi_pram __iomem *pram = spi_base; | ||
274 | u16 rpbase = in_be16(&pram->rpbase); | ||
275 | |||
276 | /* Microcode relocation patch applied? */ | ||
277 | if (rpbase) { | ||
278 | pram_ofs = rpbase; | ||
279 | } else { | ||
280 | pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); | ||
281 | out_be16(spi_base, pram_ofs); | ||
282 | } | ||
283 | } | 273 | } |
284 | 274 | ||
285 | iounmap(spi_base); | 275 | iounmap(spi_base); |
@@ -292,7 +282,6 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) | |||
292 | struct device_node *np = dev->of_node; | 282 | struct device_node *np = dev->of_node; |
293 | const u32 *iprop; | 283 | const u32 *iprop; |
294 | int size; | 284 | int size; |
295 | unsigned long pram_ofs; | ||
296 | unsigned long bds_ofs; | 285 | unsigned long bds_ofs; |
297 | 286 | ||
298 | if (!(mspi->flags & SPI_CPM_MODE)) | 287 | if (!(mspi->flags & SPI_CPM_MODE)) |
@@ -319,8 +308,21 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) | |||
319 | } | 308 | } |
320 | } | 309 | } |
321 | 310 | ||
322 | pram_ofs = fsl_spi_cpm_get_pram(mspi); | 311 | if (mspi->flags & SPI_CPM1) { |
323 | if (IS_ERR_VALUE(pram_ofs)) { | 312 | struct resource *res; |
313 | |||
314 | res = platform_get_resource(to_platform_device(dev), | ||
315 | IORESOURCE_MEM, 1); | ||
316 | mspi->pram = devm_ioremap_resource(dev, res); | ||
317 | } else { | ||
318 | unsigned long pram_ofs = fsl_spi_cpm_get_pram(mspi); | ||
319 | |||
320 | if (IS_ERR_VALUE(pram_ofs)) | ||
321 | mspi->pram = NULL; | ||
322 | else | ||
323 | mspi->pram = cpm_muram_addr(pram_ofs); | ||
324 | } | ||
325 | if (mspi->pram == NULL) { | ||
324 | dev_err(dev, "can't allocate spi parameter ram\n"); | 326 | dev_err(dev, "can't allocate spi parameter ram\n"); |
325 | goto err_pram; | 327 | goto err_pram; |
326 | } | 328 | } |
@@ -346,8 +348,6 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) | |||
346 | goto err_dummy_rx; | 348 | goto err_dummy_rx; |
347 | } | 349 | } |
348 | 350 | ||
349 | mspi->pram = cpm_muram_addr(pram_ofs); | ||
350 | |||
351 | mspi->tx_bd = cpm_muram_addr(bds_ofs); | 351 | mspi->tx_bd = cpm_muram_addr(bds_ofs); |
352 | mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd)); | 352 | mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd)); |
353 | 353 | ||
@@ -375,7 +375,8 @@ err_dummy_rx: | |||
375 | err_dummy_tx: | 375 | err_dummy_tx: |
376 | cpm_muram_free(bds_ofs); | 376 | cpm_muram_free(bds_ofs); |
377 | err_bds: | 377 | err_bds: |
378 | cpm_muram_free(pram_ofs); | 378 | if (!(mspi->flags & SPI_CPM1)) |
379 | cpm_muram_free(cpm_muram_offset(mspi->pram)); | ||
379 | err_pram: | 380 | err_pram: |
380 | fsl_spi_free_dummy_rx(); | 381 | fsl_spi_free_dummy_rx(); |
381 | return -ENOMEM; | 382 | return -ENOMEM; |
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index d1a39249704a..5fe54cda309f 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
21 | #include <linux/io.h> | 21 | #include <linux/io.h> |
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/math64.h> | ||
23 | #include <linux/module.h> | 24 | #include <linux/module.h> |
24 | #include <linux/of.h> | 25 | #include <linux/of.h> |
25 | #include <linux/of_device.h> | 26 | #include <linux/of_device.h> |
@@ -29,6 +30,7 @@ | |||
29 | #include <linux/sched.h> | 30 | #include <linux/sched.h> |
30 | #include <linux/spi/spi.h> | 31 | #include <linux/spi/spi.h> |
31 | #include <linux/spi/spi_bitbang.h> | 32 | #include <linux/spi/spi_bitbang.h> |
33 | #include <linux/time.h> | ||
32 | 34 | ||
33 | #define DRIVER_NAME "fsl-dspi" | 35 | #define DRIVER_NAME "fsl-dspi" |
34 | 36 | ||
@@ -51,7 +53,7 @@ | |||
51 | #define SPI_CTAR_CPOL(x) ((x) << 26) | 53 | #define SPI_CTAR_CPOL(x) ((x) << 26) |
52 | #define SPI_CTAR_CPHA(x) ((x) << 25) | 54 | #define SPI_CTAR_CPHA(x) ((x) << 25) |
53 | #define SPI_CTAR_LSBFE(x) ((x) << 24) | 55 | #define SPI_CTAR_LSBFE(x) ((x) << 24) |
54 | #define SPI_CTAR_PCSSCR(x) (((x) & 0x00000003) << 22) | 56 | #define SPI_CTAR_PCSSCK(x) (((x) & 0x00000003) << 22) |
55 | #define SPI_CTAR_PASC(x) (((x) & 0x00000003) << 20) | 57 | #define SPI_CTAR_PASC(x) (((x) & 0x00000003) << 20) |
56 | #define SPI_CTAR_PDT(x) (((x) & 0x00000003) << 18) | 58 | #define SPI_CTAR_PDT(x) (((x) & 0x00000003) << 18) |
57 | #define SPI_CTAR_PBR(x) (((x) & 0x00000003) << 16) | 59 | #define SPI_CTAR_PBR(x) (((x) & 0x00000003) << 16) |
@@ -59,6 +61,7 @@ | |||
59 | #define SPI_CTAR_ASC(x) (((x) & 0x0000000f) << 8) | 61 | #define SPI_CTAR_ASC(x) (((x) & 0x0000000f) << 8) |
60 | #define SPI_CTAR_DT(x) (((x) & 0x0000000f) << 4) | 62 | #define SPI_CTAR_DT(x) (((x) & 0x0000000f) << 4) |
61 | #define SPI_CTAR_BR(x) ((x) & 0x0000000f) | 63 | #define SPI_CTAR_BR(x) ((x) & 0x0000000f) |
64 | #define SPI_CTAR_SCALE_BITS 0xf | ||
62 | 65 | ||
63 | #define SPI_CTAR0_SLAVE 0x0c | 66 | #define SPI_CTAR0_SLAVE 0x0c |
64 | 67 | ||
@@ -148,23 +151,66 @@ static void hz_to_spi_baud(char *pbr, char *br, int speed_hz, | |||
148 | 16, 32, 64, 128, | 151 | 16, 32, 64, 128, |
149 | 256, 512, 1024, 2048, | 152 | 256, 512, 1024, 2048, |
150 | 4096, 8192, 16384, 32768 }; | 153 | 4096, 8192, 16384, 32768 }; |
151 | int temp, i = 0, j = 0; | 154 | int scale_needed, scale, minscale = INT_MAX; |
155 | int i, j; | ||
156 | |||
157 | scale_needed = clkrate / speed_hz; | ||
158 | if (clkrate % speed_hz) | ||
159 | scale_needed++; | ||
160 | |||
161 | for (i = 0; i < ARRAY_SIZE(brs); i++) | ||
162 | for (j = 0; j < ARRAY_SIZE(pbr_tbl); j++) { | ||
163 | scale = brs[i] * pbr_tbl[j]; | ||
164 | if (scale >= scale_needed) { | ||
165 | if (scale < minscale) { | ||
166 | minscale = scale; | ||
167 | *br = i; | ||
168 | *pbr = j; | ||
169 | } | ||
170 | break; | ||
171 | } | ||
172 | } | ||
152 | 173 | ||
153 | temp = clkrate / 2 / speed_hz; | 174 | if (minscale == INT_MAX) { |
175 | pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld, we use the max prescaler value.\n", | ||
176 | speed_hz, clkrate); | ||
177 | *pbr = ARRAY_SIZE(pbr_tbl) - 1; | ||
178 | *br = ARRAY_SIZE(brs) - 1; | ||
179 | } | ||
180 | } | ||
154 | 181 | ||
155 | for (i = 0; i < ARRAY_SIZE(pbr_tbl); i++) | 182 | static void ns_delay_scale(char *psc, char *sc, int delay_ns, |
156 | for (j = 0; j < ARRAY_SIZE(brs); j++) { | 183 | unsigned long clkrate) |
157 | if (pbr_tbl[i] * brs[j] >= temp) { | 184 | { |
158 | *pbr = i; | 185 | int pscale_tbl[4] = {1, 3, 5, 7}; |
159 | *br = j; | 186 | int scale_needed, scale, minscale = INT_MAX; |
160 | return; | 187 | int i, j; |
188 | u32 remainder; | ||
189 | |||
190 | scale_needed = div_u64_rem((u64)delay_ns * clkrate, NSEC_PER_SEC, | ||
191 | &remainder); | ||
192 | if (remainder) | ||
193 | scale_needed++; | ||
194 | |||
195 | for (i = 0; i < ARRAY_SIZE(pscale_tbl); i++) | ||
196 | for (j = 0; j <= SPI_CTAR_SCALE_BITS; j++) { | ||
197 | scale = pscale_tbl[i] * (2 << j); | ||
198 | if (scale >= scale_needed) { | ||
199 | if (scale < minscale) { | ||
200 | minscale = scale; | ||
201 | *psc = i; | ||
202 | *sc = j; | ||
203 | } | ||
204 | break; | ||
161 | } | 205 | } |
162 | } | 206 | } |
163 | 207 | ||
164 | pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld\ | 208 | if (minscale == INT_MAX) { |
165 | ,we use the max prescaler value.\n", speed_hz, clkrate); | 209 | pr_warn("Cannot find correct scale values for %dns delay at clkrate %ld, using max prescaler value", |
166 | *pbr = ARRAY_SIZE(pbr_tbl) - 1; | 210 | delay_ns, clkrate); |
167 | *br = ARRAY_SIZE(brs) - 1; | 211 | *psc = ARRAY_SIZE(pscale_tbl) - 1; |
212 | *sc = SPI_CTAR_SCALE_BITS; | ||
213 | } | ||
168 | } | 214 | } |
169 | 215 | ||
170 | static int dspi_transfer_write(struct fsl_dspi *dspi) | 216 | static int dspi_transfer_write(struct fsl_dspi *dspi) |
@@ -345,7 +391,10 @@ static int dspi_setup(struct spi_device *spi) | |||
345 | { | 391 | { |
346 | struct chip_data *chip; | 392 | struct chip_data *chip; |
347 | struct fsl_dspi *dspi = spi_master_get_devdata(spi->master); | 393 | struct fsl_dspi *dspi = spi_master_get_devdata(spi->master); |
348 | unsigned char br = 0, pbr = 0, fmsz = 0; | 394 | u32 cs_sck_delay = 0, sck_cs_delay = 0; |
395 | unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0; | ||
396 | unsigned char pasc = 0, asc = 0, fmsz = 0; | ||
397 | unsigned long clkrate; | ||
349 | 398 | ||
350 | if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) { | 399 | if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) { |
351 | fmsz = spi->bits_per_word - 1; | 400 | fmsz = spi->bits_per_word - 1; |
@@ -362,18 +411,34 @@ static int dspi_setup(struct spi_device *spi) | |||
362 | return -ENOMEM; | 411 | return -ENOMEM; |
363 | } | 412 | } |
364 | 413 | ||
414 | of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay", | ||
415 | &cs_sck_delay); | ||
416 | |||
417 | of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay", | ||
418 | &sck_cs_delay); | ||
419 | |||
365 | chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS | | 420 | chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS | |
366 | SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF; | 421 | SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF; |
367 | 422 | ||
368 | chip->void_write_data = 0; | 423 | chip->void_write_data = 0; |
369 | 424 | ||
370 | hz_to_spi_baud(&pbr, &br, | 425 | clkrate = clk_get_rate(dspi->clk); |
371 | spi->max_speed_hz, clk_get_rate(dspi->clk)); | 426 | hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate); |
427 | |||
428 | /* Set PCS to SCK delay scale values */ | ||
429 | ns_delay_scale(&pcssck, &cssck, cs_sck_delay, clkrate); | ||
430 | |||
431 | /* Set After SCK delay scale values */ | ||
432 | ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate); | ||
372 | 433 | ||
373 | chip->ctar_val = SPI_CTAR_FMSZ(fmsz) | 434 | chip->ctar_val = SPI_CTAR_FMSZ(fmsz) |
374 | | SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0) | 435 | | SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0) |
375 | | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0) | 436 | | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0) |
376 | | SPI_CTAR_LSBFE(spi->mode & SPI_LSB_FIRST ? 1 : 0) | 437 | | SPI_CTAR_LSBFE(spi->mode & SPI_LSB_FIRST ? 1 : 0) |
438 | | SPI_CTAR_PCSSCK(pcssck) | ||
439 | | SPI_CTAR_CSSCK(cssck) | ||
440 | | SPI_CTAR_PASC(pasc) | ||
441 | | SPI_CTAR_ASC(asc) | ||
377 | | SPI_CTAR_PBR(pbr) | 442 | | SPI_CTAR_PBR(pbr) |
378 | | SPI_CTAR_BR(br); | 443 | | SPI_CTAR_BR(br); |
379 | 444 | ||
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c index c01567d53581..788e2b176a4f 100644 --- a/drivers/spi/spi-img-spfi.c +++ b/drivers/spi/spi-img-spfi.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/clk.h> | 12 | #include <linux/clk.h> |
13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
14 | #include <linux/dmaengine.h> | 14 | #include <linux/dmaengine.h> |
15 | #include <linux/gpio.h> | ||
15 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
16 | #include <linux/io.h> | 17 | #include <linux/io.h> |
17 | #include <linux/irq.h> | 18 | #include <linux/irq.h> |
@@ -122,36 +123,31 @@ static inline void spfi_start(struct img_spfi *spfi) | |||
122 | spfi_writel(spfi, val, SPFI_CONTROL); | 123 | spfi_writel(spfi, val, SPFI_CONTROL); |
123 | } | 124 | } |
124 | 125 | ||
125 | static inline void spfi_stop(struct img_spfi *spfi) | ||
126 | { | ||
127 | u32 val; | ||
128 | |||
129 | val = spfi_readl(spfi, SPFI_CONTROL); | ||
130 | val &= ~SPFI_CONTROL_SPFI_EN; | ||
131 | spfi_writel(spfi, val, SPFI_CONTROL); | ||
132 | } | ||
133 | |||
134 | static inline void spfi_reset(struct img_spfi *spfi) | 126 | static inline void spfi_reset(struct img_spfi *spfi) |
135 | { | 127 | { |
136 | spfi_writel(spfi, SPFI_CONTROL_SOFT_RESET, SPFI_CONTROL); | 128 | spfi_writel(spfi, SPFI_CONTROL_SOFT_RESET, SPFI_CONTROL); |
137 | udelay(1); | ||
138 | spfi_writel(spfi, 0, SPFI_CONTROL); | 129 | spfi_writel(spfi, 0, SPFI_CONTROL); |
139 | } | 130 | } |
140 | 131 | ||
141 | static void spfi_flush_tx_fifo(struct img_spfi *spfi) | 132 | static int spfi_wait_all_done(struct img_spfi *spfi) |
142 | { | 133 | { |
143 | unsigned long timeout = jiffies + msecs_to_jiffies(10); | 134 | unsigned long timeout = jiffies + msecs_to_jiffies(50); |
144 | 135 | ||
145 | spfi_writel(spfi, SPFI_INTERRUPT_SDE, SPFI_INTERRUPT_CLEAR); | ||
146 | while (time_before(jiffies, timeout)) { | 136 | while (time_before(jiffies, timeout)) { |
147 | if (spfi_readl(spfi, SPFI_INTERRUPT_STATUS) & | 137 | u32 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); |
148 | SPFI_INTERRUPT_SDE) | 138 | |
149 | return; | 139 | if (status & SPFI_INTERRUPT_ALLDONETRIG) { |
140 | spfi_writel(spfi, SPFI_INTERRUPT_ALLDONETRIG, | ||
141 | SPFI_INTERRUPT_CLEAR); | ||
142 | return 0; | ||
143 | } | ||
150 | cpu_relax(); | 144 | cpu_relax(); |
151 | } | 145 | } |
152 | 146 | ||
153 | dev_err(spfi->dev, "Timed out waiting for FIFO to drain\n"); | 147 | dev_err(spfi->dev, "Timed out waiting for transaction to complete\n"); |
154 | spfi_reset(spfi); | 148 | spfi_reset(spfi); |
149 | |||
150 | return -ETIMEDOUT; | ||
155 | } | 151 | } |
156 | 152 | ||
157 | static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf, | 153 | static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf, |
@@ -237,6 +233,7 @@ static int img_spfi_start_pio(struct spi_master *master, | |||
237 | const void *tx_buf = xfer->tx_buf; | 233 | const void *tx_buf = xfer->tx_buf; |
238 | void *rx_buf = xfer->rx_buf; | 234 | void *rx_buf = xfer->rx_buf; |
239 | unsigned long timeout; | 235 | unsigned long timeout; |
236 | int ret; | ||
240 | 237 | ||
241 | if (tx_buf) | 238 | if (tx_buf) |
242 | tx_bytes = xfer->len; | 239 | tx_bytes = xfer->len; |
@@ -269,16 +266,15 @@ static int img_spfi_start_pio(struct spi_master *master, | |||
269 | cpu_relax(); | 266 | cpu_relax(); |
270 | } | 267 | } |
271 | 268 | ||
269 | ret = spfi_wait_all_done(spfi); | ||
270 | if (ret < 0) | ||
271 | return ret; | ||
272 | |||
272 | if (rx_bytes > 0 || tx_bytes > 0) { | 273 | if (rx_bytes > 0 || tx_bytes > 0) { |
273 | dev_err(spfi->dev, "PIO transfer timed out\n"); | 274 | dev_err(spfi->dev, "PIO transfer timed out\n"); |
274 | spfi_reset(spfi); | ||
275 | return -ETIMEDOUT; | 275 | return -ETIMEDOUT; |
276 | } | 276 | } |
277 | 277 | ||
278 | if (tx_buf) | ||
279 | spfi_flush_tx_fifo(spfi); | ||
280 | spfi_stop(spfi); | ||
281 | |||
282 | return 0; | 278 | return 0; |
283 | } | 279 | } |
284 | 280 | ||
@@ -287,14 +283,12 @@ static void img_spfi_dma_rx_cb(void *data) | |||
287 | struct img_spfi *spfi = data; | 283 | struct img_spfi *spfi = data; |
288 | unsigned long flags; | 284 | unsigned long flags; |
289 | 285 | ||
290 | spin_lock_irqsave(&spfi->lock, flags); | 286 | spfi_wait_all_done(spfi); |
291 | 287 | ||
288 | spin_lock_irqsave(&spfi->lock, flags); | ||
292 | spfi->rx_dma_busy = false; | 289 | spfi->rx_dma_busy = false; |
293 | if (!spfi->tx_dma_busy) { | 290 | if (!spfi->tx_dma_busy) |
294 | spfi_stop(spfi); | ||
295 | spi_finalize_current_transfer(spfi->master); | 291 | spi_finalize_current_transfer(spfi->master); |
296 | } | ||
297 | |||
298 | spin_unlock_irqrestore(&spfi->lock, flags); | 292 | spin_unlock_irqrestore(&spfi->lock, flags); |
299 | } | 293 | } |
300 | 294 | ||
@@ -303,16 +297,12 @@ static void img_spfi_dma_tx_cb(void *data) | |||
303 | struct img_spfi *spfi = data; | 297 | struct img_spfi *spfi = data; |
304 | unsigned long flags; | 298 | unsigned long flags; |
305 | 299 | ||
306 | spfi_flush_tx_fifo(spfi); | 300 | spfi_wait_all_done(spfi); |
307 | 301 | ||
308 | spin_lock_irqsave(&spfi->lock, flags); | 302 | spin_lock_irqsave(&spfi->lock, flags); |
309 | |||
310 | spfi->tx_dma_busy = false; | 303 | spfi->tx_dma_busy = false; |
311 | if (!spfi->rx_dma_busy) { | 304 | if (!spfi->rx_dma_busy) |
312 | spfi_stop(spfi); | ||
313 | spi_finalize_current_transfer(spfi->master); | 305 | spi_finalize_current_transfer(spfi->master); |
314 | } | ||
315 | |||
316 | spin_unlock_irqrestore(&spfi->lock, flags); | 306 | spin_unlock_irqrestore(&spfi->lock, flags); |
317 | } | 307 | } |
318 | 308 | ||
@@ -397,6 +387,75 @@ stop_dma: | |||
397 | return -EIO; | 387 | return -EIO; |
398 | } | 388 | } |
399 | 389 | ||
390 | static void img_spfi_handle_err(struct spi_master *master, | ||
391 | struct spi_message *msg) | ||
392 | { | ||
393 | struct img_spfi *spfi = spi_master_get_devdata(master); | ||
394 | unsigned long flags; | ||
395 | |||
396 | /* | ||
397 | * Stop all DMA and reset the controller if the previous transaction | ||
398 | * timed-out and never completed it's DMA. | ||
399 | */ | ||
400 | spin_lock_irqsave(&spfi->lock, flags); | ||
401 | if (spfi->tx_dma_busy || spfi->rx_dma_busy) { | ||
402 | spfi->tx_dma_busy = false; | ||
403 | spfi->rx_dma_busy = false; | ||
404 | |||
405 | dmaengine_terminate_all(spfi->tx_ch); | ||
406 | dmaengine_terminate_all(spfi->rx_ch); | ||
407 | } | ||
408 | spin_unlock_irqrestore(&spfi->lock, flags); | ||
409 | } | ||
410 | |||
411 | static int img_spfi_prepare(struct spi_master *master, struct spi_message *msg) | ||
412 | { | ||
413 | struct img_spfi *spfi = spi_master_get_devdata(master); | ||
414 | u32 val; | ||
415 | |||
416 | val = spfi_readl(spfi, SPFI_PORT_STATE); | ||
417 | if (msg->spi->mode & SPI_CPHA) | ||
418 | val |= SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select); | ||
419 | else | ||
420 | val &= ~SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select); | ||
421 | if (msg->spi->mode & SPI_CPOL) | ||
422 | val |= SPFI_PORT_STATE_CK_POL(msg->spi->chip_select); | ||
423 | else | ||
424 | val &= ~SPFI_PORT_STATE_CK_POL(msg->spi->chip_select); | ||
425 | spfi_writel(spfi, val, SPFI_PORT_STATE); | ||
426 | |||
427 | return 0; | ||
428 | } | ||
429 | |||
430 | static int img_spfi_unprepare(struct spi_master *master, | ||
431 | struct spi_message *msg) | ||
432 | { | ||
433 | struct img_spfi *spfi = spi_master_get_devdata(master); | ||
434 | |||
435 | spfi_reset(spfi); | ||
436 | |||
437 | return 0; | ||
438 | } | ||
439 | |||
440 | static int img_spfi_setup(struct spi_device *spi) | ||
441 | { | ||
442 | int ret; | ||
443 | |||
444 | ret = gpio_request_one(spi->cs_gpio, (spi->mode & SPI_CS_HIGH) ? | ||
445 | GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH, | ||
446 | dev_name(&spi->dev)); | ||
447 | if (ret) | ||
448 | dev_err(&spi->dev, "can't request chipselect gpio %d\n", | ||
449 | spi->cs_gpio); | ||
450 | |||
451 | return ret; | ||
452 | } | ||
453 | |||
454 | static void img_spfi_cleanup(struct spi_device *spi) | ||
455 | { | ||
456 | gpio_free(spi->cs_gpio); | ||
457 | } | ||
458 | |||
400 | static void img_spfi_config(struct spi_master *master, struct spi_device *spi, | 459 | static void img_spfi_config(struct spi_master *master, struct spi_device *spi, |
401 | struct spi_transfer *xfer) | 460 | struct spi_transfer *xfer) |
402 | { | 461 | { |
@@ -405,10 +464,10 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi, | |||
405 | 464 | ||
406 | /* | 465 | /* |
407 | * output = spfi_clk * (BITCLK / 512), where BITCLK must be a | 466 | * output = spfi_clk * (BITCLK / 512), where BITCLK must be a |
408 | * power of 2 up to 256 (where 255 == 256 since BITCLK is 8 bits) | 467 | * power of 2 up to 128 |
409 | */ | 468 | */ |
410 | div = DIV_ROUND_UP(master->max_speed_hz, xfer->speed_hz); | 469 | div = DIV_ROUND_UP(clk_get_rate(spfi->spfi_clk), xfer->speed_hz); |
411 | div = clamp(512 / (1 << get_count_order(div)), 1, 255); | 470 | div = clamp(512 / (1 << get_count_order(div)), 1, 128); |
412 | 471 | ||
413 | val = spfi_readl(spfi, SPFI_DEVICE_PARAMETER(spi->chip_select)); | 472 | val = spfi_readl(spfi, SPFI_DEVICE_PARAMETER(spi->chip_select)); |
414 | val &= ~(SPFI_DEVICE_PARAMETER_BITCLK_MASK << | 473 | val &= ~(SPFI_DEVICE_PARAMETER_BITCLK_MASK << |
@@ -416,6 +475,9 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi, | |||
416 | val |= div << SPFI_DEVICE_PARAMETER_BITCLK_SHIFT; | 475 | val |= div << SPFI_DEVICE_PARAMETER_BITCLK_SHIFT; |
417 | spfi_writel(spfi, val, SPFI_DEVICE_PARAMETER(spi->chip_select)); | 476 | spfi_writel(spfi, val, SPFI_DEVICE_PARAMETER(spi->chip_select)); |
418 | 477 | ||
478 | spfi_writel(spfi, xfer->len << SPFI_TRANSACTION_TSIZE_SHIFT, | ||
479 | SPFI_TRANSACTION); | ||
480 | |||
419 | val = spfi_readl(spfi, SPFI_CONTROL); | 481 | val = spfi_readl(spfi, SPFI_CONTROL); |
420 | val &= ~(SPFI_CONTROL_SEND_DMA | SPFI_CONTROL_GET_DMA); | 482 | val &= ~(SPFI_CONTROL_SEND_DMA | SPFI_CONTROL_GET_DMA); |
421 | if (xfer->tx_buf) | 483 | if (xfer->tx_buf) |
@@ -429,25 +491,7 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi, | |||
429 | else if (xfer->tx_nbits == SPI_NBITS_QUAD && | 491 | else if (xfer->tx_nbits == SPI_NBITS_QUAD && |
430 | xfer->rx_nbits == SPI_NBITS_QUAD) | 492 | xfer->rx_nbits == SPI_NBITS_QUAD) |
431 | val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT; | 493 | val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT; |
432 | val &= ~SPFI_CONTROL_CONTINUE; | ||
433 | if (!xfer->cs_change && !list_is_last(&xfer->transfer_list, | ||
434 | &master->cur_msg->transfers)) | ||
435 | val |= SPFI_CONTROL_CONTINUE; | ||
436 | spfi_writel(spfi, val, SPFI_CONTROL); | 494 | spfi_writel(spfi, val, SPFI_CONTROL); |
437 | |||
438 | val = spfi_readl(spfi, SPFI_PORT_STATE); | ||
439 | if (spi->mode & SPI_CPHA) | ||
440 | val |= SPFI_PORT_STATE_CK_PHASE(spi->chip_select); | ||
441 | else | ||
442 | val &= ~SPFI_PORT_STATE_CK_PHASE(spi->chip_select); | ||
443 | if (spi->mode & SPI_CPOL) | ||
444 | val |= SPFI_PORT_STATE_CK_POL(spi->chip_select); | ||
445 | else | ||
446 | val &= ~SPFI_PORT_STATE_CK_POL(spi->chip_select); | ||
447 | spfi_writel(spfi, val, SPFI_PORT_STATE); | ||
448 | |||
449 | spfi_writel(spfi, xfer->len << SPFI_TRANSACTION_TSIZE_SHIFT, | ||
450 | SPFI_TRANSACTION); | ||
451 | } | 495 | } |
452 | 496 | ||
453 | static int img_spfi_transfer_one(struct spi_master *master, | 497 | static int img_spfi_transfer_one(struct spi_master *master, |
@@ -455,25 +499,13 @@ static int img_spfi_transfer_one(struct spi_master *master, | |||
455 | struct spi_transfer *xfer) | 499 | struct spi_transfer *xfer) |
456 | { | 500 | { |
457 | struct img_spfi *spfi = spi_master_get_devdata(spi->master); | 501 | struct img_spfi *spfi = spi_master_get_devdata(spi->master); |
458 | bool dma_reset = false; | ||
459 | unsigned long flags; | ||
460 | int ret; | 502 | int ret; |
461 | 503 | ||
462 | /* | 504 | if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) { |
463 | * Stop all DMA and reset the controller if the previous transaction | 505 | dev_err(spfi->dev, |
464 | * timed-out and never completed it's DMA. | 506 | "Transfer length (%d) is greater than the max supported (%d)", |
465 | */ | 507 | xfer->len, SPFI_TRANSACTION_TSIZE_MASK); |
466 | spin_lock_irqsave(&spfi->lock, flags); | 508 | return -EINVAL; |
467 | if (spfi->tx_dma_busy || spfi->rx_dma_busy) { | ||
468 | dev_err(spfi->dev, "SPI DMA still busy\n"); | ||
469 | dma_reset = true; | ||
470 | } | ||
471 | spin_unlock_irqrestore(&spfi->lock, flags); | ||
472 | |||
473 | if (dma_reset) { | ||
474 | dmaengine_terminate_all(spfi->tx_ch); | ||
475 | dmaengine_terminate_all(spfi->rx_ch); | ||
476 | spfi_reset(spfi); | ||
477 | } | 509 | } |
478 | 510 | ||
479 | img_spfi_config(master, spi, xfer); | 511 | img_spfi_config(master, spi, xfer); |
@@ -485,17 +517,6 @@ static int img_spfi_transfer_one(struct spi_master *master, | |||
485 | return ret; | 517 | return ret; |
486 | } | 518 | } |
487 | 519 | ||
488 | static void img_spfi_set_cs(struct spi_device *spi, bool enable) | ||
489 | { | ||
490 | struct img_spfi *spfi = spi_master_get_devdata(spi->master); | ||
491 | u32 val; | ||
492 | |||
493 | val = spfi_readl(spfi, SPFI_PORT_STATE); | ||
494 | val &= ~(SPFI_PORT_STATE_DEV_SEL_MASK << SPFI_PORT_STATE_DEV_SEL_SHIFT); | ||
495 | val |= spi->chip_select << SPFI_PORT_STATE_DEV_SEL_SHIFT; | ||
496 | spfi_writel(spfi, val, SPFI_PORT_STATE); | ||
497 | } | ||
498 | |||
499 | static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi, | 520 | static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi, |
500 | struct spi_transfer *xfer) | 521 | struct spi_transfer *xfer) |
501 | { | 522 | { |
@@ -584,14 +605,17 @@ static int img_spfi_probe(struct platform_device *pdev) | |||
584 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL; | 605 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL; |
585 | if (of_property_read_bool(spfi->dev->of_node, "img,supports-quad-mode")) | 606 | if (of_property_read_bool(spfi->dev->of_node, "img,supports-quad-mode")) |
586 | master->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD; | 607 | master->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD; |
587 | master->num_chipselect = 5; | ||
588 | master->dev.of_node = pdev->dev.of_node; | 608 | master->dev.of_node = pdev->dev.of_node; |
589 | master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8); | 609 | master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8); |
590 | master->max_speed_hz = clk_get_rate(spfi->spfi_clk); | 610 | master->max_speed_hz = clk_get_rate(spfi->spfi_clk) / 4; |
591 | master->min_speed_hz = master->max_speed_hz / 512; | 611 | master->min_speed_hz = clk_get_rate(spfi->spfi_clk) / 512; |
592 | 612 | ||
593 | master->set_cs = img_spfi_set_cs; | 613 | master->setup = img_spfi_setup; |
614 | master->cleanup = img_spfi_cleanup; | ||
594 | master->transfer_one = img_spfi_transfer_one; | 615 | master->transfer_one = img_spfi_transfer_one; |
616 | master->prepare_message = img_spfi_prepare; | ||
617 | master->unprepare_message = img_spfi_unprepare; | ||
618 | master->handle_err = img_spfi_handle_err; | ||
595 | 619 | ||
596 | spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx"); | 620 | spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx"); |
597 | spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx"); | 621 | spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx"); |
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 6fea4af51c41..f08e812b2984 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c | |||
@@ -370,8 +370,6 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx, | |||
370 | if (spi_imx->dma_is_inited) { | 370 | if (spi_imx->dma_is_inited) { |
371 | dma = readl(spi_imx->base + MX51_ECSPI_DMA); | 371 | dma = readl(spi_imx->base + MX51_ECSPI_DMA); |
372 | 372 | ||
373 | spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2; | ||
374 | spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2; | ||
375 | spi_imx->rxt_wml = spi_imx_get_fifosize(spi_imx) / 2; | 373 | spi_imx->rxt_wml = spi_imx_get_fifosize(spi_imx) / 2; |
376 | rx_wml_cfg = spi_imx->rx_wml << MX51_ECSPI_DMA_RX_WML_OFFSET; | 374 | rx_wml_cfg = spi_imx->rx_wml << MX51_ECSPI_DMA_RX_WML_OFFSET; |
377 | tx_wml_cfg = spi_imx->tx_wml << MX51_ECSPI_DMA_TX_WML_OFFSET; | 375 | tx_wml_cfg = spi_imx->tx_wml << MX51_ECSPI_DMA_TX_WML_OFFSET; |
@@ -868,6 +866,8 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx, | |||
868 | master->max_dma_len = MAX_SDMA_BD_BYTES; | 866 | master->max_dma_len = MAX_SDMA_BD_BYTES; |
869 | spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX | | 867 | spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX | |
870 | SPI_MASTER_MUST_TX; | 868 | SPI_MASTER_MUST_TX; |
869 | spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2; | ||
870 | spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2; | ||
871 | spi_imx->dma_is_inited = 1; | 871 | spi_imx->dma_is_inited = 1; |
872 | 872 | ||
873 | return 0; | 873 | return 0; |
@@ -903,7 +903,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, | |||
903 | 903 | ||
904 | if (tx) { | 904 | if (tx) { |
905 | desc_tx = dmaengine_prep_slave_sg(master->dma_tx, | 905 | desc_tx = dmaengine_prep_slave_sg(master->dma_tx, |
906 | tx->sgl, tx->nents, DMA_TO_DEVICE, | 906 | tx->sgl, tx->nents, DMA_MEM_TO_DEV, |
907 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 907 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
908 | if (!desc_tx) | 908 | if (!desc_tx) |
909 | goto no_dma; | 909 | goto no_dma; |
@@ -915,7 +915,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, | |||
915 | 915 | ||
916 | if (rx) { | 916 | if (rx) { |
917 | desc_rx = dmaengine_prep_slave_sg(master->dma_rx, | 917 | desc_rx = dmaengine_prep_slave_sg(master->dma_rx, |
918 | rx->sgl, rx->nents, DMA_FROM_DEVICE, | 918 | rx->sgl, rx->nents, DMA_DEV_TO_MEM, |
919 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 919 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
920 | if (!desc_rx) | 920 | if (!desc_rx) |
921 | goto no_dma; | 921 | goto no_dma; |
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c index ecae0d4e2945..965d2bdcfdcc 100644 --- a/drivers/spi/spi-mpc512x-psc.c +++ b/drivers/spi/spi-mpc512x-psc.c | |||
@@ -588,7 +588,7 @@ static int mpc512x_psc_spi_of_remove(struct platform_device *op) | |||
588 | return mpc512x_psc_spi_do_remove(&op->dev); | 588 | return mpc512x_psc_spi_do_remove(&op->dev); |
589 | } | 589 | } |
590 | 590 | ||
591 | static struct of_device_id mpc512x_psc_spi_of_match[] = { | 591 | static const struct of_device_id mpc512x_psc_spi_of_match[] = { |
592 | { .compatible = "fsl,mpc5121-psc-spi", }, | 592 | { .compatible = "fsl,mpc5121-psc-spi", }, |
593 | {}, | 593 | {}, |
594 | }; | 594 | }; |
diff --git a/drivers/spi/spi-octeon.c b/drivers/spi/spi-octeon.c index b283d537d16a..e99d6a93d394 100644 --- a/drivers/spi/spi-octeon.c +++ b/drivers/spi/spi-octeon.c | |||
@@ -238,7 +238,7 @@ static int octeon_spi_remove(struct platform_device *pdev) | |||
238 | return 0; | 238 | return 0; |
239 | } | 239 | } |
240 | 240 | ||
241 | static struct of_device_id octeon_spi_match[] = { | 241 | static const struct of_device_id octeon_spi_match[] = { |
242 | { .compatible = "cavium,octeon-3010-spi", }, | 242 | { .compatible = "cavium,octeon-3010-spi", }, |
243 | {}, | 243 | {}, |
244 | }; | 244 | }; |
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c index d890d309dff9..35b332dacb13 100644 --- a/drivers/spi/spi-omap-100k.c +++ b/drivers/spi/spi-omap-100k.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/device.h> | 24 | #include <linux/device.h> |
25 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
26 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
27 | #include <linux/pm_runtime.h> | ||
27 | #include <linux/err.h> | 28 | #include <linux/err.h> |
28 | #include <linux/clk.h> | 29 | #include <linux/clk.h> |
29 | #include <linux/io.h> | 30 | #include <linux/io.h> |
@@ -294,16 +295,6 @@ static int omap1_spi100k_setup(struct spi_device *spi) | |||
294 | return ret; | 295 | return ret; |
295 | } | 296 | } |
296 | 297 | ||
297 | static int omap1_spi100k_prepare_hardware(struct spi_master *master) | ||
298 | { | ||
299 | struct omap1_spi100k *spi100k = spi_master_get_devdata(master); | ||
300 | |||
301 | clk_prepare_enable(spi100k->ick); | ||
302 | clk_prepare_enable(spi100k->fck); | ||
303 | |||
304 | return 0; | ||
305 | } | ||
306 | |||
307 | static int omap1_spi100k_transfer_one_message(struct spi_master *master, | 298 | static int omap1_spi100k_transfer_one_message(struct spi_master *master, |
308 | struct spi_message *m) | 299 | struct spi_message *m) |
309 | { | 300 | { |
@@ -372,16 +363,6 @@ static int omap1_spi100k_transfer_one_message(struct spi_master *master, | |||
372 | return status; | 363 | return status; |
373 | } | 364 | } |
374 | 365 | ||
375 | static int omap1_spi100k_unprepare_hardware(struct spi_master *master) | ||
376 | { | ||
377 | struct omap1_spi100k *spi100k = spi_master_get_devdata(master); | ||
378 | |||
379 | clk_disable_unprepare(spi100k->ick); | ||
380 | clk_disable_unprepare(spi100k->fck); | ||
381 | |||
382 | return 0; | ||
383 | } | ||
384 | |||
385 | static int omap1_spi100k_probe(struct platform_device *pdev) | 366 | static int omap1_spi100k_probe(struct platform_device *pdev) |
386 | { | 367 | { |
387 | struct spi_master *master; | 368 | struct spi_master *master; |
@@ -402,14 +383,12 @@ static int omap1_spi100k_probe(struct platform_device *pdev) | |||
402 | 383 | ||
403 | master->setup = omap1_spi100k_setup; | 384 | master->setup = omap1_spi100k_setup; |
404 | master->transfer_one_message = omap1_spi100k_transfer_one_message; | 385 | master->transfer_one_message = omap1_spi100k_transfer_one_message; |
405 | master->prepare_transfer_hardware = omap1_spi100k_prepare_hardware; | ||
406 | master->unprepare_transfer_hardware = omap1_spi100k_unprepare_hardware; | ||
407 | master->cleanup = NULL; | ||
408 | master->num_chipselect = 2; | 386 | master->num_chipselect = 2; |
409 | master->mode_bits = MODEBITS; | 387 | master->mode_bits = MODEBITS; |
410 | master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); | 388 | master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); |
411 | master->min_speed_hz = OMAP1_SPI100K_MAX_FREQ/(1<<16); | 389 | master->min_speed_hz = OMAP1_SPI100K_MAX_FREQ/(1<<16); |
412 | master->max_speed_hz = OMAP1_SPI100K_MAX_FREQ; | 390 | master->max_speed_hz = OMAP1_SPI100K_MAX_FREQ; |
391 | master->auto_runtime_pm = true; | ||
413 | 392 | ||
414 | spi100k = spi_master_get_devdata(master); | 393 | spi100k = spi_master_get_devdata(master); |
415 | 394 | ||
@@ -434,22 +413,96 @@ static int omap1_spi100k_probe(struct platform_device *pdev) | |||
434 | goto err; | 413 | goto err; |
435 | } | 414 | } |
436 | 415 | ||
416 | status = clk_prepare_enable(spi100k->ick); | ||
417 | if (status != 0) { | ||
418 | dev_err(&pdev->dev, "failed to enable ick: %d\n", status); | ||
419 | goto err; | ||
420 | } | ||
421 | |||
422 | status = clk_prepare_enable(spi100k->fck); | ||
423 | if (status != 0) { | ||
424 | dev_err(&pdev->dev, "failed to enable fck: %d\n", status); | ||
425 | goto err_ick; | ||
426 | } | ||
427 | |||
428 | pm_runtime_enable(&pdev->dev); | ||
429 | pm_runtime_set_active(&pdev->dev); | ||
430 | |||
437 | status = devm_spi_register_master(&pdev->dev, master); | 431 | status = devm_spi_register_master(&pdev->dev, master); |
438 | if (status < 0) | 432 | if (status < 0) |
439 | goto err; | 433 | goto err_fck; |
440 | 434 | ||
441 | return status; | 435 | return status; |
442 | 436 | ||
437 | err_fck: | ||
438 | clk_disable_unprepare(spi100k->fck); | ||
439 | err_ick: | ||
440 | clk_disable_unprepare(spi100k->ick); | ||
443 | err: | 441 | err: |
444 | spi_master_put(master); | 442 | spi_master_put(master); |
445 | return status; | 443 | return status; |
446 | } | 444 | } |
447 | 445 | ||
446 | static int omap1_spi100k_remove(struct platform_device *pdev) | ||
447 | { | ||
448 | struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); | ||
449 | struct omap1_spi100k *spi100k = spi_master_get_devdata(master); | ||
450 | |||
451 | pm_runtime_disable(&pdev->dev); | ||
452 | |||
453 | clk_disable_unprepare(spi100k->fck); | ||
454 | clk_disable_unprepare(spi100k->ick); | ||
455 | |||
456 | return 0; | ||
457 | } | ||
458 | |||
459 | #ifdef CONFIG_PM | ||
460 | static int omap1_spi100k_runtime_suspend(struct device *dev) | ||
461 | { | ||
462 | struct spi_master *master = spi_master_get(dev_get_drvdata(dev)); | ||
463 | struct omap1_spi100k *spi100k = spi_master_get_devdata(master); | ||
464 | |||
465 | clk_disable_unprepare(spi100k->ick); | ||
466 | clk_disable_unprepare(spi100k->fck); | ||
467 | |||
468 | return 0; | ||
469 | } | ||
470 | |||
471 | static int omap1_spi100k_runtime_resume(struct device *dev) | ||
472 | { | ||
473 | struct spi_master *master = spi_master_get(dev_get_drvdata(dev)); | ||
474 | struct omap1_spi100k *spi100k = spi_master_get_devdata(master); | ||
475 | int ret; | ||
476 | |||
477 | ret = clk_prepare_enable(spi100k->ick); | ||
478 | if (ret != 0) { | ||
479 | dev_err(dev, "Failed to enable ick: %d\n", ret); | ||
480 | return ret; | ||
481 | } | ||
482 | |||
483 | ret = clk_prepare_enable(spi100k->fck); | ||
484 | if (ret != 0) { | ||
485 | dev_err(dev, "Failed to enable fck: %d\n", ret); | ||
486 | clk_disable_unprepare(spi100k->ick); | ||
487 | return ret; | ||
488 | } | ||
489 | |||
490 | return 0; | ||
491 | } | ||
492 | #endif | ||
493 | |||
494 | static const struct dev_pm_ops omap1_spi100k_pm = { | ||
495 | SET_RUNTIME_PM_OPS(omap1_spi100k_runtime_suspend, | ||
496 | omap1_spi100k_runtime_resume, NULL) | ||
497 | }; | ||
498 | |||
448 | static struct platform_driver omap1_spi100k_driver = { | 499 | static struct platform_driver omap1_spi100k_driver = { |
449 | .driver = { | 500 | .driver = { |
450 | .name = "omap1_spi100k", | 501 | .name = "omap1_spi100k", |
502 | .pm = &omap1_spi100k_pm, | ||
451 | }, | 503 | }, |
452 | .probe = omap1_spi100k_probe, | 504 | .probe = omap1_spi100k_probe, |
505 | .remove = omap1_spi100k_remove, | ||
453 | }; | 506 | }; |
454 | 507 | ||
455 | module_platform_driver(omap1_spi100k_driver); | 508 | module_platform_driver(omap1_spi100k_driver); |
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c index 3c0844457c07..55576db31549 100644 --- a/drivers/spi/spi-omap-uwire.c +++ b/drivers/spi/spi-omap-uwire.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #include <linux/module.h> | 44 | #include <linux/module.h> |
45 | #include <linux/io.h> | 45 | #include <linux/io.h> |
46 | 46 | ||
47 | #include <asm/irq.h> | ||
48 | #include <mach/hardware.h> | 47 | #include <mach/hardware.h> |
49 | #include <asm/mach-types.h> | 48 | #include <asm/mach-types.h> |
50 | 49 | ||
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 4df8942058de..d1a5b9fc3eba 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c | |||
@@ -1210,6 +1210,7 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master, | |||
1210 | struct omap2_mcspi *mcspi; | 1210 | struct omap2_mcspi *mcspi; |
1211 | struct omap2_mcspi_dma *mcspi_dma; | 1211 | struct omap2_mcspi_dma *mcspi_dma; |
1212 | struct spi_transfer *t; | 1212 | struct spi_transfer *t; |
1213 | int status; | ||
1213 | 1214 | ||
1214 | spi = m->spi; | 1215 | spi = m->spi; |
1215 | mcspi = spi_master_get_devdata(master); | 1216 | mcspi = spi_master_get_devdata(master); |
@@ -1229,7 +1230,8 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master, | |||
1229 | tx_buf ? "tx" : "", | 1230 | tx_buf ? "tx" : "", |
1230 | rx_buf ? "rx" : "", | 1231 | rx_buf ? "rx" : "", |
1231 | t->bits_per_word); | 1232 | t->bits_per_word); |
1232 | return -EINVAL; | 1233 | status = -EINVAL; |
1234 | goto out; | ||
1233 | } | 1235 | } |
1234 | 1236 | ||
1235 | if (m->is_dma_mapped || len < DMA_MIN_BYTES) | 1237 | if (m->is_dma_mapped || len < DMA_MIN_BYTES) |
@@ -1241,7 +1243,8 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master, | |||
1241 | if (dma_mapping_error(mcspi->dev, t->tx_dma)) { | 1243 | if (dma_mapping_error(mcspi->dev, t->tx_dma)) { |
1242 | dev_dbg(mcspi->dev, "dma %cX %d bytes error\n", | 1244 | dev_dbg(mcspi->dev, "dma %cX %d bytes error\n", |
1243 | 'T', len); | 1245 | 'T', len); |
1244 | return -EINVAL; | 1246 | status = -EINVAL; |
1247 | goto out; | ||
1245 | } | 1248 | } |
1246 | } | 1249 | } |
1247 | if (mcspi_dma->dma_rx && rx_buf != NULL) { | 1250 | if (mcspi_dma->dma_rx && rx_buf != NULL) { |
@@ -1253,14 +1256,19 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master, | |||
1253 | if (tx_buf != NULL) | 1256 | if (tx_buf != NULL) |
1254 | dma_unmap_single(mcspi->dev, t->tx_dma, | 1257 | dma_unmap_single(mcspi->dev, t->tx_dma, |
1255 | len, DMA_TO_DEVICE); | 1258 | len, DMA_TO_DEVICE); |
1256 | return -EINVAL; | 1259 | status = -EINVAL; |
1260 | goto out; | ||
1257 | } | 1261 | } |
1258 | } | 1262 | } |
1259 | } | 1263 | } |
1260 | 1264 | ||
1261 | omap2_mcspi_work(mcspi, m); | 1265 | omap2_mcspi_work(mcspi, m); |
1266 | /* spi_finalize_current_message() changes the status inside the | ||
1267 | * spi_message, save the status here. */ | ||
1268 | status = m->status; | ||
1269 | out: | ||
1262 | spi_finalize_current_message(master); | 1270 | spi_finalize_current_message(master); |
1263 | return 0; | 1271 | return status; |
1264 | } | 1272 | } |
1265 | 1273 | ||
1266 | static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi) | 1274 | static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi) |
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index 89ca162801da..94af80676684 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c | |||
@@ -285,7 +285,12 @@ | |||
285 | */ | 285 | */ |
286 | #define DEFAULT_SSP_REG_IMSC 0x0UL | 286 | #define DEFAULT_SSP_REG_IMSC 0x0UL |
287 | #define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC | 287 | #define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC |
288 | #define ENABLE_ALL_INTERRUPTS (~DEFAULT_SSP_REG_IMSC) | 288 | #define ENABLE_ALL_INTERRUPTS ( \ |
289 | SSP_IMSC_MASK_RORIM | \ | ||
290 | SSP_IMSC_MASK_RTIM | \ | ||
291 | SSP_IMSC_MASK_RXIM | \ | ||
292 | SSP_IMSC_MASK_TXIM \ | ||
293 | ) | ||
289 | 294 | ||
290 | #define CLEAR_ALL_INTERRUPTS 0x3 | 295 | #define CLEAR_ALL_INTERRUPTS 0x3 |
291 | 296 | ||
@@ -534,12 +539,12 @@ static void giveback(struct pl022 *pl022) | |||
534 | pl022->cur_msg = NULL; | 539 | pl022->cur_msg = NULL; |
535 | pl022->cur_transfer = NULL; | 540 | pl022->cur_transfer = NULL; |
536 | pl022->cur_chip = NULL; | 541 | pl022->cur_chip = NULL; |
537 | spi_finalize_current_message(pl022->master); | ||
538 | 542 | ||
539 | /* disable the SPI/SSP operation */ | 543 | /* disable the SPI/SSP operation */ |
540 | writew((readw(SSP_CR1(pl022->virtbase)) & | 544 | writew((readw(SSP_CR1(pl022->virtbase)) & |
541 | (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); | 545 | (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); |
542 | 546 | ||
547 | spi_finalize_current_message(pl022->master); | ||
543 | } | 548 | } |
544 | 549 | ||
545 | /** | 550 | /** |
@@ -1251,7 +1256,6 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) | |||
1251 | struct pl022 *pl022 = dev_id; | 1256 | struct pl022 *pl022 = dev_id; |
1252 | struct spi_message *msg = pl022->cur_msg; | 1257 | struct spi_message *msg = pl022->cur_msg; |
1253 | u16 irq_status = 0; | 1258 | u16 irq_status = 0; |
1254 | u16 flag = 0; | ||
1255 | 1259 | ||
1256 | if (unlikely(!msg)) { | 1260 | if (unlikely(!msg)) { |
1257 | dev_err(&pl022->adev->dev, | 1261 | dev_err(&pl022->adev->dev, |
@@ -1280,9 +1284,6 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) | |||
1280 | if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF) | 1284 | if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF) |
1281 | dev_err(&pl022->adev->dev, | 1285 | dev_err(&pl022->adev->dev, |
1282 | "RXFIFO is full\n"); | 1286 | "RXFIFO is full\n"); |
1283 | if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF) | ||
1284 | dev_err(&pl022->adev->dev, | ||
1285 | "TXFIFO is full\n"); | ||
1286 | 1287 | ||
1287 | /* | 1288 | /* |
1288 | * Disable and clear interrupts, disable SSP, | 1289 | * Disable and clear interrupts, disable SSP, |
@@ -1303,8 +1304,7 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) | |||
1303 | 1304 | ||
1304 | readwriter(pl022); | 1305 | readwriter(pl022); |
1305 | 1306 | ||
1306 | if ((pl022->tx == pl022->tx_end) && (flag == 0)) { | 1307 | if (pl022->tx == pl022->tx_end) { |
1307 | flag = 1; | ||
1308 | /* Disable Transmit interrupt, enable receive interrupt */ | 1308 | /* Disable Transmit interrupt, enable receive interrupt */ |
1309 | writew((readw(SSP_IMSC(pl022->virtbase)) & | 1309 | writew((readw(SSP_IMSC(pl022->virtbase)) & |
1310 | ~SSP_IMSC_MASK_TXIM) | SSP_IMSC_MASK_RXIM, | 1310 | ~SSP_IMSC_MASK_TXIM) | SSP_IMSC_MASK_RXIM, |
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 6f72ad01e041..e3223ac75a7c 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
23 | #include <linux/kernel.h> | ||
23 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
24 | #include <linux/spi/pxa2xx_spi.h> | 25 | #include <linux/spi/pxa2xx_spi.h> |
25 | #include <linux/spi/spi.h> | 26 | #include <linux/spi/spi.h> |
@@ -30,10 +31,6 @@ | |||
30 | #include <linux/pm_runtime.h> | 31 | #include <linux/pm_runtime.h> |
31 | #include <linux/acpi.h> | 32 | #include <linux/acpi.h> |
32 | 33 | ||
33 | #include <asm/io.h> | ||
34 | #include <asm/irq.h> | ||
35 | #include <asm/delay.h> | ||
36 | |||
37 | #include "spi-pxa2xx.h" | 34 | #include "spi-pxa2xx.h" |
38 | 35 | ||
39 | MODULE_AUTHOR("Stephen Street"); | 36 | MODULE_AUTHOR("Stephen Street"); |
@@ -67,54 +64,6 @@ MODULE_ALIAS("platform:pxa2xx-spi"); | |||
67 | #define LPSS_TX_LOTHRESH_DFLT 160 | 64 | #define LPSS_TX_LOTHRESH_DFLT 160 |
68 | #define LPSS_TX_HITHRESH_DFLT 224 | 65 | #define LPSS_TX_HITHRESH_DFLT 224 |
69 | 66 | ||
70 | struct quark_spi_rate { | ||
71 | u32 bitrate; | ||
72 | u32 dds_clk_rate; | ||
73 | u32 clk_div; | ||
74 | }; | ||
75 | |||
76 | /* | ||
77 | * 'rate', 'dds', 'clk_div' lookup table, which is defined in | ||
78 | * the Quark SPI datasheet. | ||
79 | */ | ||
80 | static const struct quark_spi_rate quark_spi_rate_table[] = { | ||
81 | /* bitrate, dds_clk_rate, clk_div */ | ||
82 | {50000000, 0x800000, 0}, | ||
83 | {40000000, 0x666666, 0}, | ||
84 | {25000000, 0x400000, 0}, | ||
85 | {20000000, 0x666666, 1}, | ||
86 | {16667000, 0x800000, 2}, | ||
87 | {13333000, 0x666666, 2}, | ||
88 | {12500000, 0x200000, 0}, | ||
89 | {10000000, 0x800000, 4}, | ||
90 | {8000000, 0x666666, 4}, | ||
91 | {6250000, 0x400000, 3}, | ||
92 | {5000000, 0x400000, 4}, | ||
93 | {4000000, 0x666666, 9}, | ||
94 | {3125000, 0x80000, 0}, | ||
95 | {2500000, 0x400000, 9}, | ||
96 | {2000000, 0x666666, 19}, | ||
97 | {1563000, 0x40000, 0}, | ||
98 | {1250000, 0x200000, 9}, | ||
99 | {1000000, 0x400000, 24}, | ||
100 | {800000, 0x666666, 49}, | ||
101 | {781250, 0x20000, 0}, | ||
102 | {625000, 0x200000, 19}, | ||
103 | {500000, 0x400000, 49}, | ||
104 | {400000, 0x666666, 99}, | ||
105 | {390625, 0x10000, 0}, | ||
106 | {250000, 0x400000, 99}, | ||
107 | {200000, 0x666666, 199}, | ||
108 | {195313, 0x8000, 0}, | ||
109 | {125000, 0x100000, 49}, | ||
110 | {100000, 0x200000, 124}, | ||
111 | {50000, 0x100000, 124}, | ||
112 | {25000, 0x80000, 124}, | ||
113 | {10016, 0x20000, 77}, | ||
114 | {5040, 0x20000, 154}, | ||
115 | {1002, 0x8000, 194}, | ||
116 | }; | ||
117 | |||
118 | /* Offset from drv_data->lpss_base */ | 67 | /* Offset from drv_data->lpss_base */ |
119 | #define GENERAL_REG 0x08 | 68 | #define GENERAL_REG 0x08 |
120 | #define GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24) | 69 | #define GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24) |
@@ -701,25 +650,124 @@ static irqreturn_t ssp_int(int irq, void *dev_id) | |||
701 | } | 650 | } |
702 | 651 | ||
703 | /* | 652 | /* |
704 | * The Quark SPI data sheet gives a table, and for the given 'rate', | 653 | * The Quark SPI has an additional 24 bit register (DDS_CLK_RATE) to multiply |
705 | * the 'dds' and 'clk_div' can be found in the table. | 654 | * input frequency by fractions of 2^24. It also has a divider by 5. |
655 | * | ||
656 | * There are formulas to get baud rate value for given input frequency and | ||
657 | * divider parameters, such as DDS_CLK_RATE and SCR: | ||
658 | * | ||
659 | * Fsys = 200MHz | ||
660 | * | ||
661 | * Fssp = Fsys * DDS_CLK_RATE / 2^24 (1) | ||
662 | * Baud rate = Fsclk = Fssp / (2 * (SCR + 1)) (2) | ||
663 | * | ||
664 | * DDS_CLK_RATE either 2^n or 2^n / 5. | ||
665 | * SCR is in range 0 .. 255 | ||
666 | * | ||
667 | * Divisor = 5^i * 2^j * 2 * k | ||
668 | * i = [0, 1] i = 1 iff j = 0 or j > 3 | ||
669 | * j = [0, 23] j = 0 iff i = 1 | ||
670 | * k = [1, 256] | ||
671 | * Special case: j = 0, i = 1: Divisor = 2 / 5 | ||
672 | * | ||
673 | * Accordingly to the specification the recommended values for DDS_CLK_RATE | ||
674 | * are: | ||
675 | * Case 1: 2^n, n = [0, 23] | ||
676 | * Case 2: 2^24 * 2 / 5 (0x666666) | ||
677 | * Case 3: less than or equal to 2^24 / 5 / 16 (0x33333) | ||
678 | * | ||
679 | * In all cases the lowest possible value is better. | ||
680 | * | ||
681 | * The function calculates parameters for all cases and chooses the one closest | ||
682 | * to the asked baud rate. | ||
706 | */ | 683 | */ |
707 | static u32 quark_x1000_set_clk_regvals(u32 rate, u32 *dds, u32 *clk_div) | 684 | static unsigned int quark_x1000_get_clk_div(int rate, u32 *dds) |
708 | { | 685 | { |
709 | unsigned int i; | 686 | unsigned long xtal = 200000000; |
710 | 687 | unsigned long fref = xtal / 2; /* mandatory division by 2, | |
711 | for (i = 0; i < ARRAY_SIZE(quark_spi_rate_table); i++) { | 688 | see (2) */ |
712 | if (rate >= quark_spi_rate_table[i].bitrate) { | 689 | /* case 3 */ |
713 | *dds = quark_spi_rate_table[i].dds_clk_rate; | 690 | unsigned long fref1 = fref / 2; /* case 1 */ |
714 | *clk_div = quark_spi_rate_table[i].clk_div; | 691 | unsigned long fref2 = fref * 2 / 5; /* case 2 */ |
715 | return quark_spi_rate_table[i].bitrate; | 692 | unsigned long scale; |
693 | unsigned long q, q1, q2; | ||
694 | long r, r1, r2; | ||
695 | u32 mul; | ||
696 | |||
697 | /* Case 1 */ | ||
698 | |||
699 | /* Set initial value for DDS_CLK_RATE */ | ||
700 | mul = (1 << 24) >> 1; | ||
701 | |||
702 | /* Calculate initial quot */ | ||
703 | q1 = DIV_ROUND_CLOSEST(fref1, rate); | ||
704 | |||
705 | /* Scale q1 if it's too big */ | ||
706 | if (q1 > 256) { | ||
707 | /* Scale q1 to range [1, 512] */ | ||
708 | scale = fls_long(q1 - 1); | ||
709 | if (scale > 9) { | ||
710 | q1 >>= scale - 9; | ||
711 | mul >>= scale - 9; | ||
716 | } | 712 | } |
713 | |||
714 | /* Round the result if we have a remainder */ | ||
715 | q1 += q1 & 1; | ||
717 | } | 716 | } |
718 | 717 | ||
719 | *dds = quark_spi_rate_table[i-1].dds_clk_rate; | 718 | /* Decrease DDS_CLK_RATE as much as we can without loss in precision */ |
720 | *clk_div = quark_spi_rate_table[i-1].clk_div; | 719 | scale = __ffs(q1); |
720 | q1 >>= scale; | ||
721 | mul >>= scale; | ||
722 | |||
723 | /* Get the remainder */ | ||
724 | r1 = abs(fref1 / (1 << (24 - fls_long(mul))) / q1 - rate); | ||
725 | |||
726 | /* Case 2 */ | ||
727 | |||
728 | q2 = DIV_ROUND_CLOSEST(fref2, rate); | ||
729 | r2 = abs(fref2 / q2 - rate); | ||
721 | 730 | ||
722 | return quark_spi_rate_table[i-1].bitrate; | 731 | /* |
732 | * Choose the best between two: less remainder we have the better. We | ||
733 | * can't go case 2 if q2 is greater than 256 since SCR register can | ||
734 | * hold only values 0 .. 255. | ||
735 | */ | ||
736 | if (r2 >= r1 || q2 > 256) { | ||
737 | /* case 1 is better */ | ||
738 | r = r1; | ||
739 | q = q1; | ||
740 | } else { | ||
741 | /* case 2 is better */ | ||
742 | r = r2; | ||
743 | q = q2; | ||
744 | mul = (1 << 24) * 2 / 5; | ||
745 | } | ||
746 | |||
747 | /* Check case 3 only If the divisor is big enough */ | ||
748 | if (fref / rate >= 80) { | ||
749 | u64 fssp; | ||
750 | u32 m; | ||
751 | |||
752 | /* Calculate initial quot */ | ||
753 | q1 = DIV_ROUND_CLOSEST(fref, rate); | ||
754 | m = (1 << 24) / q1; | ||
755 | |||
756 | /* Get the remainder */ | ||
757 | fssp = (u64)fref * m; | ||
758 | do_div(fssp, 1 << 24); | ||
759 | r1 = abs(fssp - rate); | ||
760 | |||
761 | /* Choose this one if it suits better */ | ||
762 | if (r1 < r) { | ||
763 | /* case 3 is better */ | ||
764 | q = 1; | ||
765 | mul = m; | ||
766 | } | ||
767 | } | ||
768 | |||
769 | *dds = mul; | ||
770 | return q - 1; | ||
723 | } | 771 | } |
724 | 772 | ||
725 | static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate) | 773 | static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate) |
@@ -730,23 +778,25 @@ static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate) | |||
730 | rate = min_t(int, ssp_clk, rate); | 778 | rate = min_t(int, ssp_clk, rate); |
731 | 779 | ||
732 | if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP) | 780 | if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP) |
733 | return ((ssp_clk / (2 * rate) - 1) & 0xff) << 8; | 781 | return (ssp_clk / (2 * rate) - 1) & 0xff; |
734 | else | 782 | else |
735 | return ((ssp_clk / rate - 1) & 0xfff) << 8; | 783 | return (ssp_clk / rate - 1) & 0xfff; |
736 | } | 784 | } |
737 | 785 | ||
738 | static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data, | 786 | static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data, |
739 | struct chip_data *chip, int rate) | 787 | struct chip_data *chip, int rate) |
740 | { | 788 | { |
741 | u32 clk_div; | 789 | unsigned int clk_div; |
742 | 790 | ||
743 | switch (drv_data->ssp_type) { | 791 | switch (drv_data->ssp_type) { |
744 | case QUARK_X1000_SSP: | 792 | case QUARK_X1000_SSP: |
745 | quark_x1000_set_clk_regvals(rate, &chip->dds_rate, &clk_div); | 793 | clk_div = quark_x1000_get_clk_div(rate, &chip->dds_rate); |
746 | return clk_div << 8; | 794 | break; |
747 | default: | 795 | default: |
748 | return ssp_get_clk_div(drv_data, rate); | 796 | clk_div = ssp_get_clk_div(drv_data, rate); |
797 | break; | ||
749 | } | 798 | } |
799 | return clk_div << 8; | ||
750 | } | 800 | } |
751 | 801 | ||
752 | static void pump_transfers(unsigned long data) | 802 | static void pump_transfers(unsigned long data) |
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c index ff9cdbdb6672..810a7fae3479 100644 --- a/drivers/spi/spi-qup.c +++ b/drivers/spi/spi-qup.c | |||
@@ -22,6 +22,8 @@ | |||
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | #include <linux/pm_runtime.h> | 23 | #include <linux/pm_runtime.h> |
24 | #include <linux/spi/spi.h> | 24 | #include <linux/spi/spi.h> |
25 | #include <linux/dmaengine.h> | ||
26 | #include <linux/dma-mapping.h> | ||
25 | 27 | ||
26 | #define QUP_CONFIG 0x0000 | 28 | #define QUP_CONFIG 0x0000 |
27 | #define QUP_STATE 0x0004 | 29 | #define QUP_STATE 0x0004 |
@@ -116,6 +118,8 @@ | |||
116 | 118 | ||
117 | #define SPI_NUM_CHIPSELECTS 4 | 119 | #define SPI_NUM_CHIPSELECTS 4 |
118 | 120 | ||
121 | #define SPI_MAX_DMA_XFER (SZ_64K - 64) | ||
122 | |||
119 | /* high speed mode is when bus rate is greater then 26MHz */ | 123 | /* high speed mode is when bus rate is greater then 26MHz */ |
120 | #define SPI_HS_MIN_RATE 26000000 | 124 | #define SPI_HS_MIN_RATE 26000000 |
121 | #define SPI_MAX_RATE 50000000 | 125 | #define SPI_MAX_RATE 50000000 |
@@ -140,9 +144,14 @@ struct spi_qup { | |||
140 | struct completion done; | 144 | struct completion done; |
141 | int error; | 145 | int error; |
142 | int w_size; /* bytes per SPI word */ | 146 | int w_size; /* bytes per SPI word */ |
147 | int n_words; | ||
143 | int tx_bytes; | 148 | int tx_bytes; |
144 | int rx_bytes; | 149 | int rx_bytes; |
145 | int qup_v1; | 150 | int qup_v1; |
151 | |||
152 | int use_dma; | ||
153 | struct dma_slave_config rx_conf; | ||
154 | struct dma_slave_config tx_conf; | ||
146 | }; | 155 | }; |
147 | 156 | ||
148 | 157 | ||
@@ -198,7 +207,6 @@ static int spi_qup_set_state(struct spi_qup *controller, u32 state) | |||
198 | return 0; | 207 | return 0; |
199 | } | 208 | } |
200 | 209 | ||
201 | |||
202 | static void spi_qup_fifo_read(struct spi_qup *controller, | 210 | static void spi_qup_fifo_read(struct spi_qup *controller, |
203 | struct spi_transfer *xfer) | 211 | struct spi_transfer *xfer) |
204 | { | 212 | { |
@@ -266,6 +274,107 @@ static void spi_qup_fifo_write(struct spi_qup *controller, | |||
266 | } | 274 | } |
267 | } | 275 | } |
268 | 276 | ||
277 | static void spi_qup_dma_done(void *data) | ||
278 | { | ||
279 | struct spi_qup *qup = data; | ||
280 | |||
281 | complete(&qup->done); | ||
282 | } | ||
283 | |||
284 | static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer, | ||
285 | enum dma_transfer_direction dir, | ||
286 | dma_async_tx_callback callback) | ||
287 | { | ||
288 | struct spi_qup *qup = spi_master_get_devdata(master); | ||
289 | unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE; | ||
290 | struct dma_async_tx_descriptor *desc; | ||
291 | struct scatterlist *sgl; | ||
292 | struct dma_chan *chan; | ||
293 | dma_cookie_t cookie; | ||
294 | unsigned int nents; | ||
295 | |||
296 | if (dir == DMA_MEM_TO_DEV) { | ||
297 | chan = master->dma_tx; | ||
298 | nents = xfer->tx_sg.nents; | ||
299 | sgl = xfer->tx_sg.sgl; | ||
300 | } else { | ||
301 | chan = master->dma_rx; | ||
302 | nents = xfer->rx_sg.nents; | ||
303 | sgl = xfer->rx_sg.sgl; | ||
304 | } | ||
305 | |||
306 | desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags); | ||
307 | if (!desc) | ||
308 | return -EINVAL; | ||
309 | |||
310 | desc->callback = callback; | ||
311 | desc->callback_param = qup; | ||
312 | |||
313 | cookie = dmaengine_submit(desc); | ||
314 | |||
315 | return dma_submit_error(cookie); | ||
316 | } | ||
317 | |||
318 | static void spi_qup_dma_terminate(struct spi_master *master, | ||
319 | struct spi_transfer *xfer) | ||
320 | { | ||
321 | if (xfer->tx_buf) | ||
322 | dmaengine_terminate_all(master->dma_tx); | ||
323 | if (xfer->rx_buf) | ||
324 | dmaengine_terminate_all(master->dma_rx); | ||
325 | } | ||
326 | |||
327 | static int spi_qup_do_dma(struct spi_master *master, struct spi_transfer *xfer) | ||
328 | { | ||
329 | dma_async_tx_callback rx_done = NULL, tx_done = NULL; | ||
330 | int ret; | ||
331 | |||
332 | if (xfer->rx_buf) | ||
333 | rx_done = spi_qup_dma_done; | ||
334 | else if (xfer->tx_buf) | ||
335 | tx_done = spi_qup_dma_done; | ||
336 | |||
337 | if (xfer->rx_buf) { | ||
338 | ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done); | ||
339 | if (ret) | ||
340 | return ret; | ||
341 | |||
342 | dma_async_issue_pending(master->dma_rx); | ||
343 | } | ||
344 | |||
345 | if (xfer->tx_buf) { | ||
346 | ret = spi_qup_prep_sg(master, xfer, DMA_MEM_TO_DEV, tx_done); | ||
347 | if (ret) | ||
348 | return ret; | ||
349 | |||
350 | dma_async_issue_pending(master->dma_tx); | ||
351 | } | ||
352 | |||
353 | return 0; | ||
354 | } | ||
355 | |||
356 | static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer) | ||
357 | { | ||
358 | struct spi_qup *qup = spi_master_get_devdata(master); | ||
359 | int ret; | ||
360 | |||
361 | ret = spi_qup_set_state(qup, QUP_STATE_RUN); | ||
362 | if (ret) { | ||
363 | dev_warn(qup->dev, "cannot set RUN state\n"); | ||
364 | return ret; | ||
365 | } | ||
366 | |||
367 | ret = spi_qup_set_state(qup, QUP_STATE_PAUSE); | ||
368 | if (ret) { | ||
369 | dev_warn(qup->dev, "cannot set PAUSE state\n"); | ||
370 | return ret; | ||
371 | } | ||
372 | |||
373 | spi_qup_fifo_write(qup, xfer); | ||
374 | |||
375 | return 0; | ||
376 | } | ||
377 | |||
269 | static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id) | 378 | static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id) |
270 | { | 379 | { |
271 | struct spi_qup *controller = dev_id; | 380 | struct spi_qup *controller = dev_id; |
@@ -315,11 +424,13 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id) | |||
315 | error = -EIO; | 424 | error = -EIO; |
316 | } | 425 | } |
317 | 426 | ||
318 | if (opflags & QUP_OP_IN_SERVICE_FLAG) | 427 | if (!controller->use_dma) { |
319 | spi_qup_fifo_read(controller, xfer); | 428 | if (opflags & QUP_OP_IN_SERVICE_FLAG) |
429 | spi_qup_fifo_read(controller, xfer); | ||
320 | 430 | ||
321 | if (opflags & QUP_OP_OUT_SERVICE_FLAG) | 431 | if (opflags & QUP_OP_OUT_SERVICE_FLAG) |
322 | spi_qup_fifo_write(controller, xfer); | 432 | spi_qup_fifo_write(controller, xfer); |
433 | } | ||
323 | 434 | ||
324 | spin_lock_irqsave(&controller->lock, flags); | 435 | spin_lock_irqsave(&controller->lock, flags); |
325 | controller->error = error; | 436 | controller->error = error; |
@@ -332,13 +443,35 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id) | |||
332 | return IRQ_HANDLED; | 443 | return IRQ_HANDLED; |
333 | } | 444 | } |
334 | 445 | ||
446 | static u32 | ||
447 | spi_qup_get_mode(struct spi_master *master, struct spi_transfer *xfer) | ||
448 | { | ||
449 | struct spi_qup *qup = spi_master_get_devdata(master); | ||
450 | u32 mode; | ||
451 | |||
452 | qup->w_size = 4; | ||
453 | |||
454 | if (xfer->bits_per_word <= 8) | ||
455 | qup->w_size = 1; | ||
456 | else if (xfer->bits_per_word <= 16) | ||
457 | qup->w_size = 2; | ||
458 | |||
459 | qup->n_words = xfer->len / qup->w_size; | ||
460 | |||
461 | if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32))) | ||
462 | mode = QUP_IO_M_MODE_FIFO; | ||
463 | else | ||
464 | mode = QUP_IO_M_MODE_BLOCK; | ||
465 | |||
466 | return mode; | ||
467 | } | ||
335 | 468 | ||
336 | /* set clock freq ... bits per word */ | 469 | /* set clock freq ... bits per word */ |
337 | static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer) | 470 | static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer) |
338 | { | 471 | { |
339 | struct spi_qup *controller = spi_master_get_devdata(spi->master); | 472 | struct spi_qup *controller = spi_master_get_devdata(spi->master); |
340 | u32 config, iomode, mode, control; | 473 | u32 config, iomode, mode, control; |
341 | int ret, n_words, w_size; | 474 | int ret, n_words; |
342 | 475 | ||
343 | if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) { | 476 | if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) { |
344 | dev_err(controller->dev, "too big size for loopback %d > %d\n", | 477 | dev_err(controller->dev, "too big size for loopback %d > %d\n", |
@@ -358,35 +491,54 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer) | |||
358 | return -EIO; | 491 | return -EIO; |
359 | } | 492 | } |
360 | 493 | ||
361 | w_size = 4; | 494 | mode = spi_qup_get_mode(spi->master, xfer); |
362 | if (xfer->bits_per_word <= 8) | 495 | n_words = controller->n_words; |
363 | w_size = 1; | ||
364 | else if (xfer->bits_per_word <= 16) | ||
365 | w_size = 2; | ||
366 | 496 | ||
367 | n_words = xfer->len / w_size; | 497 | if (mode == QUP_IO_M_MODE_FIFO) { |
368 | controller->w_size = w_size; | ||
369 | |||
370 | if (n_words <= (controller->in_fifo_sz / sizeof(u32))) { | ||
371 | mode = QUP_IO_M_MODE_FIFO; | ||
372 | writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT); | 498 | writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT); |
373 | writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT); | 499 | writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT); |
374 | /* must be zero for FIFO */ | 500 | /* must be zero for FIFO */ |
375 | writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT); | 501 | writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT); |
376 | writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT); | 502 | writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT); |
377 | } else { | 503 | } else if (!controller->use_dma) { |
378 | mode = QUP_IO_M_MODE_BLOCK; | ||
379 | writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT); | 504 | writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT); |
380 | writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT); | 505 | writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT); |
381 | /* must be zero for BLOCK and BAM */ | 506 | /* must be zero for BLOCK and BAM */ |
382 | writel_relaxed(0, controller->base + QUP_MX_READ_CNT); | 507 | writel_relaxed(0, controller->base + QUP_MX_READ_CNT); |
383 | writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT); | 508 | writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT); |
509 | } else { | ||
510 | mode = QUP_IO_M_MODE_BAM; | ||
511 | writel_relaxed(0, controller->base + QUP_MX_READ_CNT); | ||
512 | writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT); | ||
513 | |||
514 | if (!controller->qup_v1) { | ||
515 | void __iomem *input_cnt; | ||
516 | |||
517 | input_cnt = controller->base + QUP_MX_INPUT_CNT; | ||
518 | /* | ||
519 | * for DMA transfers, both QUP_MX_INPUT_CNT and | ||
520 | * QUP_MX_OUTPUT_CNT must be zero to all cases but one. | ||
521 | * That case is a non-balanced transfer when there is | ||
522 | * only a rx_buf. | ||
523 | */ | ||
524 | if (xfer->tx_buf) | ||
525 | writel_relaxed(0, input_cnt); | ||
526 | else | ||
527 | writel_relaxed(n_words, input_cnt); | ||
528 | |||
529 | writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT); | ||
530 | } | ||
384 | } | 531 | } |
385 | 532 | ||
386 | iomode = readl_relaxed(controller->base + QUP_IO_M_MODES); | 533 | iomode = readl_relaxed(controller->base + QUP_IO_M_MODES); |
387 | /* Set input and output transfer mode */ | 534 | /* Set input and output transfer mode */ |
388 | iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK); | 535 | iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK); |
389 | iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN); | 536 | |
537 | if (!controller->use_dma) | ||
538 | iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN); | ||
539 | else | ||
540 | iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN; | ||
541 | |||
390 | iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT); | 542 | iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT); |
391 | iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT); | 543 | iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT); |
392 | 544 | ||
@@ -428,11 +580,31 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer) | |||
428 | config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N); | 580 | config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N); |
429 | config |= xfer->bits_per_word - 1; | 581 | config |= xfer->bits_per_word - 1; |
430 | config |= QUP_CONFIG_SPI_MODE; | 582 | config |= QUP_CONFIG_SPI_MODE; |
583 | |||
584 | if (controller->use_dma) { | ||
585 | if (!xfer->tx_buf) | ||
586 | config |= QUP_CONFIG_NO_OUTPUT; | ||
587 | if (!xfer->rx_buf) | ||
588 | config |= QUP_CONFIG_NO_INPUT; | ||
589 | } | ||
590 | |||
431 | writel_relaxed(config, controller->base + QUP_CONFIG); | 591 | writel_relaxed(config, controller->base + QUP_CONFIG); |
432 | 592 | ||
433 | /* only write to OPERATIONAL_MASK when register is present */ | 593 | /* only write to OPERATIONAL_MASK when register is present */ |
434 | if (!controller->qup_v1) | 594 | if (!controller->qup_v1) { |
435 | writel_relaxed(0, controller->base + QUP_OPERATIONAL_MASK); | 595 | u32 mask = 0; |
596 | |||
597 | /* | ||
598 | * mask INPUT and OUTPUT service flags to prevent IRQs on FIFO | ||
599 | * status change in BAM mode | ||
600 | */ | ||
601 | |||
602 | if (mode == QUP_IO_M_MODE_BAM) | ||
603 | mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG; | ||
604 | |||
605 | writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK); | ||
606 | } | ||
607 | |||
436 | return 0; | 608 | return 0; |
437 | } | 609 | } |
438 | 610 | ||
@@ -461,17 +633,13 @@ static int spi_qup_transfer_one(struct spi_master *master, | |||
461 | controller->tx_bytes = 0; | 633 | controller->tx_bytes = 0; |
462 | spin_unlock_irqrestore(&controller->lock, flags); | 634 | spin_unlock_irqrestore(&controller->lock, flags); |
463 | 635 | ||
464 | if (spi_qup_set_state(controller, QUP_STATE_RUN)) { | 636 | if (controller->use_dma) |
465 | dev_warn(controller->dev, "cannot set RUN state\n"); | 637 | ret = spi_qup_do_dma(master, xfer); |
466 | goto exit; | 638 | else |
467 | } | 639 | ret = spi_qup_do_pio(master, xfer); |
468 | 640 | ||
469 | if (spi_qup_set_state(controller, QUP_STATE_PAUSE)) { | 641 | if (ret) |
470 | dev_warn(controller->dev, "cannot set PAUSE state\n"); | ||
471 | goto exit; | 642 | goto exit; |
472 | } | ||
473 | |||
474 | spi_qup_fifo_write(controller, xfer); | ||
475 | 643 | ||
476 | if (spi_qup_set_state(controller, QUP_STATE_RUN)) { | 644 | if (spi_qup_set_state(controller, QUP_STATE_RUN)) { |
477 | dev_warn(controller->dev, "cannot set EXECUTE state\n"); | 645 | dev_warn(controller->dev, "cannot set EXECUTE state\n"); |
@@ -480,6 +648,7 @@ static int spi_qup_transfer_one(struct spi_master *master, | |||
480 | 648 | ||
481 | if (!wait_for_completion_timeout(&controller->done, timeout)) | 649 | if (!wait_for_completion_timeout(&controller->done, timeout)) |
482 | ret = -ETIMEDOUT; | 650 | ret = -ETIMEDOUT; |
651 | |||
483 | exit: | 652 | exit: |
484 | spi_qup_set_state(controller, QUP_STATE_RESET); | 653 | spi_qup_set_state(controller, QUP_STATE_RESET); |
485 | spin_lock_irqsave(&controller->lock, flags); | 654 | spin_lock_irqsave(&controller->lock, flags); |
@@ -487,6 +656,97 @@ exit: | |||
487 | if (!ret) | 656 | if (!ret) |
488 | ret = controller->error; | 657 | ret = controller->error; |
489 | spin_unlock_irqrestore(&controller->lock, flags); | 658 | spin_unlock_irqrestore(&controller->lock, flags); |
659 | |||
660 | if (ret && controller->use_dma) | ||
661 | spi_qup_dma_terminate(master, xfer); | ||
662 | |||
663 | return ret; | ||
664 | } | ||
665 | |||
666 | static bool spi_qup_can_dma(struct spi_master *master, struct spi_device *spi, | ||
667 | struct spi_transfer *xfer) | ||
668 | { | ||
669 | struct spi_qup *qup = spi_master_get_devdata(master); | ||
670 | size_t dma_align = dma_get_cache_alignment(); | ||
671 | u32 mode; | ||
672 | |||
673 | qup->use_dma = 0; | ||
674 | |||
675 | if (xfer->rx_buf && (xfer->len % qup->in_blk_sz || | ||
676 | IS_ERR_OR_NULL(master->dma_rx) || | ||
677 | !IS_ALIGNED((size_t)xfer->rx_buf, dma_align))) | ||
678 | return false; | ||
679 | |||
680 | if (xfer->tx_buf && (xfer->len % qup->out_blk_sz || | ||
681 | IS_ERR_OR_NULL(master->dma_tx) || | ||
682 | !IS_ALIGNED((size_t)xfer->tx_buf, dma_align))) | ||
683 | return false; | ||
684 | |||
685 | mode = spi_qup_get_mode(master, xfer); | ||
686 | if (mode == QUP_IO_M_MODE_FIFO) | ||
687 | return false; | ||
688 | |||
689 | qup->use_dma = 1; | ||
690 | |||
691 | return true; | ||
692 | } | ||
693 | |||
694 | static void spi_qup_release_dma(struct spi_master *master) | ||
695 | { | ||
696 | if (!IS_ERR_OR_NULL(master->dma_rx)) | ||
697 | dma_release_channel(master->dma_rx); | ||
698 | if (!IS_ERR_OR_NULL(master->dma_tx)) | ||
699 | dma_release_channel(master->dma_tx); | ||
700 | } | ||
701 | |||
702 | static int spi_qup_init_dma(struct spi_master *master, resource_size_t base) | ||
703 | { | ||
704 | struct spi_qup *spi = spi_master_get_devdata(master); | ||
705 | struct dma_slave_config *rx_conf = &spi->rx_conf, | ||
706 | *tx_conf = &spi->tx_conf; | ||
707 | struct device *dev = spi->dev; | ||
708 | int ret; | ||
709 | |||
710 | /* allocate dma resources, if available */ | ||
711 | master->dma_rx = dma_request_slave_channel_reason(dev, "rx"); | ||
712 | if (IS_ERR(master->dma_rx)) | ||
713 | return PTR_ERR(master->dma_rx); | ||
714 | |||
715 | master->dma_tx = dma_request_slave_channel_reason(dev, "tx"); | ||
716 | if (IS_ERR(master->dma_tx)) { | ||
717 | ret = PTR_ERR(master->dma_tx); | ||
718 | goto err_tx; | ||
719 | } | ||
720 | |||
721 | /* set DMA parameters */ | ||
722 | rx_conf->direction = DMA_DEV_TO_MEM; | ||
723 | rx_conf->device_fc = 1; | ||
724 | rx_conf->src_addr = base + QUP_INPUT_FIFO; | ||
725 | rx_conf->src_maxburst = spi->in_blk_sz; | ||
726 | |||
727 | tx_conf->direction = DMA_MEM_TO_DEV; | ||
728 | tx_conf->device_fc = 1; | ||
729 | tx_conf->dst_addr = base + QUP_OUTPUT_FIFO; | ||
730 | tx_conf->dst_maxburst = spi->out_blk_sz; | ||
731 | |||
732 | ret = dmaengine_slave_config(master->dma_rx, rx_conf); | ||
733 | if (ret) { | ||
734 | dev_err(dev, "failed to configure RX channel\n"); | ||
735 | goto err; | ||
736 | } | ||
737 | |||
738 | ret = dmaengine_slave_config(master->dma_tx, tx_conf); | ||
739 | if (ret) { | ||
740 | dev_err(dev, "failed to configure TX channel\n"); | ||
741 | goto err; | ||
742 | } | ||
743 | |||
744 | return 0; | ||
745 | |||
746 | err: | ||
747 | dma_release_channel(master->dma_tx); | ||
748 | err_tx: | ||
749 | dma_release_channel(master->dma_rx); | ||
490 | return ret; | 750 | return ret; |
491 | } | 751 | } |
492 | 752 | ||
@@ -498,7 +758,7 @@ static int spi_qup_probe(struct platform_device *pdev) | |||
498 | struct resource *res; | 758 | struct resource *res; |
499 | struct device *dev; | 759 | struct device *dev; |
500 | void __iomem *base; | 760 | void __iomem *base; |
501 | u32 max_freq, iomode; | 761 | u32 max_freq, iomode, num_cs; |
502 | int ret, irq, size; | 762 | int ret, irq, size; |
503 | 763 | ||
504 | dev = &pdev->dev; | 764 | dev = &pdev->dev; |
@@ -550,10 +810,11 @@ static int spi_qup_probe(struct platform_device *pdev) | |||
550 | } | 810 | } |
551 | 811 | ||
552 | /* use num-cs unless not present or out of range */ | 812 | /* use num-cs unless not present or out of range */ |
553 | if (of_property_read_u16(dev->of_node, "num-cs", | 813 | if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) || |
554 | &master->num_chipselect) || | 814 | num_cs > SPI_NUM_CHIPSELECTS) |
555 | (master->num_chipselect > SPI_NUM_CHIPSELECTS)) | ||
556 | master->num_chipselect = SPI_NUM_CHIPSELECTS; | 815 | master->num_chipselect = SPI_NUM_CHIPSELECTS; |
816 | else | ||
817 | master->num_chipselect = num_cs; | ||
557 | 818 | ||
558 | master->bus_num = pdev->id; | 819 | master->bus_num = pdev->id; |
559 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; | 820 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; |
@@ -562,6 +823,8 @@ static int spi_qup_probe(struct platform_device *pdev) | |||
562 | master->transfer_one = spi_qup_transfer_one; | 823 | master->transfer_one = spi_qup_transfer_one; |
563 | master->dev.of_node = pdev->dev.of_node; | 824 | master->dev.of_node = pdev->dev.of_node; |
564 | master->auto_runtime_pm = true; | 825 | master->auto_runtime_pm = true; |
826 | master->dma_alignment = dma_get_cache_alignment(); | ||
827 | master->max_dma_len = SPI_MAX_DMA_XFER; | ||
565 | 828 | ||
566 | platform_set_drvdata(pdev, master); | 829 | platform_set_drvdata(pdev, master); |
567 | 830 | ||
@@ -573,6 +836,12 @@ static int spi_qup_probe(struct platform_device *pdev) | |||
573 | controller->cclk = cclk; | 836 | controller->cclk = cclk; |
574 | controller->irq = irq; | 837 | controller->irq = irq; |
575 | 838 | ||
839 | ret = spi_qup_init_dma(master, res->start); | ||
840 | if (ret == -EPROBE_DEFER) | ||
841 | goto error; | ||
842 | else if (!ret) | ||
843 | master->can_dma = spi_qup_can_dma; | ||
844 | |||
576 | /* set v1 flag if device is version 1 */ | 845 | /* set v1 flag if device is version 1 */ |
577 | if (of_device_is_compatible(dev->of_node, "qcom,spi-qup-v1.1.1")) | 846 | if (of_device_is_compatible(dev->of_node, "qcom,spi-qup-v1.1.1")) |
578 | controller->qup_v1 = 1; | 847 | controller->qup_v1 = 1; |
@@ -609,7 +878,7 @@ static int spi_qup_probe(struct platform_device *pdev) | |||
609 | ret = spi_qup_set_state(controller, QUP_STATE_RESET); | 878 | ret = spi_qup_set_state(controller, QUP_STATE_RESET); |
610 | if (ret) { | 879 | if (ret) { |
611 | dev_err(dev, "cannot set RESET state\n"); | 880 | dev_err(dev, "cannot set RESET state\n"); |
612 | goto error; | 881 | goto error_dma; |
613 | } | 882 | } |
614 | 883 | ||
615 | writel_relaxed(0, base + QUP_OPERATIONAL); | 884 | writel_relaxed(0, base + QUP_OPERATIONAL); |
@@ -633,7 +902,7 @@ static int spi_qup_probe(struct platform_device *pdev) | |||
633 | ret = devm_request_irq(dev, irq, spi_qup_qup_irq, | 902 | ret = devm_request_irq(dev, irq, spi_qup_qup_irq, |
634 | IRQF_TRIGGER_HIGH, pdev->name, controller); | 903 | IRQF_TRIGGER_HIGH, pdev->name, controller); |
635 | if (ret) | 904 | if (ret) |
636 | goto error; | 905 | goto error_dma; |
637 | 906 | ||
638 | pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); | 907 | pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); |
639 | pm_runtime_use_autosuspend(dev); | 908 | pm_runtime_use_autosuspend(dev); |
@@ -648,6 +917,8 @@ static int spi_qup_probe(struct platform_device *pdev) | |||
648 | 917 | ||
649 | disable_pm: | 918 | disable_pm: |
650 | pm_runtime_disable(&pdev->dev); | 919 | pm_runtime_disable(&pdev->dev); |
920 | error_dma: | ||
921 | spi_qup_release_dma(master); | ||
651 | error: | 922 | error: |
652 | clk_disable_unprepare(cclk); | 923 | clk_disable_unprepare(cclk); |
653 | clk_disable_unprepare(iclk); | 924 | clk_disable_unprepare(iclk); |
@@ -739,6 +1010,8 @@ static int spi_qup_remove(struct platform_device *pdev) | |||
739 | if (ret) | 1010 | if (ret) |
740 | return ret; | 1011 | return ret; |
741 | 1012 | ||
1013 | spi_qup_release_dma(master); | ||
1014 | |||
742 | clk_disable_unprepare(controller->cclk); | 1015 | clk_disable_unprepare(controller->cclk); |
743 | clk_disable_unprepare(controller->iclk); | 1016 | clk_disable_unprepare(controller->iclk); |
744 | 1017 | ||
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index 1a777dc261d6..68e7efeb9a27 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c | |||
@@ -179,6 +179,7 @@ struct rockchip_spi { | |||
179 | u8 tmode; | 179 | u8 tmode; |
180 | u8 bpw; | 180 | u8 bpw; |
181 | u8 n_bytes; | 181 | u8 n_bytes; |
182 | u8 rsd_nsecs; | ||
182 | unsigned len; | 183 | unsigned len; |
183 | u32 speed; | 184 | u32 speed; |
184 | 185 | ||
@@ -302,8 +303,8 @@ static int rockchip_spi_prepare_message(struct spi_master *master, | |||
302 | return 0; | 303 | return 0; |
303 | } | 304 | } |
304 | 305 | ||
305 | static int rockchip_spi_unprepare_message(struct spi_master *master, | 306 | static void rockchip_spi_handle_err(struct spi_master *master, |
306 | struct spi_message *msg) | 307 | struct spi_message *msg) |
307 | { | 308 | { |
308 | unsigned long flags; | 309 | unsigned long flags; |
309 | struct rockchip_spi *rs = spi_master_get_devdata(master); | 310 | struct rockchip_spi *rs = spi_master_get_devdata(master); |
@@ -313,8 +314,8 @@ static int rockchip_spi_unprepare_message(struct spi_master *master, | |||
313 | /* | 314 | /* |
314 | * For DMA mode, we need terminate DMA channel and flush | 315 | * For DMA mode, we need terminate DMA channel and flush |
315 | * fifo for the next transfer if DMA thansfer timeout. | 316 | * fifo for the next transfer if DMA thansfer timeout. |
316 | * unprepare_message() was called by core if transfer complete | 317 | * handle_err() was called by core if transfer failed. |
317 | * or timeout. Maybe it is reasonable for error handling here. | 318 | * Maybe it is reasonable for error handling here. |
318 | */ | 319 | */ |
319 | if (rs->use_dma) { | 320 | if (rs->use_dma) { |
320 | if (rs->state & RXBUSY) { | 321 | if (rs->state & RXBUSY) { |
@@ -327,6 +328,12 @@ static int rockchip_spi_unprepare_message(struct spi_master *master, | |||
327 | } | 328 | } |
328 | 329 | ||
329 | spin_unlock_irqrestore(&rs->lock, flags); | 330 | spin_unlock_irqrestore(&rs->lock, flags); |
331 | } | ||
332 | |||
333 | static int rockchip_spi_unprepare_message(struct spi_master *master, | ||
334 | struct spi_message *msg) | ||
335 | { | ||
336 | struct rockchip_spi *rs = spi_master_get_devdata(master); | ||
330 | 337 | ||
331 | spi_enable_chip(rs, 0); | 338 | spi_enable_chip(rs, 0); |
332 | 339 | ||
@@ -493,6 +500,7 @@ static void rockchip_spi_config(struct rockchip_spi *rs) | |||
493 | { | 500 | { |
494 | u32 div = 0; | 501 | u32 div = 0; |
495 | u32 dmacr = 0; | 502 | u32 dmacr = 0; |
503 | int rsd = 0; | ||
496 | 504 | ||
497 | u32 cr0 = (CR0_BHT_8BIT << CR0_BHT_OFFSET) | 505 | u32 cr0 = (CR0_BHT_8BIT << CR0_BHT_OFFSET) |
498 | | (CR0_SSD_ONE << CR0_SSD_OFFSET); | 506 | | (CR0_SSD_ONE << CR0_SSD_OFFSET); |
@@ -519,9 +527,23 @@ static void rockchip_spi_config(struct rockchip_spi *rs) | |||
519 | } | 527 | } |
520 | 528 | ||
521 | /* div doesn't support odd number */ | 529 | /* div doesn't support odd number */ |
522 | div = max_t(u32, rs->max_freq / rs->speed, 1); | 530 | div = DIV_ROUND_UP(rs->max_freq, rs->speed); |
523 | div = (div + 1) & 0xfffe; | 531 | div = (div + 1) & 0xfffe; |
524 | 532 | ||
533 | /* Rx sample delay is expressed in parent clock cycles (max 3) */ | ||
534 | rsd = DIV_ROUND_CLOSEST(rs->rsd_nsecs * (rs->max_freq >> 8), | ||
535 | 1000000000 >> 8); | ||
536 | if (!rsd && rs->rsd_nsecs) { | ||
537 | pr_warn_once("rockchip-spi: %u Hz are too slow to express %u ns delay\n", | ||
538 | rs->max_freq, rs->rsd_nsecs); | ||
539 | } else if (rsd > 3) { | ||
540 | rsd = 3; | ||
541 | pr_warn_once("rockchip-spi: %u Hz are too fast to express %u ns delay, clamping at %u ns\n", | ||
542 | rs->max_freq, rs->rsd_nsecs, | ||
543 | rsd * 1000000000U / rs->max_freq); | ||
544 | } | ||
545 | cr0 |= rsd << CR0_RSD_OFFSET; | ||
546 | |||
525 | writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0); | 547 | writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0); |
526 | 548 | ||
527 | writel_relaxed(rs->len - 1, rs->regs + ROCKCHIP_SPI_CTRLR1); | 549 | writel_relaxed(rs->len - 1, rs->regs + ROCKCHIP_SPI_CTRLR1); |
@@ -614,6 +636,7 @@ static int rockchip_spi_probe(struct platform_device *pdev) | |||
614 | struct rockchip_spi *rs; | 636 | struct rockchip_spi *rs; |
615 | struct spi_master *master; | 637 | struct spi_master *master; |
616 | struct resource *mem; | 638 | struct resource *mem; |
639 | u32 rsd_nsecs; | ||
617 | 640 | ||
618 | master = spi_alloc_master(&pdev->dev, sizeof(struct rockchip_spi)); | 641 | master = spi_alloc_master(&pdev->dev, sizeof(struct rockchip_spi)); |
619 | if (!master) | 642 | if (!master) |
@@ -665,6 +688,10 @@ static int rockchip_spi_probe(struct platform_device *pdev) | |||
665 | rs->dev = &pdev->dev; | 688 | rs->dev = &pdev->dev; |
666 | rs->max_freq = clk_get_rate(rs->spiclk); | 689 | rs->max_freq = clk_get_rate(rs->spiclk); |
667 | 690 | ||
691 | if (!of_property_read_u32(pdev->dev.of_node, "rx-sample-delay-ns", | ||
692 | &rsd_nsecs)) | ||
693 | rs->rsd_nsecs = rsd_nsecs; | ||
694 | |||
668 | rs->fifo_len = get_fifo_len(rs); | 695 | rs->fifo_len = get_fifo_len(rs); |
669 | if (!rs->fifo_len) { | 696 | if (!rs->fifo_len) { |
670 | dev_err(&pdev->dev, "Failed to get fifo length\n"); | 697 | dev_err(&pdev->dev, "Failed to get fifo length\n"); |
@@ -688,6 +715,7 @@ static int rockchip_spi_probe(struct platform_device *pdev) | |||
688 | master->prepare_message = rockchip_spi_prepare_message; | 715 | master->prepare_message = rockchip_spi_prepare_message; |
689 | master->unprepare_message = rockchip_spi_unprepare_message; | 716 | master->unprepare_message = rockchip_spi_unprepare_message; |
690 | master->transfer_one = rockchip_spi_transfer_one; | 717 | master->transfer_one = rockchip_spi_transfer_one; |
718 | master->handle_err = rockchip_spi_handle_err; | ||
691 | 719 | ||
692 | rs->dma_tx.ch = dma_request_slave_channel(rs->dev, "tx"); | 720 | rs->dma_tx.ch = dma_request_slave_channel(rs->dev, "tx"); |
693 | if (!rs->dma_tx.ch) | 721 | if (!rs->dma_tx.ch) |
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 46ce47076e63..186924aa4740 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c | |||
@@ -177,6 +177,13 @@ | |||
177 | #define SPBFCR_RXRST 0x40 /* Receive Buffer Data Reset */ | 177 | #define SPBFCR_RXRST 0x40 /* Receive Buffer Data Reset */ |
178 | #define SPBFCR_TXTRG_MASK 0x30 /* Transmit Buffer Data Triggering Number */ | 178 | #define SPBFCR_TXTRG_MASK 0x30 /* Transmit Buffer Data Triggering Number */ |
179 | #define SPBFCR_RXTRG_MASK 0x07 /* Receive Buffer Data Triggering Number */ | 179 | #define SPBFCR_RXTRG_MASK 0x07 /* Receive Buffer Data Triggering Number */ |
180 | /* QSPI on R-Car Gen2 */ | ||
181 | #define SPBFCR_TXTRG_1B 0x00 /* 31 bytes (1 byte available) */ | ||
182 | #define SPBFCR_TXTRG_32B 0x30 /* 0 byte (32 bytes available) */ | ||
183 | #define SPBFCR_RXTRG_1B 0x00 /* 1 byte (31 bytes available) */ | ||
184 | #define SPBFCR_RXTRG_32B 0x07 /* 32 bytes (0 byte available) */ | ||
185 | |||
186 | #define QSPI_BUFFER_SIZE 32u | ||
180 | 187 | ||
181 | struct rspi_data { | 188 | struct rspi_data { |
182 | void __iomem *addr; | 189 | void __iomem *addr; |
@@ -366,6 +373,52 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size) | |||
366 | return 0; | 373 | return 0; |
367 | } | 374 | } |
368 | 375 | ||
376 | static void qspi_update(const struct rspi_data *rspi, u8 mask, u8 val, u8 reg) | ||
377 | { | ||
378 | u8 data; | ||
379 | |||
380 | data = rspi_read8(rspi, reg); | ||
381 | data &= ~mask; | ||
382 | data |= (val & mask); | ||
383 | rspi_write8(rspi, data, reg); | ||
384 | } | ||
385 | |||
386 | static int qspi_set_send_trigger(struct rspi_data *rspi, unsigned int len) | ||
387 | { | ||
388 | unsigned int n; | ||
389 | |||
390 | n = min(len, QSPI_BUFFER_SIZE); | ||
391 | |||
392 | if (len >= QSPI_BUFFER_SIZE) { | ||
393 | /* sets triggering number to 32 bytes */ | ||
394 | qspi_update(rspi, SPBFCR_TXTRG_MASK, | ||
395 | SPBFCR_TXTRG_32B, QSPI_SPBFCR); | ||
396 | } else { | ||
397 | /* sets triggering number to 1 byte */ | ||
398 | qspi_update(rspi, SPBFCR_TXTRG_MASK, | ||
399 | SPBFCR_TXTRG_1B, QSPI_SPBFCR); | ||
400 | } | ||
401 | |||
402 | return n; | ||
403 | } | ||
404 | |||
405 | static void qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len) | ||
406 | { | ||
407 | unsigned int n; | ||
408 | |||
409 | n = min(len, QSPI_BUFFER_SIZE); | ||
410 | |||
411 | if (len >= QSPI_BUFFER_SIZE) { | ||
412 | /* sets triggering number to 32 bytes */ | ||
413 | qspi_update(rspi, SPBFCR_RXTRG_MASK, | ||
414 | SPBFCR_RXTRG_32B, QSPI_SPBFCR); | ||
415 | } else { | ||
416 | /* sets triggering number to 1 byte */ | ||
417 | qspi_update(rspi, SPBFCR_RXTRG_MASK, | ||
418 | SPBFCR_RXTRG_1B, QSPI_SPBFCR); | ||
419 | } | ||
420 | } | ||
421 | |||
369 | #define set_config_register(spi, n) spi->ops->set_config_register(spi, n) | 422 | #define set_config_register(spi, n) spi->ops->set_config_register(spi, n) |
370 | 423 | ||
371 | static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable) | 424 | static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable) |
@@ -609,19 +662,29 @@ static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi, | |||
609 | return __rspi_can_dma(rspi, xfer); | 662 | return __rspi_can_dma(rspi, xfer); |
610 | } | 663 | } |
611 | 664 | ||
612 | static int rspi_common_transfer(struct rspi_data *rspi, | 665 | static int rspi_dma_check_then_transfer(struct rspi_data *rspi, |
613 | struct spi_transfer *xfer) | 666 | struct spi_transfer *xfer) |
614 | { | 667 | { |
615 | int ret; | ||
616 | |||
617 | if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) { | 668 | if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) { |
618 | /* rx_buf can be NULL on RSPI on SH in TX-only Mode */ | 669 | /* rx_buf can be NULL on RSPI on SH in TX-only Mode */ |
619 | ret = rspi_dma_transfer(rspi, &xfer->tx_sg, | 670 | int ret = rspi_dma_transfer(rspi, &xfer->tx_sg, |
620 | xfer->rx_buf ? &xfer->rx_sg : NULL); | 671 | xfer->rx_buf ? &xfer->rx_sg : NULL); |
621 | if (ret != -EAGAIN) | 672 | if (ret != -EAGAIN) |
622 | return ret; | 673 | return 0; |
623 | } | 674 | } |
624 | 675 | ||
676 | return -EAGAIN; | ||
677 | } | ||
678 | |||
679 | static int rspi_common_transfer(struct rspi_data *rspi, | ||
680 | struct spi_transfer *xfer) | ||
681 | { | ||
682 | int ret; | ||
683 | |||
684 | ret = rspi_dma_check_then_transfer(rspi, xfer); | ||
685 | if (ret != -EAGAIN) | ||
686 | return ret; | ||
687 | |||
625 | ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len); | 688 | ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len); |
626 | if (ret < 0) | 689 | if (ret < 0) |
627 | return ret; | 690 | return ret; |
@@ -661,12 +724,59 @@ static int rspi_rz_transfer_one(struct spi_master *master, | |||
661 | return rspi_common_transfer(rspi, xfer); | 724 | return rspi_common_transfer(rspi, xfer); |
662 | } | 725 | } |
663 | 726 | ||
727 | static int qspi_trigger_transfer_out_int(struct rspi_data *rspi, const u8 *tx, | ||
728 | u8 *rx, unsigned int len) | ||
729 | { | ||
730 | int i, n, ret; | ||
731 | int error; | ||
732 | |||
733 | while (len > 0) { | ||
734 | n = qspi_set_send_trigger(rspi, len); | ||
735 | qspi_set_receive_trigger(rspi, len); | ||
736 | if (n == QSPI_BUFFER_SIZE) { | ||
737 | error = rspi_wait_for_tx_empty(rspi); | ||
738 | if (error < 0) { | ||
739 | dev_err(&rspi->master->dev, "transmit timeout\n"); | ||
740 | return error; | ||
741 | } | ||
742 | for (i = 0; i < n; i++) | ||
743 | rspi_write_data(rspi, *tx++); | ||
744 | |||
745 | error = rspi_wait_for_rx_full(rspi); | ||
746 | if (error < 0) { | ||
747 | dev_err(&rspi->master->dev, "receive timeout\n"); | ||
748 | return error; | ||
749 | } | ||
750 | for (i = 0; i < n; i++) | ||
751 | *rx++ = rspi_read_data(rspi); | ||
752 | } else { | ||
753 | ret = rspi_pio_transfer(rspi, tx, rx, n); | ||
754 | if (ret < 0) | ||
755 | return ret; | ||
756 | } | ||
757 | len -= n; | ||
758 | } | ||
759 | |||
760 | return 0; | ||
761 | } | ||
762 | |||
664 | static int qspi_transfer_out_in(struct rspi_data *rspi, | 763 | static int qspi_transfer_out_in(struct rspi_data *rspi, |
665 | struct spi_transfer *xfer) | 764 | struct spi_transfer *xfer) |
666 | { | 765 | { |
766 | int ret; | ||
767 | |||
667 | qspi_receive_init(rspi); | 768 | qspi_receive_init(rspi); |
668 | 769 | ||
669 | return rspi_common_transfer(rspi, xfer); | 770 | ret = rspi_dma_check_then_transfer(rspi, xfer); |
771 | if (ret != -EAGAIN) | ||
772 | return ret; | ||
773 | |||
774 | ret = qspi_trigger_transfer_out_int(rspi, xfer->tx_buf, | ||
775 | xfer->rx_buf, xfer->len); | ||
776 | if (ret < 0) | ||
777 | return ret; | ||
778 | |||
779 | return 0; | ||
670 | } | 780 | } |
671 | 781 | ||
672 | static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer) | 782 | static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer) |
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 9231c34b5a5c..b1c6731fbf27 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c | |||
@@ -324,7 +324,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) | |||
324 | 324 | ||
325 | /* Acquire DMA channels */ | 325 | /* Acquire DMA channels */ |
326 | sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter, | 326 | sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter, |
327 | (void *)sdd->rx_dma.dmach, dev, "rx"); | 327 | (void *)(long)sdd->rx_dma.dmach, dev, "rx"); |
328 | if (!sdd->rx_dma.ch) { | 328 | if (!sdd->rx_dma.ch) { |
329 | dev_err(dev, "Failed to get RX DMA channel\n"); | 329 | dev_err(dev, "Failed to get RX DMA channel\n"); |
330 | ret = -EBUSY; | 330 | ret = -EBUSY; |
@@ -333,7 +333,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) | |||
333 | spi->dma_rx = sdd->rx_dma.ch; | 333 | spi->dma_rx = sdd->rx_dma.ch; |
334 | 334 | ||
335 | sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter, | 335 | sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter, |
336 | (void *)sdd->tx_dma.dmach, dev, "tx"); | 336 | (void *)(long)sdd->tx_dma.dmach, dev, "tx"); |
337 | if (!sdd->tx_dma.ch) { | 337 | if (!sdd->tx_dma.ch) { |
338 | dev_err(dev, "Failed to get TX DMA channel\n"); | 338 | dev_err(dev, "Failed to get TX DMA channel\n"); |
339 | ret = -EBUSY; | 339 | ret = -EBUSY; |
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c index 5a56acf8a43e..36af4d48a700 100644 --- a/drivers/spi/spi-sc18is602.c +++ b/drivers/spi/spi-sc18is602.c | |||
@@ -286,7 +286,7 @@ static int sc18is602_probe(struct i2c_client *client, | |||
286 | hw->freq = SC18IS602_CLOCK; | 286 | hw->freq = SC18IS602_CLOCK; |
287 | break; | 287 | break; |
288 | } | 288 | } |
289 | master->bus_num = client->adapter->nr; | 289 | master->bus_num = np ? -1 : client->adapter->nr; |
290 | master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST; | 290 | master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST; |
291 | master->bits_per_word_mask = SPI_BPW_MASK(8); | 291 | master->bits_per_word_mask = SPI_BPW_MASK(8); |
292 | master->setup = sc18is602_setup; | 292 | master->setup = sc18is602_setup; |
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c index 2faeaa7b57a8..f17c0abe299f 100644 --- a/drivers/spi/spi-st-ssc4.c +++ b/drivers/spi/spi-st-ssc4.c | |||
@@ -482,7 +482,7 @@ static const struct dev_pm_ops spi_st_pm = { | |||
482 | SET_RUNTIME_PM_OPS(spi_st_runtime_suspend, spi_st_runtime_resume, NULL) | 482 | SET_RUNTIME_PM_OPS(spi_st_runtime_suspend, spi_st_runtime_resume, NULL) |
483 | }; | 483 | }; |
484 | 484 | ||
485 | static struct of_device_id stm_spi_match[] = { | 485 | static const struct of_device_id stm_spi_match[] = { |
486 | { .compatible = "st,comms-ssc4-spi", }, | 486 | { .compatible = "st,comms-ssc4-spi", }, |
487 | {}, | 487 | {}, |
488 | }; | 488 | }; |
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 884a716e50cb..5c0616870358 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c | |||
@@ -101,6 +101,7 @@ struct ti_qspi { | |||
101 | #define QSPI_FLEN(n) ((n - 1) << 0) | 101 | #define QSPI_FLEN(n) ((n - 1) << 0) |
102 | 102 | ||
103 | /* STATUS REGISTER */ | 103 | /* STATUS REGISTER */ |
104 | #define BUSY 0x01 | ||
104 | #define WC 0x02 | 105 | #define WC 0x02 |
105 | 106 | ||
106 | /* INTERRUPT REGISTER */ | 107 | /* INTERRUPT REGISTER */ |
@@ -199,6 +200,21 @@ static void ti_qspi_restore_ctx(struct ti_qspi *qspi) | |||
199 | ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG); | 200 | ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG); |
200 | } | 201 | } |
201 | 202 | ||
203 | static inline u32 qspi_is_busy(struct ti_qspi *qspi) | ||
204 | { | ||
205 | u32 stat; | ||
206 | unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT; | ||
207 | |||
208 | stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG); | ||
209 | while ((stat & BUSY) && time_after(timeout, jiffies)) { | ||
210 | cpu_relax(); | ||
211 | stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG); | ||
212 | } | ||
213 | |||
214 | WARN(stat & BUSY, "qspi busy\n"); | ||
215 | return stat & BUSY; | ||
216 | } | ||
217 | |||
202 | static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) | 218 | static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) |
203 | { | 219 | { |
204 | int wlen, count; | 220 | int wlen, count; |
@@ -211,6 +227,9 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) | |||
211 | wlen = t->bits_per_word >> 3; /* in bytes */ | 227 | wlen = t->bits_per_word >> 3; /* in bytes */ |
212 | 228 | ||
213 | while (count) { | 229 | while (count) { |
230 | if (qspi_is_busy(qspi)) | ||
231 | return -EBUSY; | ||
232 | |||
214 | switch (wlen) { | 233 | switch (wlen) { |
215 | case 1: | 234 | case 1: |
216 | dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n", | 235 | dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n", |
@@ -266,6 +285,9 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t) | |||
266 | 285 | ||
267 | while (count) { | 286 | while (count) { |
268 | dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); | 287 | dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); |
288 | if (qspi_is_busy(qspi)) | ||
289 | return -EBUSY; | ||
290 | |||
269 | ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); | 291 | ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); |
270 | if (!wait_for_completion_timeout(&qspi->transfer_complete, | 292 | if (!wait_for_completion_timeout(&qspi->transfer_complete, |
271 | QSPI_COMPLETION_TIMEOUT)) { | 293 | QSPI_COMPLETION_TIMEOUT)) { |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index c64a3e59fce3..50910d85df5a 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -16,7 +16,6 @@ | |||
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/kmod.h> | ||
20 | #include <linux/device.h> | 19 | #include <linux/device.h> |
21 | #include <linux/init.h> | 20 | #include <linux/init.h> |
22 | #include <linux/cache.h> | 21 | #include <linux/cache.h> |
@@ -129,125 +128,11 @@ static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) | |||
129 | return 0; | 128 | return 0; |
130 | } | 129 | } |
131 | 130 | ||
132 | #ifdef CONFIG_PM_SLEEP | ||
133 | static int spi_legacy_suspend(struct device *dev, pm_message_t message) | ||
134 | { | ||
135 | int value = 0; | ||
136 | struct spi_driver *drv = to_spi_driver(dev->driver); | ||
137 | |||
138 | /* suspend will stop irqs and dma; no more i/o */ | ||
139 | if (drv) { | ||
140 | if (drv->suspend) | ||
141 | value = drv->suspend(to_spi_device(dev), message); | ||
142 | else | ||
143 | dev_dbg(dev, "... can't suspend\n"); | ||
144 | } | ||
145 | return value; | ||
146 | } | ||
147 | |||
148 | static int spi_legacy_resume(struct device *dev) | ||
149 | { | ||
150 | int value = 0; | ||
151 | struct spi_driver *drv = to_spi_driver(dev->driver); | ||
152 | |||
153 | /* resume may restart the i/o queue */ | ||
154 | if (drv) { | ||
155 | if (drv->resume) | ||
156 | value = drv->resume(to_spi_device(dev)); | ||
157 | else | ||
158 | dev_dbg(dev, "... can't resume\n"); | ||
159 | } | ||
160 | return value; | ||
161 | } | ||
162 | |||
163 | static int spi_pm_suspend(struct device *dev) | ||
164 | { | ||
165 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
166 | |||
167 | if (pm) | ||
168 | return pm_generic_suspend(dev); | ||
169 | else | ||
170 | return spi_legacy_suspend(dev, PMSG_SUSPEND); | ||
171 | } | ||
172 | |||
173 | static int spi_pm_resume(struct device *dev) | ||
174 | { | ||
175 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
176 | |||
177 | if (pm) | ||
178 | return pm_generic_resume(dev); | ||
179 | else | ||
180 | return spi_legacy_resume(dev); | ||
181 | } | ||
182 | |||
183 | static int spi_pm_freeze(struct device *dev) | ||
184 | { | ||
185 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
186 | |||
187 | if (pm) | ||
188 | return pm_generic_freeze(dev); | ||
189 | else | ||
190 | return spi_legacy_suspend(dev, PMSG_FREEZE); | ||
191 | } | ||
192 | |||
193 | static int spi_pm_thaw(struct device *dev) | ||
194 | { | ||
195 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
196 | |||
197 | if (pm) | ||
198 | return pm_generic_thaw(dev); | ||
199 | else | ||
200 | return spi_legacy_resume(dev); | ||
201 | } | ||
202 | |||
203 | static int spi_pm_poweroff(struct device *dev) | ||
204 | { | ||
205 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
206 | |||
207 | if (pm) | ||
208 | return pm_generic_poweroff(dev); | ||
209 | else | ||
210 | return spi_legacy_suspend(dev, PMSG_HIBERNATE); | ||
211 | } | ||
212 | |||
213 | static int spi_pm_restore(struct device *dev) | ||
214 | { | ||
215 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
216 | |||
217 | if (pm) | ||
218 | return pm_generic_restore(dev); | ||
219 | else | ||
220 | return spi_legacy_resume(dev); | ||
221 | } | ||
222 | #else | ||
223 | #define spi_pm_suspend NULL | ||
224 | #define spi_pm_resume NULL | ||
225 | #define spi_pm_freeze NULL | ||
226 | #define spi_pm_thaw NULL | ||
227 | #define spi_pm_poweroff NULL | ||
228 | #define spi_pm_restore NULL | ||
229 | #endif | ||
230 | |||
231 | static const struct dev_pm_ops spi_pm = { | ||
232 | .suspend = spi_pm_suspend, | ||
233 | .resume = spi_pm_resume, | ||
234 | .freeze = spi_pm_freeze, | ||
235 | .thaw = spi_pm_thaw, | ||
236 | .poweroff = spi_pm_poweroff, | ||
237 | .restore = spi_pm_restore, | ||
238 | SET_RUNTIME_PM_OPS( | ||
239 | pm_generic_runtime_suspend, | ||
240 | pm_generic_runtime_resume, | ||
241 | NULL | ||
242 | ) | ||
243 | }; | ||
244 | |||
245 | struct bus_type spi_bus_type = { | 131 | struct bus_type spi_bus_type = { |
246 | .name = "spi", | 132 | .name = "spi", |
247 | .dev_groups = spi_dev_groups, | 133 | .dev_groups = spi_dev_groups, |
248 | .match = spi_match_device, | 134 | .match = spi_match_device, |
249 | .uevent = spi_uevent, | 135 | .uevent = spi_uevent, |
250 | .pm = &spi_pm, | ||
251 | }; | 136 | }; |
252 | EXPORT_SYMBOL_GPL(spi_bus_type); | 137 | EXPORT_SYMBOL_GPL(spi_bus_type); |
253 | 138 | ||
@@ -698,6 +583,15 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) | |||
698 | rx_dev = master->dma_rx->device->dev; | 583 | rx_dev = master->dma_rx->device->dev; |
699 | 584 | ||
700 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | 585 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
586 | /* | ||
587 | * Restore the original value of tx_buf or rx_buf if they are | ||
588 | * NULL. | ||
589 | */ | ||
590 | if (xfer->tx_buf == master->dummy_tx) | ||
591 | xfer->tx_buf = NULL; | ||
592 | if (xfer->rx_buf == master->dummy_rx) | ||
593 | xfer->rx_buf = NULL; | ||
594 | |||
701 | if (!master->can_dma(master, msg->spi, xfer)) | 595 | if (!master->can_dma(master, msg->spi, xfer)) |
702 | continue; | 596 | continue; |
703 | 597 | ||
@@ -851,6 +745,9 @@ out: | |||
851 | if (msg->status == -EINPROGRESS) | 745 | if (msg->status == -EINPROGRESS) |
852 | msg->status = ret; | 746 | msg->status = ret; |
853 | 747 | ||
748 | if (msg->status && master->handle_err) | ||
749 | master->handle_err(master, msg); | ||
750 | |||
854 | spi_finalize_current_message(master); | 751 | spi_finalize_current_message(master); |
855 | 752 | ||
856 | return ret; | 753 | return ret; |
@@ -1105,13 +1002,14 @@ void spi_finalize_current_message(struct spi_master *master) | |||
1105 | "failed to unprepare message: %d\n", ret); | 1002 | "failed to unprepare message: %d\n", ret); |
1106 | } | 1003 | } |
1107 | } | 1004 | } |
1005 | |||
1006 | trace_spi_message_done(mesg); | ||
1007 | |||
1108 | master->cur_msg_prepared = false; | 1008 | master->cur_msg_prepared = false; |
1109 | 1009 | ||
1110 | mesg->state = NULL; | 1010 | mesg->state = NULL; |
1111 | if (mesg->complete) | 1011 | if (mesg->complete) |
1112 | mesg->complete(mesg->context); | 1012 | mesg->complete(mesg->context); |
1113 | |||
1114 | trace_spi_message_done(mesg); | ||
1115 | } | 1013 | } |
1116 | EXPORT_SYMBOL_GPL(spi_finalize_current_message); | 1014 | EXPORT_SYMBOL_GPL(spi_finalize_current_message); |
1117 | 1015 | ||
@@ -1359,7 +1257,6 @@ of_register_spi_device(struct spi_master *master, struct device_node *nc) | |||
1359 | spi->dev.of_node = nc; | 1257 | spi->dev.of_node = nc; |
1360 | 1258 | ||
1361 | /* Register the new device */ | 1259 | /* Register the new device */ |
1362 | request_module("%s%s", SPI_MODULE_PREFIX, spi->modalias); | ||
1363 | rc = spi_add_device(spi); | 1260 | rc = spi_add_device(spi); |
1364 | if (rc) { | 1261 | if (rc) { |
1365 | dev_err(&master->dev, "spi_device register error %s\n", | 1262 | dev_err(&master->dev, "spi_device register error %s\n", |
@@ -1893,6 +1790,8 @@ int spi_setup(struct spi_device *spi) | |||
1893 | if (!spi->max_speed_hz) | 1790 | if (!spi->max_speed_hz) |
1894 | spi->max_speed_hz = spi->master->max_speed_hz; | 1791 | spi->max_speed_hz = spi->master->max_speed_hz; |
1895 | 1792 | ||
1793 | spi_set_cs(spi, false); | ||
1794 | |||
1896 | if (spi->master->setup) | 1795 | if (spi->master->setup) |
1897 | status = spi->master->setup(spi); | 1796 | status = spi->master->setup(spi); |
1898 | 1797 | ||
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 4eb7a980e670..92c909eed6b5 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c | |||
@@ -223,7 +223,7 @@ static int spidev_message(struct spidev_data *spidev, | |||
223 | struct spi_transfer *k_xfers; | 223 | struct spi_transfer *k_xfers; |
224 | struct spi_transfer *k_tmp; | 224 | struct spi_transfer *k_tmp; |
225 | struct spi_ioc_transfer *u_tmp; | 225 | struct spi_ioc_transfer *u_tmp; |
226 | unsigned n, total; | 226 | unsigned n, total, tx_total, rx_total; |
227 | u8 *tx_buf, *rx_buf; | 227 | u8 *tx_buf, *rx_buf; |
228 | int status = -EFAULT; | 228 | int status = -EFAULT; |
229 | 229 | ||
@@ -239,33 +239,52 @@ static int spidev_message(struct spidev_data *spidev, | |||
239 | tx_buf = spidev->tx_buffer; | 239 | tx_buf = spidev->tx_buffer; |
240 | rx_buf = spidev->rx_buffer; | 240 | rx_buf = spidev->rx_buffer; |
241 | total = 0; | 241 | total = 0; |
242 | tx_total = 0; | ||
243 | rx_total = 0; | ||
242 | for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; | 244 | for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; |
243 | n; | 245 | n; |
244 | n--, k_tmp++, u_tmp++) { | 246 | n--, k_tmp++, u_tmp++) { |
245 | k_tmp->len = u_tmp->len; | 247 | k_tmp->len = u_tmp->len; |
246 | 248 | ||
247 | total += k_tmp->len; | 249 | total += k_tmp->len; |
248 | if (total > bufsiz) { | 250 | /* Since the function returns the total length of transfers |
251 | * on success, restrict the total to positive int values to | ||
252 | * avoid the return value looking like an error. Also check | ||
253 | * each transfer length to avoid arithmetic overflow. | ||
254 | */ | ||
255 | if (total > INT_MAX || k_tmp->len > INT_MAX) { | ||
249 | status = -EMSGSIZE; | 256 | status = -EMSGSIZE; |
250 | goto done; | 257 | goto done; |
251 | } | 258 | } |
252 | 259 | ||
253 | if (u_tmp->rx_buf) { | 260 | if (u_tmp->rx_buf) { |
261 | /* this transfer needs space in RX bounce buffer */ | ||
262 | rx_total += k_tmp->len; | ||
263 | if (rx_total > bufsiz) { | ||
264 | status = -EMSGSIZE; | ||
265 | goto done; | ||
266 | } | ||
254 | k_tmp->rx_buf = rx_buf; | 267 | k_tmp->rx_buf = rx_buf; |
255 | if (!access_ok(VERIFY_WRITE, (u8 __user *) | 268 | if (!access_ok(VERIFY_WRITE, (u8 __user *) |
256 | (uintptr_t) u_tmp->rx_buf, | 269 | (uintptr_t) u_tmp->rx_buf, |
257 | u_tmp->len)) | 270 | u_tmp->len)) |
258 | goto done; | 271 | goto done; |
272 | rx_buf += k_tmp->len; | ||
259 | } | 273 | } |
260 | if (u_tmp->tx_buf) { | 274 | if (u_tmp->tx_buf) { |
275 | /* this transfer needs space in TX bounce buffer */ | ||
276 | tx_total += k_tmp->len; | ||
277 | if (tx_total > bufsiz) { | ||
278 | status = -EMSGSIZE; | ||
279 | goto done; | ||
280 | } | ||
261 | k_tmp->tx_buf = tx_buf; | 281 | k_tmp->tx_buf = tx_buf; |
262 | if (copy_from_user(tx_buf, (const u8 __user *) | 282 | if (copy_from_user(tx_buf, (const u8 __user *) |
263 | (uintptr_t) u_tmp->tx_buf, | 283 | (uintptr_t) u_tmp->tx_buf, |
264 | u_tmp->len)) | 284 | u_tmp->len)) |
265 | goto done; | 285 | goto done; |
286 | tx_buf += k_tmp->len; | ||
266 | } | 287 | } |
267 | tx_buf += k_tmp->len; | ||
268 | rx_buf += k_tmp->len; | ||
269 | 288 | ||
270 | k_tmp->cs_change = !!u_tmp->cs_change; | 289 | k_tmp->cs_change = !!u_tmp->cs_change; |
271 | k_tmp->tx_nbits = u_tmp->tx_nbits; | 290 | k_tmp->tx_nbits = u_tmp->tx_nbits; |
@@ -303,8 +322,8 @@ static int spidev_message(struct spidev_data *spidev, | |||
303 | status = -EFAULT; | 322 | status = -EFAULT; |
304 | goto done; | 323 | goto done; |
305 | } | 324 | } |
325 | rx_buf += u_tmp->len; | ||
306 | } | 326 | } |
307 | rx_buf += u_tmp->len; | ||
308 | } | 327 | } |
309 | status = total; | 328 | status = total; |
310 | 329 | ||
@@ -684,6 +703,14 @@ static const struct file_operations spidev_fops = { | |||
684 | 703 | ||
685 | static struct class *spidev_class; | 704 | static struct class *spidev_class; |
686 | 705 | ||
706 | #ifdef CONFIG_OF | ||
707 | static const struct of_device_id spidev_dt_ids[] = { | ||
708 | { .compatible = "rohm,dh2228fv" }, | ||
709 | {}, | ||
710 | }; | ||
711 | MODULE_DEVICE_TABLE(of, spidev_dt_ids); | ||
712 | #endif | ||
713 | |||
687 | /*-------------------------------------------------------------------------*/ | 714 | /*-------------------------------------------------------------------------*/ |
688 | 715 | ||
689 | static int spidev_probe(struct spi_device *spi) | 716 | static int spidev_probe(struct spi_device *spi) |
@@ -692,6 +719,17 @@ static int spidev_probe(struct spi_device *spi) | |||
692 | int status; | 719 | int status; |
693 | unsigned long minor; | 720 | unsigned long minor; |
694 | 721 | ||
722 | /* | ||
723 | * spidev should never be referenced in DT without a specific | ||
724 | * compatbile string, it is a Linux implementation thing | ||
725 | * rather than a description of the hardware. | ||
726 | */ | ||
727 | if (spi->dev.of_node && !of_match_device(spidev_dt_ids, &spi->dev)) { | ||
728 | dev_err(&spi->dev, "buggy DT: spidev listed directly in DT\n"); | ||
729 | WARN_ON(spi->dev.of_node && | ||
730 | !of_match_device(spidev_dt_ids, &spi->dev)); | ||
731 | } | ||
732 | |||
695 | /* Allocate driver data */ | 733 | /* Allocate driver data */ |
696 | spidev = kzalloc(sizeof(*spidev), GFP_KERNEL); | 734 | spidev = kzalloc(sizeof(*spidev), GFP_KERNEL); |
697 | if (!spidev) | 735 | if (!spidev) |
@@ -758,13 +796,6 @@ static int spidev_remove(struct spi_device *spi) | |||
758 | return 0; | 796 | return 0; |
759 | } | 797 | } |
760 | 798 | ||
761 | static const struct of_device_id spidev_dt_ids[] = { | ||
762 | { .compatible = "rohm,dh2228fv" }, | ||
763 | {}, | ||
764 | }; | ||
765 | |||
766 | MODULE_DEVICE_TABLE(of, spidev_dt_ids); | ||
767 | |||
768 | static struct spi_driver spidev_spi_driver = { | 799 | static struct spi_driver spidev_spi_driver = { |
769 | .driver = { | 800 | .driver = { |
770 | .name = "spidev", | 801 | .name = "spidev", |
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c index 9800c01e6fb9..3f72451d2de0 100644 --- a/drivers/staging/comedi/drivers/adv_pci1710.c +++ b/drivers/staging/comedi/drivers/adv_pci1710.c | |||
@@ -426,7 +426,6 @@ static int pci171x_ai_insn_read(struct comedi_device *dev, | |||
426 | unsigned int *data) | 426 | unsigned int *data) |
427 | { | 427 | { |
428 | struct pci1710_private *devpriv = dev->private; | 428 | struct pci1710_private *devpriv = dev->private; |
429 | unsigned int chan = CR_CHAN(insn->chanspec); | ||
430 | int ret = 0; | 429 | int ret = 0; |
431 | int i; | 430 | int i; |
432 | 431 | ||
@@ -447,7 +446,7 @@ static int pci171x_ai_insn_read(struct comedi_device *dev, | |||
447 | if (ret) | 446 | if (ret) |
448 | break; | 447 | break; |
449 | 448 | ||
450 | ret = pci171x_ai_read_sample(dev, s, chan, &val); | 449 | ret = pci171x_ai_read_sample(dev, s, 0, &val); |
451 | if (ret) | 450 | if (ret) |
452 | break; | 451 | break; |
453 | 452 | ||
diff --git a/drivers/staging/comedi/drivers/comedi_isadma.c b/drivers/staging/comedi/drivers/comedi_isadma.c index dbdea71d6b95..e856f01ca077 100644 --- a/drivers/staging/comedi/drivers/comedi_isadma.c +++ b/drivers/staging/comedi/drivers/comedi_isadma.c | |||
@@ -91,9 +91,10 @@ unsigned int comedi_isadma_disable_on_sample(unsigned int dma_chan, | |||
91 | stalled++; | 91 | stalled++; |
92 | if (stalled > 10) | 92 | if (stalled > 10) |
93 | break; | 93 | break; |
94 | } else { | ||
95 | residue = new_residue; | ||
96 | stalled = 0; | ||
94 | } | 97 | } |
95 | residue = new_residue; | ||
96 | stalled = 0; | ||
97 | } | 98 | } |
98 | return residue; | 99 | return residue; |
99 | } | 100 | } |
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c index e37118321a27..a0906685e27f 100644 --- a/drivers/staging/comedi/drivers/vmk80xx.c +++ b/drivers/staging/comedi/drivers/vmk80xx.c | |||
@@ -103,11 +103,6 @@ enum vmk80xx_model { | |||
103 | VMK8061_MODEL | 103 | VMK8061_MODEL |
104 | }; | 104 | }; |
105 | 105 | ||
106 | struct firmware_version { | ||
107 | unsigned char ic3_vers[32]; /* USB-Controller */ | ||
108 | unsigned char ic6_vers[32]; /* CPU */ | ||
109 | }; | ||
110 | |||
111 | static const struct comedi_lrange vmk8061_range = { | 106 | static const struct comedi_lrange vmk8061_range = { |
112 | 2, { | 107 | 2, { |
113 | UNI_RANGE(5), | 108 | UNI_RANGE(5), |
@@ -156,68 +151,12 @@ static const struct vmk80xx_board vmk80xx_boardinfo[] = { | |||
156 | struct vmk80xx_private { | 151 | struct vmk80xx_private { |
157 | struct usb_endpoint_descriptor *ep_rx; | 152 | struct usb_endpoint_descriptor *ep_rx; |
158 | struct usb_endpoint_descriptor *ep_tx; | 153 | struct usb_endpoint_descriptor *ep_tx; |
159 | struct firmware_version fw; | ||
160 | struct semaphore limit_sem; | 154 | struct semaphore limit_sem; |
161 | unsigned char *usb_rx_buf; | 155 | unsigned char *usb_rx_buf; |
162 | unsigned char *usb_tx_buf; | 156 | unsigned char *usb_tx_buf; |
163 | enum vmk80xx_model model; | 157 | enum vmk80xx_model model; |
164 | }; | 158 | }; |
165 | 159 | ||
166 | static int vmk80xx_check_data_link(struct comedi_device *dev) | ||
167 | { | ||
168 | struct vmk80xx_private *devpriv = dev->private; | ||
169 | struct usb_device *usb = comedi_to_usb_dev(dev); | ||
170 | unsigned int tx_pipe; | ||
171 | unsigned int rx_pipe; | ||
172 | unsigned char tx[1]; | ||
173 | unsigned char rx[2]; | ||
174 | |||
175 | tx_pipe = usb_sndbulkpipe(usb, 0x01); | ||
176 | rx_pipe = usb_rcvbulkpipe(usb, 0x81); | ||
177 | |||
178 | tx[0] = VMK8061_CMD_RD_PWR_STAT; | ||
179 | |||
180 | /* | ||
181 | * Check that IC6 (PIC16F871) is powered and | ||
182 | * running and the data link between IC3 and | ||
183 | * IC6 is working properly | ||
184 | */ | ||
185 | usb_bulk_msg(usb, tx_pipe, tx, 1, NULL, devpriv->ep_tx->bInterval); | ||
186 | usb_bulk_msg(usb, rx_pipe, rx, 2, NULL, HZ * 10); | ||
187 | |||
188 | return (int)rx[1]; | ||
189 | } | ||
190 | |||
191 | static void vmk80xx_read_eeprom(struct comedi_device *dev, int flag) | ||
192 | { | ||
193 | struct vmk80xx_private *devpriv = dev->private; | ||
194 | struct usb_device *usb = comedi_to_usb_dev(dev); | ||
195 | unsigned int tx_pipe; | ||
196 | unsigned int rx_pipe; | ||
197 | unsigned char tx[1]; | ||
198 | unsigned char rx[64]; | ||
199 | int cnt; | ||
200 | |||
201 | tx_pipe = usb_sndbulkpipe(usb, 0x01); | ||
202 | rx_pipe = usb_rcvbulkpipe(usb, 0x81); | ||
203 | |||
204 | tx[0] = VMK8061_CMD_RD_VERSION; | ||
205 | |||
206 | /* | ||
207 | * Read the firmware version info of IC3 and | ||
208 | * IC6 from the internal EEPROM of the IC | ||
209 | */ | ||
210 | usb_bulk_msg(usb, tx_pipe, tx, 1, NULL, devpriv->ep_tx->bInterval); | ||
211 | usb_bulk_msg(usb, rx_pipe, rx, 64, &cnt, HZ * 10); | ||
212 | |||
213 | rx[cnt] = '\0'; | ||
214 | |||
215 | if (flag & IC3_VERSION) | ||
216 | strncpy(devpriv->fw.ic3_vers, rx + 1, 24); | ||
217 | else /* IC6_VERSION */ | ||
218 | strncpy(devpriv->fw.ic6_vers, rx + 25, 24); | ||
219 | } | ||
220 | |||
221 | static void vmk80xx_do_bulk_msg(struct comedi_device *dev) | 160 | static void vmk80xx_do_bulk_msg(struct comedi_device *dev) |
222 | { | 161 | { |
223 | struct vmk80xx_private *devpriv = dev->private; | 162 | struct vmk80xx_private *devpriv = dev->private; |
@@ -878,16 +817,6 @@ static int vmk80xx_auto_attach(struct comedi_device *dev, | |||
878 | 817 | ||
879 | usb_set_intfdata(intf, devpriv); | 818 | usb_set_intfdata(intf, devpriv); |
880 | 819 | ||
881 | if (devpriv->model == VMK8061_MODEL) { | ||
882 | vmk80xx_read_eeprom(dev, IC3_VERSION); | ||
883 | dev_info(&intf->dev, "%s\n", devpriv->fw.ic3_vers); | ||
884 | |||
885 | if (vmk80xx_check_data_link(dev)) { | ||
886 | vmk80xx_read_eeprom(dev, IC6_VERSION); | ||
887 | dev_info(&intf->dev, "%s\n", devpriv->fw.ic6_vers); | ||
888 | } | ||
889 | } | ||
890 | |||
891 | if (devpriv->model == VMK8055_MODEL) | 820 | if (devpriv->model == VMK8055_MODEL) |
892 | vmk80xx_reset_device(dev); | 821 | vmk80xx_reset_device(dev); |
893 | 822 | ||
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig index 24183028bd71..6d5b38d69578 100644 --- a/drivers/staging/iio/Kconfig +++ b/drivers/staging/iio/Kconfig | |||
@@ -38,6 +38,7 @@ config IIO_SIMPLE_DUMMY_EVENTS | |||
38 | config IIO_SIMPLE_DUMMY_BUFFER | 38 | config IIO_SIMPLE_DUMMY_BUFFER |
39 | bool "Buffered capture support" | 39 | bool "Buffered capture support" |
40 | select IIO_BUFFER | 40 | select IIO_BUFFER |
41 | select IIO_TRIGGER | ||
41 | select IIO_KFIFO_BUF | 42 | select IIO_KFIFO_BUF |
42 | help | 43 | help |
43 | Add buffered data capture to the simple dummy driver. | 44 | Add buffered data capture to the simple dummy driver. |
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c index d9d6fad7cb00..816174388f13 100644 --- a/drivers/staging/iio/adc/mxs-lradc.c +++ b/drivers/staging/iio/adc/mxs-lradc.c | |||
@@ -214,11 +214,17 @@ struct mxs_lradc { | |||
214 | unsigned long is_divided; | 214 | unsigned long is_divided; |
215 | 215 | ||
216 | /* | 216 | /* |
217 | * Touchscreen LRADC channels receives a private slot in the CTRL4 | 217 | * When the touchscreen is enabled, we give it two private virtual |
218 | * register, the slot #7. Therefore only 7 slots instead of 8 in the | 218 | * channels: #6 and #7. This means that only 6 virtual channels (instead |
219 | * CTRL4 register can be mapped to LRADC channels when using the | 219 | * of 8) will be available for buffered capture. |
220 | * touchscreen. | 220 | */ |
221 | * | 221 | #define TOUCHSCREEN_VCHANNEL1 7 |
222 | #define TOUCHSCREEN_VCHANNEL2 6 | ||
223 | #define BUFFER_VCHANS_LIMITED 0x3f | ||
224 | #define BUFFER_VCHANS_ALL 0xff | ||
225 | u8 buffer_vchans; | ||
226 | |||
227 | /* | ||
222 | * Furthermore, certain LRADC channels are shared between touchscreen | 228 | * Furthermore, certain LRADC channels are shared between touchscreen |
223 | * and/or touch-buttons and generic LRADC block. Therefore when using | 229 | * and/or touch-buttons and generic LRADC block. Therefore when using |
224 | * either of these, these channels are not available for the regular | 230 | * either of these, these channels are not available for the regular |
@@ -342,6 +348,9 @@ struct mxs_lradc { | |||
342 | #define LRADC_CTRL4 0x140 | 348 | #define LRADC_CTRL4 0x140 |
343 | #define LRADC_CTRL4_LRADCSELECT_MASK(n) (0xf << ((n) * 4)) | 349 | #define LRADC_CTRL4_LRADCSELECT_MASK(n) (0xf << ((n) * 4)) |
344 | #define LRADC_CTRL4_LRADCSELECT_OFFSET(n) ((n) * 4) | 350 | #define LRADC_CTRL4_LRADCSELECT_OFFSET(n) ((n) * 4) |
351 | #define LRADC_CTRL4_LRADCSELECT(n, x) \ | ||
352 | (((x) << LRADC_CTRL4_LRADCSELECT_OFFSET(n)) & \ | ||
353 | LRADC_CTRL4_LRADCSELECT_MASK(n)) | ||
345 | 354 | ||
346 | #define LRADC_RESOLUTION 12 | 355 | #define LRADC_RESOLUTION 12 |
347 | #define LRADC_SINGLE_SAMPLE_MASK ((1 << LRADC_RESOLUTION) - 1) | 356 | #define LRADC_SINGLE_SAMPLE_MASK ((1 << LRADC_RESOLUTION) - 1) |
@@ -416,6 +425,14 @@ static bool mxs_lradc_check_touch_event(struct mxs_lradc *lradc) | |||
416 | LRADC_STATUS_TOUCH_DETECT_RAW); | 425 | LRADC_STATUS_TOUCH_DETECT_RAW); |
417 | } | 426 | } |
418 | 427 | ||
428 | static void mxs_lradc_map_channel(struct mxs_lradc *lradc, unsigned vch, | ||
429 | unsigned ch) | ||
430 | { | ||
431 | mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(vch), | ||
432 | LRADC_CTRL4); | ||
433 | mxs_lradc_reg_set(lradc, LRADC_CTRL4_LRADCSELECT(vch, ch), LRADC_CTRL4); | ||
434 | } | ||
435 | |||
419 | static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch) | 436 | static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch) |
420 | { | 437 | { |
421 | /* | 438 | /* |
@@ -450,12 +467,8 @@ static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch) | |||
450 | LRADC_DELAY_DELAY(lradc->over_sample_delay - 1), | 467 | LRADC_DELAY_DELAY(lradc->over_sample_delay - 1), |
451 | LRADC_DELAY(3)); | 468 | LRADC_DELAY(3)); |
452 | 469 | ||
453 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) | | 470 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch), LRADC_CTRL1); |
454 | LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) | | ||
455 | LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1); | ||
456 | 471 | ||
457 | /* wake us again, when the complete conversion is done */ | ||
458 | mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch), LRADC_CTRL1); | ||
459 | /* | 472 | /* |
460 | * after changing the touchscreen plates setting | 473 | * after changing the touchscreen plates setting |
461 | * the signals need some initial time to settle. Start the | 474 | * the signals need some initial time to settle. Start the |
@@ -509,12 +522,8 @@ static void mxs_lradc_setup_ts_pressure(struct mxs_lradc *lradc, unsigned ch1, | |||
509 | LRADC_DELAY_DELAY(lradc->over_sample_delay - 1), | 522 | LRADC_DELAY_DELAY(lradc->over_sample_delay - 1), |
510 | LRADC_DELAY(3)); | 523 | LRADC_DELAY(3)); |
511 | 524 | ||
512 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) | | 525 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch2), LRADC_CTRL1); |
513 | LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) | | ||
514 | LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1); | ||
515 | 526 | ||
516 | /* wake us again, when the conversions are done */ | ||
517 | mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch2), LRADC_CTRL1); | ||
518 | /* | 527 | /* |
519 | * after changing the touchscreen plates setting | 528 | * after changing the touchscreen plates setting |
520 | * the signals need some initial time to settle. Start the | 529 | * the signals need some initial time to settle. Start the |
@@ -580,36 +589,6 @@ static unsigned mxs_lradc_read_ts_pressure(struct mxs_lradc *lradc, | |||
580 | #define TS_CH_XM 4 | 589 | #define TS_CH_XM 4 |
581 | #define TS_CH_YM 5 | 590 | #define TS_CH_YM 5 |
582 | 591 | ||
583 | static int mxs_lradc_read_ts_channel(struct mxs_lradc *lradc) | ||
584 | { | ||
585 | u32 reg; | ||
586 | int val; | ||
587 | |||
588 | reg = readl(lradc->base + LRADC_CTRL1); | ||
589 | |||
590 | /* only channels 3 to 5 are of interest here */ | ||
591 | if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YP)) { | ||
592 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YP) | | ||
593 | LRADC_CTRL1_LRADC_IRQ(TS_CH_YP), LRADC_CTRL1); | ||
594 | val = mxs_lradc_read_raw_channel(lradc, TS_CH_YP); | ||
595 | } else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_XM)) { | ||
596 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_XM) | | ||
597 | LRADC_CTRL1_LRADC_IRQ(TS_CH_XM), LRADC_CTRL1); | ||
598 | val = mxs_lradc_read_raw_channel(lradc, TS_CH_XM); | ||
599 | } else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YM)) { | ||
600 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YM) | | ||
601 | LRADC_CTRL1_LRADC_IRQ(TS_CH_YM), LRADC_CTRL1); | ||
602 | val = mxs_lradc_read_raw_channel(lradc, TS_CH_YM); | ||
603 | } else { | ||
604 | return -EIO; | ||
605 | } | ||
606 | |||
607 | mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2)); | ||
608 | mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3)); | ||
609 | |||
610 | return val; | ||
611 | } | ||
612 | |||
613 | /* | 592 | /* |
614 | * YP(open)--+-------------+ | 593 | * YP(open)--+-------------+ |
615 | * | |--+ | 594 | * | |--+ |
@@ -653,7 +632,8 @@ static void mxs_lradc_prepare_x_pos(struct mxs_lradc *lradc) | |||
653 | mxs_lradc_reg_set(lradc, mxs_lradc_drive_x_plate(lradc), LRADC_CTRL0); | 632 | mxs_lradc_reg_set(lradc, mxs_lradc_drive_x_plate(lradc), LRADC_CTRL0); |
654 | 633 | ||
655 | lradc->cur_plate = LRADC_SAMPLE_X; | 634 | lradc->cur_plate = LRADC_SAMPLE_X; |
656 | mxs_lradc_setup_ts_channel(lradc, TS_CH_YP); | 635 | mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YP); |
636 | mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1); | ||
657 | } | 637 | } |
658 | 638 | ||
659 | /* | 639 | /* |
@@ -674,7 +654,8 @@ static void mxs_lradc_prepare_y_pos(struct mxs_lradc *lradc) | |||
674 | mxs_lradc_reg_set(lradc, mxs_lradc_drive_y_plate(lradc), LRADC_CTRL0); | 654 | mxs_lradc_reg_set(lradc, mxs_lradc_drive_y_plate(lradc), LRADC_CTRL0); |
675 | 655 | ||
676 | lradc->cur_plate = LRADC_SAMPLE_Y; | 656 | lradc->cur_plate = LRADC_SAMPLE_Y; |
677 | mxs_lradc_setup_ts_channel(lradc, TS_CH_XM); | 657 | mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_XM); |
658 | mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1); | ||
678 | } | 659 | } |
679 | 660 | ||
680 | /* | 661 | /* |
@@ -695,7 +676,10 @@ static void mxs_lradc_prepare_pressure(struct mxs_lradc *lradc) | |||
695 | mxs_lradc_reg_set(lradc, mxs_lradc_drive_pressure(lradc), LRADC_CTRL0); | 676 | mxs_lradc_reg_set(lradc, mxs_lradc_drive_pressure(lradc), LRADC_CTRL0); |
696 | 677 | ||
697 | lradc->cur_plate = LRADC_SAMPLE_PRESSURE; | 678 | lradc->cur_plate = LRADC_SAMPLE_PRESSURE; |
698 | mxs_lradc_setup_ts_pressure(lradc, TS_CH_XP, TS_CH_YM); | 679 | mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YM); |
680 | mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL2, TS_CH_XP); | ||
681 | mxs_lradc_setup_ts_pressure(lradc, TOUCHSCREEN_VCHANNEL2, | ||
682 | TOUCHSCREEN_VCHANNEL1); | ||
699 | } | 683 | } |
700 | 684 | ||
701 | static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc) | 685 | static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc) |
@@ -708,6 +692,19 @@ static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc) | |||
708 | mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1); | 692 | mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1); |
709 | } | 693 | } |
710 | 694 | ||
695 | static void mxs_lradc_start_touch_event(struct mxs_lradc *lradc) | ||
696 | { | ||
697 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, | ||
698 | LRADC_CTRL1); | ||
699 | mxs_lradc_reg_set(lradc, | ||
700 | LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1); | ||
701 | /* | ||
702 | * start with the Y-pos, because it uses nearly the same plate | ||
703 | * settings like the touch detection | ||
704 | */ | ||
705 | mxs_lradc_prepare_y_pos(lradc); | ||
706 | } | ||
707 | |||
711 | static void mxs_lradc_report_ts_event(struct mxs_lradc *lradc) | 708 | static void mxs_lradc_report_ts_event(struct mxs_lradc *lradc) |
712 | { | 709 | { |
713 | input_report_abs(lradc->ts_input, ABS_X, lradc->ts_x_pos); | 710 | input_report_abs(lradc->ts_input, ABS_X, lradc->ts_x_pos); |
@@ -725,10 +722,12 @@ static void mxs_lradc_complete_touch_event(struct mxs_lradc *lradc) | |||
725 | * start a dummy conversion to burn time to settle the signals | 722 | * start a dummy conversion to burn time to settle the signals |
726 | * note: we are not interested in the conversion's value | 723 | * note: we are not interested in the conversion's value |
727 | */ | 724 | */ |
728 | mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(5)); | 725 | mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(TOUCHSCREEN_VCHANNEL1)); |
729 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1); | 726 | mxs_lradc_reg_clear(lradc, |
730 | mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(5), LRADC_CTRL1); | 727 | LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) | |
731 | mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(1 << 5) | | 728 | LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1); |
729 | mxs_lradc_reg_wrt(lradc, | ||
730 | LRADC_DELAY_TRIGGER(1 << TOUCHSCREEN_VCHANNEL1) | | ||
732 | LRADC_DELAY_KICK | LRADC_DELAY_DELAY(10), /* waste 5 ms */ | 731 | LRADC_DELAY_KICK | LRADC_DELAY_DELAY(10), /* waste 5 ms */ |
733 | LRADC_DELAY(2)); | 732 | LRADC_DELAY(2)); |
734 | } | 733 | } |
@@ -760,59 +759,45 @@ static void mxs_lradc_finish_touch_event(struct mxs_lradc *lradc, bool valid) | |||
760 | 759 | ||
761 | /* if it is released, wait for the next touch via IRQ */ | 760 | /* if it is released, wait for the next touch via IRQ */ |
762 | lradc->cur_plate = LRADC_TOUCH; | 761 | lradc->cur_plate = LRADC_TOUCH; |
763 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, LRADC_CTRL1); | 762 | mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2)); |
763 | mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3)); | ||
764 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ | | ||
765 | LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) | | ||
766 | LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1); | ||
764 | mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1); | 767 | mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1); |
765 | } | 768 | } |
766 | 769 | ||
767 | /* touchscreen's state machine */ | 770 | /* touchscreen's state machine */ |
768 | static void mxs_lradc_handle_touch(struct mxs_lradc *lradc) | 771 | static void mxs_lradc_handle_touch(struct mxs_lradc *lradc) |
769 | { | 772 | { |
770 | int val; | ||
771 | |||
772 | switch (lradc->cur_plate) { | 773 | switch (lradc->cur_plate) { |
773 | case LRADC_TOUCH: | 774 | case LRADC_TOUCH: |
774 | /* | 775 | if (mxs_lradc_check_touch_event(lradc)) |
775 | * start with the Y-pos, because it uses nearly the same plate | 776 | mxs_lradc_start_touch_event(lradc); |
776 | * settings like the touch detection | ||
777 | */ | ||
778 | if (mxs_lradc_check_touch_event(lradc)) { | ||
779 | mxs_lradc_reg_clear(lradc, | ||
780 | LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, | ||
781 | LRADC_CTRL1); | ||
782 | mxs_lradc_prepare_y_pos(lradc); | ||
783 | } | ||
784 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, | 777 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, |
785 | LRADC_CTRL1); | 778 | LRADC_CTRL1); |
786 | return; | 779 | return; |
787 | 780 | ||
788 | case LRADC_SAMPLE_Y: | 781 | case LRADC_SAMPLE_Y: |
789 | val = mxs_lradc_read_ts_channel(lradc); | 782 | lradc->ts_y_pos = mxs_lradc_read_raw_channel(lradc, |
790 | if (val < 0) { | 783 | TOUCHSCREEN_VCHANNEL1); |
791 | mxs_lradc_enable_touch_detection(lradc); /* re-start */ | ||
792 | return; | ||
793 | } | ||
794 | lradc->ts_y_pos = val; | ||
795 | mxs_lradc_prepare_x_pos(lradc); | 784 | mxs_lradc_prepare_x_pos(lradc); |
796 | return; | 785 | return; |
797 | 786 | ||
798 | case LRADC_SAMPLE_X: | 787 | case LRADC_SAMPLE_X: |
799 | val = mxs_lradc_read_ts_channel(lradc); | 788 | lradc->ts_x_pos = mxs_lradc_read_raw_channel(lradc, |
800 | if (val < 0) { | 789 | TOUCHSCREEN_VCHANNEL1); |
801 | mxs_lradc_enable_touch_detection(lradc); /* re-start */ | ||
802 | return; | ||
803 | } | ||
804 | lradc->ts_x_pos = val; | ||
805 | mxs_lradc_prepare_pressure(lradc); | 790 | mxs_lradc_prepare_pressure(lradc); |
806 | return; | 791 | return; |
807 | 792 | ||
808 | case LRADC_SAMPLE_PRESSURE: | 793 | case LRADC_SAMPLE_PRESSURE: |
809 | lradc->ts_pressure = | 794 | lradc->ts_pressure = mxs_lradc_read_ts_pressure(lradc, |
810 | mxs_lradc_read_ts_pressure(lradc, TS_CH_XP, TS_CH_YM); | 795 | TOUCHSCREEN_VCHANNEL2, |
796 | TOUCHSCREEN_VCHANNEL1); | ||
811 | mxs_lradc_complete_touch_event(lradc); | 797 | mxs_lradc_complete_touch_event(lradc); |
812 | return; | 798 | return; |
813 | 799 | ||
814 | case LRADC_SAMPLE_VALID: | 800 | case LRADC_SAMPLE_VALID: |
815 | val = mxs_lradc_read_ts_channel(lradc); /* ignore the value */ | ||
816 | mxs_lradc_finish_touch_event(lradc, 1); | 801 | mxs_lradc_finish_touch_event(lradc, 1); |
817 | break; | 802 | break; |
818 | } | 803 | } |
@@ -844,9 +829,9 @@ static int mxs_lradc_read_single(struct iio_dev *iio_dev, int chan, int *val) | |||
844 | * used if doing raw sampling. | 829 | * used if doing raw sampling. |
845 | */ | 830 | */ |
846 | if (lradc->soc == IMX28_LRADC) | 831 | if (lradc->soc == IMX28_LRADC) |
847 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK, | 832 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(0), |
848 | LRADC_CTRL1); | 833 | LRADC_CTRL1); |
849 | mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0); | 834 | mxs_lradc_reg_clear(lradc, 0x1, LRADC_CTRL0); |
850 | 835 | ||
851 | /* Enable / disable the divider per requirement */ | 836 | /* Enable / disable the divider per requirement */ |
852 | if (test_bit(chan, &lradc->is_divided)) | 837 | if (test_bit(chan, &lradc->is_divided)) |
@@ -1090,9 +1075,8 @@ static void mxs_lradc_disable_ts(struct mxs_lradc *lradc) | |||
1090 | { | 1075 | { |
1091 | /* stop all interrupts from firing */ | 1076 | /* stop all interrupts from firing */ |
1092 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN | | 1077 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN | |
1093 | LRADC_CTRL1_LRADC_IRQ_EN(2) | LRADC_CTRL1_LRADC_IRQ_EN(3) | | 1078 | LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) | |
1094 | LRADC_CTRL1_LRADC_IRQ_EN(4) | LRADC_CTRL1_LRADC_IRQ_EN(5), | 1079 | LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1); |
1095 | LRADC_CTRL1); | ||
1096 | 1080 | ||
1097 | /* Power-down touchscreen touch-detect circuitry. */ | 1081 | /* Power-down touchscreen touch-detect circuitry. */ |
1098 | mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0); | 1082 | mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0); |
@@ -1158,26 +1142,31 @@ static irqreturn_t mxs_lradc_handle_irq(int irq, void *data) | |||
1158 | struct iio_dev *iio = data; | 1142 | struct iio_dev *iio = data; |
1159 | struct mxs_lradc *lradc = iio_priv(iio); | 1143 | struct mxs_lradc *lradc = iio_priv(iio); |
1160 | unsigned long reg = readl(lradc->base + LRADC_CTRL1); | 1144 | unsigned long reg = readl(lradc->base + LRADC_CTRL1); |
1145 | uint32_t clr_irq = mxs_lradc_irq_mask(lradc); | ||
1161 | const uint32_t ts_irq_mask = | 1146 | const uint32_t ts_irq_mask = |
1162 | LRADC_CTRL1_TOUCH_DETECT_IRQ | | 1147 | LRADC_CTRL1_TOUCH_DETECT_IRQ | |
1163 | LRADC_CTRL1_LRADC_IRQ(2) | | 1148 | LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) | |
1164 | LRADC_CTRL1_LRADC_IRQ(3) | | 1149 | LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2); |
1165 | LRADC_CTRL1_LRADC_IRQ(4) | | ||
1166 | LRADC_CTRL1_LRADC_IRQ(5); | ||
1167 | 1150 | ||
1168 | if (!(reg & mxs_lradc_irq_mask(lradc))) | 1151 | if (!(reg & mxs_lradc_irq_mask(lradc))) |
1169 | return IRQ_NONE; | 1152 | return IRQ_NONE; |
1170 | 1153 | ||
1171 | if (lradc->use_touchscreen && (reg & ts_irq_mask)) | 1154 | if (lradc->use_touchscreen && (reg & ts_irq_mask)) { |
1172 | mxs_lradc_handle_touch(lradc); | 1155 | mxs_lradc_handle_touch(lradc); |
1173 | 1156 | ||
1174 | if (iio_buffer_enabled(iio)) | 1157 | /* Make sure we don't clear the next conversion's interrupt. */ |
1175 | iio_trigger_poll(iio->trig); | 1158 | clr_irq &= ~(LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) | |
1176 | else if (reg & LRADC_CTRL1_LRADC_IRQ(0)) | 1159 | LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2)); |
1160 | } | ||
1161 | |||
1162 | if (iio_buffer_enabled(iio)) { | ||
1163 | if (reg & lradc->buffer_vchans) | ||
1164 | iio_trigger_poll(iio->trig); | ||
1165 | } else if (reg & LRADC_CTRL1_LRADC_IRQ(0)) { | ||
1177 | complete(&lradc->completion); | 1166 | complete(&lradc->completion); |
1167 | } | ||
1178 | 1168 | ||
1179 | mxs_lradc_reg_clear(lradc, reg & mxs_lradc_irq_mask(lradc), | 1169 | mxs_lradc_reg_clear(lradc, reg & clr_irq, LRADC_CTRL1); |
1180 | LRADC_CTRL1); | ||
1181 | 1170 | ||
1182 | return IRQ_HANDLED; | 1171 | return IRQ_HANDLED; |
1183 | } | 1172 | } |
@@ -1289,9 +1278,10 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio) | |||
1289 | } | 1278 | } |
1290 | 1279 | ||
1291 | if (lradc->soc == IMX28_LRADC) | 1280 | if (lradc->soc == IMX28_LRADC) |
1292 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK, | 1281 | mxs_lradc_reg_clear(lradc, |
1293 | LRADC_CTRL1); | 1282 | lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET, |
1294 | mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0); | 1283 | LRADC_CTRL1); |
1284 | mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0); | ||
1295 | 1285 | ||
1296 | for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) { | 1286 | for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) { |
1297 | ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs); | 1287 | ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs); |
@@ -1324,10 +1314,11 @@ static int mxs_lradc_buffer_postdisable(struct iio_dev *iio) | |||
1324 | mxs_lradc_reg_clear(lradc, LRADC_DELAY_TRIGGER_LRADCS_MASK | | 1314 | mxs_lradc_reg_clear(lradc, LRADC_DELAY_TRIGGER_LRADCS_MASK | |
1325 | LRADC_DELAY_KICK, LRADC_DELAY(0)); | 1315 | LRADC_DELAY_KICK, LRADC_DELAY(0)); |
1326 | 1316 | ||
1327 | mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0); | 1317 | mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0); |
1328 | if (lradc->soc == IMX28_LRADC) | 1318 | if (lradc->soc == IMX28_LRADC) |
1329 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK, | 1319 | mxs_lradc_reg_clear(lradc, |
1330 | LRADC_CTRL1); | 1320 | lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET, |
1321 | LRADC_CTRL1); | ||
1331 | 1322 | ||
1332 | kfree(lradc->buffer); | 1323 | kfree(lradc->buffer); |
1333 | mutex_unlock(&lradc->lock); | 1324 | mutex_unlock(&lradc->lock); |
@@ -1353,7 +1344,7 @@ static bool mxs_lradc_validate_scan_mask(struct iio_dev *iio, | |||
1353 | if (lradc->use_touchbutton) | 1344 | if (lradc->use_touchbutton) |
1354 | rsvd_chans++; | 1345 | rsvd_chans++; |
1355 | if (lradc->use_touchscreen) | 1346 | if (lradc->use_touchscreen) |
1356 | rsvd_chans++; | 1347 | rsvd_chans += 2; |
1357 | 1348 | ||
1358 | /* Test for attempts to map channels with special mode of operation. */ | 1349 | /* Test for attempts to map channels with special mode of operation. */ |
1359 | if (bitmap_intersects(mask, &rsvd_mask, LRADC_MAX_TOTAL_CHANS)) | 1350 | if (bitmap_intersects(mask, &rsvd_mask, LRADC_MAX_TOTAL_CHANS)) |
@@ -1413,6 +1404,13 @@ static const struct iio_chan_spec mxs_lradc_chan_spec[] = { | |||
1413 | .channel = 8, | 1404 | .channel = 8, |
1414 | .scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,}, | 1405 | .scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,}, |
1415 | }, | 1406 | }, |
1407 | /* Hidden channel to keep indexes */ | ||
1408 | { | ||
1409 | .type = IIO_TEMP, | ||
1410 | .indexed = 1, | ||
1411 | .scan_index = -1, | ||
1412 | .channel = 9, | ||
1413 | }, | ||
1416 | MXS_ADC_CHAN(10, IIO_VOLTAGE), /* VDDIO */ | 1414 | MXS_ADC_CHAN(10, IIO_VOLTAGE), /* VDDIO */ |
1417 | MXS_ADC_CHAN(11, IIO_VOLTAGE), /* VTH */ | 1415 | MXS_ADC_CHAN(11, IIO_VOLTAGE), /* VTH */ |
1418 | MXS_ADC_CHAN(12, IIO_VOLTAGE), /* VDDA */ | 1416 | MXS_ADC_CHAN(12, IIO_VOLTAGE), /* VDDA */ |
@@ -1583,6 +1581,11 @@ static int mxs_lradc_probe(struct platform_device *pdev) | |||
1583 | 1581 | ||
1584 | touch_ret = mxs_lradc_probe_touchscreen(lradc, node); | 1582 | touch_ret = mxs_lradc_probe_touchscreen(lradc, node); |
1585 | 1583 | ||
1584 | if (touch_ret == 0) | ||
1585 | lradc->buffer_vchans = BUFFER_VCHANS_LIMITED; | ||
1586 | else | ||
1587 | lradc->buffer_vchans = BUFFER_VCHANS_ALL; | ||
1588 | |||
1586 | /* Grab all IRQ sources */ | 1589 | /* Grab all IRQ sources */ |
1587 | for (i = 0; i < of_cfg->irq_count; i++) { | 1590 | for (i = 0; i < of_cfg->irq_count; i++) { |
1588 | lradc->irq[i] = platform_get_irq(pdev, i); | 1591 | lradc->irq[i] = platform_get_irq(pdev, i); |
diff --git a/drivers/staging/iio/magnetometer/hmc5843_core.c b/drivers/staging/iio/magnetometer/hmc5843_core.c index fd171d8b38fb..90cc18b703cf 100644 --- a/drivers/staging/iio/magnetometer/hmc5843_core.c +++ b/drivers/staging/iio/magnetometer/hmc5843_core.c | |||
@@ -592,6 +592,7 @@ int hmc5843_common_probe(struct device *dev, struct regmap *regmap, | |||
592 | mutex_init(&data->lock); | 592 | mutex_init(&data->lock); |
593 | 593 | ||
594 | indio_dev->dev.parent = dev; | 594 | indio_dev->dev.parent = dev; |
595 | indio_dev->name = dev->driver->name; | ||
595 | indio_dev->info = &hmc5843_info; | 596 | indio_dev->info = &hmc5843_info; |
596 | indio_dev->modes = INDIO_DIRECT_MODE; | 597 | indio_dev->modes = INDIO_DIRECT_MODE; |
597 | indio_dev->channels = data->variant->channels; | 598 | indio_dev->channels = data->variant->channels; |
diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c index 017d2f8379b7..c17893b4918c 100644 --- a/drivers/staging/iio/resolver/ad2s1200.c +++ b/drivers/staging/iio/resolver/ad2s1200.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
19 | #include <linux/gpio.h> | 19 | #include <linux/gpio.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/bitops.h> | ||
21 | 22 | ||
22 | #include <linux/iio/iio.h> | 23 | #include <linux/iio/iio.h> |
23 | #include <linux/iio/sysfs.h> | 24 | #include <linux/iio/sysfs.h> |
@@ -68,7 +69,7 @@ static int ad2s1200_read_raw(struct iio_dev *indio_dev, | |||
68 | break; | 69 | break; |
69 | case IIO_ANGL_VEL: | 70 | case IIO_ANGL_VEL: |
70 | vel = (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4); | 71 | vel = (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4); |
71 | vel = (vel << 4) >> 4; | 72 | vel = sign_extend32(vel, 11); |
72 | *val = vel; | 73 | *val = vel; |
73 | break; | 74 | break; |
74 | default: | 75 | default: |
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index 4324282afe49..03b2a90b9ac0 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c | |||
@@ -330,16 +330,6 @@ static void device_init_registers(struct vnt_private *pDevice) | |||
330 | /* zonetype initial */ | 330 | /* zonetype initial */ |
331 | pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE]; | 331 | pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE]; |
332 | 332 | ||
333 | /* Get RFType */ | ||
334 | pDevice->byRFType = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_RFTYPE); | ||
335 | |||
336 | /* force change RevID for VT3253 emu */ | ||
337 | if ((pDevice->byRFType & RF_EMU) != 0) | ||
338 | pDevice->byRevId = 0x80; | ||
339 | |||
340 | pDevice->byRFType &= RF_MASK; | ||
341 | pr_debug("pDevice->byRFType = %x\n", pDevice->byRFType); | ||
342 | |||
343 | if (!pDevice->bZoneRegExist) | 333 | if (!pDevice->bZoneRegExist) |
344 | pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE]; | 334 | pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE]; |
345 | 335 | ||
@@ -1187,12 +1177,14 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) | |||
1187 | { | 1177 | { |
1188 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 1178 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
1189 | PSTxDesc head_td; | 1179 | PSTxDesc head_td; |
1190 | u32 dma_idx = TYPE_AC0DMA; | 1180 | u32 dma_idx; |
1191 | unsigned long flags; | 1181 | unsigned long flags; |
1192 | 1182 | ||
1193 | spin_lock_irqsave(&priv->lock, flags); | 1183 | spin_lock_irqsave(&priv->lock, flags); |
1194 | 1184 | ||
1195 | if (!ieee80211_is_data(hdr->frame_control)) | 1185 | if (ieee80211_is_data(hdr->frame_control)) |
1186 | dma_idx = TYPE_AC0DMA; | ||
1187 | else | ||
1196 | dma_idx = TYPE_TXDMA0; | 1188 | dma_idx = TYPE_TXDMA0; |
1197 | 1189 | ||
1198 | if (AVAIL_TD(priv, dma_idx) < 1) { | 1190 | if (AVAIL_TD(priv, dma_idx) < 1) { |
@@ -1206,6 +1198,9 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) | |||
1206 | 1198 | ||
1207 | head_td->pTDInfo->skb = skb; | 1199 | head_td->pTDInfo->skb = skb; |
1208 | 1200 | ||
1201 | if (dma_idx == TYPE_AC0DMA) | ||
1202 | head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB; | ||
1203 | |||
1209 | priv->iTDUsed[dma_idx]++; | 1204 | priv->iTDUsed[dma_idx]++; |
1210 | 1205 | ||
1211 | /* Take ownership */ | 1206 | /* Take ownership */ |
@@ -1234,13 +1229,10 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) | |||
1234 | 1229 | ||
1235 | head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma); | 1230 | head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma); |
1236 | 1231 | ||
1237 | if (dma_idx == TYPE_AC0DMA) { | 1232 | if (head_td->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) |
1238 | head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB; | ||
1239 | |||
1240 | MACvTransmitAC0(priv->PortOffset); | 1233 | MACvTransmitAC0(priv->PortOffset); |
1241 | } else { | 1234 | else |
1242 | MACvTransmit0(priv->PortOffset); | 1235 | MACvTransmit0(priv->PortOffset); |
1243 | } | ||
1244 | 1236 | ||
1245 | spin_unlock_irqrestore(&priv->lock, flags); | 1237 | spin_unlock_irqrestore(&priv->lock, flags); |
1246 | 1238 | ||
@@ -1778,6 +1770,12 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent) | |||
1778 | MACvInitialize(priv->PortOffset); | 1770 | MACvInitialize(priv->PortOffset); |
1779 | MACvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr); | 1771 | MACvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr); |
1780 | 1772 | ||
1773 | /* Get RFType */ | ||
1774 | priv->byRFType = SROMbyReadEmbedded(priv->PortOffset, EEP_OFS_RFTYPE); | ||
1775 | priv->byRFType &= RF_MASK; | ||
1776 | |||
1777 | dev_dbg(&pcid->dev, "RF Type = %x\n", priv->byRFType); | ||
1778 | |||
1781 | device_get_options(priv); | 1779 | device_get_options(priv); |
1782 | device_set_options(priv); | 1780 | device_set_options(priv); |
1783 | /* Mask out the options cannot be set to the chip */ | 1781 | /* Mask out the options cannot be set to the chip */ |
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c index 941b2adca95a..7626f635f160 100644 --- a/drivers/staging/vt6655/rf.c +++ b/drivers/staging/vt6655/rf.c | |||
@@ -794,6 +794,7 @@ bool RFbSetPower( | |||
794 | break; | 794 | break; |
795 | case RATE_6M: | 795 | case RATE_6M: |
796 | case RATE_9M: | 796 | case RATE_9M: |
797 | case RATE_12M: | ||
797 | case RATE_18M: | 798 | case RATE_18M: |
798 | byPwr = priv->abyOFDMPwrTbl[uCH]; | 799 | byPwr = priv->abyOFDMPwrTbl[uCH]; |
799 | if (priv->byRFType == RF_UW2452) | 800 | if (priv->byRFType == RF_UW2452) |
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c index c42cde59f598..c4286ccac320 100644 --- a/drivers/staging/vt6656/rf.c +++ b/drivers/staging/vt6656/rf.c | |||
@@ -640,6 +640,7 @@ int vnt_rf_setpower(struct vnt_private *priv, u32 rate, u32 channel) | |||
640 | break; | 640 | break; |
641 | case RATE_6M: | 641 | case RATE_6M: |
642 | case RATE_9M: | 642 | case RATE_9M: |
643 | case RATE_12M: | ||
643 | case RATE_18M: | 644 | case RATE_18M: |
644 | case RATE_24M: | 645 | case RATE_24M: |
645 | case RATE_36M: | 646 | case RATE_36M: |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 50bad55a0c42..77d64251af40 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -1181,7 +1181,7 @@ iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
1181 | * traditional iSCSI block I/O. | 1181 | * traditional iSCSI block I/O. |
1182 | */ | 1182 | */ |
1183 | if (iscsit_allocate_iovecs(cmd) < 0) { | 1183 | if (iscsit_allocate_iovecs(cmd) < 0) { |
1184 | return iscsit_add_reject_cmd(cmd, | 1184 | return iscsit_reject_cmd(cmd, |
1185 | ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); | 1185 | ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); |
1186 | } | 1186 | } |
1187 | immed_data = cmd->immediate_data; | 1187 | immed_data = cmd->immediate_data; |
@@ -3468,6 +3468,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, | |||
3468 | tpg_np_list) { | 3468 | tpg_np_list) { |
3469 | struct iscsi_np *np = tpg_np->tpg_np; | 3469 | struct iscsi_np *np = tpg_np->tpg_np; |
3470 | bool inaddr_any = iscsit_check_inaddr_any(np); | 3470 | bool inaddr_any = iscsit_check_inaddr_any(np); |
3471 | char *fmt_str; | ||
3471 | 3472 | ||
3472 | if (np->np_network_transport != network_transport) | 3473 | if (np->np_network_transport != network_transport) |
3473 | continue; | 3474 | continue; |
@@ -3495,8 +3496,12 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, | |||
3495 | } | 3496 | } |
3496 | } | 3497 | } |
3497 | 3498 | ||
3498 | len = sprintf(buf, "TargetAddress=" | 3499 | if (np->np_sockaddr.ss_family == AF_INET6) |
3499 | "%s:%hu,%hu", | 3500 | fmt_str = "TargetAddress=[%s]:%hu,%hu"; |
3501 | else | ||
3502 | fmt_str = "TargetAddress=%s:%hu,%hu"; | ||
3503 | |||
3504 | len = sprintf(buf, fmt_str, | ||
3500 | inaddr_any ? conn->local_ip : np->np_ip, | 3505 | inaddr_any ? conn->local_ip : np->np_ip, |
3501 | np->np_port, | 3506 | np->np_port, |
3502 | tpg->tpgt); | 3507 | tpg->tpgt); |
@@ -4256,11 +4261,17 @@ int iscsit_close_connection( | |||
4256 | pr_debug("Closing iSCSI connection CID %hu on SID:" | 4261 | pr_debug("Closing iSCSI connection CID %hu on SID:" |
4257 | " %u\n", conn->cid, sess->sid); | 4262 | " %u\n", conn->cid, sess->sid); |
4258 | /* | 4263 | /* |
4259 | * Always up conn_logout_comp just in case the RX Thread is sleeping | 4264 | * Always up conn_logout_comp for the traditional TCP case just in case |
4260 | * and the logout response never got sent because the connection | 4265 | * the RX Thread in iscsi_target_rx_opcode() is sleeping and the logout |
4261 | * failed. | 4266 | * response never got sent because the connection failed. |
4267 | * | ||
4268 | * However for iser-target, isert_wait4logout() is using conn_logout_comp | ||
4269 | * to signal logout response TX interrupt completion. Go ahead and skip | ||
4270 | * this for iser since isert_rx_opcode() does not wait on logout failure, | ||
4271 | * and to avoid iscsi_conn pointer dereference in iser-target code. | ||
4262 | */ | 4272 | */ |
4263 | complete(&conn->conn_logout_comp); | 4273 | if (conn->conn_transport->transport_type == ISCSI_TCP) |
4274 | complete(&conn->conn_logout_comp); | ||
4264 | 4275 | ||
4265 | iscsi_release_thread_set(conn); | 4276 | iscsi_release_thread_set(conn); |
4266 | 4277 | ||
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index 1c197bad6132..bdd8731a4daa 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <target/target_core_fabric.h> | 22 | #include <target/target_core_fabric.h> |
23 | 23 | ||
24 | #include <target/iscsi/iscsi_target_core.h> | 24 | #include <target/iscsi/iscsi_target_core.h> |
25 | #include <target/iscsi/iscsi_transport.h> | ||
26 | #include "iscsi_target_seq_pdu_list.h" | 25 | #include "iscsi_target_seq_pdu_list.h" |
27 | #include "iscsi_target_tq.h" | 26 | #include "iscsi_target_tq.h" |
28 | #include "iscsi_target_erl0.h" | 27 | #include "iscsi_target_erl0.h" |
@@ -940,8 +939,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) | |||
940 | 939 | ||
941 | if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { | 940 | if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { |
942 | spin_unlock_bh(&conn->state_lock); | 941 | spin_unlock_bh(&conn->state_lock); |
943 | if (conn->conn_transport->transport_type == ISCSI_TCP) | 942 | iscsit_close_connection(conn); |
944 | iscsit_close_connection(conn); | ||
945 | return; | 943 | return; |
946 | } | 944 | } |
947 | 945 | ||
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 6b3c32954689..c36bd7c29136 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c | |||
@@ -953,11 +953,8 @@ static int tcm_loop_make_nexus( | |||
953 | transport_free_session(tl_nexus->se_sess); | 953 | transport_free_session(tl_nexus->se_sess); |
954 | goto out; | 954 | goto out; |
955 | } | 955 | } |
956 | /* | 956 | /* Now, register the SAS I_T Nexus as active. */ |
957 | * Now, register the SAS I_T Nexus as active with the call to | 957 | transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, |
958 | * transport_register_session() | ||
959 | */ | ||
960 | __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, | ||
961 | tl_nexus->se_sess, tl_nexus); | 958 | tl_nexus->se_sess, tl_nexus); |
962 | tl_tpg->tl_nexus = tl_nexus; | 959 | tl_tpg->tl_nexus = tl_nexus; |
963 | pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" | 960 | pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 58f49ff69b14..7faa6aef9a4d 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -650,6 +650,18 @@ static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) | |||
650 | return aligned_max_sectors; | 650 | return aligned_max_sectors; |
651 | } | 651 | } |
652 | 652 | ||
653 | bool se_dev_check_wce(struct se_device *dev) | ||
654 | { | ||
655 | bool wce = false; | ||
656 | |||
657 | if (dev->transport->get_write_cache) | ||
658 | wce = dev->transport->get_write_cache(dev); | ||
659 | else if (dev->dev_attrib.emulate_write_cache > 0) | ||
660 | wce = true; | ||
661 | |||
662 | return wce; | ||
663 | } | ||
664 | |||
653 | int se_dev_set_max_unmap_lba_count( | 665 | int se_dev_set_max_unmap_lba_count( |
654 | struct se_device *dev, | 666 | struct se_device *dev, |
655 | u32 max_unmap_lba_count) | 667 | u32 max_unmap_lba_count) |
@@ -767,6 +779,16 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) | |||
767 | pr_err("Illegal value %d\n", flag); | 779 | pr_err("Illegal value %d\n", flag); |
768 | return -EINVAL; | 780 | return -EINVAL; |
769 | } | 781 | } |
782 | if (flag && | ||
783 | dev->transport->get_write_cache) { | ||
784 | pr_warn("emulate_fua_write not supported for this device, ignoring\n"); | ||
785 | return 0; | ||
786 | } | ||
787 | if (dev->export_count) { | ||
788 | pr_err("emulate_fua_write cannot be changed with active" | ||
789 | " exports: %d\n", dev->export_count); | ||
790 | return -EINVAL; | ||
791 | } | ||
770 | dev->dev_attrib.emulate_fua_write = flag; | 792 | dev->dev_attrib.emulate_fua_write = flag; |
771 | pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", | 793 | pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", |
772 | dev, dev->dev_attrib.emulate_fua_write); | 794 | dev, dev->dev_attrib.emulate_fua_write); |
@@ -801,7 +823,11 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) | |||
801 | pr_err("emulate_write_cache not supported for this device\n"); | 823 | pr_err("emulate_write_cache not supported for this device\n"); |
802 | return -EINVAL; | 824 | return -EINVAL; |
803 | } | 825 | } |
804 | 826 | if (dev->export_count) { | |
827 | pr_err("emulate_write_cache cannot be changed with active" | ||
828 | " exports: %d\n", dev->export_count); | ||
829 | return -EINVAL; | ||
830 | } | ||
805 | dev->dev_attrib.emulate_write_cache = flag; | 831 | dev->dev_attrib.emulate_write_cache = flag; |
806 | pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", | 832 | pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", |
807 | dev, dev->dev_attrib.emulate_write_cache); | 833 | dev, dev->dev_attrib.emulate_write_cache); |
@@ -1534,8 +1560,6 @@ int target_configure_device(struct se_device *dev) | |||
1534 | ret = dev->transport->configure_device(dev); | 1560 | ret = dev->transport->configure_device(dev); |
1535 | if (ret) | 1561 | if (ret) |
1536 | goto out; | 1562 | goto out; |
1537 | dev->dev_flags |= DF_CONFIGURED; | ||
1538 | |||
1539 | /* | 1563 | /* |
1540 | * XXX: there is not much point to have two different values here.. | 1564 | * XXX: there is not much point to have two different values here.. |
1541 | */ | 1565 | */ |
@@ -1597,6 +1621,8 @@ int target_configure_device(struct se_device *dev) | |||
1597 | list_add_tail(&dev->g_dev_node, &g_device_list); | 1621 | list_add_tail(&dev->g_dev_node, &g_device_list); |
1598 | mutex_unlock(&g_device_mutex); | 1622 | mutex_unlock(&g_device_mutex); |
1599 | 1623 | ||
1624 | dev->dev_flags |= DF_CONFIGURED; | ||
1625 | |||
1600 | return 0; | 1626 | return 0; |
1601 | 1627 | ||
1602 | out_free_alua: | 1628 | out_free_alua: |
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 1045dcd7bf65..f6c954c4635f 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c | |||
@@ -1121,7 +1121,7 @@ static u32 pscsi_get_device_type(struct se_device *dev) | |||
1121 | struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); | 1121 | struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); |
1122 | struct scsi_device *sd = pdv->pdv_sd; | 1122 | struct scsi_device *sd = pdv->pdv_sd; |
1123 | 1123 | ||
1124 | return sd->type; | 1124 | return (sd) ? sd->type : TYPE_NO_LUN; |
1125 | } | 1125 | } |
1126 | 1126 | ||
1127 | static sector_t pscsi_get_blocks(struct se_device *dev) | 1127 | static sector_t pscsi_get_blocks(struct se_device *dev) |
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 9a2f9d3a6e70..3e7297411110 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c | |||
@@ -708,8 +708,7 @@ sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb) | |||
708 | } | 708 | } |
709 | } | 709 | } |
710 | if (cdb[1] & 0x8) { | 710 | if (cdb[1] & 0x8) { |
711 | if (!dev->dev_attrib.emulate_fua_write || | 711 | if (!dev->dev_attrib.emulate_fua_write || !se_dev_check_wce(dev)) { |
712 | !dev->dev_attrib.emulate_write_cache) { | ||
713 | pr_err("Got CDB: 0x%02x with FUA bit set, but device" | 712 | pr_err("Got CDB: 0x%02x with FUA bit set, but device" |
714 | " does not advertise support for FUA write\n", | 713 | " does not advertise support for FUA write\n", |
715 | cdb[0]); | 714 | cdb[0]); |
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 460e93109473..6c8bd6bc175c 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c | |||
@@ -454,19 +454,6 @@ check_scsi_name: | |||
454 | } | 454 | } |
455 | EXPORT_SYMBOL(spc_emulate_evpd_83); | 455 | EXPORT_SYMBOL(spc_emulate_evpd_83); |
456 | 456 | ||
457 | static bool | ||
458 | spc_check_dev_wce(struct se_device *dev) | ||
459 | { | ||
460 | bool wce = false; | ||
461 | |||
462 | if (dev->transport->get_write_cache) | ||
463 | wce = dev->transport->get_write_cache(dev); | ||
464 | else if (dev->dev_attrib.emulate_write_cache > 0) | ||
465 | wce = true; | ||
466 | |||
467 | return wce; | ||
468 | } | ||
469 | |||
470 | /* Extended INQUIRY Data VPD Page */ | 457 | /* Extended INQUIRY Data VPD Page */ |
471 | static sense_reason_t | 458 | static sense_reason_t |
472 | spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) | 459 | spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) |
@@ -490,7 +477,7 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) | |||
490 | buf[5] = 0x07; | 477 | buf[5] = 0x07; |
491 | 478 | ||
492 | /* If WriteCache emulation is enabled, set V_SUP */ | 479 | /* If WriteCache emulation is enabled, set V_SUP */ |
493 | if (spc_check_dev_wce(dev)) | 480 | if (se_dev_check_wce(dev)) |
494 | buf[6] = 0x01; | 481 | buf[6] = 0x01; |
495 | /* If an LBA map is present set R_SUP */ | 482 | /* If an LBA map is present set R_SUP */ |
496 | spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); | 483 | spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); |
@@ -897,7 +884,7 @@ static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p) | |||
897 | if (pc == 1) | 884 | if (pc == 1) |
898 | goto out; | 885 | goto out; |
899 | 886 | ||
900 | if (spc_check_dev_wce(dev)) | 887 | if (se_dev_check_wce(dev)) |
901 | p[2] = 0x04; /* Write Cache Enable */ | 888 | p[2] = 0x04; /* Write Cache Enable */ |
902 | p[12] = 0x20; /* Disabled Read Ahead */ | 889 | p[12] = 0x20; /* Disabled Read Ahead */ |
903 | 890 | ||
@@ -1009,7 +996,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) | |||
1009 | (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) | 996 | (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) |
1010 | spc_modesense_write_protect(&buf[length], type); | 997 | spc_modesense_write_protect(&buf[length], type); |
1011 | 998 | ||
1012 | if ((spc_check_dev_wce(dev)) && | 999 | if ((se_dev_check_wce(dev)) && |
1013 | (dev->dev_attrib.emulate_fua_write > 0)) | 1000 | (dev->dev_attrib.emulate_fua_write > 0)) |
1014 | spc_modesense_dpofua(&buf[length], type); | 1001 | spc_modesense_dpofua(&buf[length], type); |
1015 | 1002 | ||
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 0adc0f650213..ac3cbabdbdf0 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -2389,6 +2389,10 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, | |||
2389 | list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); | 2389 | list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); |
2390 | out: | 2390 | out: |
2391 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 2391 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
2392 | |||
2393 | if (ret && ack_kref) | ||
2394 | target_put_sess_cmd(se_sess, se_cmd); | ||
2395 | |||
2392 | return ret; | 2396 | return ret; |
2393 | } | 2397 | } |
2394 | EXPORT_SYMBOL(target_get_sess_cmd); | 2398 | EXPORT_SYMBOL(target_get_sess_cmd); |
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index 97b486c3dda1..583e755d8091 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c | |||
@@ -359,7 +359,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd) | |||
359 | ep = fc_seq_exch(seq); | 359 | ep = fc_seq_exch(seq); |
360 | if (ep) { | 360 | if (ep) { |
361 | lport = ep->lp; | 361 | lport = ep->lp; |
362 | if (lport && (ep->xid <= lport->lro_xid)) | 362 | if (lport && (ep->xid <= lport->lro_xid)) { |
363 | /* | 363 | /* |
364 | * "ddp_done" trigger invalidation of HW | 364 | * "ddp_done" trigger invalidation of HW |
365 | * specific DDP context | 365 | * specific DDP context |
@@ -374,6 +374,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd) | |||
374 | * identified using ep->xid) | 374 | * identified using ep->xid) |
375 | */ | 375 | */ |
376 | cmd->was_ddp_setup = 0; | 376 | cmd->was_ddp_setup = 0; |
377 | } | ||
377 | } | 378 | } |
378 | } | 379 | } |
379 | } | 380 | } |
diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c index 25d244cbbe8f..031018e7a65b 100644 --- a/drivers/thermal/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/int340x_thermal/int3400_thermal.c | |||
@@ -262,13 +262,12 @@ static int int3400_thermal_probe(struct platform_device *pdev) | |||
262 | result = acpi_parse_art(priv->adev->handle, &priv->art_count, | 262 | result = acpi_parse_art(priv->adev->handle, &priv->art_count, |
263 | &priv->arts, true); | 263 | &priv->arts, true); |
264 | if (result) | 264 | if (result) |
265 | goto free_priv; | 265 | dev_dbg(&pdev->dev, "_ART table parsing error\n"); |
266 | |||
267 | 266 | ||
268 | result = acpi_parse_trt(priv->adev->handle, &priv->trt_count, | 267 | result = acpi_parse_trt(priv->adev->handle, &priv->trt_count, |
269 | &priv->trts, true); | 268 | &priv->trts, true); |
270 | if (result) | 269 | if (result) |
271 | goto free_art; | 270 | dev_dbg(&pdev->dev, "_TRT table parsing error\n"); |
272 | 271 | ||
273 | platform_set_drvdata(pdev, priv); | 272 | platform_set_drvdata(pdev, priv); |
274 | 273 | ||
@@ -281,7 +280,7 @@ static int int3400_thermal_probe(struct platform_device *pdev) | |||
281 | &int3400_thermal_params, 0, 0); | 280 | &int3400_thermal_params, 0, 0); |
282 | if (IS_ERR(priv->thermal)) { | 281 | if (IS_ERR(priv->thermal)) { |
283 | result = PTR_ERR(priv->thermal); | 282 | result = PTR_ERR(priv->thermal); |
284 | goto free_trt; | 283 | goto free_art_trt; |
285 | } | 284 | } |
286 | 285 | ||
287 | priv->rel_misc_dev_res = acpi_thermal_rel_misc_device_add( | 286 | priv->rel_misc_dev_res = acpi_thermal_rel_misc_device_add( |
@@ -295,9 +294,8 @@ static int int3400_thermal_probe(struct platform_device *pdev) | |||
295 | 294 | ||
296 | free_zone: | 295 | free_zone: |
297 | thermal_zone_device_unregister(priv->thermal); | 296 | thermal_zone_device_unregister(priv->thermal); |
298 | free_trt: | 297 | free_art_trt: |
299 | kfree(priv->trts); | 298 | kfree(priv->trts); |
300 | free_art: | ||
301 | kfree(priv->arts); | 299 | kfree(priv->arts); |
302 | free_priv: | 300 | free_priv: |
303 | kfree(priv); | 301 | kfree(priv); |
diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c index f88b08877025..1e25133d35e2 100644 --- a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c +++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c | |||
@@ -208,7 +208,7 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev, | |||
208 | trip_cnt, GFP_KERNEL); | 208 | trip_cnt, GFP_KERNEL); |
209 | if (!int34x_thermal_zone->aux_trips) { | 209 | if (!int34x_thermal_zone->aux_trips) { |
210 | ret = -ENOMEM; | 210 | ret = -ENOMEM; |
211 | goto free_mem; | 211 | goto err_trip_alloc; |
212 | } | 212 | } |
213 | trip_mask = BIT(trip_cnt) - 1; | 213 | trip_mask = BIT(trip_cnt) - 1; |
214 | int34x_thermal_zone->aux_trip_nr = trip_cnt; | 214 | int34x_thermal_zone->aux_trip_nr = trip_cnt; |
@@ -248,14 +248,15 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev, | |||
248 | 0, 0); | 248 | 0, 0); |
249 | if (IS_ERR(int34x_thermal_zone->zone)) { | 249 | if (IS_ERR(int34x_thermal_zone->zone)) { |
250 | ret = PTR_ERR(int34x_thermal_zone->zone); | 250 | ret = PTR_ERR(int34x_thermal_zone->zone); |
251 | goto free_lpat; | 251 | goto err_thermal_zone; |
252 | } | 252 | } |
253 | 253 | ||
254 | return int34x_thermal_zone; | 254 | return int34x_thermal_zone; |
255 | 255 | ||
256 | free_lpat: | 256 | err_thermal_zone: |
257 | acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table); | 257 | acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table); |
258 | free_mem: | 258 | kfree(int34x_thermal_zone->aux_trips); |
259 | err_trip_alloc: | ||
259 | kfree(int34x_thermal_zone); | 260 | kfree(int34x_thermal_zone); |
260 | return ERR_PTR(ret); | 261 | return ERR_PTR(ret); |
261 | } | 262 | } |
@@ -266,6 +267,7 @@ void int340x_thermal_zone_remove(struct int34x_thermal_zone | |||
266 | { | 267 | { |
267 | thermal_zone_device_unregister(int34x_thermal_zone->zone); | 268 | thermal_zone_device_unregister(int34x_thermal_zone->zone); |
268 | acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table); | 269 | acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table); |
270 | kfree(int34x_thermal_zone->aux_trips); | ||
269 | kfree(int34x_thermal_zone); | 271 | kfree(int34x_thermal_zone); |
270 | } | 272 | } |
271 | EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove); | 273 | EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove); |
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c index 6ceebd659dd4..12623bc02f46 100644 --- a/drivers/thermal/intel_powerclamp.c +++ b/drivers/thermal/intel_powerclamp.c | |||
@@ -688,6 +688,7 @@ static const struct x86_cpu_id intel_powerclamp_ids[] = { | |||
688 | { X86_VENDOR_INTEL, 6, 0x45}, | 688 | { X86_VENDOR_INTEL, 6, 0x45}, |
689 | { X86_VENDOR_INTEL, 6, 0x46}, | 689 | { X86_VENDOR_INTEL, 6, 0x46}, |
690 | { X86_VENDOR_INTEL, 6, 0x4c}, | 690 | { X86_VENDOR_INTEL, 6, 0x4c}, |
691 | { X86_VENDOR_INTEL, 6, 0x4d}, | ||
691 | { X86_VENDOR_INTEL, 6, 0x56}, | 692 | { X86_VENDOR_INTEL, 6, 0x56}, |
692 | {} | 693 | {} |
693 | }; | 694 | }; |
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 2580a4872f90..fe4e767018c4 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c | |||
@@ -387,21 +387,9 @@ static int rcar_thermal_probe(struct platform_device *pdev) | |||
387 | 387 | ||
388 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 388 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
389 | if (irq) { | 389 | if (irq) { |
390 | int ret; | ||
391 | |||
392 | /* | 390 | /* |
393 | * platform has IRQ support. | 391 | * platform has IRQ support. |
394 | * Then, driver uses common registers | 392 | * Then, driver uses common registers |
395 | */ | ||
396 | |||
397 | ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0, | ||
398 | dev_name(dev), common); | ||
399 | if (ret) { | ||
400 | dev_err(dev, "irq request failed\n "); | ||
401 | return ret; | ||
402 | } | ||
403 | |||
404 | /* | ||
405 | * rcar_has_irq_support() will be enabled | 393 | * rcar_has_irq_support() will be enabled |
406 | */ | 394 | */ |
407 | res = platform_get_resource(pdev, IORESOURCE_MEM, mres++); | 395 | res = platform_get_resource(pdev, IORESOURCE_MEM, mres++); |
@@ -456,8 +444,16 @@ static int rcar_thermal_probe(struct platform_device *pdev) | |||
456 | } | 444 | } |
457 | 445 | ||
458 | /* enable temperature comparation */ | 446 | /* enable temperature comparation */ |
459 | if (irq) | 447 | if (irq) { |
448 | ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0, | ||
449 | dev_name(dev), common); | ||
450 | if (ret) { | ||
451 | dev_err(dev, "irq request failed\n "); | ||
452 | goto error_unregister; | ||
453 | } | ||
454 | |||
460 | rcar_thermal_common_write(common, ENR, enr_bits); | 455 | rcar_thermal_common_write(common, ENR, enr_bits); |
456 | } | ||
461 | 457 | ||
462 | platform_set_drvdata(pdev, common); | 458 | platform_set_drvdata(pdev, common); |
463 | 459 | ||
@@ -467,9 +463,9 @@ static int rcar_thermal_probe(struct platform_device *pdev) | |||
467 | 463 | ||
468 | error_unregister: | 464 | error_unregister: |
469 | rcar_thermal_for_each_priv(priv, common) { | 465 | rcar_thermal_for_each_priv(priv, common) { |
470 | thermal_zone_device_unregister(priv->zone); | ||
471 | if (rcar_has_irq_support(priv)) | 466 | if (rcar_has_irq_support(priv)) |
472 | rcar_thermal_irq_disable(priv); | 467 | rcar_thermal_irq_disable(priv); |
468 | thermal_zone_device_unregister(priv->zone); | ||
473 | } | 469 | } |
474 | 470 | ||
475 | pm_runtime_put(dev); | 471 | pm_runtime_put(dev); |
@@ -485,9 +481,9 @@ static int rcar_thermal_remove(struct platform_device *pdev) | |||
485 | struct rcar_thermal_priv *priv; | 481 | struct rcar_thermal_priv *priv; |
486 | 482 | ||
487 | rcar_thermal_for_each_priv(priv, common) { | 483 | rcar_thermal_for_each_priv(priv, common) { |
488 | thermal_zone_device_unregister(priv->zone); | ||
489 | if (rcar_has_irq_support(priv)) | 484 | if (rcar_has_irq_support(priv)) |
490 | rcar_thermal_irq_disable(priv); | 485 | rcar_thermal_irq_disable(priv); |
486 | thermal_zone_device_unregister(priv->zone); | ||
491 | } | 487 | } |
492 | 488 | ||
493 | pm_runtime_put(dev); | 489 | pm_runtime_put(dev); |
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index 933cd80a6bc5..1d30b0975651 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c | |||
@@ -682,6 +682,7 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on) | |||
682 | 682 | ||
683 | if (on) { | 683 | if (on) { |
684 | con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); | 684 | con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); |
685 | con |= (1 << EXYNOS7_PD_DET_EN_SHIFT); | ||
685 | interrupt_en = | 686 | interrupt_en = |
686 | (of_thermal_is_trip_valid(tz, 7) | 687 | (of_thermal_is_trip_valid(tz, 7) |
687 | << EXYNOS7_TMU_INTEN_RISE7_SHIFT) | | 688 | << EXYNOS7_TMU_INTEN_RISE7_SHIFT) | |
@@ -704,9 +705,9 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on) | |||
704 | interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT; | 705 | interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT; |
705 | } else { | 706 | } else { |
706 | con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); | 707 | con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); |
708 | con &= ~(1 << EXYNOS7_PD_DET_EN_SHIFT); | ||
707 | interrupt_en = 0; /* Disable all interrupts */ | 709 | interrupt_en = 0; /* Disable all interrupts */ |
708 | } | 710 | } |
709 | con |= 1 << EXYNOS7_PD_DET_EN_SHIFT; | ||
710 | 711 | ||
711 | writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN); | 712 | writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN); |
712 | writel(con, data->base + EXYNOS_TMU_REG_CONTROL); | 713 | writel(con, data->base + EXYNOS_TMU_REG_CONTROL); |
@@ -918,34 +919,16 @@ static irqreturn_t exynos_tmu_irq(int irq, void *id) | |||
918 | } | 919 | } |
919 | 920 | ||
920 | static const struct of_device_id exynos_tmu_match[] = { | 921 | static const struct of_device_id exynos_tmu_match[] = { |
921 | { | 922 | { .compatible = "samsung,exynos3250-tmu", }, |
922 | .compatible = "samsung,exynos3250-tmu", | 923 | { .compatible = "samsung,exynos4210-tmu", }, |
923 | }, | 924 | { .compatible = "samsung,exynos4412-tmu", }, |
924 | { | 925 | { .compatible = "samsung,exynos5250-tmu", }, |
925 | .compatible = "samsung,exynos4210-tmu", | 926 | { .compatible = "samsung,exynos5260-tmu", }, |
926 | }, | 927 | { .compatible = "samsung,exynos5420-tmu", }, |
927 | { | 928 | { .compatible = "samsung,exynos5420-tmu-ext-triminfo", }, |
928 | .compatible = "samsung,exynos4412-tmu", | 929 | { .compatible = "samsung,exynos5440-tmu", }, |
929 | }, | 930 | { .compatible = "samsung,exynos7-tmu", }, |
930 | { | 931 | { /* sentinel */ }, |
931 | .compatible = "samsung,exynos5250-tmu", | ||
932 | }, | ||
933 | { | ||
934 | .compatible = "samsung,exynos5260-tmu", | ||
935 | }, | ||
936 | { | ||
937 | .compatible = "samsung,exynos5420-tmu", | ||
938 | }, | ||
939 | { | ||
940 | .compatible = "samsung,exynos5420-tmu-ext-triminfo", | ||
941 | }, | ||
942 | { | ||
943 | .compatible = "samsung,exynos5440-tmu", | ||
944 | }, | ||
945 | { | ||
946 | .compatible = "samsung,exynos7-tmu", | ||
947 | }, | ||
948 | {}, | ||
949 | }; | 932 | }; |
950 | MODULE_DEVICE_TABLE(of, exynos_tmu_match); | 933 | MODULE_DEVICE_TABLE(of, exynos_tmu_match); |
951 | 934 | ||
diff --git a/drivers/thermal/st/st_thermal.c b/drivers/thermal/st/st_thermal.c index d1ec5804c0bb..76c515dd802b 100644 --- a/drivers/thermal/st/st_thermal.c +++ b/drivers/thermal/st/st_thermal.c | |||
@@ -25,7 +25,7 @@ | |||
25 | * Function to allocate regfields which are common | 25 | * Function to allocate regfields which are common |
26 | * between syscfg and memory mapped based sensors | 26 | * between syscfg and memory mapped based sensors |
27 | */ | 27 | */ |
28 | int st_thermal_alloc_regfields(struct st_thermal_sensor *sensor) | 28 | static int st_thermal_alloc_regfields(struct st_thermal_sensor *sensor) |
29 | { | 29 | { |
30 | struct device *dev = sensor->dev; | 30 | struct device *dev = sensor->dev; |
31 | struct regmap *regmap = sensor->regmap; | 31 | struct regmap *regmap = sensor->regmap; |
diff --git a/drivers/thermal/st/st_thermal_memmap.c b/drivers/thermal/st/st_thermal_memmap.c index 067bfcdb91d6..fc0c9e198710 100644 --- a/drivers/thermal/st/st_thermal_memmap.c +++ b/drivers/thermal/st/st_thermal_memmap.c | |||
@@ -157,7 +157,7 @@ static const struct st_thermal_sensor_ops st_mmap_sensor_ops = { | |||
157 | }; | 157 | }; |
158 | 158 | ||
159 | /* Compatible device data stih416 mpe thermal sensor */ | 159 | /* Compatible device data stih416 mpe thermal sensor */ |
160 | const struct st_thermal_compat_data st_416mpe_cdata = { | 160 | static const struct st_thermal_compat_data st_416mpe_cdata = { |
161 | .reg_fields = st_mmap_thermal_regfields, | 161 | .reg_fields = st_mmap_thermal_regfields, |
162 | .ops = &st_mmap_sensor_ops, | 162 | .ops = &st_mmap_sensor_ops, |
163 | .calibration_val = 14, | 163 | .calibration_val = 14, |
@@ -166,7 +166,7 @@ const struct st_thermal_compat_data st_416mpe_cdata = { | |||
166 | }; | 166 | }; |
167 | 167 | ||
168 | /* Compatible device data stih407 thermal sensor */ | 168 | /* Compatible device data stih407 thermal sensor */ |
169 | const struct st_thermal_compat_data st_407_cdata = { | 169 | static const struct st_thermal_compat_data st_407_cdata = { |
170 | .reg_fields = st_mmap_thermal_regfields, | 170 | .reg_fields = st_mmap_thermal_regfields, |
171 | .ops = &st_mmap_sensor_ops, | 171 | .ops = &st_mmap_sensor_ops, |
172 | .calibration_val = 16, | 172 | .calibration_val = 16, |
@@ -174,19 +174,19 @@ const struct st_thermal_compat_data st_407_cdata = { | |||
174 | .crit_temp = 120, | 174 | .crit_temp = 120, |
175 | }; | 175 | }; |
176 | 176 | ||
177 | static struct of_device_id st_mmap_thermal_of_match[] = { | 177 | static const struct of_device_id st_mmap_thermal_of_match[] = { |
178 | { .compatible = "st,stih416-mpe-thermal", .data = &st_416mpe_cdata }, | 178 | { .compatible = "st,stih416-mpe-thermal", .data = &st_416mpe_cdata }, |
179 | { .compatible = "st,stih407-thermal", .data = &st_407_cdata }, | 179 | { .compatible = "st,stih407-thermal", .data = &st_407_cdata }, |
180 | { /* sentinel */ } | 180 | { /* sentinel */ } |
181 | }; | 181 | }; |
182 | MODULE_DEVICE_TABLE(of, st_mmap_thermal_of_match); | 182 | MODULE_DEVICE_TABLE(of, st_mmap_thermal_of_match); |
183 | 183 | ||
184 | int st_mmap_probe(struct platform_device *pdev) | 184 | static int st_mmap_probe(struct platform_device *pdev) |
185 | { | 185 | { |
186 | return st_thermal_register(pdev, st_mmap_thermal_of_match); | 186 | return st_thermal_register(pdev, st_mmap_thermal_of_match); |
187 | } | 187 | } |
188 | 188 | ||
189 | int st_mmap_remove(struct platform_device *pdev) | 189 | static int st_mmap_remove(struct platform_device *pdev) |
190 | { | 190 | { |
191 | return st_thermal_unregister(pdev); | 191 | return st_thermal_unregister(pdev); |
192 | } | 192 | } |
diff --git a/drivers/thermal/st/st_thermal_syscfg.c b/drivers/thermal/st/st_thermal_syscfg.c index 26d36a242bb8..3df5b7890703 100644 --- a/drivers/thermal/st/st_thermal_syscfg.c +++ b/drivers/thermal/st/st_thermal_syscfg.c | |||
@@ -104,7 +104,7 @@ static const struct st_thermal_sensor_ops st_syscfg_sensor_ops = { | |||
104 | }; | 104 | }; |
105 | 105 | ||
106 | /* Compatible device data for stih415 sas thermal sensor */ | 106 | /* Compatible device data for stih415 sas thermal sensor */ |
107 | const struct st_thermal_compat_data st_415sas_cdata = { | 107 | static const struct st_thermal_compat_data st_415sas_cdata = { |
108 | .sys_compat = "st,stih415-front-syscfg", | 108 | .sys_compat = "st,stih415-front-syscfg", |
109 | .reg_fields = st_415sas_regfields, | 109 | .reg_fields = st_415sas_regfields, |
110 | .ops = &st_syscfg_sensor_ops, | 110 | .ops = &st_syscfg_sensor_ops, |
@@ -114,7 +114,7 @@ const struct st_thermal_compat_data st_415sas_cdata = { | |||
114 | }; | 114 | }; |
115 | 115 | ||
116 | /* Compatible device data for stih415 mpe thermal sensor */ | 116 | /* Compatible device data for stih415 mpe thermal sensor */ |
117 | const struct st_thermal_compat_data st_415mpe_cdata = { | 117 | static const struct st_thermal_compat_data st_415mpe_cdata = { |
118 | .sys_compat = "st,stih415-system-syscfg", | 118 | .sys_compat = "st,stih415-system-syscfg", |
119 | .reg_fields = st_415mpe_regfields, | 119 | .reg_fields = st_415mpe_regfields, |
120 | .ops = &st_syscfg_sensor_ops, | 120 | .ops = &st_syscfg_sensor_ops, |
@@ -124,7 +124,7 @@ const struct st_thermal_compat_data st_415mpe_cdata = { | |||
124 | }; | 124 | }; |
125 | 125 | ||
126 | /* Compatible device data for stih416 sas thermal sensor */ | 126 | /* Compatible device data for stih416 sas thermal sensor */ |
127 | const struct st_thermal_compat_data st_416sas_cdata = { | 127 | static const struct st_thermal_compat_data st_416sas_cdata = { |
128 | .sys_compat = "st,stih416-front-syscfg", | 128 | .sys_compat = "st,stih416-front-syscfg", |
129 | .reg_fields = st_416sas_regfields, | 129 | .reg_fields = st_416sas_regfields, |
130 | .ops = &st_syscfg_sensor_ops, | 130 | .ops = &st_syscfg_sensor_ops, |
@@ -134,7 +134,7 @@ const struct st_thermal_compat_data st_416sas_cdata = { | |||
134 | }; | 134 | }; |
135 | 135 | ||
136 | /* Compatible device data for stid127 thermal sensor */ | 136 | /* Compatible device data for stid127 thermal sensor */ |
137 | const struct st_thermal_compat_data st_127_cdata = { | 137 | static const struct st_thermal_compat_data st_127_cdata = { |
138 | .sys_compat = "st,stid127-cpu-syscfg", | 138 | .sys_compat = "st,stid127-cpu-syscfg", |
139 | .reg_fields = st_127_regfields, | 139 | .reg_fields = st_127_regfields, |
140 | .ops = &st_syscfg_sensor_ops, | 140 | .ops = &st_syscfg_sensor_ops, |
@@ -143,7 +143,7 @@ const struct st_thermal_compat_data st_127_cdata = { | |||
143 | .crit_temp = 120, | 143 | .crit_temp = 120, |
144 | }; | 144 | }; |
145 | 145 | ||
146 | static struct of_device_id st_syscfg_thermal_of_match[] = { | 146 | static const struct of_device_id st_syscfg_thermal_of_match[] = { |
147 | { .compatible = "st,stih415-sas-thermal", .data = &st_415sas_cdata }, | 147 | { .compatible = "st,stih415-sas-thermal", .data = &st_415sas_cdata }, |
148 | { .compatible = "st,stih415-mpe-thermal", .data = &st_415mpe_cdata }, | 148 | { .compatible = "st,stih415-mpe-thermal", .data = &st_415mpe_cdata }, |
149 | { .compatible = "st,stih416-sas-thermal", .data = &st_416sas_cdata }, | 149 | { .compatible = "st,stih416-sas-thermal", .data = &st_416sas_cdata }, |
@@ -152,12 +152,12 @@ static struct of_device_id st_syscfg_thermal_of_match[] = { | |||
152 | }; | 152 | }; |
153 | MODULE_DEVICE_TABLE(of, st_syscfg_thermal_of_match); | 153 | MODULE_DEVICE_TABLE(of, st_syscfg_thermal_of_match); |
154 | 154 | ||
155 | int st_syscfg_probe(struct platform_device *pdev) | 155 | static int st_syscfg_probe(struct platform_device *pdev) |
156 | { | 156 | { |
157 | return st_thermal_register(pdev, st_syscfg_thermal_of_match); | 157 | return st_thermal_register(pdev, st_syscfg_thermal_of_match); |
158 | } | 158 | } |
159 | 159 | ||
160 | int st_syscfg_remove(struct platform_device *pdev) | 160 | static int st_syscfg_remove(struct platform_device *pdev) |
161 | { | 161 | { |
162 | return st_thermal_unregister(pdev); | 162 | return st_thermal_unregister(pdev); |
163 | } | 163 | } |
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 48491d1a81d6..4108db7e10c1 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c | |||
@@ -458,8 +458,10 @@ static void update_temperature(struct thermal_zone_device *tz) | |||
458 | 458 | ||
459 | ret = thermal_zone_get_temp(tz, &temp); | 459 | ret = thermal_zone_get_temp(tz, &temp); |
460 | if (ret) { | 460 | if (ret) { |
461 | dev_warn(&tz->device, "failed to read out thermal zone %d\n", | 461 | if (ret != -EAGAIN) |
462 | tz->id); | 462 | dev_warn(&tz->device, |
463 | "failed to read out thermal zone (%d)\n", | ||
464 | ret); | ||
463 | return; | 465 | return; |
464 | } | 466 | } |
465 | 467 | ||
@@ -899,6 +901,22 @@ thermal_cooling_device_trip_point_show(struct device *dev, | |||
899 | return sprintf(buf, "%d\n", instance->trip); | 901 | return sprintf(buf, "%d\n", instance->trip); |
900 | } | 902 | } |
901 | 903 | ||
904 | static struct attribute *cooling_device_attrs[] = { | ||
905 | &dev_attr_cdev_type.attr, | ||
906 | &dev_attr_max_state.attr, | ||
907 | &dev_attr_cur_state.attr, | ||
908 | NULL, | ||
909 | }; | ||
910 | |||
911 | static const struct attribute_group cooling_device_attr_group = { | ||
912 | .attrs = cooling_device_attrs, | ||
913 | }; | ||
914 | |||
915 | static const struct attribute_group *cooling_device_attr_groups[] = { | ||
916 | &cooling_device_attr_group, | ||
917 | NULL, | ||
918 | }; | ||
919 | |||
902 | /* Device management */ | 920 | /* Device management */ |
903 | 921 | ||
904 | /** | 922 | /** |
@@ -1130,6 +1148,7 @@ __thermal_cooling_device_register(struct device_node *np, | |||
1130 | cdev->ops = ops; | 1148 | cdev->ops = ops; |
1131 | cdev->updated = false; | 1149 | cdev->updated = false; |
1132 | cdev->device.class = &thermal_class; | 1150 | cdev->device.class = &thermal_class; |
1151 | cdev->device.groups = cooling_device_attr_groups; | ||
1133 | cdev->devdata = devdata; | 1152 | cdev->devdata = devdata; |
1134 | dev_set_name(&cdev->device, "cooling_device%d", cdev->id); | 1153 | dev_set_name(&cdev->device, "cooling_device%d", cdev->id); |
1135 | result = device_register(&cdev->device); | 1154 | result = device_register(&cdev->device); |
@@ -1139,21 +1158,6 @@ __thermal_cooling_device_register(struct device_node *np, | |||
1139 | return ERR_PTR(result); | 1158 | return ERR_PTR(result); |
1140 | } | 1159 | } |
1141 | 1160 | ||
1142 | /* sys I/F */ | ||
1143 | if (type) { | ||
1144 | result = device_create_file(&cdev->device, &dev_attr_cdev_type); | ||
1145 | if (result) | ||
1146 | goto unregister; | ||
1147 | } | ||
1148 | |||
1149 | result = device_create_file(&cdev->device, &dev_attr_max_state); | ||
1150 | if (result) | ||
1151 | goto unregister; | ||
1152 | |||
1153 | result = device_create_file(&cdev->device, &dev_attr_cur_state); | ||
1154 | if (result) | ||
1155 | goto unregister; | ||
1156 | |||
1157 | /* Add 'this' new cdev to the global cdev list */ | 1161 | /* Add 'this' new cdev to the global cdev list */ |
1158 | mutex_lock(&thermal_list_lock); | 1162 | mutex_lock(&thermal_list_lock); |
1159 | list_add(&cdev->node, &thermal_cdev_list); | 1163 | list_add(&cdev->node, &thermal_cdev_list); |
@@ -1163,11 +1167,6 @@ __thermal_cooling_device_register(struct device_node *np, | |||
1163 | bind_cdev(cdev); | 1167 | bind_cdev(cdev); |
1164 | 1168 | ||
1165 | return cdev; | 1169 | return cdev; |
1166 | |||
1167 | unregister: | ||
1168 | release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id); | ||
1169 | device_unregister(&cdev->device); | ||
1170 | return ERR_PTR(result); | ||
1171 | } | 1170 | } |
1172 | 1171 | ||
1173 | /** | 1172 | /** |
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c index 634b6ce0e63a..62a5d449c388 100644 --- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c +++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c | |||
@@ -1402,7 +1402,7 @@ int ti_bandgap_remove(struct platform_device *pdev) | |||
1402 | return 0; | 1402 | return 0; |
1403 | } | 1403 | } |
1404 | 1404 | ||
1405 | #ifdef CONFIG_PM | 1405 | #ifdef CONFIG_PM_SLEEP |
1406 | static int ti_bandgap_save_ctxt(struct ti_bandgap *bgp) | 1406 | static int ti_bandgap_save_ctxt(struct ti_bandgap *bgp) |
1407 | { | 1407 | { |
1408 | int i; | 1408 | int i; |
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c index 3fb054a10f6a..a38c1756442a 100644 --- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c +++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c | |||
@@ -429,7 +429,7 @@ int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id) | |||
429 | 429 | ||
430 | data = ti_bandgap_get_sensor_data(bgp, id); | 430 | data = ti_bandgap_get_sensor_data(bgp, id); |
431 | 431 | ||
432 | if (data && data->cool_dev) | 432 | if (data) |
433 | cpufreq_cooling_unregister(data->cool_dev); | 433 | cpufreq_cooling_unregister(data->cool_dev); |
434 | 434 | ||
435 | return 0; | 435 | return 0; |
diff --git a/drivers/tty/bfin_jtag_comm.c b/drivers/tty/bfin_jtag_comm.c index d7b198c400c7..ce24182f8514 100644 --- a/drivers/tty/bfin_jtag_comm.c +++ b/drivers/tty/bfin_jtag_comm.c | |||
@@ -210,18 +210,6 @@ bfin_jc_chars_in_buffer(struct tty_struct *tty) | |||
210 | return circ_cnt(&bfin_jc_write_buf); | 210 | return circ_cnt(&bfin_jc_write_buf); |
211 | } | 211 | } |
212 | 212 | ||
213 | static void | ||
214 | bfin_jc_wait_until_sent(struct tty_struct *tty, int timeout) | ||
215 | { | ||
216 | unsigned long expire = jiffies + timeout; | ||
217 | while (!circ_empty(&bfin_jc_write_buf)) { | ||
218 | if (signal_pending(current)) | ||
219 | break; | ||
220 | if (time_after(jiffies, expire)) | ||
221 | break; | ||
222 | } | ||
223 | } | ||
224 | |||
225 | static const struct tty_operations bfin_jc_ops = { | 213 | static const struct tty_operations bfin_jc_ops = { |
226 | .open = bfin_jc_open, | 214 | .open = bfin_jc_open, |
227 | .close = bfin_jc_close, | 215 | .close = bfin_jc_close, |
@@ -230,7 +218,6 @@ static const struct tty_operations bfin_jc_ops = { | |||
230 | .flush_chars = bfin_jc_flush_chars, | 218 | .flush_chars = bfin_jc_flush_chars, |
231 | .write_room = bfin_jc_write_room, | 219 | .write_room = bfin_jc_write_room, |
232 | .chars_in_buffer = bfin_jc_chars_in_buffer, | 220 | .chars_in_buffer = bfin_jc_chars_in_buffer, |
233 | .wait_until_sent = bfin_jc_wait_until_sent, | ||
234 | }; | 221 | }; |
235 | 222 | ||
236 | static int __init bfin_jc_init(void) | 223 | static int __init bfin_jc_init(void) |
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index e3b9570a1eff..deae122c9c4b 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c | |||
@@ -2138,8 +2138,8 @@ int serial8250_do_startup(struct uart_port *port) | |||
2138 | /* | 2138 | /* |
2139 | * Clear the interrupt registers. | 2139 | * Clear the interrupt registers. |
2140 | */ | 2140 | */ |
2141 | if (serial_port_in(port, UART_LSR) & UART_LSR_DR) | 2141 | serial_port_in(port, UART_LSR); |
2142 | serial_port_in(port, UART_RX); | 2142 | serial_port_in(port, UART_RX); |
2143 | serial_port_in(port, UART_IIR); | 2143 | serial_port_in(port, UART_IIR); |
2144 | serial_port_in(port, UART_MSR); | 2144 | serial_port_in(port, UART_MSR); |
2145 | 2145 | ||
@@ -2300,8 +2300,8 @@ dont_test_tx_en: | |||
2300 | * saved flags to avoid getting false values from polling | 2300 | * saved flags to avoid getting false values from polling |
2301 | * routines or the previous session. | 2301 | * routines or the previous session. |
2302 | */ | 2302 | */ |
2303 | if (serial_port_in(port, UART_LSR) & UART_LSR_DR) | 2303 | serial_port_in(port, UART_LSR); |
2304 | serial_port_in(port, UART_RX); | 2304 | serial_port_in(port, UART_RX); |
2305 | serial_port_in(port, UART_IIR); | 2305 | serial_port_in(port, UART_IIR); |
2306 | serial_port_in(port, UART_MSR); | 2306 | serial_port_in(port, UART_MSR); |
2307 | up->lsr_saved_flags = 0; | 2307 | up->lsr_saved_flags = 0; |
@@ -2394,8 +2394,7 @@ void serial8250_do_shutdown(struct uart_port *port) | |||
2394 | * Read data port to reset things, and then unlink from | 2394 | * Read data port to reset things, and then unlink from |
2395 | * the IRQ chain. | 2395 | * the IRQ chain. |
2396 | */ | 2396 | */ |
2397 | if (serial_port_in(port, UART_LSR) & UART_LSR_DR) | 2397 | serial_port_in(port, UART_RX); |
2398 | serial_port_in(port, UART_RX); | ||
2399 | serial8250_rpm_put(up); | 2398 | serial8250_rpm_put(up); |
2400 | 2399 | ||
2401 | del_timer_sync(&up->timer); | 2400 | del_timer_sync(&up->timer); |
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index e60116235836..6ae5b8560e4d 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c | |||
@@ -59,6 +59,8 @@ struct dw8250_data { | |||
59 | u8 usr_reg; | 59 | u8 usr_reg; |
60 | int last_mcr; | 60 | int last_mcr; |
61 | int line; | 61 | int line; |
62 | int msr_mask_on; | ||
63 | int msr_mask_off; | ||
62 | struct clk *clk; | 64 | struct clk *clk; |
63 | struct clk *pclk; | 65 | struct clk *pclk; |
64 | struct reset_control *rst; | 66 | struct reset_control *rst; |
@@ -81,6 +83,12 @@ static inline int dw8250_modify_msr(struct uart_port *p, int offset, int value) | |||
81 | value &= ~UART_MSR_DCTS; | 83 | value &= ~UART_MSR_DCTS; |
82 | } | 84 | } |
83 | 85 | ||
86 | /* Override any modem control signals if needed */ | ||
87 | if (offset == UART_MSR) { | ||
88 | value |= d->msr_mask_on; | ||
89 | value &= ~d->msr_mask_off; | ||
90 | } | ||
91 | |||
84 | return value; | 92 | return value; |
85 | } | 93 | } |
86 | 94 | ||
@@ -111,7 +119,10 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value) | |||
111 | dw8250_force_idle(p); | 119 | dw8250_force_idle(p); |
112 | writeb(value, p->membase + (UART_LCR << p->regshift)); | 120 | writeb(value, p->membase + (UART_LCR << p->regshift)); |
113 | } | 121 | } |
114 | dev_err(p->dev, "Couldn't set LCR to %d\n", value); | 122 | /* |
123 | * FIXME: this deadlocks if port->lock is already held | ||
124 | * dev_err(p->dev, "Couldn't set LCR to %d\n", value); | ||
125 | */ | ||
115 | } | 126 | } |
116 | } | 127 | } |
117 | 128 | ||
@@ -155,7 +166,10 @@ static void dw8250_serial_outq(struct uart_port *p, int offset, int value) | |||
155 | __raw_writeq(value & 0xff, | 166 | __raw_writeq(value & 0xff, |
156 | p->membase + (UART_LCR << p->regshift)); | 167 | p->membase + (UART_LCR << p->regshift)); |
157 | } | 168 | } |
158 | dev_err(p->dev, "Couldn't set LCR to %d\n", value); | 169 | /* |
170 | * FIXME: this deadlocks if port->lock is already held | ||
171 | * dev_err(p->dev, "Couldn't set LCR to %d\n", value); | ||
172 | */ | ||
159 | } | 173 | } |
160 | } | 174 | } |
161 | #endif /* CONFIG_64BIT */ | 175 | #endif /* CONFIG_64BIT */ |
@@ -179,7 +193,10 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value) | |||
179 | dw8250_force_idle(p); | 193 | dw8250_force_idle(p); |
180 | writel(value, p->membase + (UART_LCR << p->regshift)); | 194 | writel(value, p->membase + (UART_LCR << p->regshift)); |
181 | } | 195 | } |
182 | dev_err(p->dev, "Couldn't set LCR to %d\n", value); | 196 | /* |
197 | * FIXME: this deadlocks if port->lock is already held | ||
198 | * dev_err(p->dev, "Couldn't set LCR to %d\n", value); | ||
199 | */ | ||
183 | } | 200 | } |
184 | } | 201 | } |
185 | 202 | ||
@@ -334,6 +351,30 @@ static int dw8250_probe_of(struct uart_port *p, | |||
334 | if (id >= 0) | 351 | if (id >= 0) |
335 | p->line = id; | 352 | p->line = id; |
336 | 353 | ||
354 | if (of_property_read_bool(np, "dcd-override")) { | ||
355 | /* Always report DCD as active */ | ||
356 | data->msr_mask_on |= UART_MSR_DCD; | ||
357 | data->msr_mask_off |= UART_MSR_DDCD; | ||
358 | } | ||
359 | |||
360 | if (of_property_read_bool(np, "dsr-override")) { | ||
361 | /* Always report DSR as active */ | ||
362 | data->msr_mask_on |= UART_MSR_DSR; | ||
363 | data->msr_mask_off |= UART_MSR_DDSR; | ||
364 | } | ||
365 | |||
366 | if (of_property_read_bool(np, "cts-override")) { | ||
367 | /* Always report DSR as active */ | ||
368 | data->msr_mask_on |= UART_MSR_DSR; | ||
369 | data->msr_mask_off |= UART_MSR_DDSR; | ||
370 | } | ||
371 | |||
372 | if (of_property_read_bool(np, "ri-override")) { | ||
373 | /* Always report Ring indicator as inactive */ | ||
374 | data->msr_mask_off |= UART_MSR_RI; | ||
375 | data->msr_mask_off |= UART_MSR_TERI; | ||
376 | } | ||
377 | |||
337 | /* clock got configured through clk api, all done */ | 378 | /* clock got configured through clk api, all done */ |
338 | if (p->uartclk) | 379 | if (p->uartclk) |
339 | return 0; | 380 | return 0; |
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index daf2c82984e9..892eb32cdef4 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c | |||
@@ -69,7 +69,7 @@ static void moan_device(const char *str, struct pci_dev *dev) | |||
69 | "Please send the output of lspci -vv, this\n" | 69 | "Please send the output of lspci -vv, this\n" |
70 | "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n" | 70 | "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n" |
71 | "manufacturer and name of serial board or\n" | 71 | "manufacturer and name of serial board or\n" |
72 | "modem board to rmk+serial@arm.linux.org.uk.\n", | 72 | "modem board to <linux-serial@vger.kernel.org>.\n", |
73 | pci_name(dev), str, dev->vendor, dev->device, | 73 | pci_name(dev), str, dev->vendor, dev->device, |
74 | dev->subsystem_vendor, dev->subsystem_device); | 74 | dev->subsystem_vendor, dev->subsystem_device); |
75 | } | 75 | } |
@@ -1989,13 +1989,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { | |||
1989 | }, | 1989 | }, |
1990 | { | 1990 | { |
1991 | .vendor = PCI_VENDOR_ID_INTEL, | 1991 | .vendor = PCI_VENDOR_ID_INTEL, |
1992 | .device = PCI_DEVICE_ID_INTEL_QRK_UART, | ||
1993 | .subvendor = PCI_ANY_ID, | ||
1994 | .subdevice = PCI_ANY_ID, | ||
1995 | .setup = pci_default_setup, | ||
1996 | }, | ||
1997 | { | ||
1998 | .vendor = PCI_VENDOR_ID_INTEL, | ||
1999 | .device = PCI_DEVICE_ID_INTEL_BSW_UART1, | 1992 | .device = PCI_DEVICE_ID_INTEL_BSW_UART1, |
2000 | .subvendor = PCI_ANY_ID, | 1993 | .subvendor = PCI_ANY_ID, |
2001 | .subdevice = PCI_ANY_ID, | 1994 | .subdevice = PCI_ANY_ID, |
@@ -2201,13 +2194,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { | |||
2201 | */ | 2194 | */ |
2202 | { | 2195 | { |
2203 | .vendor = PCI_VENDOR_ID_PLX, | 2196 | .vendor = PCI_VENDOR_ID_PLX, |
2204 | .device = PCI_DEVICE_ID_PLX_9030, | ||
2205 | .subvendor = PCI_SUBVENDOR_ID_PERLE, | ||
2206 | .subdevice = PCI_ANY_ID, | ||
2207 | .setup = pci_default_setup, | ||
2208 | }, | ||
2209 | { | ||
2210 | .vendor = PCI_VENDOR_ID_PLX, | ||
2211 | .device = PCI_DEVICE_ID_PLX_9050, | 2197 | .device = PCI_DEVICE_ID_PLX_9050, |
2212 | .subvendor = PCI_SUBVENDOR_ID_EXSYS, | 2198 | .subvendor = PCI_SUBVENDOR_ID_EXSYS, |
2213 | .subdevice = PCI_SUBDEVICE_ID_EXSYS_4055, | 2199 | .subdevice = PCI_SUBDEVICE_ID_EXSYS_4055, |
@@ -5415,10 +5401,6 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
5415 | PCI_ANY_ID, PCI_ANY_ID, | 5401 | PCI_ANY_ID, PCI_ANY_ID, |
5416 | 0, 0, pbn_b0_bt_2_115200 }, | 5402 | 0, 0, pbn_b0_bt_2_115200 }, |
5417 | 5403 | ||
5418 | { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH352_2S, | ||
5419 | PCI_ANY_ID, PCI_ANY_ID, | ||
5420 | 0, 0, pbn_b0_bt_2_115200 }, | ||
5421 | |||
5422 | { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S, | 5404 | { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S, |
5423 | PCI_ANY_ID, PCI_ANY_ID, | 5405 | PCI_ANY_ID, PCI_ANY_ID, |
5424 | 0, 0, pbn_wch384_4 }, | 5406 | 0, 0, pbn_wch384_4 }, |
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 846552bff67d..4e959c43f680 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <linux/gpio/consumer.h> | 47 | #include <linux/gpio/consumer.h> |
48 | #include <linux/err.h> | 48 | #include <linux/err.h> |
49 | #include <linux/irq.h> | 49 | #include <linux/irq.h> |
50 | #include <linux/suspend.h> | ||
50 | 51 | ||
51 | #include <asm/io.h> | 52 | #include <asm/io.h> |
52 | #include <asm/ioctls.h> | 53 | #include <asm/ioctls.h> |
@@ -173,6 +174,12 @@ struct atmel_uart_port { | |||
173 | bool ms_irq_enabled; | 174 | bool ms_irq_enabled; |
174 | bool is_usart; /* usart or uart */ | 175 | bool is_usart; /* usart or uart */ |
175 | struct timer_list uart_timer; /* uart timer */ | 176 | struct timer_list uart_timer; /* uart timer */ |
177 | |||
178 | bool suspended; | ||
179 | unsigned int pending; | ||
180 | unsigned int pending_status; | ||
181 | spinlock_t lock_suspended; | ||
182 | |||
176 | int (*prepare_rx)(struct uart_port *port); | 183 | int (*prepare_rx)(struct uart_port *port); |
177 | int (*prepare_tx)(struct uart_port *port); | 184 | int (*prepare_tx)(struct uart_port *port); |
178 | void (*schedule_rx)(struct uart_port *port); | 185 | void (*schedule_rx)(struct uart_port *port); |
@@ -1179,12 +1186,15 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id) | |||
1179 | { | 1186 | { |
1180 | struct uart_port *port = dev_id; | 1187 | struct uart_port *port = dev_id; |
1181 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); | 1188 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); |
1182 | unsigned int status, pending, pass_counter = 0; | 1189 | unsigned int status, pending, mask, pass_counter = 0; |
1183 | bool gpio_handled = false; | 1190 | bool gpio_handled = false; |
1184 | 1191 | ||
1192 | spin_lock(&atmel_port->lock_suspended); | ||
1193 | |||
1185 | do { | 1194 | do { |
1186 | status = atmel_get_lines_status(port); | 1195 | status = atmel_get_lines_status(port); |
1187 | pending = status & UART_GET_IMR(port); | 1196 | mask = UART_GET_IMR(port); |
1197 | pending = status & mask; | ||
1188 | if (!gpio_handled) { | 1198 | if (!gpio_handled) { |
1189 | /* | 1199 | /* |
1190 | * Dealing with GPIO interrupt | 1200 | * Dealing with GPIO interrupt |
@@ -1206,11 +1216,21 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id) | |||
1206 | if (!pending) | 1216 | if (!pending) |
1207 | break; | 1217 | break; |
1208 | 1218 | ||
1219 | if (atmel_port->suspended) { | ||
1220 | atmel_port->pending |= pending; | ||
1221 | atmel_port->pending_status = status; | ||
1222 | UART_PUT_IDR(port, mask); | ||
1223 | pm_system_wakeup(); | ||
1224 | break; | ||
1225 | } | ||
1226 | |||
1209 | atmel_handle_receive(port, pending); | 1227 | atmel_handle_receive(port, pending); |
1210 | atmel_handle_status(port, pending, status); | 1228 | atmel_handle_status(port, pending, status); |
1211 | atmel_handle_transmit(port, pending); | 1229 | atmel_handle_transmit(port, pending); |
1212 | } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT); | 1230 | } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT); |
1213 | 1231 | ||
1232 | spin_unlock(&atmel_port->lock_suspended); | ||
1233 | |||
1214 | return pass_counter ? IRQ_HANDLED : IRQ_NONE; | 1234 | return pass_counter ? IRQ_HANDLED : IRQ_NONE; |
1215 | } | 1235 | } |
1216 | 1236 | ||
@@ -1742,7 +1762,8 @@ static int atmel_startup(struct uart_port *port) | |||
1742 | /* | 1762 | /* |
1743 | * Allocate the IRQ | 1763 | * Allocate the IRQ |
1744 | */ | 1764 | */ |
1745 | retval = request_irq(port->irq, atmel_interrupt, IRQF_SHARED, | 1765 | retval = request_irq(port->irq, atmel_interrupt, |
1766 | IRQF_SHARED | IRQF_COND_SUSPEND, | ||
1746 | tty ? tty->name : "atmel_serial", port); | 1767 | tty ? tty->name : "atmel_serial", port); |
1747 | if (retval) { | 1768 | if (retval) { |
1748 | dev_err(port->dev, "atmel_startup - Can't get irq\n"); | 1769 | dev_err(port->dev, "atmel_startup - Can't get irq\n"); |
@@ -2513,8 +2534,14 @@ static int atmel_serial_suspend(struct platform_device *pdev, | |||
2513 | 2534 | ||
2514 | /* we can not wake up if we're running on slow clock */ | 2535 | /* we can not wake up if we're running on slow clock */ |
2515 | atmel_port->may_wakeup = device_may_wakeup(&pdev->dev); | 2536 | atmel_port->may_wakeup = device_may_wakeup(&pdev->dev); |
2516 | if (atmel_serial_clk_will_stop()) | 2537 | if (atmel_serial_clk_will_stop()) { |
2538 | unsigned long flags; | ||
2539 | |||
2540 | spin_lock_irqsave(&atmel_port->lock_suspended, flags); | ||
2541 | atmel_port->suspended = true; | ||
2542 | spin_unlock_irqrestore(&atmel_port->lock_suspended, flags); | ||
2517 | device_set_wakeup_enable(&pdev->dev, 0); | 2543 | device_set_wakeup_enable(&pdev->dev, 0); |
2544 | } | ||
2518 | 2545 | ||
2519 | uart_suspend_port(&atmel_uart, port); | 2546 | uart_suspend_port(&atmel_uart, port); |
2520 | 2547 | ||
@@ -2525,6 +2552,18 @@ static int atmel_serial_resume(struct platform_device *pdev) | |||
2525 | { | 2552 | { |
2526 | struct uart_port *port = platform_get_drvdata(pdev); | 2553 | struct uart_port *port = platform_get_drvdata(pdev); |
2527 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); | 2554 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); |
2555 | unsigned long flags; | ||
2556 | |||
2557 | spin_lock_irqsave(&atmel_port->lock_suspended, flags); | ||
2558 | if (atmel_port->pending) { | ||
2559 | atmel_handle_receive(port, atmel_port->pending); | ||
2560 | atmel_handle_status(port, atmel_port->pending, | ||
2561 | atmel_port->pending_status); | ||
2562 | atmel_handle_transmit(port, atmel_port->pending); | ||
2563 | atmel_port->pending = 0; | ||
2564 | } | ||
2565 | atmel_port->suspended = false; | ||
2566 | spin_unlock_irqrestore(&atmel_port->lock_suspended, flags); | ||
2528 | 2567 | ||
2529 | uart_resume_port(&atmel_uart, port); | 2568 | uart_resume_port(&atmel_uart, port); |
2530 | device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup); | 2569 | device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup); |
@@ -2593,6 +2632,8 @@ static int atmel_serial_probe(struct platform_device *pdev) | |||
2593 | port->backup_imr = 0; | 2632 | port->backup_imr = 0; |
2594 | port->uart.line = ret; | 2633 | port->uart.line = ret; |
2595 | 2634 | ||
2635 | spin_lock_init(&port->lock_suspended); | ||
2636 | |||
2596 | ret = atmel_init_gpios(port, &pdev->dev); | 2637 | ret = atmel_init_gpios(port, &pdev->dev); |
2597 | if (ret < 0) | 2638 | if (ret < 0) |
2598 | dev_err(&pdev->dev, "%s", | 2639 | dev_err(&pdev->dev, "%s", |
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index b1893f3f88f1..3ad1458bfeb0 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c | |||
@@ -921,6 +921,9 @@ static void lpuart_setup_watermark(struct lpuart_port *sport) | |||
921 | writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE, | 921 | writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE, |
922 | sport->port.membase + UARTPFIFO); | 922 | sport->port.membase + UARTPFIFO); |
923 | 923 | ||
924 | /* explicitly clear RDRF */ | ||
925 | readb(sport->port.membase + UARTSR1); | ||
926 | |||
924 | /* flush Tx and Rx FIFO */ | 927 | /* flush Tx and Rx FIFO */ |
925 | writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH, | 928 | writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH, |
926 | sport->port.membase + UARTCFIFO); | 929 | sport->port.membase + UARTCFIFO); |
@@ -1076,6 +1079,8 @@ static int lpuart_startup(struct uart_port *port) | |||
1076 | sport->txfifo_size = 0x1 << (((temp >> UARTPFIFO_TXSIZE_OFF) & | 1079 | sport->txfifo_size = 0x1 << (((temp >> UARTPFIFO_TXSIZE_OFF) & |
1077 | UARTPFIFO_FIFOSIZE_MASK) + 1); | 1080 | UARTPFIFO_FIFOSIZE_MASK) + 1); |
1078 | 1081 | ||
1082 | sport->port.fifosize = sport->txfifo_size; | ||
1083 | |||
1079 | sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) & | 1084 | sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) & |
1080 | UARTPFIFO_FIFOSIZE_MASK) + 1); | 1085 | UARTPFIFO_FIFOSIZE_MASK) + 1); |
1081 | 1086 | ||
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c index 7ff61e24a195..33fb94f78967 100644 --- a/drivers/tty/serial/of_serial.c +++ b/drivers/tty/serial/of_serial.c | |||
@@ -133,10 +133,6 @@ static int of_platform_serial_setup(struct platform_device *ofdev, | |||
133 | if (of_find_property(np, "no-loopback-test", NULL)) | 133 | if (of_find_property(np, "no-loopback-test", NULL)) |
134 | port->flags |= UPF_SKIP_TEST; | 134 | port->flags |= UPF_SKIP_TEST; |
135 | 135 | ||
136 | ret = of_alias_get_id(np, "serial"); | ||
137 | if (ret >= 0) | ||
138 | port->line = ret; | ||
139 | |||
140 | port->dev = &ofdev->dev; | 136 | port->dev = &ofdev->dev; |
141 | 137 | ||
142 | switch (type) { | 138 | switch (type) { |
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index af821a908720..cf08876922f1 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c | |||
@@ -963,6 +963,7 @@ static void s3c24xx_serial_shutdown(struct uart_port *port) | |||
963 | free_irq(ourport->tx_irq, ourport); | 963 | free_irq(ourport->tx_irq, ourport); |
964 | tx_enabled(port) = 0; | 964 | tx_enabled(port) = 0; |
965 | ourport->tx_claimed = 0; | 965 | ourport->tx_claimed = 0; |
966 | ourport->tx_mode = 0; | ||
966 | } | 967 | } |
967 | 968 | ||
968 | if (ourport->rx_claimed) { | 969 | if (ourport->rx_claimed) { |
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c index 594b63331ef4..bca975f5093b 100644 --- a/drivers/tty/serial/sprd_serial.c +++ b/drivers/tty/serial/sprd_serial.c | |||
@@ -293,8 +293,10 @@ static irqreturn_t sprd_handle_irq(int irq, void *dev_id) | |||
293 | 293 | ||
294 | ims = serial_in(port, SPRD_IMSR); | 294 | ims = serial_in(port, SPRD_IMSR); |
295 | 295 | ||
296 | if (!ims) | 296 | if (!ims) { |
297 | spin_unlock(&port->lock); | ||
297 | return IRQ_NONE; | 298 | return IRQ_NONE; |
299 | } | ||
298 | 300 | ||
299 | serial_out(port, SPRD_ICLR, ~0); | 301 | serial_out(port, SPRD_ICLR, ~0); |
300 | 302 | ||
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 51f066aa375e..2bb4dfc02873 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c | |||
@@ -1028,8 +1028,8 @@ EXPORT_SYMBOL(start_tty); | |||
1028 | /* We limit tty time update visibility to every 8 seconds or so. */ | 1028 | /* We limit tty time update visibility to every 8 seconds or so. */ |
1029 | static void tty_update_time(struct timespec *time) | 1029 | static void tty_update_time(struct timespec *time) |
1030 | { | 1030 | { |
1031 | unsigned long sec = get_seconds() & ~7; | 1031 | unsigned long sec = get_seconds(); |
1032 | if ((long)(sec - time->tv_sec) > 0) | 1032 | if (abs(sec - time->tv_sec) & ~7) |
1033 | time->tv_sec = sec; | 1033 | time->tv_sec = sec; |
1034 | } | 1034 | } |
1035 | 1035 | ||
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c index a5cf253b2544..632fc8152061 100644 --- a/drivers/tty/tty_ioctl.c +++ b/drivers/tty/tty_ioctl.c | |||
@@ -217,11 +217,17 @@ void tty_wait_until_sent(struct tty_struct *tty, long timeout) | |||
217 | #endif | 217 | #endif |
218 | if (!timeout) | 218 | if (!timeout) |
219 | timeout = MAX_SCHEDULE_TIMEOUT; | 219 | timeout = MAX_SCHEDULE_TIMEOUT; |
220 | if (wait_event_interruptible_timeout(tty->write_wait, | 220 | |
221 | !tty_chars_in_buffer(tty), timeout) >= 0) { | 221 | timeout = wait_event_interruptible_timeout(tty->write_wait, |
222 | if (tty->ops->wait_until_sent) | 222 | !tty_chars_in_buffer(tty), timeout); |
223 | tty->ops->wait_until_sent(tty, timeout); | 223 | if (timeout <= 0) |
224 | } | 224 | return; |
225 | |||
226 | if (timeout == MAX_SCHEDULE_TIMEOUT) | ||
227 | timeout = 0; | ||
228 | |||
229 | if (tty->ops->wait_until_sent) | ||
230 | tty->ops->wait_until_sent(tty, timeout); | ||
225 | } | 231 | } |
226 | EXPORT_SYMBOL(tty_wait_until_sent); | 232 | EXPORT_SYMBOL(tty_wait_until_sent); |
227 | 233 | ||
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index ff451048c1ac..4bfb7ac0239f 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c | |||
@@ -929,6 +929,13 @@ __acquires(hwep->lock) | |||
929 | return retval; | 929 | return retval; |
930 | } | 930 | } |
931 | 931 | ||
932 | static int otg_a_alt_hnp_support(struct ci_hdrc *ci) | ||
933 | { | ||
934 | dev_warn(&ci->gadget.dev, | ||
935 | "connect the device to an alternate port if you want HNP\n"); | ||
936 | return isr_setup_status_phase(ci); | ||
937 | } | ||
938 | |||
932 | /** | 939 | /** |
933 | * isr_setup_packet_handler: setup packet handler | 940 | * isr_setup_packet_handler: setup packet handler |
934 | * @ci: UDC descriptor | 941 | * @ci: UDC descriptor |
@@ -1061,6 +1068,10 @@ __acquires(ci->lock) | |||
1061 | ci); | 1068 | ci); |
1062 | } | 1069 | } |
1063 | break; | 1070 | break; |
1071 | case USB_DEVICE_A_ALT_HNP_SUPPORT: | ||
1072 | if (ci_otg_is_fsm_mode(ci)) | ||
1073 | err = otg_a_alt_hnp_support(ci); | ||
1074 | break; | ||
1064 | default: | 1075 | default: |
1065 | goto delegate; | 1076 | goto delegate; |
1066 | } | 1077 | } |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index e78720b59d67..683617714e7c 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -1650,6 +1650,8 @@ static int acm_reset_resume(struct usb_interface *intf) | |||
1650 | 1650 | ||
1651 | static const struct usb_device_id acm_ids[] = { | 1651 | static const struct usb_device_id acm_ids[] = { |
1652 | /* quirky and broken devices */ | 1652 | /* quirky and broken devices */ |
1653 | { USB_DEVICE(0x076d, 0x0006), /* Denso Cradle CU-321 */ | ||
1654 | .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */ | ||
1653 | { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */ | 1655 | { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */ |
1654 | .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */ | 1656 | .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */ |
1655 | { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */ | 1657 | { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */ |
diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c index c6b35b77dab7..61d538aa2346 100644 --- a/drivers/usb/common/usb-otg-fsm.c +++ b/drivers/usb/common/usb-otg-fsm.c | |||
@@ -150,9 +150,9 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state) | |||
150 | break; | 150 | break; |
151 | case OTG_STATE_B_PERIPHERAL: | 151 | case OTG_STATE_B_PERIPHERAL: |
152 | otg_chrg_vbus(fsm, 0); | 152 | otg_chrg_vbus(fsm, 0); |
153 | otg_loc_conn(fsm, 1); | ||
154 | otg_loc_sof(fsm, 0); | 153 | otg_loc_sof(fsm, 0); |
155 | otg_set_protocol(fsm, PROTO_GADGET); | 154 | otg_set_protocol(fsm, PROTO_GADGET); |
155 | otg_loc_conn(fsm, 1); | ||
156 | break; | 156 | break; |
157 | case OTG_STATE_B_WAIT_ACON: | 157 | case OTG_STATE_B_WAIT_ACON: |
158 | otg_chrg_vbus(fsm, 0); | 158 | otg_chrg_vbus(fsm, 0); |
@@ -213,10 +213,10 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state) | |||
213 | 213 | ||
214 | break; | 214 | break; |
215 | case OTG_STATE_A_PERIPHERAL: | 215 | case OTG_STATE_A_PERIPHERAL: |
216 | otg_loc_conn(fsm, 1); | ||
217 | otg_loc_sof(fsm, 0); | 216 | otg_loc_sof(fsm, 0); |
218 | otg_set_protocol(fsm, PROTO_GADGET); | 217 | otg_set_protocol(fsm, PROTO_GADGET); |
219 | otg_drv_vbus(fsm, 1); | 218 | otg_drv_vbus(fsm, 1); |
219 | otg_loc_conn(fsm, 1); | ||
220 | otg_add_timer(fsm, A_BIDL_ADIS); | 220 | otg_add_timer(fsm, A_BIDL_ADIS); |
221 | break; | 221 | break; |
222 | case OTG_STATE_A_WAIT_VFALL: | 222 | case OTG_STATE_A_WAIT_VFALL: |
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 66abdbcfbfa5..11635537c052 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c | |||
@@ -501,6 +501,7 @@ static void async_completed(struct urb *urb) | |||
501 | as->status = urb->status; | 501 | as->status = urb->status; |
502 | signr = as->signr; | 502 | signr = as->signr; |
503 | if (signr) { | 503 | if (signr) { |
504 | memset(&sinfo, 0, sizeof(sinfo)); | ||
504 | sinfo.si_signo = as->signr; | 505 | sinfo.si_signo = as->signr; |
505 | sinfo.si_errno = as->status; | 506 | sinfo.si_errno = as->status; |
506 | sinfo.si_code = SI_ASYNCIO; | 507 | sinfo.si_code = SI_ASYNCIO; |
@@ -2382,6 +2383,7 @@ static void usbdev_remove(struct usb_device *udev) | |||
2382 | wake_up_all(&ps->wait); | 2383 | wake_up_all(&ps->wait); |
2383 | list_del_init(&ps->list); | 2384 | list_del_init(&ps->list); |
2384 | if (ps->discsignr) { | 2385 | if (ps->discsignr) { |
2386 | memset(&sinfo, 0, sizeof(sinfo)); | ||
2385 | sinfo.si_signo = ps->discsignr; | 2387 | sinfo.si_signo = ps->discsignr; |
2386 | sinfo.si_errno = EPIPE; | 2388 | sinfo.si_errno = EPIPE; |
2387 | sinfo.si_code = SI_ASYNCIO; | 2389 | sinfo.si_code = SI_ASYNCIO; |
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c index 02e3e2d4ea56..6cf047878dba 100644 --- a/drivers/usb/dwc2/core_intr.c +++ b/drivers/usb/dwc2/core_intr.c | |||
@@ -377,6 +377,9 @@ static void dwc2_handle_disconnect_intr(struct dwc2_hsotg *hsotg) | |||
377 | dwc2_is_host_mode(hsotg) ? "Host" : "Device", | 377 | dwc2_is_host_mode(hsotg) ? "Host" : "Device", |
378 | dwc2_op_state_str(hsotg)); | 378 | dwc2_op_state_str(hsotg)); |
379 | 379 | ||
380 | if (hsotg->op_state == OTG_STATE_A_HOST) | ||
381 | dwc2_hcd_disconnect(hsotg); | ||
382 | |||
380 | /* Change to L3 (OFF) state */ | 383 | /* Change to L3 (OFF) state */ |
381 | hsotg->lx_state = DWC2_L3; | 384 | hsotg->lx_state = DWC2_L3; |
382 | 385 | ||
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c index 172d64e585b6..52e0c4e5e48e 100644 --- a/drivers/usb/dwc3/dwc3-omap.c +++ b/drivers/usb/dwc3/dwc3-omap.c | |||
@@ -205,6 +205,18 @@ static void dwc3_omap_write_irq0_set(struct dwc3_omap *omap, u32 value) | |||
205 | omap->irq0_offset, value); | 205 | omap->irq0_offset, value); |
206 | } | 206 | } |
207 | 207 | ||
208 | static void dwc3_omap_write_irqmisc_clr(struct dwc3_omap *omap, u32 value) | ||
209 | { | ||
210 | dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_MISC + | ||
211 | omap->irqmisc_offset, value); | ||
212 | } | ||
213 | |||
214 | static void dwc3_omap_write_irq0_clr(struct dwc3_omap *omap, u32 value) | ||
215 | { | ||
216 | dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_0 - | ||
217 | omap->irq0_offset, value); | ||
218 | } | ||
219 | |||
208 | static void dwc3_omap_set_mailbox(struct dwc3_omap *omap, | 220 | static void dwc3_omap_set_mailbox(struct dwc3_omap *omap, |
209 | enum omap_dwc3_vbus_id_status status) | 221 | enum omap_dwc3_vbus_id_status status) |
210 | { | 222 | { |
@@ -345,9 +357,23 @@ static void dwc3_omap_enable_irqs(struct dwc3_omap *omap) | |||
345 | 357 | ||
346 | static void dwc3_omap_disable_irqs(struct dwc3_omap *omap) | 358 | static void dwc3_omap_disable_irqs(struct dwc3_omap *omap) |
347 | { | 359 | { |
360 | u32 reg; | ||
361 | |||
348 | /* disable all IRQs */ | 362 | /* disable all IRQs */ |
349 | dwc3_omap_write_irqmisc_set(omap, 0x00); | 363 | reg = USBOTGSS_IRQO_COREIRQ_ST; |
350 | dwc3_omap_write_irq0_set(omap, 0x00); | 364 | dwc3_omap_write_irq0_clr(omap, reg); |
365 | |||
366 | reg = (USBOTGSS_IRQMISC_OEVT | | ||
367 | USBOTGSS_IRQMISC_DRVVBUS_RISE | | ||
368 | USBOTGSS_IRQMISC_CHRGVBUS_RISE | | ||
369 | USBOTGSS_IRQMISC_DISCHRGVBUS_RISE | | ||
370 | USBOTGSS_IRQMISC_IDPULLUP_RISE | | ||
371 | USBOTGSS_IRQMISC_DRVVBUS_FALL | | ||
372 | USBOTGSS_IRQMISC_CHRGVBUS_FALL | | ||
373 | USBOTGSS_IRQMISC_DISCHRGVBUS_FALL | | ||
374 | USBOTGSS_IRQMISC_IDPULLUP_FALL); | ||
375 | |||
376 | dwc3_omap_write_irqmisc_clr(omap, reg); | ||
351 | } | 377 | } |
352 | 378 | ||
353 | static u64 dwc3_omap_dma_mask = DMA_BIT_MASK(32); | 379 | static u64 dwc3_omap_dma_mask = DMA_BIT_MASK(32); |
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 75648145dc1b..c42765b3a060 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c | |||
@@ -1161,7 +1161,6 @@ static ssize_t interf_grp_compatible_id_store(struct usb_os_desc *desc, | |||
1161 | if (desc->opts_mutex) | 1161 | if (desc->opts_mutex) |
1162 | mutex_lock(desc->opts_mutex); | 1162 | mutex_lock(desc->opts_mutex); |
1163 | memcpy(desc->ext_compat_id, page, l); | 1163 | memcpy(desc->ext_compat_id, page, l); |
1164 | desc->ext_compat_id[l] = '\0'; | ||
1165 | 1164 | ||
1166 | if (desc->opts_mutex) | 1165 | if (desc->opts_mutex) |
1167 | mutex_unlock(desc->opts_mutex); | 1166 | mutex_unlock(desc->opts_mutex); |
@@ -1192,7 +1191,6 @@ static ssize_t interf_grp_sub_compatible_id_store(struct usb_os_desc *desc, | |||
1192 | if (desc->opts_mutex) | 1191 | if (desc->opts_mutex) |
1193 | mutex_lock(desc->opts_mutex); | 1192 | mutex_lock(desc->opts_mutex); |
1194 | memcpy(desc->ext_compat_id + 8, page, l); | 1193 | memcpy(desc->ext_compat_id + 8, page, l); |
1195 | desc->ext_compat_id[l + 8] = '\0'; | ||
1196 | 1194 | ||
1197 | if (desc->opts_mutex) | 1195 | if (desc->opts_mutex) |
1198 | mutex_unlock(desc->opts_mutex); | 1196 | mutex_unlock(desc->opts_mutex); |
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index af98b096af2f..175c9956cbe3 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c | |||
@@ -144,10 +144,9 @@ struct ffs_io_data { | |||
144 | bool read; | 144 | bool read; |
145 | 145 | ||
146 | struct kiocb *kiocb; | 146 | struct kiocb *kiocb; |
147 | const struct iovec *iovec; | 147 | struct iov_iter data; |
148 | unsigned long nr_segs; | 148 | const void *to_free; |
149 | char __user *buf; | 149 | char *buf; |
150 | size_t len; | ||
151 | 150 | ||
152 | struct mm_struct *mm; | 151 | struct mm_struct *mm; |
153 | struct work_struct work; | 152 | struct work_struct work; |
@@ -649,29 +648,10 @@ static void ffs_user_copy_worker(struct work_struct *work) | |||
649 | io_data->req->actual; | 648 | io_data->req->actual; |
650 | 649 | ||
651 | if (io_data->read && ret > 0) { | 650 | if (io_data->read && ret > 0) { |
652 | int i; | ||
653 | size_t pos = 0; | ||
654 | |||
655 | /* | ||
656 | * Since req->length may be bigger than io_data->len (after | ||
657 | * being rounded up to maxpacketsize), we may end up with more | ||
658 | * data then user space has space for. | ||
659 | */ | ||
660 | ret = min_t(int, ret, io_data->len); | ||
661 | |||
662 | use_mm(io_data->mm); | 651 | use_mm(io_data->mm); |
663 | for (i = 0; i < io_data->nr_segs; i++) { | 652 | ret = copy_to_iter(io_data->buf, ret, &io_data->data); |
664 | size_t len = min_t(size_t, ret - pos, | 653 | if (iov_iter_count(&io_data->data)) |
665 | io_data->iovec[i].iov_len); | 654 | ret = -EFAULT; |
666 | if (!len) | ||
667 | break; | ||
668 | if (unlikely(copy_to_user(io_data->iovec[i].iov_base, | ||
669 | &io_data->buf[pos], len))) { | ||
670 | ret = -EFAULT; | ||
671 | break; | ||
672 | } | ||
673 | pos += len; | ||
674 | } | ||
675 | unuse_mm(io_data->mm); | 655 | unuse_mm(io_data->mm); |
676 | } | 656 | } |
677 | 657 | ||
@@ -684,7 +664,7 @@ static void ffs_user_copy_worker(struct work_struct *work) | |||
684 | 664 | ||
685 | io_data->kiocb->private = NULL; | 665 | io_data->kiocb->private = NULL; |
686 | if (io_data->read) | 666 | if (io_data->read) |
687 | kfree(io_data->iovec); | 667 | kfree(io_data->to_free); |
688 | kfree(io_data->buf); | 668 | kfree(io_data->buf); |
689 | kfree(io_data); | 669 | kfree(io_data); |
690 | } | 670 | } |
@@ -743,6 +723,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) | |||
743 | * before the waiting completes, so do not assign to 'gadget' earlier | 723 | * before the waiting completes, so do not assign to 'gadget' earlier |
744 | */ | 724 | */ |
745 | struct usb_gadget *gadget = epfile->ffs->gadget; | 725 | struct usb_gadget *gadget = epfile->ffs->gadget; |
726 | size_t copied; | ||
746 | 727 | ||
747 | spin_lock_irq(&epfile->ffs->eps_lock); | 728 | spin_lock_irq(&epfile->ffs->eps_lock); |
748 | /* In the meantime, endpoint got disabled or changed. */ | 729 | /* In the meantime, endpoint got disabled or changed. */ |
@@ -750,34 +731,21 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) | |||
750 | spin_unlock_irq(&epfile->ffs->eps_lock); | 731 | spin_unlock_irq(&epfile->ffs->eps_lock); |
751 | return -ESHUTDOWN; | 732 | return -ESHUTDOWN; |
752 | } | 733 | } |
734 | data_len = iov_iter_count(&io_data->data); | ||
753 | /* | 735 | /* |
754 | * Controller may require buffer size to be aligned to | 736 | * Controller may require buffer size to be aligned to |
755 | * maxpacketsize of an out endpoint. | 737 | * maxpacketsize of an out endpoint. |
756 | */ | 738 | */ |
757 | data_len = io_data->read ? | 739 | if (io_data->read) |
758 | usb_ep_align_maybe(gadget, ep->ep, io_data->len) : | 740 | data_len = usb_ep_align_maybe(gadget, ep->ep, data_len); |
759 | io_data->len; | ||
760 | spin_unlock_irq(&epfile->ffs->eps_lock); | 741 | spin_unlock_irq(&epfile->ffs->eps_lock); |
761 | 742 | ||
762 | data = kmalloc(data_len, GFP_KERNEL); | 743 | data = kmalloc(data_len, GFP_KERNEL); |
763 | if (unlikely(!data)) | 744 | if (unlikely(!data)) |
764 | return -ENOMEM; | 745 | return -ENOMEM; |
765 | if (io_data->aio && !io_data->read) { | 746 | if (!io_data->read) { |
766 | int i; | 747 | copied = copy_from_iter(data, data_len, &io_data->data); |
767 | size_t pos = 0; | 748 | if (copied != data_len) { |
768 | for (i = 0; i < io_data->nr_segs; i++) { | ||
769 | if (unlikely(copy_from_user(&data[pos], | ||
770 | io_data->iovec[i].iov_base, | ||
771 | io_data->iovec[i].iov_len))) { | ||
772 | ret = -EFAULT; | ||
773 | goto error; | ||
774 | } | ||
775 | pos += io_data->iovec[i].iov_len; | ||
776 | } | ||
777 | } else { | ||
778 | if (!io_data->read && | ||
779 | unlikely(__copy_from_user(data, io_data->buf, | ||
780 | io_data->len))) { | ||
781 | ret = -EFAULT; | 749 | ret = -EFAULT; |
782 | goto error; | 750 | goto error; |
783 | } | 751 | } |
@@ -876,10 +844,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) | |||
876 | */ | 844 | */ |
877 | ret = ep->status; | 845 | ret = ep->status; |
878 | if (io_data->read && ret > 0) { | 846 | if (io_data->read && ret > 0) { |
879 | ret = min_t(size_t, ret, io_data->len); | 847 | ret = copy_to_iter(data, ret, &io_data->data); |
880 | 848 | if (unlikely(iov_iter_count(&io_data->data))) | |
881 | if (unlikely(copy_to_user(io_data->buf, | ||
882 | data, ret))) | ||
883 | ret = -EFAULT; | 849 | ret = -EFAULT; |
884 | } | 850 | } |
885 | } | 851 | } |
@@ -898,37 +864,6 @@ error: | |||
898 | return ret; | 864 | return ret; |
899 | } | 865 | } |
900 | 866 | ||
901 | static ssize_t | ||
902 | ffs_epfile_write(struct file *file, const char __user *buf, size_t len, | ||
903 | loff_t *ptr) | ||
904 | { | ||
905 | struct ffs_io_data io_data; | ||
906 | |||
907 | ENTER(); | ||
908 | |||
909 | io_data.aio = false; | ||
910 | io_data.read = false; | ||
911 | io_data.buf = (char * __user)buf; | ||
912 | io_data.len = len; | ||
913 | |||
914 | return ffs_epfile_io(file, &io_data); | ||
915 | } | ||
916 | |||
917 | static ssize_t | ||
918 | ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr) | ||
919 | { | ||
920 | struct ffs_io_data io_data; | ||
921 | |||
922 | ENTER(); | ||
923 | |||
924 | io_data.aio = false; | ||
925 | io_data.read = true; | ||
926 | io_data.buf = buf; | ||
927 | io_data.len = len; | ||
928 | |||
929 | return ffs_epfile_io(file, &io_data); | ||
930 | } | ||
931 | |||
932 | static int | 867 | static int |
933 | ffs_epfile_open(struct inode *inode, struct file *file) | 868 | ffs_epfile_open(struct inode *inode, struct file *file) |
934 | { | 869 | { |
@@ -965,67 +900,86 @@ static int ffs_aio_cancel(struct kiocb *kiocb) | |||
965 | return value; | 900 | return value; |
966 | } | 901 | } |
967 | 902 | ||
968 | static ssize_t ffs_epfile_aio_write(struct kiocb *kiocb, | 903 | static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from) |
969 | const struct iovec *iovec, | ||
970 | unsigned long nr_segs, loff_t loff) | ||
971 | { | 904 | { |
972 | struct ffs_io_data *io_data; | 905 | struct ffs_io_data io_data, *p = &io_data; |
906 | ssize_t res; | ||
973 | 907 | ||
974 | ENTER(); | 908 | ENTER(); |
975 | 909 | ||
976 | io_data = kmalloc(sizeof(*io_data), GFP_KERNEL); | 910 | if (!is_sync_kiocb(kiocb)) { |
977 | if (unlikely(!io_data)) | 911 | p = kmalloc(sizeof(io_data), GFP_KERNEL); |
978 | return -ENOMEM; | 912 | if (unlikely(!p)) |
913 | return -ENOMEM; | ||
914 | p->aio = true; | ||
915 | } else { | ||
916 | p->aio = false; | ||
917 | } | ||
979 | 918 | ||
980 | io_data->aio = true; | 919 | p->read = false; |
981 | io_data->read = false; | 920 | p->kiocb = kiocb; |
982 | io_data->kiocb = kiocb; | 921 | p->data = *from; |
983 | io_data->iovec = iovec; | 922 | p->mm = current->mm; |
984 | io_data->nr_segs = nr_segs; | ||
985 | io_data->len = kiocb->ki_nbytes; | ||
986 | io_data->mm = current->mm; | ||
987 | 923 | ||
988 | kiocb->private = io_data; | 924 | kiocb->private = p; |
989 | 925 | ||
990 | kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); | 926 | kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); |
991 | 927 | ||
992 | return ffs_epfile_io(kiocb->ki_filp, io_data); | 928 | res = ffs_epfile_io(kiocb->ki_filp, p); |
929 | if (res == -EIOCBQUEUED) | ||
930 | return res; | ||
931 | if (p->aio) | ||
932 | kfree(p); | ||
933 | else | ||
934 | *from = p->data; | ||
935 | return res; | ||
993 | } | 936 | } |
994 | 937 | ||
995 | static ssize_t ffs_epfile_aio_read(struct kiocb *kiocb, | 938 | static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to) |
996 | const struct iovec *iovec, | ||
997 | unsigned long nr_segs, loff_t loff) | ||
998 | { | 939 | { |
999 | struct ffs_io_data *io_data; | 940 | struct ffs_io_data io_data, *p = &io_data; |
1000 | struct iovec *iovec_copy; | 941 | ssize_t res; |
1001 | 942 | ||
1002 | ENTER(); | 943 | ENTER(); |
1003 | 944 | ||
1004 | iovec_copy = kmalloc_array(nr_segs, sizeof(*iovec_copy), GFP_KERNEL); | 945 | if (!is_sync_kiocb(kiocb)) { |
1005 | if (unlikely(!iovec_copy)) | 946 | p = kmalloc(sizeof(io_data), GFP_KERNEL); |
1006 | return -ENOMEM; | 947 | if (unlikely(!p)) |
1007 | 948 | return -ENOMEM; | |
1008 | memcpy(iovec_copy, iovec, sizeof(struct iovec)*nr_segs); | 949 | p->aio = true; |
1009 | 950 | } else { | |
1010 | io_data = kmalloc(sizeof(*io_data), GFP_KERNEL); | 951 | p->aio = false; |
1011 | if (unlikely(!io_data)) { | ||
1012 | kfree(iovec_copy); | ||
1013 | return -ENOMEM; | ||
1014 | } | 952 | } |
1015 | 953 | ||
1016 | io_data->aio = true; | 954 | p->read = true; |
1017 | io_data->read = true; | 955 | p->kiocb = kiocb; |
1018 | io_data->kiocb = kiocb; | 956 | if (p->aio) { |
1019 | io_data->iovec = iovec_copy; | 957 | p->to_free = dup_iter(&p->data, to, GFP_KERNEL); |
1020 | io_data->nr_segs = nr_segs; | 958 | if (!p->to_free) { |
1021 | io_data->len = kiocb->ki_nbytes; | 959 | kfree(p); |
1022 | io_data->mm = current->mm; | 960 | return -ENOMEM; |
961 | } | ||
962 | } else { | ||
963 | p->data = *to; | ||
964 | p->to_free = NULL; | ||
965 | } | ||
966 | p->mm = current->mm; | ||
1023 | 967 | ||
1024 | kiocb->private = io_data; | 968 | kiocb->private = p; |
1025 | 969 | ||
1026 | kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); | 970 | kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); |
1027 | 971 | ||
1028 | return ffs_epfile_io(kiocb->ki_filp, io_data); | 972 | res = ffs_epfile_io(kiocb->ki_filp, p); |
973 | if (res == -EIOCBQUEUED) | ||
974 | return res; | ||
975 | |||
976 | if (p->aio) { | ||
977 | kfree(p->to_free); | ||
978 | kfree(p); | ||
979 | } else { | ||
980 | *to = p->data; | ||
981 | } | ||
982 | return res; | ||
1029 | } | 983 | } |
1030 | 984 | ||
1031 | static int | 985 | static int |
@@ -1105,10 +1059,10 @@ static const struct file_operations ffs_epfile_operations = { | |||
1105 | .llseek = no_llseek, | 1059 | .llseek = no_llseek, |
1106 | 1060 | ||
1107 | .open = ffs_epfile_open, | 1061 | .open = ffs_epfile_open, |
1108 | .write = ffs_epfile_write, | 1062 | .write = new_sync_write, |
1109 | .read = ffs_epfile_read, | 1063 | .read = new_sync_read, |
1110 | .aio_write = ffs_epfile_aio_write, | 1064 | .write_iter = ffs_epfile_write_iter, |
1111 | .aio_read = ffs_epfile_aio_read, | 1065 | .read_iter = ffs_epfile_read_iter, |
1112 | .release = ffs_epfile_release, | 1066 | .release = ffs_epfile_release, |
1113 | .unlocked_ioctl = ffs_epfile_ioctl, | 1067 | .unlocked_ioctl = ffs_epfile_ioctl, |
1114 | }; | 1068 | }; |
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index 426d69a9c018..a2612fb79eff 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c | |||
@@ -569,7 +569,7 @@ fail: | |||
569 | return status; | 569 | return status; |
570 | } | 570 | } |
571 | 571 | ||
572 | const struct file_operations f_hidg_fops = { | 572 | static const struct file_operations f_hidg_fops = { |
573 | .owner = THIS_MODULE, | 573 | .owner = THIS_MODULE, |
574 | .open = f_hidg_open, | 574 | .open = f_hidg_open, |
575 | .release = f_hidg_release, | 575 | .release = f_hidg_release, |
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c index 298b46112b1a..39f49f1ad22f 100644 --- a/drivers/usb/gadget/function/f_loopback.c +++ b/drivers/usb/gadget/function/f_loopback.c | |||
@@ -289,8 +289,7 @@ static void disable_loopback(struct f_loopback *loop) | |||
289 | struct usb_composite_dev *cdev; | 289 | struct usb_composite_dev *cdev; |
290 | 290 | ||
291 | cdev = loop->function.config->cdev; | 291 | cdev = loop->function.config->cdev; |
292 | disable_endpoints(cdev, loop->in_ep, loop->out_ep, NULL, NULL, NULL, | 292 | disable_endpoints(cdev, loop->in_ep, loop->out_ep, NULL, NULL); |
293 | NULL); | ||
294 | VDBG(cdev, "%s disabled\n", loop->function.name); | 293 | VDBG(cdev, "%s disabled\n", loop->function.name); |
295 | } | 294 | } |
296 | 295 | ||
diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c index c89e96cfa3e4..c0c3ef272714 100644 --- a/drivers/usb/gadget/function/f_phonet.c +++ b/drivers/usb/gadget/function/f_phonet.c | |||
@@ -417,7 +417,10 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt) | |||
417 | return -EINVAL; | 417 | return -EINVAL; |
418 | 418 | ||
419 | spin_lock(&port->lock); | 419 | spin_lock(&port->lock); |
420 | __pn_reset(f); | 420 | |
421 | if (fp->in_ep->driver_data) | ||
422 | __pn_reset(f); | ||
423 | |||
421 | if (alt == 1) { | 424 | if (alt == 1) { |
422 | int i; | 425 | int i; |
423 | 426 | ||
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c index e07c50ced64d..3a5ae9900b1e 100644 --- a/drivers/usb/gadget/function/f_sourcesink.c +++ b/drivers/usb/gadget/function/f_sourcesink.c | |||
@@ -23,15 +23,6 @@ | |||
23 | #include "gadget_chips.h" | 23 | #include "gadget_chips.h" |
24 | #include "u_f.h" | 24 | #include "u_f.h" |
25 | 25 | ||
26 | #define USB_MS_TO_SS_INTERVAL(x) USB_MS_TO_HS_INTERVAL(x) | ||
27 | |||
28 | enum eptype { | ||
29 | EP_CONTROL = 0, | ||
30 | EP_BULK, | ||
31 | EP_ISOC, | ||
32 | EP_INTERRUPT, | ||
33 | }; | ||
34 | |||
35 | /* | 26 | /* |
36 | * SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral | 27 | * SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral |
37 | * controller drivers. | 28 | * controller drivers. |
@@ -64,8 +55,6 @@ struct f_sourcesink { | |||
64 | struct usb_ep *out_ep; | 55 | struct usb_ep *out_ep; |
65 | struct usb_ep *iso_in_ep; | 56 | struct usb_ep *iso_in_ep; |
66 | struct usb_ep *iso_out_ep; | 57 | struct usb_ep *iso_out_ep; |
67 | struct usb_ep *int_in_ep; | ||
68 | struct usb_ep *int_out_ep; | ||
69 | int cur_alt; | 58 | int cur_alt; |
70 | }; | 59 | }; |
71 | 60 | ||
@@ -79,10 +68,6 @@ static unsigned isoc_interval; | |||
79 | static unsigned isoc_maxpacket; | 68 | static unsigned isoc_maxpacket; |
80 | static unsigned isoc_mult; | 69 | static unsigned isoc_mult; |
81 | static unsigned isoc_maxburst; | 70 | static unsigned isoc_maxburst; |
82 | static unsigned int_interval; /* In ms */ | ||
83 | static unsigned int_maxpacket; | ||
84 | static unsigned int_mult; | ||
85 | static unsigned int_maxburst; | ||
86 | static unsigned buflen; | 71 | static unsigned buflen; |
87 | 72 | ||
88 | /*-------------------------------------------------------------------------*/ | 73 | /*-------------------------------------------------------------------------*/ |
@@ -107,16 +92,6 @@ static struct usb_interface_descriptor source_sink_intf_alt1 = { | |||
107 | /* .iInterface = DYNAMIC */ | 92 | /* .iInterface = DYNAMIC */ |
108 | }; | 93 | }; |
109 | 94 | ||
110 | static struct usb_interface_descriptor source_sink_intf_alt2 = { | ||
111 | .bLength = USB_DT_INTERFACE_SIZE, | ||
112 | .bDescriptorType = USB_DT_INTERFACE, | ||
113 | |||
114 | .bAlternateSetting = 2, | ||
115 | .bNumEndpoints = 2, | ||
116 | .bInterfaceClass = USB_CLASS_VENDOR_SPEC, | ||
117 | /* .iInterface = DYNAMIC */ | ||
118 | }; | ||
119 | |||
120 | /* full speed support: */ | 95 | /* full speed support: */ |
121 | 96 | ||
122 | static struct usb_endpoint_descriptor fs_source_desc = { | 97 | static struct usb_endpoint_descriptor fs_source_desc = { |
@@ -155,26 +130,6 @@ static struct usb_endpoint_descriptor fs_iso_sink_desc = { | |||
155 | .bInterval = 4, | 130 | .bInterval = 4, |
156 | }; | 131 | }; |
157 | 132 | ||
158 | static struct usb_endpoint_descriptor fs_int_source_desc = { | ||
159 | .bLength = USB_DT_ENDPOINT_SIZE, | ||
160 | .bDescriptorType = USB_DT_ENDPOINT, | ||
161 | |||
162 | .bEndpointAddress = USB_DIR_IN, | ||
163 | .bmAttributes = USB_ENDPOINT_XFER_INT, | ||
164 | .wMaxPacketSize = cpu_to_le16(64), | ||
165 | .bInterval = GZERO_INT_INTERVAL, | ||
166 | }; | ||
167 | |||
168 | static struct usb_endpoint_descriptor fs_int_sink_desc = { | ||
169 | .bLength = USB_DT_ENDPOINT_SIZE, | ||
170 | .bDescriptorType = USB_DT_ENDPOINT, | ||
171 | |||
172 | .bEndpointAddress = USB_DIR_OUT, | ||
173 | .bmAttributes = USB_ENDPOINT_XFER_INT, | ||
174 | .wMaxPacketSize = cpu_to_le16(64), | ||
175 | .bInterval = GZERO_INT_INTERVAL, | ||
176 | }; | ||
177 | |||
178 | static struct usb_descriptor_header *fs_source_sink_descs[] = { | 133 | static struct usb_descriptor_header *fs_source_sink_descs[] = { |
179 | (struct usb_descriptor_header *) &source_sink_intf_alt0, | 134 | (struct usb_descriptor_header *) &source_sink_intf_alt0, |
180 | (struct usb_descriptor_header *) &fs_sink_desc, | 135 | (struct usb_descriptor_header *) &fs_sink_desc, |
@@ -185,10 +140,6 @@ static struct usb_descriptor_header *fs_source_sink_descs[] = { | |||
185 | (struct usb_descriptor_header *) &fs_source_desc, | 140 | (struct usb_descriptor_header *) &fs_source_desc, |
186 | (struct usb_descriptor_header *) &fs_iso_sink_desc, | 141 | (struct usb_descriptor_header *) &fs_iso_sink_desc, |
187 | (struct usb_descriptor_header *) &fs_iso_source_desc, | 142 | (struct usb_descriptor_header *) &fs_iso_source_desc, |
188 | (struct usb_descriptor_header *) &source_sink_intf_alt2, | ||
189 | #define FS_ALT_IFC_2_OFFSET 8 | ||
190 | (struct usb_descriptor_header *) &fs_int_sink_desc, | ||
191 | (struct usb_descriptor_header *) &fs_int_source_desc, | ||
192 | NULL, | 143 | NULL, |
193 | }; | 144 | }; |
194 | 145 | ||
@@ -228,24 +179,6 @@ static struct usb_endpoint_descriptor hs_iso_sink_desc = { | |||
228 | .bInterval = 4, | 179 | .bInterval = 4, |
229 | }; | 180 | }; |
230 | 181 | ||
231 | static struct usb_endpoint_descriptor hs_int_source_desc = { | ||
232 | .bLength = USB_DT_ENDPOINT_SIZE, | ||
233 | .bDescriptorType = USB_DT_ENDPOINT, | ||
234 | |||
235 | .bmAttributes = USB_ENDPOINT_XFER_INT, | ||
236 | .wMaxPacketSize = cpu_to_le16(1024), | ||
237 | .bInterval = USB_MS_TO_HS_INTERVAL(GZERO_INT_INTERVAL), | ||
238 | }; | ||
239 | |||
240 | static struct usb_endpoint_descriptor hs_int_sink_desc = { | ||
241 | .bLength = USB_DT_ENDPOINT_SIZE, | ||
242 | .bDescriptorType = USB_DT_ENDPOINT, | ||
243 | |||
244 | .bmAttributes = USB_ENDPOINT_XFER_INT, | ||
245 | .wMaxPacketSize = cpu_to_le16(1024), | ||
246 | .bInterval = USB_MS_TO_HS_INTERVAL(GZERO_INT_INTERVAL), | ||
247 | }; | ||
248 | |||
249 | static struct usb_descriptor_header *hs_source_sink_descs[] = { | 182 | static struct usb_descriptor_header *hs_source_sink_descs[] = { |
250 | (struct usb_descriptor_header *) &source_sink_intf_alt0, | 183 | (struct usb_descriptor_header *) &source_sink_intf_alt0, |
251 | (struct usb_descriptor_header *) &hs_source_desc, | 184 | (struct usb_descriptor_header *) &hs_source_desc, |
@@ -256,10 +189,6 @@ static struct usb_descriptor_header *hs_source_sink_descs[] = { | |||
256 | (struct usb_descriptor_header *) &hs_sink_desc, | 189 | (struct usb_descriptor_header *) &hs_sink_desc, |
257 | (struct usb_descriptor_header *) &hs_iso_source_desc, | 190 | (struct usb_descriptor_header *) &hs_iso_source_desc, |
258 | (struct usb_descriptor_header *) &hs_iso_sink_desc, | 191 | (struct usb_descriptor_header *) &hs_iso_sink_desc, |
259 | (struct usb_descriptor_header *) &source_sink_intf_alt2, | ||
260 | #define HS_ALT_IFC_2_OFFSET 8 | ||
261 | (struct usb_descriptor_header *) &hs_int_source_desc, | ||
262 | (struct usb_descriptor_header *) &hs_int_sink_desc, | ||
263 | NULL, | 192 | NULL, |
264 | }; | 193 | }; |
265 | 194 | ||
@@ -335,42 +264,6 @@ static struct usb_ss_ep_comp_descriptor ss_iso_sink_comp_desc = { | |||
335 | .wBytesPerInterval = cpu_to_le16(1024), | 264 | .wBytesPerInterval = cpu_to_le16(1024), |
336 | }; | 265 | }; |
337 | 266 | ||
338 | static struct usb_endpoint_descriptor ss_int_source_desc = { | ||
339 | .bLength = USB_DT_ENDPOINT_SIZE, | ||
340 | .bDescriptorType = USB_DT_ENDPOINT, | ||
341 | |||
342 | .bmAttributes = USB_ENDPOINT_XFER_INT, | ||
343 | .wMaxPacketSize = cpu_to_le16(1024), | ||
344 | .bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL), | ||
345 | }; | ||
346 | |||
347 | struct usb_ss_ep_comp_descriptor ss_int_source_comp_desc = { | ||
348 | .bLength = USB_DT_SS_EP_COMP_SIZE, | ||
349 | .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, | ||
350 | |||
351 | .bMaxBurst = 0, | ||
352 | .bmAttributes = 0, | ||
353 | .wBytesPerInterval = cpu_to_le16(1024), | ||
354 | }; | ||
355 | |||
356 | static struct usb_endpoint_descriptor ss_int_sink_desc = { | ||
357 | .bLength = USB_DT_ENDPOINT_SIZE, | ||
358 | .bDescriptorType = USB_DT_ENDPOINT, | ||
359 | |||
360 | .bmAttributes = USB_ENDPOINT_XFER_INT, | ||
361 | .wMaxPacketSize = cpu_to_le16(1024), | ||
362 | .bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL), | ||
363 | }; | ||
364 | |||
365 | struct usb_ss_ep_comp_descriptor ss_int_sink_comp_desc = { | ||
366 | .bLength = USB_DT_SS_EP_COMP_SIZE, | ||
367 | .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, | ||
368 | |||
369 | .bMaxBurst = 0, | ||
370 | .bmAttributes = 0, | ||
371 | .wBytesPerInterval = cpu_to_le16(1024), | ||
372 | }; | ||
373 | |||
374 | static struct usb_descriptor_header *ss_source_sink_descs[] = { | 267 | static struct usb_descriptor_header *ss_source_sink_descs[] = { |
375 | (struct usb_descriptor_header *) &source_sink_intf_alt0, | 268 | (struct usb_descriptor_header *) &source_sink_intf_alt0, |
376 | (struct usb_descriptor_header *) &ss_source_desc, | 269 | (struct usb_descriptor_header *) &ss_source_desc, |
@@ -387,12 +280,6 @@ static struct usb_descriptor_header *ss_source_sink_descs[] = { | |||
387 | (struct usb_descriptor_header *) &ss_iso_source_comp_desc, | 280 | (struct usb_descriptor_header *) &ss_iso_source_comp_desc, |
388 | (struct usb_descriptor_header *) &ss_iso_sink_desc, | 281 | (struct usb_descriptor_header *) &ss_iso_sink_desc, |
389 | (struct usb_descriptor_header *) &ss_iso_sink_comp_desc, | 282 | (struct usb_descriptor_header *) &ss_iso_sink_comp_desc, |
390 | (struct usb_descriptor_header *) &source_sink_intf_alt2, | ||
391 | #define SS_ALT_IFC_2_OFFSET 14 | ||
392 | (struct usb_descriptor_header *) &ss_int_source_desc, | ||
393 | (struct usb_descriptor_header *) &ss_int_source_comp_desc, | ||
394 | (struct usb_descriptor_header *) &ss_int_sink_desc, | ||
395 | (struct usb_descriptor_header *) &ss_int_sink_comp_desc, | ||
396 | NULL, | 283 | NULL, |
397 | }; | 284 | }; |
398 | 285 | ||
@@ -414,21 +301,6 @@ static struct usb_gadget_strings *sourcesink_strings[] = { | |||
414 | }; | 301 | }; |
415 | 302 | ||
416 | /*-------------------------------------------------------------------------*/ | 303 | /*-------------------------------------------------------------------------*/ |
417 | static const char *get_ep_string(enum eptype ep_type) | ||
418 | { | ||
419 | switch (ep_type) { | ||
420 | case EP_ISOC: | ||
421 | return "ISOC-"; | ||
422 | case EP_INTERRUPT: | ||
423 | return "INTERRUPT-"; | ||
424 | case EP_CONTROL: | ||
425 | return "CTRL-"; | ||
426 | case EP_BULK: | ||
427 | return "BULK-"; | ||
428 | default: | ||
429 | return "UNKNOWN-"; | ||
430 | } | ||
431 | } | ||
432 | 304 | ||
433 | static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len) | 305 | static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len) |
434 | { | 306 | { |
@@ -456,8 +328,7 @@ static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep) | |||
456 | 328 | ||
457 | void disable_endpoints(struct usb_composite_dev *cdev, | 329 | void disable_endpoints(struct usb_composite_dev *cdev, |
458 | struct usb_ep *in, struct usb_ep *out, | 330 | struct usb_ep *in, struct usb_ep *out, |
459 | struct usb_ep *iso_in, struct usb_ep *iso_out, | 331 | struct usb_ep *iso_in, struct usb_ep *iso_out) |
460 | struct usb_ep *int_in, struct usb_ep *int_out) | ||
461 | { | 332 | { |
462 | disable_ep(cdev, in); | 333 | disable_ep(cdev, in); |
463 | disable_ep(cdev, out); | 334 | disable_ep(cdev, out); |
@@ -465,10 +336,6 @@ void disable_endpoints(struct usb_composite_dev *cdev, | |||
465 | disable_ep(cdev, iso_in); | 336 | disable_ep(cdev, iso_in); |
466 | if (iso_out) | 337 | if (iso_out) |
467 | disable_ep(cdev, iso_out); | 338 | disable_ep(cdev, iso_out); |
468 | if (int_in) | ||
469 | disable_ep(cdev, int_in); | ||
470 | if (int_out) | ||
471 | disable_ep(cdev, int_out); | ||
472 | } | 339 | } |
473 | 340 | ||
474 | static int | 341 | static int |
@@ -485,7 +352,6 @@ sourcesink_bind(struct usb_configuration *c, struct usb_function *f) | |||
485 | return id; | 352 | return id; |
486 | source_sink_intf_alt0.bInterfaceNumber = id; | 353 | source_sink_intf_alt0.bInterfaceNumber = id; |
487 | source_sink_intf_alt1.bInterfaceNumber = id; | 354 | source_sink_intf_alt1.bInterfaceNumber = id; |
488 | source_sink_intf_alt2.bInterfaceNumber = id; | ||
489 | 355 | ||
490 | /* allocate bulk endpoints */ | 356 | /* allocate bulk endpoints */ |
491 | ss->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_source_desc); | 357 | ss->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_source_desc); |
@@ -546,55 +412,14 @@ no_iso: | |||
546 | if (isoc_maxpacket > 1024) | 412 | if (isoc_maxpacket > 1024) |
547 | isoc_maxpacket = 1024; | 413 | isoc_maxpacket = 1024; |
548 | 414 | ||
549 | /* sanity check the interrupt module parameters */ | ||
550 | if (int_interval < 1) | ||
551 | int_interval = 1; | ||
552 | if (int_interval > 4096) | ||
553 | int_interval = 4096; | ||
554 | if (int_mult > 2) | ||
555 | int_mult = 2; | ||
556 | if (int_maxburst > 15) | ||
557 | int_maxburst = 15; | ||
558 | |||
559 | /* fill in the FS interrupt descriptors from the module parameters */ | ||
560 | fs_int_source_desc.wMaxPacketSize = int_maxpacket > 64 ? | ||
561 | 64 : int_maxpacket; | ||
562 | fs_int_source_desc.bInterval = int_interval > 255 ? | ||
563 | 255 : int_interval; | ||
564 | fs_int_sink_desc.wMaxPacketSize = int_maxpacket > 64 ? | ||
565 | 64 : int_maxpacket; | ||
566 | fs_int_sink_desc.bInterval = int_interval > 255 ? | ||
567 | 255 : int_interval; | ||
568 | |||
569 | /* allocate int endpoints */ | ||
570 | ss->int_in_ep = usb_ep_autoconfig(cdev->gadget, &fs_int_source_desc); | ||
571 | if (!ss->int_in_ep) | ||
572 | goto no_int; | ||
573 | ss->int_in_ep->driver_data = cdev; /* claim */ | ||
574 | |||
575 | ss->int_out_ep = usb_ep_autoconfig(cdev->gadget, &fs_int_sink_desc); | ||
576 | if (ss->int_out_ep) { | ||
577 | ss->int_out_ep->driver_data = cdev; /* claim */ | ||
578 | } else { | ||
579 | ss->int_in_ep->driver_data = NULL; | ||
580 | ss->int_in_ep = NULL; | ||
581 | no_int: | ||
582 | fs_source_sink_descs[FS_ALT_IFC_2_OFFSET] = NULL; | ||
583 | hs_source_sink_descs[HS_ALT_IFC_2_OFFSET] = NULL; | ||
584 | ss_source_sink_descs[SS_ALT_IFC_2_OFFSET] = NULL; | ||
585 | } | ||
586 | |||
587 | if (int_maxpacket > 1024) | ||
588 | int_maxpacket = 1024; | ||
589 | |||
590 | /* support high speed hardware */ | 415 | /* support high speed hardware */ |
591 | hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress; | 416 | hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress; |
592 | hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress; | 417 | hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress; |
593 | 418 | ||
594 | /* | 419 | /* |
595 | * Fill in the HS isoc and interrupt descriptors from the module | 420 | * Fill in the HS isoc descriptors from the module parameters. |
596 | * parameters. We assume that the user knows what they are doing and | 421 | * We assume that the user knows what they are doing and won't |
597 | * won't give parameters that their UDC doesn't support. | 422 | * give parameters that their UDC doesn't support. |
598 | */ | 423 | */ |
599 | hs_iso_source_desc.wMaxPacketSize = isoc_maxpacket; | 424 | hs_iso_source_desc.wMaxPacketSize = isoc_maxpacket; |
600 | hs_iso_source_desc.wMaxPacketSize |= isoc_mult << 11; | 425 | hs_iso_source_desc.wMaxPacketSize |= isoc_mult << 11; |
@@ -607,17 +432,6 @@ no_int: | |||
607 | hs_iso_sink_desc.bInterval = isoc_interval; | 432 | hs_iso_sink_desc.bInterval = isoc_interval; |
608 | hs_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; | 433 | hs_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; |
609 | 434 | ||
610 | hs_int_source_desc.wMaxPacketSize = int_maxpacket; | ||
611 | hs_int_source_desc.wMaxPacketSize |= int_mult << 11; | ||
612 | hs_int_source_desc.bInterval = USB_MS_TO_HS_INTERVAL(int_interval); | ||
613 | hs_int_source_desc.bEndpointAddress = | ||
614 | fs_int_source_desc.bEndpointAddress; | ||
615 | |||
616 | hs_int_sink_desc.wMaxPacketSize = int_maxpacket; | ||
617 | hs_int_sink_desc.wMaxPacketSize |= int_mult << 11; | ||
618 | hs_int_sink_desc.bInterval = USB_MS_TO_HS_INTERVAL(int_interval); | ||
619 | hs_int_sink_desc.bEndpointAddress = fs_int_sink_desc.bEndpointAddress; | ||
620 | |||
621 | /* support super speed hardware */ | 435 | /* support super speed hardware */ |
622 | ss_source_desc.bEndpointAddress = | 436 | ss_source_desc.bEndpointAddress = |
623 | fs_source_desc.bEndpointAddress; | 437 | fs_source_desc.bEndpointAddress; |
@@ -625,9 +439,9 @@ no_int: | |||
625 | fs_sink_desc.bEndpointAddress; | 439 | fs_sink_desc.bEndpointAddress; |
626 | 440 | ||
627 | /* | 441 | /* |
628 | * Fill in the SS isoc and interrupt descriptors from the module | 442 | * Fill in the SS isoc descriptors from the module parameters. |
629 | * parameters. We assume that the user knows what they are doing and | 443 | * We assume that the user knows what they are doing and won't |
630 | * won't give parameters that their UDC doesn't support. | 444 | * give parameters that their UDC doesn't support. |
631 | */ | 445 | */ |
632 | ss_iso_source_desc.wMaxPacketSize = isoc_maxpacket; | 446 | ss_iso_source_desc.wMaxPacketSize = isoc_maxpacket; |
633 | ss_iso_source_desc.bInterval = isoc_interval; | 447 | ss_iso_source_desc.bInterval = isoc_interval; |
@@ -646,37 +460,17 @@ no_int: | |||
646 | isoc_maxpacket * (isoc_mult + 1) * (isoc_maxburst + 1); | 460 | isoc_maxpacket * (isoc_mult + 1) * (isoc_maxburst + 1); |
647 | ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; | 461 | ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; |
648 | 462 | ||
649 | ss_int_source_desc.wMaxPacketSize = int_maxpacket; | ||
650 | ss_int_source_desc.bInterval = USB_MS_TO_SS_INTERVAL(int_interval); | ||
651 | ss_int_source_comp_desc.bmAttributes = int_mult; | ||
652 | ss_int_source_comp_desc.bMaxBurst = int_maxburst; | ||
653 | ss_int_source_comp_desc.wBytesPerInterval = | ||
654 | int_maxpacket * (int_mult + 1) * (int_maxburst + 1); | ||
655 | ss_int_source_desc.bEndpointAddress = | ||
656 | fs_int_source_desc.bEndpointAddress; | ||
657 | |||
658 | ss_int_sink_desc.wMaxPacketSize = int_maxpacket; | ||
659 | ss_int_sink_desc.bInterval = USB_MS_TO_SS_INTERVAL(int_interval); | ||
660 | ss_int_sink_comp_desc.bmAttributes = int_mult; | ||
661 | ss_int_sink_comp_desc.bMaxBurst = int_maxburst; | ||
662 | ss_int_sink_comp_desc.wBytesPerInterval = | ||
663 | int_maxpacket * (int_mult + 1) * (int_maxburst + 1); | ||
664 | ss_int_sink_desc.bEndpointAddress = fs_int_sink_desc.bEndpointAddress; | ||
665 | |||
666 | ret = usb_assign_descriptors(f, fs_source_sink_descs, | 463 | ret = usb_assign_descriptors(f, fs_source_sink_descs, |
667 | hs_source_sink_descs, ss_source_sink_descs); | 464 | hs_source_sink_descs, ss_source_sink_descs); |
668 | if (ret) | 465 | if (ret) |
669 | return ret; | 466 | return ret; |
670 | 467 | ||
671 | DBG(cdev, "%s speed %s: IN/%s, OUT/%s, ISO-IN/%s, ISO-OUT/%s, " | 468 | DBG(cdev, "%s speed %s: IN/%s, OUT/%s, ISO-IN/%s, ISO-OUT/%s\n", |
672 | "INT-IN/%s, INT-OUT/%s\n", | ||
673 | (gadget_is_superspeed(c->cdev->gadget) ? "super" : | 469 | (gadget_is_superspeed(c->cdev->gadget) ? "super" : |
674 | (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")), | 470 | (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")), |
675 | f->name, ss->in_ep->name, ss->out_ep->name, | 471 | f->name, ss->in_ep->name, ss->out_ep->name, |
676 | ss->iso_in_ep ? ss->iso_in_ep->name : "<none>", | 472 | ss->iso_in_ep ? ss->iso_in_ep->name : "<none>", |
677 | ss->iso_out_ep ? ss->iso_out_ep->name : "<none>", | 473 | ss->iso_out_ep ? ss->iso_out_ep->name : "<none>"); |
678 | ss->int_in_ep ? ss->int_in_ep->name : "<none>", | ||
679 | ss->int_out_ep ? ss->int_out_ep->name : "<none>"); | ||
680 | return 0; | 474 | return 0; |
681 | } | 475 | } |
682 | 476 | ||
@@ -807,15 +601,14 @@ static void source_sink_complete(struct usb_ep *ep, struct usb_request *req) | |||
807 | } | 601 | } |
808 | 602 | ||
809 | static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, | 603 | static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, |
810 | enum eptype ep_type, int speed) | 604 | bool is_iso, int speed) |
811 | { | 605 | { |
812 | struct usb_ep *ep; | 606 | struct usb_ep *ep; |
813 | struct usb_request *req; | 607 | struct usb_request *req; |
814 | int i, size, status; | 608 | int i, size, status; |
815 | 609 | ||
816 | for (i = 0; i < 8; i++) { | 610 | for (i = 0; i < 8; i++) { |
817 | switch (ep_type) { | 611 | if (is_iso) { |
818 | case EP_ISOC: | ||
819 | switch (speed) { | 612 | switch (speed) { |
820 | case USB_SPEED_SUPER: | 613 | case USB_SPEED_SUPER: |
821 | size = isoc_maxpacket * (isoc_mult + 1) * | 614 | size = isoc_maxpacket * (isoc_mult + 1) * |
@@ -831,28 +624,9 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, | |||
831 | } | 624 | } |
832 | ep = is_in ? ss->iso_in_ep : ss->iso_out_ep; | 625 | ep = is_in ? ss->iso_in_ep : ss->iso_out_ep; |
833 | req = ss_alloc_ep_req(ep, size); | 626 | req = ss_alloc_ep_req(ep, size); |
834 | break; | 627 | } else { |
835 | case EP_INTERRUPT: | ||
836 | switch (speed) { | ||
837 | case USB_SPEED_SUPER: | ||
838 | size = int_maxpacket * (int_mult + 1) * | ||
839 | (int_maxburst + 1); | ||
840 | break; | ||
841 | case USB_SPEED_HIGH: | ||
842 | size = int_maxpacket * (int_mult + 1); | ||
843 | break; | ||
844 | default: | ||
845 | size = int_maxpacket > 1023 ? | ||
846 | 1023 : int_maxpacket; | ||
847 | break; | ||
848 | } | ||
849 | ep = is_in ? ss->int_in_ep : ss->int_out_ep; | ||
850 | req = ss_alloc_ep_req(ep, size); | ||
851 | break; | ||
852 | default: | ||
853 | ep = is_in ? ss->in_ep : ss->out_ep; | 628 | ep = is_in ? ss->in_ep : ss->out_ep; |
854 | req = ss_alloc_ep_req(ep, 0); | 629 | req = ss_alloc_ep_req(ep, 0); |
855 | break; | ||
856 | } | 630 | } |
857 | 631 | ||
858 | if (!req) | 632 | if (!req) |
@@ -870,12 +644,12 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, | |||
870 | 644 | ||
871 | cdev = ss->function.config->cdev; | 645 | cdev = ss->function.config->cdev; |
872 | ERROR(cdev, "start %s%s %s --> %d\n", | 646 | ERROR(cdev, "start %s%s %s --> %d\n", |
873 | get_ep_string(ep_type), is_in ? "IN" : "OUT", | 647 | is_iso ? "ISO-" : "", is_in ? "IN" : "OUT", |
874 | ep->name, status); | 648 | ep->name, status); |
875 | free_ep_req(ep, req); | 649 | free_ep_req(ep, req); |
876 | } | 650 | } |
877 | 651 | ||
878 | if (!(ep_type == EP_ISOC)) | 652 | if (!is_iso) |
879 | break; | 653 | break; |
880 | } | 654 | } |
881 | 655 | ||
@@ -888,7 +662,7 @@ static void disable_source_sink(struct f_sourcesink *ss) | |||
888 | 662 | ||
889 | cdev = ss->function.config->cdev; | 663 | cdev = ss->function.config->cdev; |
890 | disable_endpoints(cdev, ss->in_ep, ss->out_ep, ss->iso_in_ep, | 664 | disable_endpoints(cdev, ss->in_ep, ss->out_ep, ss->iso_in_ep, |
891 | ss->iso_out_ep, ss->int_in_ep, ss->int_out_ep); | 665 | ss->iso_out_ep); |
892 | VDBG(cdev, "%s disabled\n", ss->function.name); | 666 | VDBG(cdev, "%s disabled\n", ss->function.name); |
893 | } | 667 | } |
894 | 668 | ||
@@ -900,62 +674,6 @@ enable_source_sink(struct usb_composite_dev *cdev, struct f_sourcesink *ss, | |||
900 | int speed = cdev->gadget->speed; | 674 | int speed = cdev->gadget->speed; |
901 | struct usb_ep *ep; | 675 | struct usb_ep *ep; |
902 | 676 | ||
903 | if (alt == 2) { | ||
904 | /* Configure for periodic interrupt endpoint */ | ||
905 | ep = ss->int_in_ep; | ||
906 | if (ep) { | ||
907 | result = config_ep_by_speed(cdev->gadget, | ||
908 | &(ss->function), ep); | ||
909 | if (result) | ||
910 | return result; | ||
911 | |||
912 | result = usb_ep_enable(ep); | ||
913 | if (result < 0) | ||
914 | return result; | ||
915 | |||
916 | ep->driver_data = ss; | ||
917 | result = source_sink_start_ep(ss, true, EP_INTERRUPT, | ||
918 | speed); | ||
919 | if (result < 0) { | ||
920 | fail1: | ||
921 | ep = ss->int_in_ep; | ||
922 | if (ep) { | ||
923 | usb_ep_disable(ep); | ||
924 | ep->driver_data = NULL; | ||
925 | } | ||
926 | return result; | ||
927 | } | ||
928 | } | ||
929 | |||
930 | /* | ||
931 | * one interrupt endpoint reads (sinks) anything OUT (from the | ||
932 | * host) | ||
933 | */ | ||
934 | ep = ss->int_out_ep; | ||
935 | if (ep) { | ||
936 | result = config_ep_by_speed(cdev->gadget, | ||
937 | &(ss->function), ep); | ||
938 | if (result) | ||
939 | goto fail1; | ||
940 | |||
941 | result = usb_ep_enable(ep); | ||
942 | if (result < 0) | ||
943 | goto fail1; | ||
944 | |||
945 | ep->driver_data = ss; | ||
946 | result = source_sink_start_ep(ss, false, EP_INTERRUPT, | ||
947 | speed); | ||
948 | if (result < 0) { | ||
949 | ep = ss->int_out_ep; | ||
950 | usb_ep_disable(ep); | ||
951 | ep->driver_data = NULL; | ||
952 | goto fail1; | ||
953 | } | ||
954 | } | ||
955 | |||
956 | goto out; | ||
957 | } | ||
958 | |||
959 | /* one bulk endpoint writes (sources) zeroes IN (to the host) */ | 677 | /* one bulk endpoint writes (sources) zeroes IN (to the host) */ |
960 | ep = ss->in_ep; | 678 | ep = ss->in_ep; |
961 | result = config_ep_by_speed(cdev->gadget, &(ss->function), ep); | 679 | result = config_ep_by_speed(cdev->gadget, &(ss->function), ep); |
@@ -966,7 +684,7 @@ fail1: | |||
966 | return result; | 684 | return result; |
967 | ep->driver_data = ss; | 685 | ep->driver_data = ss; |
968 | 686 | ||
969 | result = source_sink_start_ep(ss, true, EP_BULK, speed); | 687 | result = source_sink_start_ep(ss, true, false, speed); |
970 | if (result < 0) { | 688 | if (result < 0) { |
971 | fail: | 689 | fail: |
972 | ep = ss->in_ep; | 690 | ep = ss->in_ep; |
@@ -985,7 +703,7 @@ fail: | |||
985 | goto fail; | 703 | goto fail; |
986 | ep->driver_data = ss; | 704 | ep->driver_data = ss; |
987 | 705 | ||
988 | result = source_sink_start_ep(ss, false, EP_BULK, speed); | 706 | result = source_sink_start_ep(ss, false, false, speed); |
989 | if (result < 0) { | 707 | if (result < 0) { |
990 | fail2: | 708 | fail2: |
991 | ep = ss->out_ep; | 709 | ep = ss->out_ep; |
@@ -1008,7 +726,7 @@ fail2: | |||
1008 | goto fail2; | 726 | goto fail2; |
1009 | ep->driver_data = ss; | 727 | ep->driver_data = ss; |
1010 | 728 | ||
1011 | result = source_sink_start_ep(ss, true, EP_ISOC, speed); | 729 | result = source_sink_start_ep(ss, true, true, speed); |
1012 | if (result < 0) { | 730 | if (result < 0) { |
1013 | fail3: | 731 | fail3: |
1014 | ep = ss->iso_in_ep; | 732 | ep = ss->iso_in_ep; |
@@ -1031,14 +749,13 @@ fail3: | |||
1031 | goto fail3; | 749 | goto fail3; |
1032 | ep->driver_data = ss; | 750 | ep->driver_data = ss; |
1033 | 751 | ||
1034 | result = source_sink_start_ep(ss, false, EP_ISOC, speed); | 752 | result = source_sink_start_ep(ss, false, true, speed); |
1035 | if (result < 0) { | 753 | if (result < 0) { |
1036 | usb_ep_disable(ep); | 754 | usb_ep_disable(ep); |
1037 | ep->driver_data = NULL; | 755 | ep->driver_data = NULL; |
1038 | goto fail3; | 756 | goto fail3; |
1039 | } | 757 | } |
1040 | } | 758 | } |
1041 | |||
1042 | out: | 759 | out: |
1043 | ss->cur_alt = alt; | 760 | ss->cur_alt = alt; |
1044 | 761 | ||
@@ -1054,8 +771,6 @@ static int sourcesink_set_alt(struct usb_function *f, | |||
1054 | 771 | ||
1055 | if (ss->in_ep->driver_data) | 772 | if (ss->in_ep->driver_data) |
1056 | disable_source_sink(ss); | 773 | disable_source_sink(ss); |
1057 | else if (alt == 2 && ss->int_in_ep->driver_data) | ||
1058 | disable_source_sink(ss); | ||
1059 | return enable_source_sink(cdev, ss, alt); | 774 | return enable_source_sink(cdev, ss, alt); |
1060 | } | 775 | } |
1061 | 776 | ||
@@ -1168,10 +883,6 @@ static struct usb_function *source_sink_alloc_func( | |||
1168 | isoc_maxpacket = ss_opts->isoc_maxpacket; | 883 | isoc_maxpacket = ss_opts->isoc_maxpacket; |
1169 | isoc_mult = ss_opts->isoc_mult; | 884 | isoc_mult = ss_opts->isoc_mult; |
1170 | isoc_maxburst = ss_opts->isoc_maxburst; | 885 | isoc_maxburst = ss_opts->isoc_maxburst; |
1171 | int_interval = ss_opts->int_interval; | ||
1172 | int_maxpacket = ss_opts->int_maxpacket; | ||
1173 | int_mult = ss_opts->int_mult; | ||
1174 | int_maxburst = ss_opts->int_maxburst; | ||
1175 | buflen = ss_opts->bulk_buflen; | 886 | buflen = ss_opts->bulk_buflen; |
1176 | 887 | ||
1177 | ss->function.name = "source/sink"; | 888 | ss->function.name = "source/sink"; |
@@ -1468,182 +1179,6 @@ static struct f_ss_opts_attribute f_ss_opts_bulk_buflen = | |||
1468 | f_ss_opts_bulk_buflen_show, | 1179 | f_ss_opts_bulk_buflen_show, |
1469 | f_ss_opts_bulk_buflen_store); | 1180 | f_ss_opts_bulk_buflen_store); |
1470 | 1181 | ||
1471 | static ssize_t f_ss_opts_int_interval_show(struct f_ss_opts *opts, char *page) | ||
1472 | { | ||
1473 | int result; | ||
1474 | |||
1475 | mutex_lock(&opts->lock); | ||
1476 | result = sprintf(page, "%u", opts->int_interval); | ||
1477 | mutex_unlock(&opts->lock); | ||
1478 | |||
1479 | return result; | ||
1480 | } | ||
1481 | |||
1482 | static ssize_t f_ss_opts_int_interval_store(struct f_ss_opts *opts, | ||
1483 | const char *page, size_t len) | ||
1484 | { | ||
1485 | int ret; | ||
1486 | u32 num; | ||
1487 | |||
1488 | mutex_lock(&opts->lock); | ||
1489 | if (opts->refcnt) { | ||
1490 | ret = -EBUSY; | ||
1491 | goto end; | ||
1492 | } | ||
1493 | |||
1494 | ret = kstrtou32(page, 0, &num); | ||
1495 | if (ret) | ||
1496 | goto end; | ||
1497 | |||
1498 | if (num > 4096) { | ||
1499 | ret = -EINVAL; | ||
1500 | goto end; | ||
1501 | } | ||
1502 | |||
1503 | opts->int_interval = num; | ||
1504 | ret = len; | ||
1505 | end: | ||
1506 | mutex_unlock(&opts->lock); | ||
1507 | return ret; | ||
1508 | } | ||
1509 | |||
1510 | static struct f_ss_opts_attribute f_ss_opts_int_interval = | ||
1511 | __CONFIGFS_ATTR(int_interval, S_IRUGO | S_IWUSR, | ||
1512 | f_ss_opts_int_interval_show, | ||
1513 | f_ss_opts_int_interval_store); | ||
1514 | |||
1515 | static ssize_t f_ss_opts_int_maxpacket_show(struct f_ss_opts *opts, char *page) | ||
1516 | { | ||
1517 | int result; | ||
1518 | |||
1519 | mutex_lock(&opts->lock); | ||
1520 | result = sprintf(page, "%u", opts->int_maxpacket); | ||
1521 | mutex_unlock(&opts->lock); | ||
1522 | |||
1523 | return result; | ||
1524 | } | ||
1525 | |||
1526 | static ssize_t f_ss_opts_int_maxpacket_store(struct f_ss_opts *opts, | ||
1527 | const char *page, size_t len) | ||
1528 | { | ||
1529 | int ret; | ||
1530 | u16 num; | ||
1531 | |||
1532 | mutex_lock(&opts->lock); | ||
1533 | if (opts->refcnt) { | ||
1534 | ret = -EBUSY; | ||
1535 | goto end; | ||
1536 | } | ||
1537 | |||
1538 | ret = kstrtou16(page, 0, &num); | ||
1539 | if (ret) | ||
1540 | goto end; | ||
1541 | |||
1542 | if (num > 1024) { | ||
1543 | ret = -EINVAL; | ||
1544 | goto end; | ||
1545 | } | ||
1546 | |||
1547 | opts->int_maxpacket = num; | ||
1548 | ret = len; | ||
1549 | end: | ||
1550 | mutex_unlock(&opts->lock); | ||
1551 | return ret; | ||
1552 | } | ||
1553 | |||
1554 | static struct f_ss_opts_attribute f_ss_opts_int_maxpacket = | ||
1555 | __CONFIGFS_ATTR(int_maxpacket, S_IRUGO | S_IWUSR, | ||
1556 | f_ss_opts_int_maxpacket_show, | ||
1557 | f_ss_opts_int_maxpacket_store); | ||
1558 | |||
1559 | static ssize_t f_ss_opts_int_mult_show(struct f_ss_opts *opts, char *page) | ||
1560 | { | ||
1561 | int result; | ||
1562 | |||
1563 | mutex_lock(&opts->lock); | ||
1564 | result = sprintf(page, "%u", opts->int_mult); | ||
1565 | mutex_unlock(&opts->lock); | ||
1566 | |||
1567 | return result; | ||
1568 | } | ||
1569 | |||
1570 | static ssize_t f_ss_opts_int_mult_store(struct f_ss_opts *opts, | ||
1571 | const char *page, size_t len) | ||
1572 | { | ||
1573 | int ret; | ||
1574 | u8 num; | ||
1575 | |||
1576 | mutex_lock(&opts->lock); | ||
1577 | if (opts->refcnt) { | ||
1578 | ret = -EBUSY; | ||
1579 | goto end; | ||
1580 | } | ||
1581 | |||
1582 | ret = kstrtou8(page, 0, &num); | ||
1583 | if (ret) | ||
1584 | goto end; | ||
1585 | |||
1586 | if (num > 2) { | ||
1587 | ret = -EINVAL; | ||
1588 | goto end; | ||
1589 | } | ||
1590 | |||
1591 | opts->int_mult = num; | ||
1592 | ret = len; | ||
1593 | end: | ||
1594 | mutex_unlock(&opts->lock); | ||
1595 | return ret; | ||
1596 | } | ||
1597 | |||
1598 | static struct f_ss_opts_attribute f_ss_opts_int_mult = | ||
1599 | __CONFIGFS_ATTR(int_mult, S_IRUGO | S_IWUSR, | ||
1600 | f_ss_opts_int_mult_show, | ||
1601 | f_ss_opts_int_mult_store); | ||
1602 | |||
1603 | static ssize_t f_ss_opts_int_maxburst_show(struct f_ss_opts *opts, char *page) | ||
1604 | { | ||
1605 | int result; | ||
1606 | |||
1607 | mutex_lock(&opts->lock); | ||
1608 | result = sprintf(page, "%u", opts->int_maxburst); | ||
1609 | mutex_unlock(&opts->lock); | ||
1610 | |||
1611 | return result; | ||
1612 | } | ||
1613 | |||
1614 | static ssize_t f_ss_opts_int_maxburst_store(struct f_ss_opts *opts, | ||
1615 | const char *page, size_t len) | ||
1616 | { | ||
1617 | int ret; | ||
1618 | u8 num; | ||
1619 | |||
1620 | mutex_lock(&opts->lock); | ||
1621 | if (opts->refcnt) { | ||
1622 | ret = -EBUSY; | ||
1623 | goto end; | ||
1624 | } | ||
1625 | |||
1626 | ret = kstrtou8(page, 0, &num); | ||
1627 | if (ret) | ||
1628 | goto end; | ||
1629 | |||
1630 | if (num > 15) { | ||
1631 | ret = -EINVAL; | ||
1632 | goto end; | ||
1633 | } | ||
1634 | |||
1635 | opts->int_maxburst = num; | ||
1636 | ret = len; | ||
1637 | end: | ||
1638 | mutex_unlock(&opts->lock); | ||
1639 | return ret; | ||
1640 | } | ||
1641 | |||
1642 | static struct f_ss_opts_attribute f_ss_opts_int_maxburst = | ||
1643 | __CONFIGFS_ATTR(int_maxburst, S_IRUGO | S_IWUSR, | ||
1644 | f_ss_opts_int_maxburst_show, | ||
1645 | f_ss_opts_int_maxburst_store); | ||
1646 | |||
1647 | static struct configfs_attribute *ss_attrs[] = { | 1182 | static struct configfs_attribute *ss_attrs[] = { |
1648 | &f_ss_opts_pattern.attr, | 1183 | &f_ss_opts_pattern.attr, |
1649 | &f_ss_opts_isoc_interval.attr, | 1184 | &f_ss_opts_isoc_interval.attr, |
@@ -1651,10 +1186,6 @@ static struct configfs_attribute *ss_attrs[] = { | |||
1651 | &f_ss_opts_isoc_mult.attr, | 1186 | &f_ss_opts_isoc_mult.attr, |
1652 | &f_ss_opts_isoc_maxburst.attr, | 1187 | &f_ss_opts_isoc_maxburst.attr, |
1653 | &f_ss_opts_bulk_buflen.attr, | 1188 | &f_ss_opts_bulk_buflen.attr, |
1654 | &f_ss_opts_int_interval.attr, | ||
1655 | &f_ss_opts_int_maxpacket.attr, | ||
1656 | &f_ss_opts_int_mult.attr, | ||
1657 | &f_ss_opts_int_maxburst.attr, | ||
1658 | NULL, | 1189 | NULL, |
1659 | }; | 1190 | }; |
1660 | 1191 | ||
@@ -1684,8 +1215,6 @@ static struct usb_function_instance *source_sink_alloc_inst(void) | |||
1684 | ss_opts->isoc_interval = GZERO_ISOC_INTERVAL; | 1215 | ss_opts->isoc_interval = GZERO_ISOC_INTERVAL; |
1685 | ss_opts->isoc_maxpacket = GZERO_ISOC_MAXPACKET; | 1216 | ss_opts->isoc_maxpacket = GZERO_ISOC_MAXPACKET; |
1686 | ss_opts->bulk_buflen = GZERO_BULK_BUFLEN; | 1217 | ss_opts->bulk_buflen = GZERO_BULK_BUFLEN; |
1687 | ss_opts->int_interval = GZERO_INT_INTERVAL; | ||
1688 | ss_opts->int_maxpacket = GZERO_INT_MAXPACKET; | ||
1689 | 1218 | ||
1690 | config_group_init_type_name(&ss_opts->func_inst.group, "", | 1219 | config_group_init_type_name(&ss_opts->func_inst.group, "", |
1691 | &ss_func_type); | 1220 | &ss_func_type); |
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index 33e16658e5cf..6d3eb8b00a48 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c | |||
@@ -54,7 +54,7 @@ | |||
54 | #define UNFLW_CTRL 8 | 54 | #define UNFLW_CTRL 8 |
55 | #define OVFLW_CTRL 10 | 55 | #define OVFLW_CTRL 10 |
56 | 56 | ||
57 | const char *uac2_name = "snd_uac2"; | 57 | static const char *uac2_name = "snd_uac2"; |
58 | 58 | ||
59 | struct uac2_req { | 59 | struct uac2_req { |
60 | struct uac2_rtd_params *pp; /* parent param */ | 60 | struct uac2_rtd_params *pp; /* parent param */ |
@@ -634,7 +634,7 @@ static struct usb_interface_descriptor std_ac_if_desc = { | |||
634 | }; | 634 | }; |
635 | 635 | ||
636 | /* Clock source for IN traffic */ | 636 | /* Clock source for IN traffic */ |
637 | struct uac_clock_source_descriptor in_clk_src_desc = { | 637 | static struct uac_clock_source_descriptor in_clk_src_desc = { |
638 | .bLength = sizeof in_clk_src_desc, | 638 | .bLength = sizeof in_clk_src_desc, |
639 | .bDescriptorType = USB_DT_CS_INTERFACE, | 639 | .bDescriptorType = USB_DT_CS_INTERFACE, |
640 | 640 | ||
@@ -646,7 +646,7 @@ struct uac_clock_source_descriptor in_clk_src_desc = { | |||
646 | }; | 646 | }; |
647 | 647 | ||
648 | /* Clock source for OUT traffic */ | 648 | /* Clock source for OUT traffic */ |
649 | struct uac_clock_source_descriptor out_clk_src_desc = { | 649 | static struct uac_clock_source_descriptor out_clk_src_desc = { |
650 | .bLength = sizeof out_clk_src_desc, | 650 | .bLength = sizeof out_clk_src_desc, |
651 | .bDescriptorType = USB_DT_CS_INTERFACE, | 651 | .bDescriptorType = USB_DT_CS_INTERFACE, |
652 | 652 | ||
@@ -658,7 +658,7 @@ struct uac_clock_source_descriptor out_clk_src_desc = { | |||
658 | }; | 658 | }; |
659 | 659 | ||
660 | /* Input Terminal for USB_OUT */ | 660 | /* Input Terminal for USB_OUT */ |
661 | struct uac2_input_terminal_descriptor usb_out_it_desc = { | 661 | static struct uac2_input_terminal_descriptor usb_out_it_desc = { |
662 | .bLength = sizeof usb_out_it_desc, | 662 | .bLength = sizeof usb_out_it_desc, |
663 | .bDescriptorType = USB_DT_CS_INTERFACE, | 663 | .bDescriptorType = USB_DT_CS_INTERFACE, |
664 | 664 | ||
@@ -672,7 +672,7 @@ struct uac2_input_terminal_descriptor usb_out_it_desc = { | |||
672 | }; | 672 | }; |
673 | 673 | ||
674 | /* Input Terminal for I/O-In */ | 674 | /* Input Terminal for I/O-In */ |
675 | struct uac2_input_terminal_descriptor io_in_it_desc = { | 675 | static struct uac2_input_terminal_descriptor io_in_it_desc = { |
676 | .bLength = sizeof io_in_it_desc, | 676 | .bLength = sizeof io_in_it_desc, |
677 | .bDescriptorType = USB_DT_CS_INTERFACE, | 677 | .bDescriptorType = USB_DT_CS_INTERFACE, |
678 | 678 | ||
@@ -686,7 +686,7 @@ struct uac2_input_terminal_descriptor io_in_it_desc = { | |||
686 | }; | 686 | }; |
687 | 687 | ||
688 | /* Ouput Terminal for USB_IN */ | 688 | /* Ouput Terminal for USB_IN */ |
689 | struct uac2_output_terminal_descriptor usb_in_ot_desc = { | 689 | static struct uac2_output_terminal_descriptor usb_in_ot_desc = { |
690 | .bLength = sizeof usb_in_ot_desc, | 690 | .bLength = sizeof usb_in_ot_desc, |
691 | .bDescriptorType = USB_DT_CS_INTERFACE, | 691 | .bDescriptorType = USB_DT_CS_INTERFACE, |
692 | 692 | ||
@@ -700,7 +700,7 @@ struct uac2_output_terminal_descriptor usb_in_ot_desc = { | |||
700 | }; | 700 | }; |
701 | 701 | ||
702 | /* Ouput Terminal for I/O-Out */ | 702 | /* Ouput Terminal for I/O-Out */ |
703 | struct uac2_output_terminal_descriptor io_out_ot_desc = { | 703 | static struct uac2_output_terminal_descriptor io_out_ot_desc = { |
704 | .bLength = sizeof io_out_ot_desc, | 704 | .bLength = sizeof io_out_ot_desc, |
705 | .bDescriptorType = USB_DT_CS_INTERFACE, | 705 | .bDescriptorType = USB_DT_CS_INTERFACE, |
706 | 706 | ||
@@ -713,7 +713,7 @@ struct uac2_output_terminal_descriptor io_out_ot_desc = { | |||
713 | .bmControls = (CONTROL_RDWR << COPY_CTRL), | 713 | .bmControls = (CONTROL_RDWR << COPY_CTRL), |
714 | }; | 714 | }; |
715 | 715 | ||
716 | struct uac2_ac_header_descriptor ac_hdr_desc = { | 716 | static struct uac2_ac_header_descriptor ac_hdr_desc = { |
717 | .bLength = sizeof ac_hdr_desc, | 717 | .bLength = sizeof ac_hdr_desc, |
718 | .bDescriptorType = USB_DT_CS_INTERFACE, | 718 | .bDescriptorType = USB_DT_CS_INTERFACE, |
719 | 719 | ||
@@ -751,7 +751,7 @@ static struct usb_interface_descriptor std_as_out_if1_desc = { | |||
751 | }; | 751 | }; |
752 | 752 | ||
753 | /* Audio Stream OUT Intface Desc */ | 753 | /* Audio Stream OUT Intface Desc */ |
754 | struct uac2_as_header_descriptor as_out_hdr_desc = { | 754 | static struct uac2_as_header_descriptor as_out_hdr_desc = { |
755 | .bLength = sizeof as_out_hdr_desc, | 755 | .bLength = sizeof as_out_hdr_desc, |
756 | .bDescriptorType = USB_DT_CS_INTERFACE, | 756 | .bDescriptorType = USB_DT_CS_INTERFACE, |
757 | 757 | ||
@@ -764,7 +764,7 @@ struct uac2_as_header_descriptor as_out_hdr_desc = { | |||
764 | }; | 764 | }; |
765 | 765 | ||
766 | /* Audio USB_OUT Format */ | 766 | /* Audio USB_OUT Format */ |
767 | struct uac2_format_type_i_descriptor as_out_fmt1_desc = { | 767 | static struct uac2_format_type_i_descriptor as_out_fmt1_desc = { |
768 | .bLength = sizeof as_out_fmt1_desc, | 768 | .bLength = sizeof as_out_fmt1_desc, |
769 | .bDescriptorType = USB_DT_CS_INTERFACE, | 769 | .bDescriptorType = USB_DT_CS_INTERFACE, |
770 | .bDescriptorSubtype = UAC_FORMAT_TYPE, | 770 | .bDescriptorSubtype = UAC_FORMAT_TYPE, |
@@ -772,7 +772,7 @@ struct uac2_format_type_i_descriptor as_out_fmt1_desc = { | |||
772 | }; | 772 | }; |
773 | 773 | ||
774 | /* STD AS ISO OUT Endpoint */ | 774 | /* STD AS ISO OUT Endpoint */ |
775 | struct usb_endpoint_descriptor fs_epout_desc = { | 775 | static struct usb_endpoint_descriptor fs_epout_desc = { |
776 | .bLength = USB_DT_ENDPOINT_SIZE, | 776 | .bLength = USB_DT_ENDPOINT_SIZE, |
777 | .bDescriptorType = USB_DT_ENDPOINT, | 777 | .bDescriptorType = USB_DT_ENDPOINT, |
778 | 778 | ||
@@ -782,7 +782,7 @@ struct usb_endpoint_descriptor fs_epout_desc = { | |||
782 | .bInterval = 1, | 782 | .bInterval = 1, |
783 | }; | 783 | }; |
784 | 784 | ||
785 | struct usb_endpoint_descriptor hs_epout_desc = { | 785 | static struct usb_endpoint_descriptor hs_epout_desc = { |
786 | .bLength = USB_DT_ENDPOINT_SIZE, | 786 | .bLength = USB_DT_ENDPOINT_SIZE, |
787 | .bDescriptorType = USB_DT_ENDPOINT, | 787 | .bDescriptorType = USB_DT_ENDPOINT, |
788 | 788 | ||
@@ -828,7 +828,7 @@ static struct usb_interface_descriptor std_as_in_if1_desc = { | |||
828 | }; | 828 | }; |
829 | 829 | ||
830 | /* Audio Stream IN Intface Desc */ | 830 | /* Audio Stream IN Intface Desc */ |
831 | struct uac2_as_header_descriptor as_in_hdr_desc = { | 831 | static struct uac2_as_header_descriptor as_in_hdr_desc = { |
832 | .bLength = sizeof as_in_hdr_desc, | 832 | .bLength = sizeof as_in_hdr_desc, |
833 | .bDescriptorType = USB_DT_CS_INTERFACE, | 833 | .bDescriptorType = USB_DT_CS_INTERFACE, |
834 | 834 | ||
@@ -841,7 +841,7 @@ struct uac2_as_header_descriptor as_in_hdr_desc = { | |||
841 | }; | 841 | }; |
842 | 842 | ||
843 | /* Audio USB_IN Format */ | 843 | /* Audio USB_IN Format */ |
844 | struct uac2_format_type_i_descriptor as_in_fmt1_desc = { | 844 | static struct uac2_format_type_i_descriptor as_in_fmt1_desc = { |
845 | .bLength = sizeof as_in_fmt1_desc, | 845 | .bLength = sizeof as_in_fmt1_desc, |
846 | .bDescriptorType = USB_DT_CS_INTERFACE, | 846 | .bDescriptorType = USB_DT_CS_INTERFACE, |
847 | .bDescriptorSubtype = UAC_FORMAT_TYPE, | 847 | .bDescriptorSubtype = UAC_FORMAT_TYPE, |
@@ -849,7 +849,7 @@ struct uac2_format_type_i_descriptor as_in_fmt1_desc = { | |||
849 | }; | 849 | }; |
850 | 850 | ||
851 | /* STD AS ISO IN Endpoint */ | 851 | /* STD AS ISO IN Endpoint */ |
852 | struct usb_endpoint_descriptor fs_epin_desc = { | 852 | static struct usb_endpoint_descriptor fs_epin_desc = { |
853 | .bLength = USB_DT_ENDPOINT_SIZE, | 853 | .bLength = USB_DT_ENDPOINT_SIZE, |
854 | .bDescriptorType = USB_DT_ENDPOINT, | 854 | .bDescriptorType = USB_DT_ENDPOINT, |
855 | 855 | ||
@@ -859,7 +859,7 @@ struct usb_endpoint_descriptor fs_epin_desc = { | |||
859 | .bInterval = 1, | 859 | .bInterval = 1, |
860 | }; | 860 | }; |
861 | 861 | ||
862 | struct usb_endpoint_descriptor hs_epin_desc = { | 862 | static struct usb_endpoint_descriptor hs_epin_desc = { |
863 | .bLength = USB_DT_ENDPOINT_SIZE, | 863 | .bLength = USB_DT_ENDPOINT_SIZE, |
864 | .bDescriptorType = USB_DT_ENDPOINT, | 864 | .bDescriptorType = USB_DT_ENDPOINT, |
865 | 865 | ||
@@ -1563,7 +1563,7 @@ static void afunc_unbind(struct usb_configuration *c, struct usb_function *f) | |||
1563 | agdev->out_ep->driver_data = NULL; | 1563 | agdev->out_ep->driver_data = NULL; |
1564 | } | 1564 | } |
1565 | 1565 | ||
1566 | struct usb_function *afunc_alloc(struct usb_function_instance *fi) | 1566 | static struct usb_function *afunc_alloc(struct usb_function_instance *fi) |
1567 | { | 1567 | { |
1568 | struct audio_dev *agdev; | 1568 | struct audio_dev *agdev; |
1569 | struct f_uac2_opts *opts; | 1569 | struct f_uac2_opts *opts; |
diff --git a/drivers/usb/gadget/function/g_zero.h b/drivers/usb/gadget/function/g_zero.h index 2ce28b9d97cc..15f180904f8a 100644 --- a/drivers/usb/gadget/function/g_zero.h +++ b/drivers/usb/gadget/function/g_zero.h | |||
@@ -10,8 +10,6 @@ | |||
10 | #define GZERO_QLEN 32 | 10 | #define GZERO_QLEN 32 |
11 | #define GZERO_ISOC_INTERVAL 4 | 11 | #define GZERO_ISOC_INTERVAL 4 |
12 | #define GZERO_ISOC_MAXPACKET 1024 | 12 | #define GZERO_ISOC_MAXPACKET 1024 |
13 | #define GZERO_INT_INTERVAL 1 /* Default interrupt interval = 1 ms */ | ||
14 | #define GZERO_INT_MAXPACKET 1024 | ||
15 | 13 | ||
16 | struct usb_zero_options { | 14 | struct usb_zero_options { |
17 | unsigned pattern; | 15 | unsigned pattern; |
@@ -19,10 +17,6 @@ struct usb_zero_options { | |||
19 | unsigned isoc_maxpacket; | 17 | unsigned isoc_maxpacket; |
20 | unsigned isoc_mult; | 18 | unsigned isoc_mult; |
21 | unsigned isoc_maxburst; | 19 | unsigned isoc_maxburst; |
22 | unsigned int_interval; /* In ms */ | ||
23 | unsigned int_maxpacket; | ||
24 | unsigned int_mult; | ||
25 | unsigned int_maxburst; | ||
26 | unsigned bulk_buflen; | 20 | unsigned bulk_buflen; |
27 | unsigned qlen; | 21 | unsigned qlen; |
28 | }; | 22 | }; |
@@ -34,10 +28,6 @@ struct f_ss_opts { | |||
34 | unsigned isoc_maxpacket; | 28 | unsigned isoc_maxpacket; |
35 | unsigned isoc_mult; | 29 | unsigned isoc_mult; |
36 | unsigned isoc_maxburst; | 30 | unsigned isoc_maxburst; |
37 | unsigned int_interval; /* In ms */ | ||
38 | unsigned int_maxpacket; | ||
39 | unsigned int_mult; | ||
40 | unsigned int_maxburst; | ||
41 | unsigned bulk_buflen; | 31 | unsigned bulk_buflen; |
42 | 32 | ||
43 | /* | 33 | /* |
@@ -72,7 +62,6 @@ int lb_modinit(void); | |||
72 | void free_ep_req(struct usb_ep *ep, struct usb_request *req); | 62 | void free_ep_req(struct usb_ep *ep, struct usb_request *req); |
73 | void disable_endpoints(struct usb_composite_dev *cdev, | 63 | void disable_endpoints(struct usb_composite_dev *cdev, |
74 | struct usb_ep *in, struct usb_ep *out, | 64 | struct usb_ep *in, struct usb_ep *out, |
75 | struct usb_ep *iso_in, struct usb_ep *iso_out, | 65 | struct usb_ep *iso_in, struct usb_ep *iso_out); |
76 | struct usb_ep *int_in, struct usb_ep *int_out); | ||
77 | 66 | ||
78 | #endif /* __G_ZERO_H */ | 67 | #endif /* __G_ZERO_H */ |
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c index 5aad7fededa5..8b818fd027b3 100644 --- a/drivers/usb/gadget/function/uvc_v4l2.c +++ b/drivers/usb/gadget/function/uvc_v4l2.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include "uvc.h" | 27 | #include "uvc.h" |
28 | #include "uvc_queue.h" | 28 | #include "uvc_queue.h" |
29 | #include "uvc_video.h" | 29 | #include "uvc_video.h" |
30 | #include "uvc_v4l2.h" | ||
30 | 31 | ||
31 | /* -------------------------------------------------------------------------- | 32 | /* -------------------------------------------------------------------------- |
32 | * Requests handling | 33 | * Requests handling |
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c index 9cb86bc1a9a5..50a5e637ca35 100644 --- a/drivers/usb/gadget/function/uvc_video.c +++ b/drivers/usb/gadget/function/uvc_video.c | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #include "uvc.h" | 22 | #include "uvc.h" |
23 | #include "uvc_queue.h" | 23 | #include "uvc_queue.h" |
24 | #include "uvc_video.h" | ||
24 | 25 | ||
25 | /* -------------------------------------------------------------------------- | 26 | /* -------------------------------------------------------------------------- |
26 | * Video codecs | 27 | * Video codecs |
diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c index 06acfa55864a..b01b88e1b716 100644 --- a/drivers/usb/gadget/legacy/g_ffs.c +++ b/drivers/usb/gadget/legacy/g_ffs.c | |||
@@ -133,7 +133,9 @@ struct gfs_configuration { | |||
133 | struct usb_configuration c; | 133 | struct usb_configuration c; |
134 | int (*eth)(struct usb_configuration *c); | 134 | int (*eth)(struct usb_configuration *c); |
135 | int num; | 135 | int num; |
136 | } gfs_configurations[] = { | 136 | }; |
137 | |||
138 | static struct gfs_configuration gfs_configurations[] = { | ||
137 | #ifdef CONFIG_USB_FUNCTIONFS_RNDIS | 139 | #ifdef CONFIG_USB_FUNCTIONFS_RNDIS |
138 | { | 140 | { |
139 | .eth = bind_rndis_config, | 141 | .eth = bind_rndis_config, |
@@ -278,7 +280,7 @@ static void *functionfs_acquire_dev(struct ffs_dev *dev) | |||
278 | if (!try_module_get(THIS_MODULE)) | 280 | if (!try_module_get(THIS_MODULE)) |
279 | return ERR_PTR(-ENOENT); | 281 | return ERR_PTR(-ENOENT); |
280 | 282 | ||
281 | return 0; | 283 | return NULL; |
282 | } | 284 | } |
283 | 285 | ||
284 | static void functionfs_release_dev(struct ffs_dev *dev) | 286 | static void functionfs_release_dev(struct ffs_dev *dev) |
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index db49ec4c748e..200f9a584064 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c | |||
@@ -74,6 +74,8 @@ MODULE_DESCRIPTION (DRIVER_DESC); | |||
74 | MODULE_AUTHOR ("David Brownell"); | 74 | MODULE_AUTHOR ("David Brownell"); |
75 | MODULE_LICENSE ("GPL"); | 75 | MODULE_LICENSE ("GPL"); |
76 | 76 | ||
77 | static int ep_open(struct inode *, struct file *); | ||
78 | |||
77 | 79 | ||
78 | /*----------------------------------------------------------------------*/ | 80 | /*----------------------------------------------------------------------*/ |
79 | 81 | ||
@@ -283,14 +285,15 @@ static void epio_complete (struct usb_ep *ep, struct usb_request *req) | |||
283 | * still need dev->lock to use epdata->ep. | 285 | * still need dev->lock to use epdata->ep. |
284 | */ | 286 | */ |
285 | static int | 287 | static int |
286 | get_ready_ep (unsigned f_flags, struct ep_data *epdata) | 288 | get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write) |
287 | { | 289 | { |
288 | int val; | 290 | int val; |
289 | 291 | ||
290 | if (f_flags & O_NONBLOCK) { | 292 | if (f_flags & O_NONBLOCK) { |
291 | if (!mutex_trylock(&epdata->lock)) | 293 | if (!mutex_trylock(&epdata->lock)) |
292 | goto nonblock; | 294 | goto nonblock; |
293 | if (epdata->state != STATE_EP_ENABLED) { | 295 | if (epdata->state != STATE_EP_ENABLED && |
296 | (!is_write || epdata->state != STATE_EP_READY)) { | ||
294 | mutex_unlock(&epdata->lock); | 297 | mutex_unlock(&epdata->lock); |
295 | nonblock: | 298 | nonblock: |
296 | val = -EAGAIN; | 299 | val = -EAGAIN; |
@@ -305,18 +308,20 @@ nonblock: | |||
305 | 308 | ||
306 | switch (epdata->state) { | 309 | switch (epdata->state) { |
307 | case STATE_EP_ENABLED: | 310 | case STATE_EP_ENABLED: |
311 | return 0; | ||
312 | case STATE_EP_READY: /* not configured yet */ | ||
313 | if (is_write) | ||
314 | return 0; | ||
315 | // FALLTHRU | ||
316 | case STATE_EP_UNBOUND: /* clean disconnect */ | ||
308 | break; | 317 | break; |
309 | // case STATE_EP_DISABLED: /* "can't happen" */ | 318 | // case STATE_EP_DISABLED: /* "can't happen" */ |
310 | // case STATE_EP_READY: /* "can't happen" */ | ||
311 | default: /* error! */ | 319 | default: /* error! */ |
312 | pr_debug ("%s: ep %p not available, state %d\n", | 320 | pr_debug ("%s: ep %p not available, state %d\n", |
313 | shortname, epdata, epdata->state); | 321 | shortname, epdata, epdata->state); |
314 | // FALLTHROUGH | ||
315 | case STATE_EP_UNBOUND: /* clean disconnect */ | ||
316 | val = -ENODEV; | ||
317 | mutex_unlock(&epdata->lock); | ||
318 | } | 322 | } |
319 | return val; | 323 | mutex_unlock(&epdata->lock); |
324 | return -ENODEV; | ||
320 | } | 325 | } |
321 | 326 | ||
322 | static ssize_t | 327 | static ssize_t |
@@ -363,97 +368,6 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len) | |||
363 | return value; | 368 | return value; |
364 | } | 369 | } |
365 | 370 | ||
366 | |||
367 | /* handle a synchronous OUT bulk/intr/iso transfer */ | ||
368 | static ssize_t | ||
369 | ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) | ||
370 | { | ||
371 | struct ep_data *data = fd->private_data; | ||
372 | void *kbuf; | ||
373 | ssize_t value; | ||
374 | |||
375 | if ((value = get_ready_ep (fd->f_flags, data)) < 0) | ||
376 | return value; | ||
377 | |||
378 | /* halt any endpoint by doing a "wrong direction" i/o call */ | ||
379 | if (usb_endpoint_dir_in(&data->desc)) { | ||
380 | if (usb_endpoint_xfer_isoc(&data->desc)) { | ||
381 | mutex_unlock(&data->lock); | ||
382 | return -EINVAL; | ||
383 | } | ||
384 | DBG (data->dev, "%s halt\n", data->name); | ||
385 | spin_lock_irq (&data->dev->lock); | ||
386 | if (likely (data->ep != NULL)) | ||
387 | usb_ep_set_halt (data->ep); | ||
388 | spin_unlock_irq (&data->dev->lock); | ||
389 | mutex_unlock(&data->lock); | ||
390 | return -EBADMSG; | ||
391 | } | ||
392 | |||
393 | /* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */ | ||
394 | |||
395 | value = -ENOMEM; | ||
396 | kbuf = kmalloc (len, GFP_KERNEL); | ||
397 | if (unlikely (!kbuf)) | ||
398 | goto free1; | ||
399 | |||
400 | value = ep_io (data, kbuf, len); | ||
401 | VDEBUG (data->dev, "%s read %zu OUT, status %d\n", | ||
402 | data->name, len, (int) value); | ||
403 | if (value >= 0 && copy_to_user (buf, kbuf, value)) | ||
404 | value = -EFAULT; | ||
405 | |||
406 | free1: | ||
407 | mutex_unlock(&data->lock); | ||
408 | kfree (kbuf); | ||
409 | return value; | ||
410 | } | ||
411 | |||
412 | /* handle a synchronous IN bulk/intr/iso transfer */ | ||
413 | static ssize_t | ||
414 | ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | ||
415 | { | ||
416 | struct ep_data *data = fd->private_data; | ||
417 | void *kbuf; | ||
418 | ssize_t value; | ||
419 | |||
420 | if ((value = get_ready_ep (fd->f_flags, data)) < 0) | ||
421 | return value; | ||
422 | |||
423 | /* halt any endpoint by doing a "wrong direction" i/o call */ | ||
424 | if (!usb_endpoint_dir_in(&data->desc)) { | ||
425 | if (usb_endpoint_xfer_isoc(&data->desc)) { | ||
426 | mutex_unlock(&data->lock); | ||
427 | return -EINVAL; | ||
428 | } | ||
429 | DBG (data->dev, "%s halt\n", data->name); | ||
430 | spin_lock_irq (&data->dev->lock); | ||
431 | if (likely (data->ep != NULL)) | ||
432 | usb_ep_set_halt (data->ep); | ||
433 | spin_unlock_irq (&data->dev->lock); | ||
434 | mutex_unlock(&data->lock); | ||
435 | return -EBADMSG; | ||
436 | } | ||
437 | |||
438 | /* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */ | ||
439 | |||
440 | value = -ENOMEM; | ||
441 | kbuf = memdup_user(buf, len); | ||
442 | if (IS_ERR(kbuf)) { | ||
443 | value = PTR_ERR(kbuf); | ||
444 | kbuf = NULL; | ||
445 | goto free1; | ||
446 | } | ||
447 | |||
448 | value = ep_io (data, kbuf, len); | ||
449 | VDEBUG (data->dev, "%s write %zu IN, status %d\n", | ||
450 | data->name, len, (int) value); | ||
451 | free1: | ||
452 | mutex_unlock(&data->lock); | ||
453 | kfree (kbuf); | ||
454 | return value; | ||
455 | } | ||
456 | |||
457 | static int | 371 | static int |
458 | ep_release (struct inode *inode, struct file *fd) | 372 | ep_release (struct inode *inode, struct file *fd) |
459 | { | 373 | { |
@@ -481,7 +395,7 @@ static long ep_ioctl(struct file *fd, unsigned code, unsigned long value) | |||
481 | struct ep_data *data = fd->private_data; | 395 | struct ep_data *data = fd->private_data; |
482 | int status; | 396 | int status; |
483 | 397 | ||
484 | if ((status = get_ready_ep (fd->f_flags, data)) < 0) | 398 | if ((status = get_ready_ep (fd->f_flags, data, false)) < 0) |
485 | return status; | 399 | return status; |
486 | 400 | ||
487 | spin_lock_irq (&data->dev->lock); | 401 | spin_lock_irq (&data->dev->lock); |
@@ -517,8 +431,8 @@ struct kiocb_priv { | |||
517 | struct mm_struct *mm; | 431 | struct mm_struct *mm; |
518 | struct work_struct work; | 432 | struct work_struct work; |
519 | void *buf; | 433 | void *buf; |
520 | const struct iovec *iv; | 434 | struct iov_iter to; |
521 | unsigned long nr_segs; | 435 | const void *to_free; |
522 | unsigned actual; | 436 | unsigned actual; |
523 | }; | 437 | }; |
524 | 438 | ||
@@ -541,35 +455,6 @@ static int ep_aio_cancel(struct kiocb *iocb) | |||
541 | return value; | 455 | return value; |
542 | } | 456 | } |
543 | 457 | ||
544 | static ssize_t ep_copy_to_user(struct kiocb_priv *priv) | ||
545 | { | ||
546 | ssize_t len, total; | ||
547 | void *to_copy; | ||
548 | int i; | ||
549 | |||
550 | /* copy stuff into user buffers */ | ||
551 | total = priv->actual; | ||
552 | len = 0; | ||
553 | to_copy = priv->buf; | ||
554 | for (i=0; i < priv->nr_segs; i++) { | ||
555 | ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total); | ||
556 | |||
557 | if (copy_to_user(priv->iv[i].iov_base, to_copy, this)) { | ||
558 | if (len == 0) | ||
559 | len = -EFAULT; | ||
560 | break; | ||
561 | } | ||
562 | |||
563 | total -= this; | ||
564 | len += this; | ||
565 | to_copy += this; | ||
566 | if (total == 0) | ||
567 | break; | ||
568 | } | ||
569 | |||
570 | return len; | ||
571 | } | ||
572 | |||
573 | static void ep_user_copy_worker(struct work_struct *work) | 458 | static void ep_user_copy_worker(struct work_struct *work) |
574 | { | 459 | { |
575 | struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work); | 460 | struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work); |
@@ -578,13 +463,16 @@ static void ep_user_copy_worker(struct work_struct *work) | |||
578 | size_t ret; | 463 | size_t ret; |
579 | 464 | ||
580 | use_mm(mm); | 465 | use_mm(mm); |
581 | ret = ep_copy_to_user(priv); | 466 | ret = copy_to_iter(priv->buf, priv->actual, &priv->to); |
582 | unuse_mm(mm); | 467 | unuse_mm(mm); |
468 | if (!ret) | ||
469 | ret = -EFAULT; | ||
583 | 470 | ||
584 | /* completing the iocb can drop the ctx and mm, don't touch mm after */ | 471 | /* completing the iocb can drop the ctx and mm, don't touch mm after */ |
585 | aio_complete(iocb, ret, ret); | 472 | aio_complete(iocb, ret, ret); |
586 | 473 | ||
587 | kfree(priv->buf); | 474 | kfree(priv->buf); |
475 | kfree(priv->to_free); | ||
588 | kfree(priv); | 476 | kfree(priv); |
589 | } | 477 | } |
590 | 478 | ||
@@ -603,8 +491,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) | |||
603 | * don't need to copy anything to userspace, so we can | 491 | * don't need to copy anything to userspace, so we can |
604 | * complete the aio request immediately. | 492 | * complete the aio request immediately. |
605 | */ | 493 | */ |
606 | if (priv->iv == NULL || unlikely(req->actual == 0)) { | 494 | if (priv->to_free == NULL || unlikely(req->actual == 0)) { |
607 | kfree(req->buf); | 495 | kfree(req->buf); |
496 | kfree(priv->to_free); | ||
608 | kfree(priv); | 497 | kfree(priv); |
609 | iocb->private = NULL; | 498 | iocb->private = NULL; |
610 | /* aio_complete() reports bytes-transferred _and_ faults */ | 499 | /* aio_complete() reports bytes-transferred _and_ faults */ |
@@ -618,6 +507,7 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) | |||
618 | 507 | ||
619 | priv->buf = req->buf; | 508 | priv->buf = req->buf; |
620 | priv->actual = req->actual; | 509 | priv->actual = req->actual; |
510 | INIT_WORK(&priv->work, ep_user_copy_worker); | ||
621 | schedule_work(&priv->work); | 511 | schedule_work(&priv->work); |
622 | } | 512 | } |
623 | spin_unlock(&epdata->dev->lock); | 513 | spin_unlock(&epdata->dev->lock); |
@@ -626,38 +516,17 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) | |||
626 | put_ep(epdata); | 516 | put_ep(epdata); |
627 | } | 517 | } |
628 | 518 | ||
629 | static ssize_t | 519 | static ssize_t ep_aio(struct kiocb *iocb, |
630 | ep_aio_rwtail( | 520 | struct kiocb_priv *priv, |
631 | struct kiocb *iocb, | 521 | struct ep_data *epdata, |
632 | char *buf, | 522 | char *buf, |
633 | size_t len, | 523 | size_t len) |
634 | struct ep_data *epdata, | ||
635 | const struct iovec *iv, | ||
636 | unsigned long nr_segs | ||
637 | ) | ||
638 | { | 524 | { |
639 | struct kiocb_priv *priv; | 525 | struct usb_request *req; |
640 | struct usb_request *req; | 526 | ssize_t value; |
641 | ssize_t value; | ||
642 | 527 | ||
643 | priv = kmalloc(sizeof *priv, GFP_KERNEL); | ||
644 | if (!priv) { | ||
645 | value = -ENOMEM; | ||
646 | fail: | ||
647 | kfree(buf); | ||
648 | return value; | ||
649 | } | ||
650 | iocb->private = priv; | 528 | iocb->private = priv; |
651 | priv->iocb = iocb; | 529 | priv->iocb = iocb; |
652 | priv->iv = iv; | ||
653 | priv->nr_segs = nr_segs; | ||
654 | INIT_WORK(&priv->work, ep_user_copy_worker); | ||
655 | |||
656 | value = get_ready_ep(iocb->ki_filp->f_flags, epdata); | ||
657 | if (unlikely(value < 0)) { | ||
658 | kfree(priv); | ||
659 | goto fail; | ||
660 | } | ||
661 | 530 | ||
662 | kiocb_set_cancel_fn(iocb, ep_aio_cancel); | 531 | kiocb_set_cancel_fn(iocb, ep_aio_cancel); |
663 | get_ep(epdata); | 532 | get_ep(epdata); |
@@ -669,75 +538,154 @@ fail: | |||
669 | * allocate or submit those if the host disconnected. | 538 | * allocate or submit those if the host disconnected. |
670 | */ | 539 | */ |
671 | spin_lock_irq(&epdata->dev->lock); | 540 | spin_lock_irq(&epdata->dev->lock); |
672 | if (likely(epdata->ep)) { | 541 | value = -ENODEV; |
673 | req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); | 542 | if (unlikely(epdata->ep)) |
674 | if (likely(req)) { | 543 | goto fail; |
675 | priv->req = req; | ||
676 | req->buf = buf; | ||
677 | req->length = len; | ||
678 | req->complete = ep_aio_complete; | ||
679 | req->context = iocb; | ||
680 | value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC); | ||
681 | if (unlikely(0 != value)) | ||
682 | usb_ep_free_request(epdata->ep, req); | ||
683 | } else | ||
684 | value = -EAGAIN; | ||
685 | } else | ||
686 | value = -ENODEV; | ||
687 | spin_unlock_irq(&epdata->dev->lock); | ||
688 | 544 | ||
689 | mutex_unlock(&epdata->lock); | 545 | req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); |
546 | value = -ENOMEM; | ||
547 | if (unlikely(!req)) | ||
548 | goto fail; | ||
690 | 549 | ||
691 | if (unlikely(value)) { | 550 | priv->req = req; |
692 | kfree(priv); | 551 | req->buf = buf; |
693 | put_ep(epdata); | 552 | req->length = len; |
694 | } else | 553 | req->complete = ep_aio_complete; |
695 | value = -EIOCBQUEUED; | 554 | req->context = iocb; |
555 | value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC); | ||
556 | if (unlikely(0 != value)) { | ||
557 | usb_ep_free_request(epdata->ep, req); | ||
558 | goto fail; | ||
559 | } | ||
560 | spin_unlock_irq(&epdata->dev->lock); | ||
561 | return -EIOCBQUEUED; | ||
562 | |||
563 | fail: | ||
564 | spin_unlock_irq(&epdata->dev->lock); | ||
565 | kfree(priv->to_free); | ||
566 | kfree(priv); | ||
567 | put_ep(epdata); | ||
696 | return value; | 568 | return value; |
697 | } | 569 | } |
698 | 570 | ||
699 | static ssize_t | 571 | static ssize_t |
700 | ep_aio_read(struct kiocb *iocb, const struct iovec *iov, | 572 | ep_read_iter(struct kiocb *iocb, struct iov_iter *to) |
701 | unsigned long nr_segs, loff_t o) | ||
702 | { | 573 | { |
703 | struct ep_data *epdata = iocb->ki_filp->private_data; | 574 | struct file *file = iocb->ki_filp; |
704 | char *buf; | 575 | struct ep_data *epdata = file->private_data; |
576 | size_t len = iov_iter_count(to); | ||
577 | ssize_t value; | ||
578 | char *buf; | ||
705 | 579 | ||
706 | if (unlikely(usb_endpoint_dir_in(&epdata->desc))) | 580 | if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0) |
707 | return -EINVAL; | 581 | return value; |
708 | 582 | ||
709 | buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); | 583 | /* halt any endpoint by doing a "wrong direction" i/o call */ |
710 | if (unlikely(!buf)) | 584 | if (usb_endpoint_dir_in(&epdata->desc)) { |
711 | return -ENOMEM; | 585 | if (usb_endpoint_xfer_isoc(&epdata->desc) || |
586 | !is_sync_kiocb(iocb)) { | ||
587 | mutex_unlock(&epdata->lock); | ||
588 | return -EINVAL; | ||
589 | } | ||
590 | DBG (epdata->dev, "%s halt\n", epdata->name); | ||
591 | spin_lock_irq(&epdata->dev->lock); | ||
592 | if (likely(epdata->ep != NULL)) | ||
593 | usb_ep_set_halt(epdata->ep); | ||
594 | spin_unlock_irq(&epdata->dev->lock); | ||
595 | mutex_unlock(&epdata->lock); | ||
596 | return -EBADMSG; | ||
597 | } | ||
712 | 598 | ||
713 | return ep_aio_rwtail(iocb, buf, iocb->ki_nbytes, epdata, iov, nr_segs); | 599 | buf = kmalloc(len, GFP_KERNEL); |
600 | if (unlikely(!buf)) { | ||
601 | mutex_unlock(&epdata->lock); | ||
602 | return -ENOMEM; | ||
603 | } | ||
604 | if (is_sync_kiocb(iocb)) { | ||
605 | value = ep_io(epdata, buf, len); | ||
606 | if (value >= 0 && copy_to_iter(buf, value, to)) | ||
607 | value = -EFAULT; | ||
608 | } else { | ||
609 | struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); | ||
610 | value = -ENOMEM; | ||
611 | if (!priv) | ||
612 | goto fail; | ||
613 | priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL); | ||
614 | if (!priv->to_free) { | ||
615 | kfree(priv); | ||
616 | goto fail; | ||
617 | } | ||
618 | value = ep_aio(iocb, priv, epdata, buf, len); | ||
619 | if (value == -EIOCBQUEUED) | ||
620 | buf = NULL; | ||
621 | } | ||
622 | fail: | ||
623 | kfree(buf); | ||
624 | mutex_unlock(&epdata->lock); | ||
625 | return value; | ||
714 | } | 626 | } |
715 | 627 | ||
628 | static ssize_t ep_config(struct ep_data *, const char *, size_t); | ||
629 | |||
716 | static ssize_t | 630 | static ssize_t |
717 | ep_aio_write(struct kiocb *iocb, const struct iovec *iov, | 631 | ep_write_iter(struct kiocb *iocb, struct iov_iter *from) |
718 | unsigned long nr_segs, loff_t o) | ||
719 | { | 632 | { |
720 | struct ep_data *epdata = iocb->ki_filp->private_data; | 633 | struct file *file = iocb->ki_filp; |
721 | char *buf; | 634 | struct ep_data *epdata = file->private_data; |
722 | size_t len = 0; | 635 | size_t len = iov_iter_count(from); |
723 | int i = 0; | 636 | bool configured; |
637 | ssize_t value; | ||
638 | char *buf; | ||
639 | |||
640 | if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0) | ||
641 | return value; | ||
724 | 642 | ||
725 | if (unlikely(!usb_endpoint_dir_in(&epdata->desc))) | 643 | configured = epdata->state == STATE_EP_ENABLED; |
726 | return -EINVAL; | ||
727 | 644 | ||
728 | buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); | 645 | /* halt any endpoint by doing a "wrong direction" i/o call */ |
729 | if (unlikely(!buf)) | 646 | if (configured && !usb_endpoint_dir_in(&epdata->desc)) { |
647 | if (usb_endpoint_xfer_isoc(&epdata->desc) || | ||
648 | !is_sync_kiocb(iocb)) { | ||
649 | mutex_unlock(&epdata->lock); | ||
650 | return -EINVAL; | ||
651 | } | ||
652 | DBG (epdata->dev, "%s halt\n", epdata->name); | ||
653 | spin_lock_irq(&epdata->dev->lock); | ||
654 | if (likely(epdata->ep != NULL)) | ||
655 | usb_ep_set_halt(epdata->ep); | ||
656 | spin_unlock_irq(&epdata->dev->lock); | ||
657 | mutex_unlock(&epdata->lock); | ||
658 | return -EBADMSG; | ||
659 | } | ||
660 | |||
661 | buf = kmalloc(len, GFP_KERNEL); | ||
662 | if (unlikely(!buf)) { | ||
663 | mutex_unlock(&epdata->lock); | ||
730 | return -ENOMEM; | 664 | return -ENOMEM; |
665 | } | ||
731 | 666 | ||
732 | for (i=0; i < nr_segs; i++) { | 667 | if (unlikely(copy_from_iter(buf, len, from) != len)) { |
733 | if (unlikely(copy_from_user(&buf[len], iov[i].iov_base, | 668 | value = -EFAULT; |
734 | iov[i].iov_len) != 0)) { | 669 | goto out; |
735 | kfree(buf); | 670 | } |
736 | return -EFAULT; | 671 | |
672 | if (unlikely(!configured)) { | ||
673 | value = ep_config(epdata, buf, len); | ||
674 | } else if (is_sync_kiocb(iocb)) { | ||
675 | value = ep_io(epdata, buf, len); | ||
676 | } else { | ||
677 | struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); | ||
678 | value = -ENOMEM; | ||
679 | if (priv) { | ||
680 | value = ep_aio(iocb, priv, epdata, buf, len); | ||
681 | if (value == -EIOCBQUEUED) | ||
682 | buf = NULL; | ||
737 | } | 683 | } |
738 | len += iov[i].iov_len; | ||
739 | } | 684 | } |
740 | return ep_aio_rwtail(iocb, buf, len, epdata, NULL, 0); | 685 | out: |
686 | kfree(buf); | ||
687 | mutex_unlock(&epdata->lock); | ||
688 | return value; | ||
741 | } | 689 | } |
742 | 690 | ||
743 | /*----------------------------------------------------------------------*/ | 691 | /*----------------------------------------------------------------------*/ |
@@ -745,15 +693,15 @@ ep_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
745 | /* used after endpoint configuration */ | 693 | /* used after endpoint configuration */ |
746 | static const struct file_operations ep_io_operations = { | 694 | static const struct file_operations ep_io_operations = { |
747 | .owner = THIS_MODULE, | 695 | .owner = THIS_MODULE, |
748 | .llseek = no_llseek, | ||
749 | 696 | ||
750 | .read = ep_read, | 697 | .open = ep_open, |
751 | .write = ep_write, | ||
752 | .unlocked_ioctl = ep_ioctl, | ||
753 | .release = ep_release, | 698 | .release = ep_release, |
754 | 699 | .llseek = no_llseek, | |
755 | .aio_read = ep_aio_read, | 700 | .read = new_sync_read, |
756 | .aio_write = ep_aio_write, | 701 | .write = new_sync_write, |
702 | .unlocked_ioctl = ep_ioctl, | ||
703 | .read_iter = ep_read_iter, | ||
704 | .write_iter = ep_write_iter, | ||
757 | }; | 705 | }; |
758 | 706 | ||
759 | /* ENDPOINT INITIALIZATION | 707 | /* ENDPOINT INITIALIZATION |
@@ -770,17 +718,12 @@ static const struct file_operations ep_io_operations = { | |||
770 | * speed descriptor, then optional high speed descriptor. | 718 | * speed descriptor, then optional high speed descriptor. |
771 | */ | 719 | */ |
772 | static ssize_t | 720 | static ssize_t |
773 | ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | 721 | ep_config (struct ep_data *data, const char *buf, size_t len) |
774 | { | 722 | { |
775 | struct ep_data *data = fd->private_data; | ||
776 | struct usb_ep *ep; | 723 | struct usb_ep *ep; |
777 | u32 tag; | 724 | u32 tag; |
778 | int value, length = len; | 725 | int value, length = len; |
779 | 726 | ||
780 | value = mutex_lock_interruptible(&data->lock); | ||
781 | if (value < 0) | ||
782 | return value; | ||
783 | |||
784 | if (data->state != STATE_EP_READY) { | 727 | if (data->state != STATE_EP_READY) { |
785 | value = -EL2HLT; | 728 | value = -EL2HLT; |
786 | goto fail; | 729 | goto fail; |
@@ -791,9 +734,7 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
791 | goto fail0; | 734 | goto fail0; |
792 | 735 | ||
793 | /* we might need to change message format someday */ | 736 | /* we might need to change message format someday */ |
794 | if (copy_from_user (&tag, buf, 4)) { | 737 | memcpy(&tag, buf, 4); |
795 | goto fail1; | ||
796 | } | ||
797 | if (tag != 1) { | 738 | if (tag != 1) { |
798 | DBG(data->dev, "config %s, bad tag %d\n", data->name, tag); | 739 | DBG(data->dev, "config %s, bad tag %d\n", data->name, tag); |
799 | goto fail0; | 740 | goto fail0; |
@@ -806,19 +747,15 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
806 | */ | 747 | */ |
807 | 748 | ||
808 | /* full/low speed descriptor, then high speed */ | 749 | /* full/low speed descriptor, then high speed */ |
809 | if (copy_from_user (&data->desc, buf, USB_DT_ENDPOINT_SIZE)) { | 750 | memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE); |
810 | goto fail1; | ||
811 | } | ||
812 | if (data->desc.bLength != USB_DT_ENDPOINT_SIZE | 751 | if (data->desc.bLength != USB_DT_ENDPOINT_SIZE |
813 | || data->desc.bDescriptorType != USB_DT_ENDPOINT) | 752 | || data->desc.bDescriptorType != USB_DT_ENDPOINT) |
814 | goto fail0; | 753 | goto fail0; |
815 | if (len != USB_DT_ENDPOINT_SIZE) { | 754 | if (len != USB_DT_ENDPOINT_SIZE) { |
816 | if (len != 2 * USB_DT_ENDPOINT_SIZE) | 755 | if (len != 2 * USB_DT_ENDPOINT_SIZE) |
817 | goto fail0; | 756 | goto fail0; |
818 | if (copy_from_user (&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE, | 757 | memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE, |
819 | USB_DT_ENDPOINT_SIZE)) { | 758 | USB_DT_ENDPOINT_SIZE); |
820 | goto fail1; | ||
821 | } | ||
822 | if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE | 759 | if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE |
823 | || data->hs_desc.bDescriptorType | 760 | || data->hs_desc.bDescriptorType |
824 | != USB_DT_ENDPOINT) { | 761 | != USB_DT_ENDPOINT) { |
@@ -840,24 +777,20 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
840 | case USB_SPEED_LOW: | 777 | case USB_SPEED_LOW: |
841 | case USB_SPEED_FULL: | 778 | case USB_SPEED_FULL: |
842 | ep->desc = &data->desc; | 779 | ep->desc = &data->desc; |
843 | value = usb_ep_enable(ep); | ||
844 | if (value == 0) | ||
845 | data->state = STATE_EP_ENABLED; | ||
846 | break; | 780 | break; |
847 | case USB_SPEED_HIGH: | 781 | case USB_SPEED_HIGH: |
848 | /* fails if caller didn't provide that descriptor... */ | 782 | /* fails if caller didn't provide that descriptor... */ |
849 | ep->desc = &data->hs_desc; | 783 | ep->desc = &data->hs_desc; |
850 | value = usb_ep_enable(ep); | ||
851 | if (value == 0) | ||
852 | data->state = STATE_EP_ENABLED; | ||
853 | break; | 784 | break; |
854 | default: | 785 | default: |
855 | DBG(data->dev, "unconnected, %s init abandoned\n", | 786 | DBG(data->dev, "unconnected, %s init abandoned\n", |
856 | data->name); | 787 | data->name); |
857 | value = -EINVAL; | 788 | value = -EINVAL; |
789 | goto gone; | ||
858 | } | 790 | } |
791 | value = usb_ep_enable(ep); | ||
859 | if (value == 0) { | 792 | if (value == 0) { |
860 | fd->f_op = &ep_io_operations; | 793 | data->state = STATE_EP_ENABLED; |
861 | value = length; | 794 | value = length; |
862 | } | 795 | } |
863 | gone: | 796 | gone: |
@@ -867,14 +800,10 @@ fail: | |||
867 | data->desc.bDescriptorType = 0; | 800 | data->desc.bDescriptorType = 0; |
868 | data->hs_desc.bDescriptorType = 0; | 801 | data->hs_desc.bDescriptorType = 0; |
869 | } | 802 | } |
870 | mutex_unlock(&data->lock); | ||
871 | return value; | 803 | return value; |
872 | fail0: | 804 | fail0: |
873 | value = -EINVAL; | 805 | value = -EINVAL; |
874 | goto fail; | 806 | goto fail; |
875 | fail1: | ||
876 | value = -EFAULT; | ||
877 | goto fail; | ||
878 | } | 807 | } |
879 | 808 | ||
880 | static int | 809 | static int |
@@ -902,15 +831,6 @@ ep_open (struct inode *inode, struct file *fd) | |||
902 | return value; | 831 | return value; |
903 | } | 832 | } |
904 | 833 | ||
905 | /* used before endpoint configuration */ | ||
906 | static const struct file_operations ep_config_operations = { | ||
907 | .llseek = no_llseek, | ||
908 | |||
909 | .open = ep_open, | ||
910 | .write = ep_config, | ||
911 | .release = ep_release, | ||
912 | }; | ||
913 | |||
914 | /*----------------------------------------------------------------------*/ | 834 | /*----------------------------------------------------------------------*/ |
915 | 835 | ||
916 | /* EP0 IMPLEMENTATION can be partly in userspace. | 836 | /* EP0 IMPLEMENTATION can be partly in userspace. |
@@ -989,6 +909,10 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) | |||
989 | enum ep0_state state; | 909 | enum ep0_state state; |
990 | 910 | ||
991 | spin_lock_irq (&dev->lock); | 911 | spin_lock_irq (&dev->lock); |
912 | if (dev->state <= STATE_DEV_OPENED) { | ||
913 | retval = -EINVAL; | ||
914 | goto done; | ||
915 | } | ||
992 | 916 | ||
993 | /* report fd mode change before acting on it */ | 917 | /* report fd mode change before acting on it */ |
994 | if (dev->setup_abort) { | 918 | if (dev->setup_abort) { |
@@ -1187,8 +1111,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
1187 | struct dev_data *dev = fd->private_data; | 1111 | struct dev_data *dev = fd->private_data; |
1188 | ssize_t retval = -ESRCH; | 1112 | ssize_t retval = -ESRCH; |
1189 | 1113 | ||
1190 | spin_lock_irq (&dev->lock); | ||
1191 | |||
1192 | /* report fd mode change before acting on it */ | 1114 | /* report fd mode change before acting on it */ |
1193 | if (dev->setup_abort) { | 1115 | if (dev->setup_abort) { |
1194 | dev->setup_abort = 0; | 1116 | dev->setup_abort = 0; |
@@ -1234,7 +1156,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
1234 | } else | 1156 | } else |
1235 | DBG (dev, "fail %s, state %d\n", __func__, dev->state); | 1157 | DBG (dev, "fail %s, state %d\n", __func__, dev->state); |
1236 | 1158 | ||
1237 | spin_unlock_irq (&dev->lock); | ||
1238 | return retval; | 1159 | return retval; |
1239 | } | 1160 | } |
1240 | 1161 | ||
@@ -1281,6 +1202,9 @@ ep0_poll (struct file *fd, poll_table *wait) | |||
1281 | struct dev_data *dev = fd->private_data; | 1202 | struct dev_data *dev = fd->private_data; |
1282 | int mask = 0; | 1203 | int mask = 0; |
1283 | 1204 | ||
1205 | if (dev->state <= STATE_DEV_OPENED) | ||
1206 | return DEFAULT_POLLMASK; | ||
1207 | |||
1284 | poll_wait(fd, &dev->wait, wait); | 1208 | poll_wait(fd, &dev->wait, wait); |
1285 | 1209 | ||
1286 | spin_lock_irq (&dev->lock); | 1210 | spin_lock_irq (&dev->lock); |
@@ -1316,19 +1240,6 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value) | |||
1316 | return ret; | 1240 | return ret; |
1317 | } | 1241 | } |
1318 | 1242 | ||
1319 | /* used after device configuration */ | ||
1320 | static const struct file_operations ep0_io_operations = { | ||
1321 | .owner = THIS_MODULE, | ||
1322 | .llseek = no_llseek, | ||
1323 | |||
1324 | .read = ep0_read, | ||
1325 | .write = ep0_write, | ||
1326 | .fasync = ep0_fasync, | ||
1327 | .poll = ep0_poll, | ||
1328 | .unlocked_ioctl = dev_ioctl, | ||
1329 | .release = dev_release, | ||
1330 | }; | ||
1331 | |||
1332 | /*----------------------------------------------------------------------*/ | 1243 | /*----------------------------------------------------------------------*/ |
1333 | 1244 | ||
1334 | /* The in-kernel gadget driver handles most ep0 issues, in particular | 1245 | /* The in-kernel gadget driver handles most ep0 issues, in particular |
@@ -1650,7 +1561,7 @@ static int activate_ep_files (struct dev_data *dev) | |||
1650 | goto enomem1; | 1561 | goto enomem1; |
1651 | 1562 | ||
1652 | data->dentry = gadgetfs_create_file (dev->sb, data->name, | 1563 | data->dentry = gadgetfs_create_file (dev->sb, data->name, |
1653 | data, &ep_config_operations); | 1564 | data, &ep_io_operations); |
1654 | if (!data->dentry) | 1565 | if (!data->dentry) |
1655 | goto enomem2; | 1566 | goto enomem2; |
1656 | list_add_tail (&data->epfiles, &dev->epfiles); | 1567 | list_add_tail (&data->epfiles, &dev->epfiles); |
@@ -1852,6 +1763,14 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
1852 | u32 tag; | 1763 | u32 tag; |
1853 | char *kbuf; | 1764 | char *kbuf; |
1854 | 1765 | ||
1766 | spin_lock_irq(&dev->lock); | ||
1767 | if (dev->state > STATE_DEV_OPENED) { | ||
1768 | value = ep0_write(fd, buf, len, ptr); | ||
1769 | spin_unlock_irq(&dev->lock); | ||
1770 | return value; | ||
1771 | } | ||
1772 | spin_unlock_irq(&dev->lock); | ||
1773 | |||
1855 | if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) | 1774 | if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) |
1856 | return -EINVAL; | 1775 | return -EINVAL; |
1857 | 1776 | ||
@@ -1925,7 +1844,6 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
1925 | * on, they can work ... except in cleanup paths that | 1844 | * on, they can work ... except in cleanup paths that |
1926 | * kick in after the ep0 descriptor is closed. | 1845 | * kick in after the ep0 descriptor is closed. |
1927 | */ | 1846 | */ |
1928 | fd->f_op = &ep0_io_operations; | ||
1929 | value = len; | 1847 | value = len; |
1930 | } | 1848 | } |
1931 | return value; | 1849 | return value; |
@@ -1956,12 +1874,14 @@ dev_open (struct inode *inode, struct file *fd) | |||
1956 | return value; | 1874 | return value; |
1957 | } | 1875 | } |
1958 | 1876 | ||
1959 | static const struct file_operations dev_init_operations = { | 1877 | static const struct file_operations ep0_operations = { |
1960 | .llseek = no_llseek, | 1878 | .llseek = no_llseek, |
1961 | 1879 | ||
1962 | .open = dev_open, | 1880 | .open = dev_open, |
1881 | .read = ep0_read, | ||
1963 | .write = dev_config, | 1882 | .write = dev_config, |
1964 | .fasync = ep0_fasync, | 1883 | .fasync = ep0_fasync, |
1884 | .poll = ep0_poll, | ||
1965 | .unlocked_ioctl = dev_ioctl, | 1885 | .unlocked_ioctl = dev_ioctl, |
1966 | .release = dev_release, | 1886 | .release = dev_release, |
1967 | }; | 1887 | }; |
@@ -2077,7 +1997,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent) | |||
2077 | goto Enomem; | 1997 | goto Enomem; |
2078 | 1998 | ||
2079 | dev->sb = sb; | 1999 | dev->sb = sb; |
2080 | dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &dev_init_operations); | 2000 | dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations); |
2081 | if (!dev->dentry) { | 2001 | if (!dev->dentry) { |
2082 | put_dev(dev); | 2002 | put_dev(dev); |
2083 | goto Enomem; | 2003 | goto Enomem; |
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c index 3a494168661e..6e0a019aad54 100644 --- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c +++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c | |||
@@ -1740,10 +1740,9 @@ static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name) | |||
1740 | goto err_session; | 1740 | goto err_session; |
1741 | } | 1741 | } |
1742 | /* | 1742 | /* |
1743 | * Now register the TCM vHost virtual I_T Nexus as active with the | 1743 | * Now register the TCM vHost virtual I_T Nexus as active. |
1744 | * call to __transport_register_session() | ||
1745 | */ | 1744 | */ |
1746 | __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, | 1745 | transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, |
1747 | tv_nexus->tvn_se_sess, tv_nexus); | 1746 | tv_nexus->tvn_se_sess, tv_nexus); |
1748 | tpg->tpg_nexus = tv_nexus; | 1747 | tpg->tpg_nexus = tv_nexus; |
1749 | mutex_unlock(&tpg->tpg_mutex); | 1748 | mutex_unlock(&tpg->tpg_mutex); |
diff --git a/drivers/usb/gadget/legacy/zero.c b/drivers/usb/gadget/legacy/zero.c index ff97ac93ac03..5ee95152493c 100644 --- a/drivers/usb/gadget/legacy/zero.c +++ b/drivers/usb/gadget/legacy/zero.c | |||
@@ -68,8 +68,6 @@ static struct usb_zero_options gzero_options = { | |||
68 | .isoc_maxpacket = GZERO_ISOC_MAXPACKET, | 68 | .isoc_maxpacket = GZERO_ISOC_MAXPACKET, |
69 | .bulk_buflen = GZERO_BULK_BUFLEN, | 69 | .bulk_buflen = GZERO_BULK_BUFLEN, |
70 | .qlen = GZERO_QLEN, | 70 | .qlen = GZERO_QLEN, |
71 | .int_interval = GZERO_INT_INTERVAL, | ||
72 | .int_maxpacket = GZERO_INT_MAXPACKET, | ||
73 | }; | 71 | }; |
74 | 72 | ||
75 | /*-------------------------------------------------------------------------*/ | 73 | /*-------------------------------------------------------------------------*/ |
@@ -268,21 +266,6 @@ module_param_named(isoc_maxburst, gzero_options.isoc_maxburst, uint, | |||
268 | S_IRUGO|S_IWUSR); | 266 | S_IRUGO|S_IWUSR); |
269 | MODULE_PARM_DESC(isoc_maxburst, "0 - 15 (ss only)"); | 267 | MODULE_PARM_DESC(isoc_maxburst, "0 - 15 (ss only)"); |
270 | 268 | ||
271 | module_param_named(int_interval, gzero_options.int_interval, uint, | ||
272 | S_IRUGO|S_IWUSR); | ||
273 | MODULE_PARM_DESC(int_interval, "1 - 16"); | ||
274 | |||
275 | module_param_named(int_maxpacket, gzero_options.int_maxpacket, uint, | ||
276 | S_IRUGO|S_IWUSR); | ||
277 | MODULE_PARM_DESC(int_maxpacket, "0 - 1023 (fs), 0 - 1024 (hs/ss)"); | ||
278 | |||
279 | module_param_named(int_mult, gzero_options.int_mult, uint, S_IRUGO|S_IWUSR); | ||
280 | MODULE_PARM_DESC(int_mult, "0 - 2 (hs/ss only)"); | ||
281 | |||
282 | module_param_named(int_maxburst, gzero_options.int_maxburst, uint, | ||
283 | S_IRUGO|S_IWUSR); | ||
284 | MODULE_PARM_DESC(int_maxburst, "0 - 15 (ss only)"); | ||
285 | |||
286 | static struct usb_function *func_lb; | 269 | static struct usb_function *func_lb; |
287 | static struct usb_function_instance *func_inst_lb; | 270 | static struct usb_function_instance *func_inst_lb; |
288 | 271 | ||
@@ -318,10 +301,6 @@ static int __init zero_bind(struct usb_composite_dev *cdev) | |||
318 | ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket; | 301 | ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket; |
319 | ss_opts->isoc_mult = gzero_options.isoc_mult; | 302 | ss_opts->isoc_mult = gzero_options.isoc_mult; |
320 | ss_opts->isoc_maxburst = gzero_options.isoc_maxburst; | 303 | ss_opts->isoc_maxburst = gzero_options.isoc_maxburst; |
321 | ss_opts->int_interval = gzero_options.int_interval; | ||
322 | ss_opts->int_maxpacket = gzero_options.int_maxpacket; | ||
323 | ss_opts->int_mult = gzero_options.int_mult; | ||
324 | ss_opts->int_maxburst = gzero_options.int_maxburst; | ||
325 | ss_opts->bulk_buflen = gzero_options.bulk_buflen; | 304 | ss_opts->bulk_buflen = gzero_options.bulk_buflen; |
326 | 305 | ||
327 | func_ss = usb_get_function(func_inst_ss); | 306 | func_ss = usb_get_function(func_inst_ss); |
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c index 663f7908b15c..be0964a801e8 100644 --- a/drivers/usb/host/ehci-atmel.c +++ b/drivers/usb/host/ehci-atmel.c | |||
@@ -34,7 +34,6 @@ static const char hcd_name[] = "ehci-atmel"; | |||
34 | 34 | ||
35 | struct atmel_ehci_priv { | 35 | struct atmel_ehci_priv { |
36 | struct clk *iclk; | 36 | struct clk *iclk; |
37 | struct clk *fclk; | ||
38 | struct clk *uclk; | 37 | struct clk *uclk; |
39 | bool clocked; | 38 | bool clocked; |
40 | }; | 39 | }; |
@@ -51,12 +50,9 @@ static void atmel_start_clock(struct atmel_ehci_priv *atmel_ehci) | |||
51 | { | 50 | { |
52 | if (atmel_ehci->clocked) | 51 | if (atmel_ehci->clocked) |
53 | return; | 52 | return; |
54 | if (IS_ENABLED(CONFIG_COMMON_CLK)) { | 53 | |
55 | clk_set_rate(atmel_ehci->uclk, 48000000); | 54 | clk_prepare_enable(atmel_ehci->uclk); |
56 | clk_prepare_enable(atmel_ehci->uclk); | ||
57 | } | ||
58 | clk_prepare_enable(atmel_ehci->iclk); | 55 | clk_prepare_enable(atmel_ehci->iclk); |
59 | clk_prepare_enable(atmel_ehci->fclk); | ||
60 | atmel_ehci->clocked = true; | 56 | atmel_ehci->clocked = true; |
61 | } | 57 | } |
62 | 58 | ||
@@ -64,10 +60,9 @@ static void atmel_stop_clock(struct atmel_ehci_priv *atmel_ehci) | |||
64 | { | 60 | { |
65 | if (!atmel_ehci->clocked) | 61 | if (!atmel_ehci->clocked) |
66 | return; | 62 | return; |
67 | clk_disable_unprepare(atmel_ehci->fclk); | 63 | |
68 | clk_disable_unprepare(atmel_ehci->iclk); | 64 | clk_disable_unprepare(atmel_ehci->iclk); |
69 | if (IS_ENABLED(CONFIG_COMMON_CLK)) | 65 | clk_disable_unprepare(atmel_ehci->uclk); |
70 | clk_disable_unprepare(atmel_ehci->uclk); | ||
71 | atmel_ehci->clocked = false; | 66 | atmel_ehci->clocked = false; |
72 | } | 67 | } |
73 | 68 | ||
@@ -146,20 +141,13 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev) | |||
146 | retval = -ENOENT; | 141 | retval = -ENOENT; |
147 | goto fail_request_resource; | 142 | goto fail_request_resource; |
148 | } | 143 | } |
149 | atmel_ehci->fclk = devm_clk_get(&pdev->dev, "uhpck"); | 144 | |
150 | if (IS_ERR(atmel_ehci->fclk)) { | 145 | atmel_ehci->uclk = devm_clk_get(&pdev->dev, "usb_clk"); |
151 | dev_err(&pdev->dev, "Error getting function clock\n"); | 146 | if (IS_ERR(atmel_ehci->uclk)) { |
152 | retval = -ENOENT; | 147 | dev_err(&pdev->dev, "failed to get uclk\n"); |
148 | retval = PTR_ERR(atmel_ehci->uclk); | ||
153 | goto fail_request_resource; | 149 | goto fail_request_resource; |
154 | } | 150 | } |
155 | if (IS_ENABLED(CONFIG_COMMON_CLK)) { | ||
156 | atmel_ehci->uclk = devm_clk_get(&pdev->dev, "usb_clk"); | ||
157 | if (IS_ERR(atmel_ehci->uclk)) { | ||
158 | dev_err(&pdev->dev, "failed to get uclk\n"); | ||
159 | retval = PTR_ERR(atmel_ehci->uclk); | ||
160 | goto fail_request_resource; | ||
161 | } | ||
162 | } | ||
163 | 151 | ||
164 | ehci = hcd_to_ehci(hcd); | 152 | ehci = hcd_to_ehci(hcd); |
165 | /* registers start at offset 0x0 */ | 153 | /* registers start at offset 0x0 */ |
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index a7865c4b0498..0827d7c96527 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c | |||
@@ -387,6 +387,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue, | |||
387 | status = PORT_PLC; | 387 | status = PORT_PLC; |
388 | port_change_bit = "link state"; | 388 | port_change_bit = "link state"; |
389 | break; | 389 | break; |
390 | case USB_PORT_FEAT_C_PORT_CONFIG_ERROR: | ||
391 | status = PORT_CEC; | ||
392 | port_change_bit = "config error"; | ||
393 | break; | ||
390 | default: | 394 | default: |
391 | /* Should never happen */ | 395 | /* Should never happen */ |
392 | return; | 396 | return; |
@@ -588,6 +592,8 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, | |||
588 | status |= USB_PORT_STAT_C_LINK_STATE << 16; | 592 | status |= USB_PORT_STAT_C_LINK_STATE << 16; |
589 | if ((raw_port_status & PORT_WRC)) | 593 | if ((raw_port_status & PORT_WRC)) |
590 | status |= USB_PORT_STAT_C_BH_RESET << 16; | 594 | status |= USB_PORT_STAT_C_BH_RESET << 16; |
595 | if ((raw_port_status & PORT_CEC)) | ||
596 | status |= USB_PORT_STAT_C_CONFIG_ERROR << 16; | ||
591 | } | 597 | } |
592 | 598 | ||
593 | if (hcd->speed != HCD_USB3) { | 599 | if (hcd->speed != HCD_USB3) { |
@@ -1005,6 +1011,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
1005 | case USB_PORT_FEAT_C_OVER_CURRENT: | 1011 | case USB_PORT_FEAT_C_OVER_CURRENT: |
1006 | case USB_PORT_FEAT_C_ENABLE: | 1012 | case USB_PORT_FEAT_C_ENABLE: |
1007 | case USB_PORT_FEAT_C_PORT_LINK_STATE: | 1013 | case USB_PORT_FEAT_C_PORT_LINK_STATE: |
1014 | case USB_PORT_FEAT_C_PORT_CONFIG_ERROR: | ||
1008 | xhci_clear_port_change_bit(xhci, wValue, wIndex, | 1015 | xhci_clear_port_change_bit(xhci, wValue, wIndex, |
1009 | port_array[wIndex], temp); | 1016 | port_array[wIndex], temp); |
1010 | break; | 1017 | break; |
@@ -1069,7 +1076,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) | |||
1069 | */ | 1076 | */ |
1070 | status = bus_state->resuming_ports; | 1077 | status = bus_state->resuming_ports; |
1071 | 1078 | ||
1072 | mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC; | 1079 | mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC; |
1073 | 1080 | ||
1074 | spin_lock_irqsave(&xhci->lock, flags); | 1081 | spin_lock_irqsave(&xhci->lock, flags); |
1075 | /* For each port, did anything change? If so, set that bit in buf. */ | 1082 | /* For each port, did anything change? If so, set that bit in buf. */ |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 7f76c8a12f89..2af32e26fafc 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -37,6 +37,9 @@ | |||
37 | 37 | ||
38 | #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 | 38 | #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 |
39 | #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 | 39 | #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 |
40 | #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5 | ||
41 | #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f | ||
42 | #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f | ||
40 | 43 | ||
41 | static const char hcd_name[] = "xhci_hcd"; | 44 | static const char hcd_name[] = "xhci_hcd"; |
42 | 45 | ||
@@ -112,6 +115,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
112 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) { | 115 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) { |
113 | xhci->quirks |= XHCI_LPM_SUPPORT; | 116 | xhci->quirks |= XHCI_LPM_SUPPORT; |
114 | xhci->quirks |= XHCI_INTEL_HOST; | 117 | xhci->quirks |= XHCI_INTEL_HOST; |
118 | xhci->quirks |= XHCI_AVOID_BEI; | ||
115 | } | 119 | } |
116 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && | 120 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && |
117 | pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) { | 121 | pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) { |
@@ -127,12 +131,17 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
127 | * PPT chipsets. | 131 | * PPT chipsets. |
128 | */ | 132 | */ |
129 | xhci->quirks |= XHCI_SPURIOUS_REBOOT; | 133 | xhci->quirks |= XHCI_SPURIOUS_REBOOT; |
130 | xhci->quirks |= XHCI_AVOID_BEI; | ||
131 | } | 134 | } |
132 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && | 135 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && |
133 | pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) { | 136 | pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) { |
134 | xhci->quirks |= XHCI_SPURIOUS_REBOOT; | 137 | xhci->quirks |= XHCI_SPURIOUS_REBOOT; |
135 | } | 138 | } |
139 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && | ||
140 | (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI || | ||
141 | pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI || | ||
142 | pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) { | ||
143 | xhci->quirks |= XHCI_PME_STUCK_QUIRK; | ||
144 | } | ||
136 | if (pdev->vendor == PCI_VENDOR_ID_ETRON && | 145 | if (pdev->vendor == PCI_VENDOR_ID_ETRON && |
137 | pdev->device == PCI_DEVICE_ID_EJ168) { | 146 | pdev->device == PCI_DEVICE_ID_EJ168) { |
138 | xhci->quirks |= XHCI_RESET_ON_RESUME; | 147 | xhci->quirks |= XHCI_RESET_ON_RESUME; |
@@ -159,6 +168,21 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
159 | "QUIRK: Resetting on resume"); | 168 | "QUIRK: Resetting on resume"); |
160 | } | 169 | } |
161 | 170 | ||
171 | /* | ||
172 | * Make sure PME works on some Intel xHCI controllers by writing 1 to clear | ||
173 | * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4 | ||
174 | */ | ||
175 | static void xhci_pme_quirk(struct xhci_hcd *xhci) | ||
176 | { | ||
177 | u32 val; | ||
178 | void __iomem *reg; | ||
179 | |||
180 | reg = (void __iomem *) xhci->cap_regs + 0x80a4; | ||
181 | val = readl(reg); | ||
182 | writel(val | BIT(28), reg); | ||
183 | readl(reg); | ||
184 | } | ||
185 | |||
162 | /* called during probe() after chip reset completes */ | 186 | /* called during probe() after chip reset completes */ |
163 | static int xhci_pci_setup(struct usb_hcd *hcd) | 187 | static int xhci_pci_setup(struct usb_hcd *hcd) |
164 | { | 188 | { |
@@ -283,6 +307,9 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) | |||
283 | if (xhci->quirks & XHCI_COMP_MODE_QUIRK) | 307 | if (xhci->quirks & XHCI_COMP_MODE_QUIRK) |
284 | pdev->no_d3cold = true; | 308 | pdev->no_d3cold = true; |
285 | 309 | ||
310 | if (xhci->quirks & XHCI_PME_STUCK_QUIRK) | ||
311 | xhci_pme_quirk(xhci); | ||
312 | |||
286 | return xhci_suspend(xhci, do_wakeup); | 313 | return xhci_suspend(xhci, do_wakeup); |
287 | } | 314 | } |
288 | 315 | ||
@@ -313,6 +340,9 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) | |||
313 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) | 340 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) |
314 | usb_enable_intel_xhci_ports(pdev); | 341 | usb_enable_intel_xhci_ports(pdev); |
315 | 342 | ||
343 | if (xhci->quirks & XHCI_PME_STUCK_QUIRK) | ||
344 | xhci_pme_quirk(xhci); | ||
345 | |||
316 | retval = xhci_resume(xhci, hibernated); | 346 | retval = xhci_resume(xhci, hibernated); |
317 | return retval; | 347 | return retval; |
318 | } | 348 | } |
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 08d402b15482..0e11d61408ff 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c | |||
@@ -83,16 +83,6 @@ static int xhci_plat_probe(struct platform_device *pdev) | |||
83 | if (irq < 0) | 83 | if (irq < 0) |
84 | return -ENODEV; | 84 | return -ENODEV; |
85 | 85 | ||
86 | |||
87 | if (of_device_is_compatible(pdev->dev.of_node, | ||
88 | "marvell,armada-375-xhci") || | ||
89 | of_device_is_compatible(pdev->dev.of_node, | ||
90 | "marvell,armada-380-xhci")) { | ||
91 | ret = xhci_mvebu_mbus_init_quirk(pdev); | ||
92 | if (ret) | ||
93 | return ret; | ||
94 | } | ||
95 | |||
96 | /* Initialize dma_mask and coherent_dma_mask to 32-bits */ | 86 | /* Initialize dma_mask and coherent_dma_mask to 32-bits */ |
97 | ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); | 87 | ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
98 | if (ret) | 88 | if (ret) |
@@ -127,6 +117,15 @@ static int xhci_plat_probe(struct platform_device *pdev) | |||
127 | goto put_hcd; | 117 | goto put_hcd; |
128 | } | 118 | } |
129 | 119 | ||
120 | if (of_device_is_compatible(pdev->dev.of_node, | ||
121 | "marvell,armada-375-xhci") || | ||
122 | of_device_is_compatible(pdev->dev.of_node, | ||
123 | "marvell,armada-380-xhci")) { | ||
124 | ret = xhci_mvebu_mbus_init_quirk(pdev); | ||
125 | if (ret) | ||
126 | goto disable_clk; | ||
127 | } | ||
128 | |||
130 | ret = usb_add_hcd(hcd, irq, IRQF_SHARED); | 129 | ret = usb_add_hcd(hcd, irq, IRQF_SHARED); |
131 | if (ret) | 130 | if (ret) |
132 | goto disable_clk; | 131 | goto disable_clk; |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 88da8d629820..73485fa4372f 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -1946,7 +1946,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
1946 | if (event_trb != ep_ring->dequeue) { | 1946 | if (event_trb != ep_ring->dequeue) { |
1947 | /* The event was for the status stage */ | 1947 | /* The event was for the status stage */ |
1948 | if (event_trb == td->last_trb) { | 1948 | if (event_trb == td->last_trb) { |
1949 | if (td->urb->actual_length != 0) { | 1949 | if (td->urb_length_set) { |
1950 | /* Don't overwrite a previously set error code | 1950 | /* Don't overwrite a previously set error code |
1951 | */ | 1951 | */ |
1952 | if ((*status == -EINPROGRESS || *status == 0) && | 1952 | if ((*status == -EINPROGRESS || *status == 0) && |
@@ -1960,7 +1960,13 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
1960 | td->urb->transfer_buffer_length; | 1960 | td->urb->transfer_buffer_length; |
1961 | } | 1961 | } |
1962 | } else { | 1962 | } else { |
1963 | /* Maybe the event was for the data stage? */ | 1963 | /* |
1964 | * Maybe the event was for the data stage? If so, update | ||
1965 | * already the actual_length of the URB and flag it as | ||
1966 | * set, so that it is not overwritten in the event for | ||
1967 | * the last TRB. | ||
1968 | */ | ||
1969 | td->urb_length_set = true; | ||
1964 | td->urb->actual_length = | 1970 | td->urb->actual_length = |
1965 | td->urb->transfer_buffer_length - | 1971 | td->urb->transfer_buffer_length - |
1966 | EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); | 1972 | EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 974514762a14..8e421b89632d 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -1,3 +1,4 @@ | |||
1 | |||
1 | /* | 2 | /* |
2 | * xHCI host controller driver | 3 | * xHCI host controller driver |
3 | * | 4 | * |
@@ -88,9 +89,10 @@ struct xhci_cap_regs { | |||
88 | #define HCS_IST(p) (((p) >> 0) & 0xf) | 89 | #define HCS_IST(p) (((p) >> 0) & 0xf) |
89 | /* bits 4:7, max number of Event Ring segments */ | 90 | /* bits 4:7, max number of Event Ring segments */ |
90 | #define HCS_ERST_MAX(p) (((p) >> 4) & 0xf) | 91 | #define HCS_ERST_MAX(p) (((p) >> 4) & 0xf) |
92 | /* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */ | ||
91 | /* bit 26 Scratchpad restore - for save/restore HW state - not used yet */ | 93 | /* bit 26 Scratchpad restore - for save/restore HW state - not used yet */ |
92 | /* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */ | 94 | /* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */ |
93 | #define HCS_MAX_SCRATCHPAD(p) (((p) >> 27) & 0x1f) | 95 | #define HCS_MAX_SCRATCHPAD(p) ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f)) |
94 | 96 | ||
95 | /* HCSPARAMS3 - hcs_params3 - bitmasks */ | 97 | /* HCSPARAMS3 - hcs_params3 - bitmasks */ |
96 | /* bits 0:7, Max U1 to U0 latency for the roothub ports */ | 98 | /* bits 0:7, Max U1 to U0 latency for the roothub ports */ |
@@ -1288,6 +1290,8 @@ struct xhci_td { | |||
1288 | struct xhci_segment *start_seg; | 1290 | struct xhci_segment *start_seg; |
1289 | union xhci_trb *first_trb; | 1291 | union xhci_trb *first_trb; |
1290 | union xhci_trb *last_trb; | 1292 | union xhci_trb *last_trb; |
1293 | /* actual_length of the URB has already been set */ | ||
1294 | bool urb_length_set; | ||
1291 | }; | 1295 | }; |
1292 | 1296 | ||
1293 | /* xHCI command default timeout value */ | 1297 | /* xHCI command default timeout value */ |
@@ -1560,6 +1564,7 @@ struct xhci_hcd { | |||
1560 | #define XHCI_SPURIOUS_WAKEUP (1 << 18) | 1564 | #define XHCI_SPURIOUS_WAKEUP (1 << 18) |
1561 | /* For controllers with a broken beyond repair streams implementation */ | 1565 | /* For controllers with a broken beyond repair streams implementation */ |
1562 | #define XHCI_BROKEN_STREAMS (1 << 19) | 1566 | #define XHCI_BROKEN_STREAMS (1 << 19) |
1567 | #define XHCI_PME_STUCK_QUIRK (1 << 20) | ||
1563 | unsigned int num_active_eps; | 1568 | unsigned int num_active_eps; |
1564 | unsigned int limit_active_eps; | 1569 | unsigned int limit_active_eps; |
1565 | /* There are two roothubs to keep track of bus suspend info for */ | 1570 | /* There are two roothubs to keep track of bus suspend info for */ |
diff --git a/drivers/usb/isp1760/isp1760-core.c b/drivers/usb/isp1760/isp1760-core.c index b9827556455f..bfa402cf3a27 100644 --- a/drivers/usb/isp1760/isp1760-core.c +++ b/drivers/usb/isp1760/isp1760-core.c | |||
@@ -151,8 +151,7 @@ int isp1760_register(struct resource *mem, int irq, unsigned long irqflags, | |||
151 | } | 151 | } |
152 | 152 | ||
153 | if (IS_ENABLED(CONFIG_USB_ISP1761_UDC) && !udc_disabled) { | 153 | if (IS_ENABLED(CONFIG_USB_ISP1761_UDC) && !udc_disabled) { |
154 | ret = isp1760_udc_register(isp, irq, irqflags | IRQF_SHARED | | 154 | ret = isp1760_udc_register(isp, irq, irqflags); |
155 | IRQF_DISABLED); | ||
156 | if (ret < 0) { | 155 | if (ret < 0) { |
157 | isp1760_hcd_unregister(&isp->hcd); | 156 | isp1760_hcd_unregister(&isp->hcd); |
158 | return ret; | 157 | return ret; |
diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c index eba9b82e2d70..3cb98b1d5d29 100644 --- a/drivers/usb/isp1760/isp1760-hcd.c +++ b/drivers/usb/isp1760/isp1760-hcd.c | |||
@@ -1274,7 +1274,7 @@ static void errata2_function(unsigned long data) | |||
1274 | for (slot = 0; slot < 32; slot++) | 1274 | for (slot = 0; slot < 32; slot++) |
1275 | if (priv->atl_slots[slot].qh && time_after(jiffies, | 1275 | if (priv->atl_slots[slot].qh && time_after(jiffies, |
1276 | priv->atl_slots[slot].timestamp + | 1276 | priv->atl_slots[slot].timestamp + |
1277 | SLOT_TIMEOUT * HZ / 1000)) { | 1277 | msecs_to_jiffies(SLOT_TIMEOUT))) { |
1278 | ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd); | 1278 | ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd); |
1279 | if (!FROM_DW0_VALID(ptd.dw0) && | 1279 | if (!FROM_DW0_VALID(ptd.dw0) && |
1280 | !FROM_DW3_ACTIVE(ptd.dw3)) | 1280 | !FROM_DW3_ACTIVE(ptd.dw3)) |
@@ -1286,7 +1286,7 @@ static void errata2_function(unsigned long data) | |||
1286 | 1286 | ||
1287 | spin_unlock_irqrestore(&priv->lock, spinflags); | 1287 | spin_unlock_irqrestore(&priv->lock, spinflags); |
1288 | 1288 | ||
1289 | errata2_timer.expires = jiffies + SLOT_CHECK_PERIOD * HZ / 1000; | 1289 | errata2_timer.expires = jiffies + msecs_to_jiffies(SLOT_CHECK_PERIOD); |
1290 | add_timer(&errata2_timer); | 1290 | add_timer(&errata2_timer); |
1291 | } | 1291 | } |
1292 | 1292 | ||
@@ -1336,7 +1336,7 @@ static int isp1760_run(struct usb_hcd *hcd) | |||
1336 | return retval; | 1336 | return retval; |
1337 | 1337 | ||
1338 | setup_timer(&errata2_timer, errata2_function, (unsigned long)hcd); | 1338 | setup_timer(&errata2_timer, errata2_function, (unsigned long)hcd); |
1339 | errata2_timer.expires = jiffies + SLOT_CHECK_PERIOD * HZ / 1000; | 1339 | errata2_timer.expires = jiffies + msecs_to_jiffies(SLOT_CHECK_PERIOD); |
1340 | add_timer(&errata2_timer); | 1340 | add_timer(&errata2_timer); |
1341 | 1341 | ||
1342 | chipid = reg_read32(hcd->regs, HC_CHIP_ID_REG); | 1342 | chipid = reg_read32(hcd->regs, HC_CHIP_ID_REG); |
diff --git a/drivers/usb/isp1760/isp1760-udc.c b/drivers/usb/isp1760/isp1760-udc.c index 9612d7990565..3fc4fe770253 100644 --- a/drivers/usb/isp1760/isp1760-udc.c +++ b/drivers/usb/isp1760/isp1760-udc.c | |||
@@ -1191,6 +1191,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget, | |||
1191 | struct usb_gadget_driver *driver) | 1191 | struct usb_gadget_driver *driver) |
1192 | { | 1192 | { |
1193 | struct isp1760_udc *udc = gadget_to_udc(gadget); | 1193 | struct isp1760_udc *udc = gadget_to_udc(gadget); |
1194 | unsigned long flags; | ||
1194 | 1195 | ||
1195 | /* The hardware doesn't support low speed. */ | 1196 | /* The hardware doesn't support low speed. */ |
1196 | if (driver->max_speed < USB_SPEED_FULL) { | 1197 | if (driver->max_speed < USB_SPEED_FULL) { |
@@ -1198,17 +1199,17 @@ static int isp1760_udc_start(struct usb_gadget *gadget, | |||
1198 | return -EINVAL; | 1199 | return -EINVAL; |
1199 | } | 1200 | } |
1200 | 1201 | ||
1201 | spin_lock(&udc->lock); | 1202 | spin_lock_irqsave(&udc->lock, flags); |
1202 | 1203 | ||
1203 | if (udc->driver) { | 1204 | if (udc->driver) { |
1204 | dev_err(udc->isp->dev, "UDC already has a gadget driver\n"); | 1205 | dev_err(udc->isp->dev, "UDC already has a gadget driver\n"); |
1205 | spin_unlock(&udc->lock); | 1206 | spin_unlock_irqrestore(&udc->lock, flags); |
1206 | return -EBUSY; | 1207 | return -EBUSY; |
1207 | } | 1208 | } |
1208 | 1209 | ||
1209 | udc->driver = driver; | 1210 | udc->driver = driver; |
1210 | 1211 | ||
1211 | spin_unlock(&udc->lock); | 1212 | spin_unlock_irqrestore(&udc->lock, flags); |
1212 | 1213 | ||
1213 | dev_dbg(udc->isp->dev, "starting UDC with driver %s\n", | 1214 | dev_dbg(udc->isp->dev, "starting UDC with driver %s\n", |
1214 | driver->function); | 1215 | driver->function); |
@@ -1232,6 +1233,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget, | |||
1232 | static int isp1760_udc_stop(struct usb_gadget *gadget) | 1233 | static int isp1760_udc_stop(struct usb_gadget *gadget) |
1233 | { | 1234 | { |
1234 | struct isp1760_udc *udc = gadget_to_udc(gadget); | 1235 | struct isp1760_udc *udc = gadget_to_udc(gadget); |
1236 | unsigned long flags; | ||
1235 | 1237 | ||
1236 | dev_dbg(udc->isp->dev, "%s\n", __func__); | 1238 | dev_dbg(udc->isp->dev, "%s\n", __func__); |
1237 | 1239 | ||
@@ -1239,9 +1241,9 @@ static int isp1760_udc_stop(struct usb_gadget *gadget) | |||
1239 | 1241 | ||
1240 | isp1760_udc_write(udc, DC_MODE, 0); | 1242 | isp1760_udc_write(udc, DC_MODE, 0); |
1241 | 1243 | ||
1242 | spin_lock(&udc->lock); | 1244 | spin_lock_irqsave(&udc->lock, flags); |
1243 | udc->driver = NULL; | 1245 | udc->driver = NULL; |
1244 | spin_unlock(&udc->lock); | 1246 | spin_unlock_irqrestore(&udc->lock, flags); |
1245 | 1247 | ||
1246 | return 0; | 1248 | return 0; |
1247 | } | 1249 | } |
@@ -1411,7 +1413,7 @@ static int isp1760_udc_init(struct isp1760_udc *udc) | |||
1411 | return -ENODEV; | 1413 | return -ENODEV; |
1412 | } | 1414 | } |
1413 | 1415 | ||
1414 | if (chipid != 0x00011582) { | 1416 | if (chipid != 0x00011582 && chipid != 0x00158210) { |
1415 | dev_err(udc->isp->dev, "udc: invalid chip ID 0x%08x\n", chipid); | 1417 | dev_err(udc->isp->dev, "udc: invalid chip ID 0x%08x\n", chipid); |
1416 | return -ENODEV; | 1418 | return -ENODEV; |
1417 | } | 1419 | } |
@@ -1451,8 +1453,8 @@ int isp1760_udc_register(struct isp1760_device *isp, int irq, | |||
1451 | 1453 | ||
1452 | sprintf(udc->irqname, "%s (udc)", devname); | 1454 | sprintf(udc->irqname, "%s (udc)", devname); |
1453 | 1455 | ||
1454 | ret = request_irq(irq, isp1760_udc_irq, IRQF_SHARED | IRQF_DISABLED | | 1456 | ret = request_irq(irq, isp1760_udc_irq, IRQF_SHARED | irqflags, |
1455 | irqflags, udc->irqname, udc); | 1457 | udc->irqname, udc); |
1456 | if (ret < 0) | 1458 | if (ret < 0) |
1457 | goto error; | 1459 | goto error; |
1458 | 1460 | ||
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index 14e1628483d9..39db8b603627 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig | |||
@@ -79,7 +79,8 @@ config USB_MUSB_TUSB6010 | |||
79 | 79 | ||
80 | config USB_MUSB_OMAP2PLUS | 80 | config USB_MUSB_OMAP2PLUS |
81 | tristate "OMAP2430 and onwards" | 81 | tristate "OMAP2430 and onwards" |
82 | depends on ARCH_OMAP2PLUS && USB && OMAP_CONTROL_PHY | 82 | depends on ARCH_OMAP2PLUS && USB |
83 | depends on OMAP_CONTROL_PHY || !OMAP_CONTROL_PHY | ||
83 | select GENERIC_PHY | 84 | select GENERIC_PHY |
84 | 85 | ||
85 | config USB_MUSB_AM35X | 86 | config USB_MUSB_AM35X |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index e6f4cbfeed97..067920f2d570 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
@@ -1969,10 +1969,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) | |||
1969 | goto fail0; | 1969 | goto fail0; |
1970 | } | 1970 | } |
1971 | 1971 | ||
1972 | pm_runtime_use_autosuspend(musb->controller); | ||
1973 | pm_runtime_set_autosuspend_delay(musb->controller, 200); | ||
1974 | pm_runtime_enable(musb->controller); | ||
1975 | |||
1976 | spin_lock_init(&musb->lock); | 1972 | spin_lock_init(&musb->lock); |
1977 | musb->board_set_power = plat->set_power; | 1973 | musb->board_set_power = plat->set_power; |
1978 | musb->min_power = plat->min_power; | 1974 | musb->min_power = plat->min_power; |
@@ -1991,6 +1987,12 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) | |||
1991 | musb_readl = musb_default_readl; | 1987 | musb_readl = musb_default_readl; |
1992 | musb_writel = musb_default_writel; | 1988 | musb_writel = musb_default_writel; |
1993 | 1989 | ||
1990 | /* We need musb_read/write functions initialized for PM */ | ||
1991 | pm_runtime_use_autosuspend(musb->controller); | ||
1992 | pm_runtime_set_autosuspend_delay(musb->controller, 200); | ||
1993 | pm_runtime_irq_safe(musb->controller); | ||
1994 | pm_runtime_enable(musb->controller); | ||
1995 | |||
1994 | /* The musb_platform_init() call: | 1996 | /* The musb_platform_init() call: |
1995 | * - adjusts musb->mregs | 1997 | * - adjusts musb->mregs |
1996 | * - sets the musb->isr | 1998 | * - sets the musb->isr |
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 53bd0e71d19f..a900c9877195 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c | |||
@@ -457,12 +457,27 @@ static int dsps_musb_init(struct musb *musb) | |||
457 | if (IS_ERR(musb->xceiv)) | 457 | if (IS_ERR(musb->xceiv)) |
458 | return PTR_ERR(musb->xceiv); | 458 | return PTR_ERR(musb->xceiv); |
459 | 459 | ||
460 | musb->phy = devm_phy_get(dev->parent, "usb2-phy"); | ||
461 | |||
460 | /* Returns zero if e.g. not clocked */ | 462 | /* Returns zero if e.g. not clocked */ |
461 | rev = dsps_readl(reg_base, wrp->revision); | 463 | rev = dsps_readl(reg_base, wrp->revision); |
462 | if (!rev) | 464 | if (!rev) |
463 | return -ENODEV; | 465 | return -ENODEV; |
464 | 466 | ||
465 | usb_phy_init(musb->xceiv); | 467 | usb_phy_init(musb->xceiv); |
468 | if (IS_ERR(musb->phy)) { | ||
469 | musb->phy = NULL; | ||
470 | } else { | ||
471 | ret = phy_init(musb->phy); | ||
472 | if (ret < 0) | ||
473 | return ret; | ||
474 | ret = phy_power_on(musb->phy); | ||
475 | if (ret) { | ||
476 | phy_exit(musb->phy); | ||
477 | return ret; | ||
478 | } | ||
479 | } | ||
480 | |||
466 | setup_timer(&glue->timer, otg_timer, (unsigned long) musb); | 481 | setup_timer(&glue->timer, otg_timer, (unsigned long) musb); |
467 | 482 | ||
468 | /* Reset the musb */ | 483 | /* Reset the musb */ |
@@ -502,6 +517,8 @@ static int dsps_musb_exit(struct musb *musb) | |||
502 | 517 | ||
503 | del_timer_sync(&glue->timer); | 518 | del_timer_sync(&glue->timer); |
504 | usb_phy_shutdown(musb->xceiv); | 519 | usb_phy_shutdown(musb->xceiv); |
520 | phy_power_off(musb->phy); | ||
521 | phy_exit(musb->phy); | ||
505 | debugfs_remove_recursive(glue->dbgfs_root); | 522 | debugfs_remove_recursive(glue->dbgfs_root); |
506 | 523 | ||
507 | return 0; | 524 | return 0; |
@@ -610,7 +627,7 @@ static int dsps_musb_reset(struct musb *musb) | |||
610 | struct device *dev = musb->controller; | 627 | struct device *dev = musb->controller; |
611 | struct dsps_glue *glue = dev_get_drvdata(dev->parent); | 628 | struct dsps_glue *glue = dev_get_drvdata(dev->parent); |
612 | const struct dsps_musb_wrapper *wrp = glue->wrp; | 629 | const struct dsps_musb_wrapper *wrp = glue->wrp; |
613 | int session_restart = 0; | 630 | int session_restart = 0, error; |
614 | 631 | ||
615 | if (glue->sw_babble_enabled) | 632 | if (glue->sw_babble_enabled) |
616 | session_restart = sw_babble_control(musb); | 633 | session_restart = sw_babble_control(musb); |
@@ -624,8 +641,14 @@ static int dsps_musb_reset(struct musb *musb) | |||
624 | dsps_writel(musb->ctrl_base, wrp->control, (1 << wrp->reset)); | 641 | dsps_writel(musb->ctrl_base, wrp->control, (1 << wrp->reset)); |
625 | usleep_range(100, 200); | 642 | usleep_range(100, 200); |
626 | usb_phy_shutdown(musb->xceiv); | 643 | usb_phy_shutdown(musb->xceiv); |
644 | error = phy_power_off(musb->phy); | ||
645 | if (error) | ||
646 | dev_err(dev, "phy shutdown failed: %i\n", error); | ||
627 | usleep_range(100, 200); | 647 | usleep_range(100, 200); |
628 | usb_phy_init(musb->xceiv); | 648 | usb_phy_init(musb->xceiv); |
649 | error = phy_power_on(musb->phy); | ||
650 | if (error) | ||
651 | dev_err(dev, "phy powerup failed: %i\n", error); | ||
629 | session_restart = 1; | 652 | session_restart = 1; |
630 | } | 653 | } |
631 | 654 | ||
@@ -687,7 +710,7 @@ static int dsps_create_musb_pdev(struct dsps_glue *glue, | |||
687 | struct musb_hdrc_config *config; | 710 | struct musb_hdrc_config *config; |
688 | struct platform_device *musb; | 711 | struct platform_device *musb; |
689 | struct device_node *dn = parent->dev.of_node; | 712 | struct device_node *dn = parent->dev.of_node; |
690 | int ret; | 713 | int ret, val; |
691 | 714 | ||
692 | memset(resources, 0, sizeof(resources)); | 715 | memset(resources, 0, sizeof(resources)); |
693 | res = platform_get_resource_byname(parent, IORESOURCE_MEM, "mc"); | 716 | res = platform_get_resource_byname(parent, IORESOURCE_MEM, "mc"); |
@@ -739,7 +762,10 @@ static int dsps_create_musb_pdev(struct dsps_glue *glue, | |||
739 | pdata.mode = get_musb_port_mode(dev); | 762 | pdata.mode = get_musb_port_mode(dev); |
740 | /* DT keeps this entry in mA, musb expects it as per USB spec */ | 763 | /* DT keeps this entry in mA, musb expects it as per USB spec */ |
741 | pdata.power = get_int_prop(dn, "mentor,power") / 2; | 764 | pdata.power = get_int_prop(dn, "mentor,power") / 2; |
742 | config->multipoint = of_property_read_bool(dn, "mentor,multipoint"); | 765 | |
766 | ret = of_property_read_u32(dn, "mentor,multipoint", &val); | ||
767 | if (!ret && val) | ||
768 | config->multipoint = true; | ||
743 | 769 | ||
744 | ret = platform_device_add_data(musb, &pdata, sizeof(pdata)); | 770 | ret = platform_device_add_data(musb, &pdata, sizeof(pdata)); |
745 | if (ret) { | 771 | if (ret) { |
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 883a9adfdfff..c3d5fc9dfb5b 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c | |||
@@ -2613,7 +2613,7 @@ static const struct hc_driver musb_hc_driver = { | |||
2613 | .description = "musb-hcd", | 2613 | .description = "musb-hcd", |
2614 | .product_desc = "MUSB HDRC host driver", | 2614 | .product_desc = "MUSB HDRC host driver", |
2615 | .hcd_priv_size = sizeof(struct musb *), | 2615 | .hcd_priv_size = sizeof(struct musb *), |
2616 | .flags = HCD_USB2 | HCD_MEMORY, | 2616 | .flags = HCD_USB2 | HCD_MEMORY | HCD_BH, |
2617 | 2617 | ||
2618 | /* not using irq handler or reset hooks from usbcore, since | 2618 | /* not using irq handler or reset hooks from usbcore, since |
2619 | * those must be shared with peripheral code for OTG configs | 2619 | * those must be shared with peripheral code for OTG configs |
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index 763649eb4987..cc752d8c7773 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c | |||
@@ -516,7 +516,7 @@ static int omap2430_probe(struct platform_device *pdev) | |||
516 | struct omap2430_glue *glue; | 516 | struct omap2430_glue *glue; |
517 | struct device_node *np = pdev->dev.of_node; | 517 | struct device_node *np = pdev->dev.of_node; |
518 | struct musb_hdrc_config *config; | 518 | struct musb_hdrc_config *config; |
519 | int ret = -ENOMEM; | 519 | int ret = -ENOMEM, val; |
520 | 520 | ||
521 | glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL); | 521 | glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL); |
522 | if (!glue) | 522 | if (!glue) |
@@ -559,7 +559,10 @@ static int omap2430_probe(struct platform_device *pdev) | |||
559 | of_property_read_u32(np, "num-eps", (u32 *)&config->num_eps); | 559 | of_property_read_u32(np, "num-eps", (u32 *)&config->num_eps); |
560 | of_property_read_u32(np, "ram-bits", (u32 *)&config->ram_bits); | 560 | of_property_read_u32(np, "ram-bits", (u32 *)&config->ram_bits); |
561 | of_property_read_u32(np, "power", (u32 *)&pdata->power); | 561 | of_property_read_u32(np, "power", (u32 *)&pdata->power); |
562 | config->multipoint = of_property_read_bool(np, "multipoint"); | 562 | |
563 | ret = of_property_read_u32(np, "multipoint", &val); | ||
564 | if (!ret && val) | ||
565 | config->multipoint = true; | ||
563 | 566 | ||
564 | pdata->board_data = data; | 567 | pdata->board_data = data; |
565 | pdata->config = config; | 568 | pdata->config = config; |
diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c index 403fab772724..7b3035ff9434 100644 --- a/drivers/usb/phy/phy-am335x-control.c +++ b/drivers/usb/phy/phy-am335x-control.c | |||
@@ -126,6 +126,9 @@ struct phy_control *am335x_get_phy_control(struct device *dev) | |||
126 | return NULL; | 126 | return NULL; |
127 | 127 | ||
128 | dev = bus_find_device(&platform_bus_type, NULL, node, match); | 128 | dev = bus_find_device(&platform_bus_type, NULL, node, match); |
129 | if (!dev) | ||
130 | return NULL; | ||
131 | |||
129 | ctrl_usb = dev_get_drvdata(dev); | 132 | ctrl_usb = dev_get_drvdata(dev); |
130 | if (!ctrl_usb) | 133 | if (!ctrl_usb) |
131 | return NULL; | 134 | return NULL; |
diff --git a/drivers/usb/renesas_usbhs/Kconfig b/drivers/usb/renesas_usbhs/Kconfig index de83b9d0cd5c..ebc99ee076ce 100644 --- a/drivers/usb/renesas_usbhs/Kconfig +++ b/drivers/usb/renesas_usbhs/Kconfig | |||
@@ -6,6 +6,7 @@ config USB_RENESAS_USBHS | |||
6 | tristate 'Renesas USBHS controller' | 6 | tristate 'Renesas USBHS controller' |
7 | depends on USB_GADGET | 7 | depends on USB_GADGET |
8 | depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST | 8 | depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST |
9 | depends on EXTCON || !EXTCON # if EXTCON=m, USBHS cannot be built-in | ||
9 | default n | 10 | default n |
10 | help | 11 | help |
11 | Renesas USBHS is a discrete USB host and peripheral controller chip | 12 | Renesas USBHS is a discrete USB host and peripheral controller chip |
diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c index 9374bd2aba20..8936a83c96cd 100644 --- a/drivers/usb/serial/bus.c +++ b/drivers/usb/serial/bus.c | |||
@@ -38,56 +38,51 @@ static int usb_serial_device_match(struct device *dev, | |||
38 | return 0; | 38 | return 0; |
39 | } | 39 | } |
40 | 40 | ||
41 | static ssize_t port_number_show(struct device *dev, | ||
42 | struct device_attribute *attr, char *buf) | ||
43 | { | ||
44 | struct usb_serial_port *port = to_usb_serial_port(dev); | ||
45 | |||
46 | return sprintf(buf, "%d\n", port->port_number); | ||
47 | } | ||
48 | static DEVICE_ATTR_RO(port_number); | ||
49 | |||
50 | static int usb_serial_device_probe(struct device *dev) | 41 | static int usb_serial_device_probe(struct device *dev) |
51 | { | 42 | { |
52 | struct usb_serial_driver *driver; | 43 | struct usb_serial_driver *driver; |
53 | struct usb_serial_port *port; | 44 | struct usb_serial_port *port; |
45 | struct device *tty_dev; | ||
54 | int retval = 0; | 46 | int retval = 0; |
55 | int minor; | 47 | int minor; |
56 | 48 | ||
57 | port = to_usb_serial_port(dev); | 49 | port = to_usb_serial_port(dev); |
58 | if (!port) { | 50 | if (!port) |
59 | retval = -ENODEV; | 51 | return -ENODEV; |
60 | goto exit; | ||
61 | } | ||
62 | 52 | ||
63 | /* make sure suspend/resume doesn't race against port_probe */ | 53 | /* make sure suspend/resume doesn't race against port_probe */ |
64 | retval = usb_autopm_get_interface(port->serial->interface); | 54 | retval = usb_autopm_get_interface(port->serial->interface); |
65 | if (retval) | 55 | if (retval) |
66 | goto exit; | 56 | return retval; |
67 | 57 | ||
68 | driver = port->serial->type; | 58 | driver = port->serial->type; |
69 | if (driver->port_probe) { | 59 | if (driver->port_probe) { |
70 | retval = driver->port_probe(port); | 60 | retval = driver->port_probe(port); |
71 | if (retval) | 61 | if (retval) |
72 | goto exit_with_autopm; | 62 | goto err_autopm_put; |
73 | } | 63 | } |
74 | 64 | ||
75 | retval = device_create_file(dev, &dev_attr_port_number); | 65 | minor = port->minor; |
76 | if (retval) { | 66 | tty_dev = tty_register_device(usb_serial_tty_driver, minor, dev); |
77 | if (driver->port_remove) | 67 | if (IS_ERR(tty_dev)) { |
78 | retval = driver->port_remove(port); | 68 | retval = PTR_ERR(tty_dev); |
79 | goto exit_with_autopm; | 69 | goto err_port_remove; |
80 | } | 70 | } |
81 | 71 | ||
82 | minor = port->minor; | 72 | usb_autopm_put_interface(port->serial->interface); |
83 | tty_register_device(usb_serial_tty_driver, minor, dev); | 73 | |
84 | dev_info(&port->serial->dev->dev, | 74 | dev_info(&port->serial->dev->dev, |
85 | "%s converter now attached to ttyUSB%d\n", | 75 | "%s converter now attached to ttyUSB%d\n", |
86 | driver->description, minor); | 76 | driver->description, minor); |
87 | 77 | ||
88 | exit_with_autopm: | 78 | return 0; |
79 | |||
80 | err_port_remove: | ||
81 | if (driver->port_remove) | ||
82 | driver->port_remove(port); | ||
83 | err_autopm_put: | ||
89 | usb_autopm_put_interface(port->serial->interface); | 84 | usb_autopm_put_interface(port->serial->interface); |
90 | exit: | 85 | |
91 | return retval; | 86 | return retval; |
92 | } | 87 | } |
93 | 88 | ||
@@ -114,8 +109,6 @@ static int usb_serial_device_remove(struct device *dev) | |||
114 | minor = port->minor; | 109 | minor = port->minor; |
115 | tty_unregister_device(usb_serial_tty_driver, minor); | 110 | tty_unregister_device(usb_serial_tty_driver, minor); |
116 | 111 | ||
117 | device_remove_file(&port->dev, &dev_attr_port_number); | ||
118 | |||
119 | driver = port->serial->type; | 112 | driver = port->serial->type; |
120 | if (driver->port_remove) | 113 | if (driver->port_remove) |
121 | retval = driver->port_remove(port); | 114 | retval = driver->port_remove(port); |
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 2d72aa3564a3..ede4f5fcfadd 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c | |||
@@ -84,6 +84,10 @@ struct ch341_private { | |||
84 | u8 line_status; /* active status of modem control inputs */ | 84 | u8 line_status; /* active status of modem control inputs */ |
85 | }; | 85 | }; |
86 | 86 | ||
87 | static void ch341_set_termios(struct tty_struct *tty, | ||
88 | struct usb_serial_port *port, | ||
89 | struct ktermios *old_termios); | ||
90 | |||
87 | static int ch341_control_out(struct usb_device *dev, u8 request, | 91 | static int ch341_control_out(struct usb_device *dev, u8 request, |
88 | u16 value, u16 index) | 92 | u16 value, u16 index) |
89 | { | 93 | { |
@@ -309,19 +313,12 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port) | |||
309 | struct ch341_private *priv = usb_get_serial_port_data(port); | 313 | struct ch341_private *priv = usb_get_serial_port_data(port); |
310 | int r; | 314 | int r; |
311 | 315 | ||
312 | priv->baud_rate = DEFAULT_BAUD_RATE; | ||
313 | |||
314 | r = ch341_configure(serial->dev, priv); | 316 | r = ch341_configure(serial->dev, priv); |
315 | if (r) | 317 | if (r) |
316 | goto out; | 318 | goto out; |
317 | 319 | ||
318 | r = ch341_set_handshake(serial->dev, priv->line_control); | 320 | if (tty) |
319 | if (r) | 321 | ch341_set_termios(tty, port, NULL); |
320 | goto out; | ||
321 | |||
322 | r = ch341_set_baudrate(serial->dev, priv); | ||
323 | if (r) | ||
324 | goto out; | ||
325 | 322 | ||
326 | dev_dbg(&port->dev, "%s - submitting interrupt urb\n", __func__); | 323 | dev_dbg(&port->dev, "%s - submitting interrupt urb\n", __func__); |
327 | r = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); | 324 | r = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); |
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c index 29fa1c3d0089..3806e7014199 100644 --- a/drivers/usb/serial/console.c +++ b/drivers/usb/serial/console.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
15 | 15 | ||
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/module.h> | ||
17 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
18 | #include <linux/tty.h> | 19 | #include <linux/tty.h> |
19 | #include <linux/console.h> | 20 | #include <linux/console.h> |
@@ -144,6 +145,7 @@ static int usb_console_setup(struct console *co, char *options) | |||
144 | init_ldsem(&tty->ldisc_sem); | 145 | init_ldsem(&tty->ldisc_sem); |
145 | INIT_LIST_HEAD(&tty->tty_files); | 146 | INIT_LIST_HEAD(&tty->tty_files); |
146 | kref_get(&tty->driver->kref); | 147 | kref_get(&tty->driver->kref); |
148 | __module_get(tty->driver->owner); | ||
147 | tty->ops = &usb_console_fake_tty_ops; | 149 | tty->ops = &usb_console_fake_tty_ops; |
148 | if (tty_init_termios(tty)) { | 150 | if (tty_init_termios(tty)) { |
149 | retval = -ENOMEM; | 151 | retval = -ENOMEM; |
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index f40c856ff758..84ce2d74894c 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c | |||
@@ -147,6 +147,8 @@ static const struct usb_device_id id_table[] = { | |||
147 | { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */ | 147 | { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */ |
148 | { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */ | 148 | { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */ |
149 | { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */ | 149 | { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */ |
150 | { USB_DEVICE(0x16C0, 0x09B0) }, /* Lunatico Seletek */ | ||
151 | { USB_DEVICE(0x16C0, 0x09B1) }, /* Lunatico Seletek */ | ||
150 | { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ | 152 | { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ |
151 | { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */ | 153 | { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */ |
152 | { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */ | 154 | { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */ |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 1ebb351b9e9a..8eb68a31cab6 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -604,6 +604,7 @@ static const struct usb_device_id id_table_combined[] = { | |||
604 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 604 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
605 | { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID), | 605 | { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID), |
606 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 606 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
607 | { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) }, | ||
607 | /* | 608 | /* |
608 | * ELV devices: | 609 | * ELV devices: |
609 | */ | 610 | */ |
@@ -799,6 +800,8 @@ static const struct usb_device_id id_table_combined[] = { | |||
799 | { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, | 800 | { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, |
800 | { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) }, | 801 | { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) }, |
801 | { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) }, | 802 | { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) }, |
803 | { USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID), | ||
804 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | ||
802 | { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID), | 805 | { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID), |
803 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 806 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
804 | { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID), | 807 | { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID), |
@@ -978,6 +981,23 @@ static const struct usb_device_id id_table_combined[] = { | |||
978 | { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) }, | 981 | { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) }, |
979 | /* GE Healthcare devices */ | 982 | /* GE Healthcare devices */ |
980 | { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) }, | 983 | { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) }, |
984 | /* Active Research (Actisense) devices */ | ||
985 | { USB_DEVICE(FTDI_VID, ACTISENSE_NDC_PID) }, | ||
986 | { USB_DEVICE(FTDI_VID, ACTISENSE_USG_PID) }, | ||
987 | { USB_DEVICE(FTDI_VID, ACTISENSE_NGT_PID) }, | ||
988 | { USB_DEVICE(FTDI_VID, ACTISENSE_NGW_PID) }, | ||
989 | { USB_DEVICE(FTDI_VID, ACTISENSE_D9AC_PID) }, | ||
990 | { USB_DEVICE(FTDI_VID, ACTISENSE_D9AD_PID) }, | ||
991 | { USB_DEVICE(FTDI_VID, ACTISENSE_D9AE_PID) }, | ||
992 | { USB_DEVICE(FTDI_VID, ACTISENSE_D9AF_PID) }, | ||
993 | { USB_DEVICE(FTDI_VID, CHETCO_SEAGAUGE_PID) }, | ||
994 | { USB_DEVICE(FTDI_VID, CHETCO_SEASWITCH_PID) }, | ||
995 | { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_NMEA2000_PID) }, | ||
996 | { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ETHERNET_PID) }, | ||
997 | { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_WIFI_PID) }, | ||
998 | { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) }, | ||
999 | { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) }, | ||
1000 | { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) }, | ||
981 | { } /* Terminating entry */ | 1001 | { } /* Terminating entry */ |
982 | }; | 1002 | }; |
983 | 1003 | ||
@@ -1864,8 +1884,12 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial) | |||
1864 | { | 1884 | { |
1865 | struct usb_device *udev = serial->dev; | 1885 | struct usb_device *udev = serial->dev; |
1866 | 1886 | ||
1867 | if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) || | 1887 | if (udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) |
1868 | (udev->product && !strcmp(udev->product, "BeagleBone/XDS100V2"))) | 1888 | return ftdi_jtag_probe(serial); |
1889 | |||
1890 | if (udev->product && | ||
1891 | (!strcmp(udev->product, "BeagleBone/XDS100V2") || | ||
1892 | !strcmp(udev->product, "SNAP Connect E10"))) | ||
1869 | return ftdi_jtag_probe(serial); | 1893 | return ftdi_jtag_probe(serial); |
1870 | 1894 | ||
1871 | return 0; | 1895 | return 0; |
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index e52409c9be99..4e4f46f3c89c 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
@@ -38,6 +38,9 @@ | |||
38 | 38 | ||
39 | #define FTDI_LUMEL_PD12_PID 0x6002 | 39 | #define FTDI_LUMEL_PD12_PID 0x6002 |
40 | 40 | ||
41 | /* Cyber Cortex AV by Fabulous Silicon (http://fabuloussilicon.com) */ | ||
42 | #define CYBER_CORTEX_AV_PID 0x8698 | ||
43 | |||
41 | /* | 44 | /* |
42 | * Marvell OpenRD Base, Client | 45 | * Marvell OpenRD Base, Client |
43 | * http://www.open-rd.org | 46 | * http://www.open-rd.org |
@@ -558,6 +561,12 @@ | |||
558 | */ | 561 | */ |
559 | #define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */ | 562 | #define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */ |
560 | 563 | ||
564 | /* | ||
565 | * Synapse Wireless product ids (FTDI_VID) | ||
566 | * http://www.synapse-wireless.com | ||
567 | */ | ||
568 | #define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */ | ||
569 | |||
561 | 570 | ||
562 | /********************************/ | 571 | /********************************/ |
563 | /** third-party VID/PID combos **/ | 572 | /** third-party VID/PID combos **/ |
@@ -1438,3 +1447,23 @@ | |||
1438 | */ | 1447 | */ |
1439 | #define GE_HEALTHCARE_VID 0x1901 | 1448 | #define GE_HEALTHCARE_VID 0x1901 |
1440 | #define GE_HEALTHCARE_NEMO_TRACKER_PID 0x0015 | 1449 | #define GE_HEALTHCARE_NEMO_TRACKER_PID 0x0015 |
1450 | |||
1451 | /* | ||
1452 | * Active Research (Actisense) devices | ||
1453 | */ | ||
1454 | #define ACTISENSE_NDC_PID 0xD9A8 /* NDC USB Serial Adapter */ | ||
1455 | #define ACTISENSE_USG_PID 0xD9A9 /* USG USB Serial Adapter */ | ||
1456 | #define ACTISENSE_NGT_PID 0xD9AA /* NGT NMEA2000 Interface */ | ||
1457 | #define ACTISENSE_NGW_PID 0xD9AB /* NGW NMEA2000 Gateway */ | ||
1458 | #define ACTISENSE_D9AC_PID 0xD9AC /* Actisense Reserved */ | ||
1459 | #define ACTISENSE_D9AD_PID 0xD9AD /* Actisense Reserved */ | ||
1460 | #define ACTISENSE_D9AE_PID 0xD9AE /* Actisense Reserved */ | ||
1461 | #define ACTISENSE_D9AF_PID 0xD9AF /* Actisense Reserved */ | ||
1462 | #define CHETCO_SEAGAUGE_PID 0xA548 /* SeaGauge USB Adapter */ | ||
1463 | #define CHETCO_SEASWITCH_PID 0xA549 /* SeaSwitch USB Adapter */ | ||
1464 | #define CHETCO_SEASMART_NMEA2000_PID 0xA54A /* SeaSmart NMEA2000 Gateway */ | ||
1465 | #define CHETCO_SEASMART_ETHERNET_PID 0xA54B /* SeaSmart Ethernet Gateway */ | ||
1466 | #define CHETCO_SEASMART_WIFI_PID 0xA5AC /* SeaSmart Wifi Gateway */ | ||
1467 | #define CHETCO_SEASMART_DISPLAY_PID 0xA5AD /* SeaSmart NMEA2000 Display */ | ||
1468 | #define CHETCO_SEASMART_LITE_PID 0xA5AE /* SeaSmart Lite USB Adapter */ | ||
1469 | #define CHETCO_SEASMART_ANALOG_PID 0xA5AF /* SeaSmart Analog Adapter */ | ||
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index ccf1df7c4b80..54e170dd3dad 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c | |||
@@ -258,7 +258,8 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout) | |||
258 | * character or at least one jiffy. | 258 | * character or at least one jiffy. |
259 | */ | 259 | */ |
260 | period = max_t(unsigned long, (10 * HZ / bps), 1); | 260 | period = max_t(unsigned long, (10 * HZ / bps), 1); |
261 | period = min_t(unsigned long, period, timeout); | 261 | if (timeout) |
262 | period = min_t(unsigned long, period, timeout); | ||
262 | 263 | ||
263 | dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n", | 264 | dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n", |
264 | __func__, jiffies_to_msecs(timeout), | 265 | __func__, jiffies_to_msecs(timeout), |
@@ -268,7 +269,7 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout) | |||
268 | schedule_timeout_interruptible(period); | 269 | schedule_timeout_interruptible(period); |
269 | if (signal_pending(current)) | 270 | if (signal_pending(current)) |
270 | break; | 271 | break; |
271 | if (time_after(jiffies, expire)) | 272 | if (timeout && time_after(jiffies, expire)) |
272 | break; | 273 | break; |
273 | } | 274 | } |
274 | } | 275 | } |
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c index dd97d8b572c3..4f7e072e4e00 100644 --- a/drivers/usb/serial/keyspan_pda.c +++ b/drivers/usb/serial/keyspan_pda.c | |||
@@ -61,6 +61,7 @@ struct keyspan_pda_private { | |||
61 | /* For Xircom PGSDB9 and older Entrega version of the same device */ | 61 | /* For Xircom PGSDB9 and older Entrega version of the same device */ |
62 | #define XIRCOM_VENDOR_ID 0x085a | 62 | #define XIRCOM_VENDOR_ID 0x085a |
63 | #define XIRCOM_FAKE_ID 0x8027 | 63 | #define XIRCOM_FAKE_ID 0x8027 |
64 | #define XIRCOM_FAKE_ID_2 0x8025 /* "PGMFHUB" serial */ | ||
64 | #define ENTREGA_VENDOR_ID 0x1645 | 65 | #define ENTREGA_VENDOR_ID 0x1645 |
65 | #define ENTREGA_FAKE_ID 0x8093 | 66 | #define ENTREGA_FAKE_ID 0x8093 |
66 | 67 | ||
@@ -70,6 +71,7 @@ static const struct usb_device_id id_table_combined[] = { | |||
70 | #endif | 71 | #endif |
71 | #ifdef XIRCOM | 72 | #ifdef XIRCOM |
72 | { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) }, | 73 | { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) }, |
74 | { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) }, | ||
73 | { USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) }, | 75 | { USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) }, |
74 | #endif | 76 | #endif |
75 | { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) }, | 77 | { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) }, |
@@ -93,6 +95,7 @@ static const struct usb_device_id id_table_fake[] = { | |||
93 | #ifdef XIRCOM | 95 | #ifdef XIRCOM |
94 | static const struct usb_device_id id_table_fake_xircom[] = { | 96 | static const struct usb_device_id id_table_fake_xircom[] = { |
95 | { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) }, | 97 | { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) }, |
98 | { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) }, | ||
96 | { USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) }, | 99 | { USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) }, |
97 | { } | 100 | { } |
98 | }; | 101 | }; |
diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c index ab1d690274ae..460a40669967 100644 --- a/drivers/usb/serial/mxuport.c +++ b/drivers/usb/serial/mxuport.c | |||
@@ -1284,7 +1284,8 @@ static int mxuport_open(struct tty_struct *tty, struct usb_serial_port *port) | |||
1284 | } | 1284 | } |
1285 | 1285 | ||
1286 | /* Initial port termios */ | 1286 | /* Initial port termios */ |
1287 | mxuport_set_termios(tty, port, NULL); | 1287 | if (tty) |
1288 | mxuport_set_termios(tty, port, NULL); | ||
1288 | 1289 | ||
1289 | /* | 1290 | /* |
1290 | * TODO: use RQ_VENDOR_GET_MSR, once we know what it | 1291 | * TODO: use RQ_VENDOR_GET_MSR, once we know what it |
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 0f872e6b2c87..829604d11f3f 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c | |||
@@ -132,6 +132,7 @@ MODULE_DEVICE_TABLE(usb, id_table); | |||
132 | #define UART_OVERRUN_ERROR 0x40 | 132 | #define UART_OVERRUN_ERROR 0x40 |
133 | #define UART_CTS 0x80 | 133 | #define UART_CTS 0x80 |
134 | 134 | ||
135 | static void pl2303_set_break(struct usb_serial_port *port, bool enable); | ||
135 | 136 | ||
136 | enum pl2303_type { | 137 | enum pl2303_type { |
137 | TYPE_01, /* Type 0 and 1 (difference unknown) */ | 138 | TYPE_01, /* Type 0 and 1 (difference unknown) */ |
@@ -615,6 +616,7 @@ static void pl2303_close(struct usb_serial_port *port) | |||
615 | { | 616 | { |
616 | usb_serial_generic_close(port); | 617 | usb_serial_generic_close(port); |
617 | usb_kill_urb(port->interrupt_in_urb); | 618 | usb_kill_urb(port->interrupt_in_urb); |
619 | pl2303_set_break(port, false); | ||
618 | } | 620 | } |
619 | 621 | ||
620 | static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port) | 622 | static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port) |
@@ -741,17 +743,16 @@ static int pl2303_ioctl(struct tty_struct *tty, | |||
741 | return -ENOIOCTLCMD; | 743 | return -ENOIOCTLCMD; |
742 | } | 744 | } |
743 | 745 | ||
744 | static void pl2303_break_ctl(struct tty_struct *tty, int break_state) | 746 | static void pl2303_set_break(struct usb_serial_port *port, bool enable) |
745 | { | 747 | { |
746 | struct usb_serial_port *port = tty->driver_data; | ||
747 | struct usb_serial *serial = port->serial; | 748 | struct usb_serial *serial = port->serial; |
748 | u16 state; | 749 | u16 state; |
749 | int result; | 750 | int result; |
750 | 751 | ||
751 | if (break_state == 0) | 752 | if (enable) |
752 | state = BREAK_OFF; | ||
753 | else | ||
754 | state = BREAK_ON; | 753 | state = BREAK_ON; |
754 | else | ||
755 | state = BREAK_OFF; | ||
755 | 756 | ||
756 | dev_dbg(&port->dev, "%s - turning break %s\n", __func__, | 757 | dev_dbg(&port->dev, "%s - turning break %s\n", __func__, |
757 | state == BREAK_OFF ? "off" : "on"); | 758 | state == BREAK_OFF ? "off" : "on"); |
@@ -763,6 +764,13 @@ static void pl2303_break_ctl(struct tty_struct *tty, int break_state) | |||
763 | dev_err(&port->dev, "error sending break = %d\n", result); | 764 | dev_err(&port->dev, "error sending break = %d\n", result); |
764 | } | 765 | } |
765 | 766 | ||
767 | static void pl2303_break_ctl(struct tty_struct *tty, int state) | ||
768 | { | ||
769 | struct usb_serial_port *port = tty->driver_data; | ||
770 | |||
771 | pl2303_set_break(port, state); | ||
772 | } | ||
773 | |||
766 | static void pl2303_update_line_status(struct usb_serial_port *port, | 774 | static void pl2303_update_line_status(struct usb_serial_port *port, |
767 | unsigned char *data, | 775 | unsigned char *data, |
768 | unsigned int actual_length) | 776 | unsigned int actual_length) |
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 475723c006f9..529066bbc7e8 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c | |||
@@ -687,6 +687,21 @@ static void serial_port_dtr_rts(struct tty_port *port, int on) | |||
687 | drv->dtr_rts(p, on); | 687 | drv->dtr_rts(p, on); |
688 | } | 688 | } |
689 | 689 | ||
690 | static ssize_t port_number_show(struct device *dev, | ||
691 | struct device_attribute *attr, char *buf) | ||
692 | { | ||
693 | struct usb_serial_port *port = to_usb_serial_port(dev); | ||
694 | |||
695 | return sprintf(buf, "%u\n", port->port_number); | ||
696 | } | ||
697 | static DEVICE_ATTR_RO(port_number); | ||
698 | |||
699 | static struct attribute *usb_serial_port_attrs[] = { | ||
700 | &dev_attr_port_number.attr, | ||
701 | NULL | ||
702 | }; | ||
703 | ATTRIBUTE_GROUPS(usb_serial_port); | ||
704 | |||
690 | static const struct tty_port_operations serial_port_ops = { | 705 | static const struct tty_port_operations serial_port_ops = { |
691 | .carrier_raised = serial_port_carrier_raised, | 706 | .carrier_raised = serial_port_carrier_raised, |
692 | .dtr_rts = serial_port_dtr_rts, | 707 | .dtr_rts = serial_port_dtr_rts, |
@@ -902,6 +917,7 @@ static int usb_serial_probe(struct usb_interface *interface, | |||
902 | port->dev.driver = NULL; | 917 | port->dev.driver = NULL; |
903 | port->dev.bus = &usb_serial_bus_type; | 918 | port->dev.bus = &usb_serial_bus_type; |
904 | port->dev.release = &usb_serial_port_release; | 919 | port->dev.release = &usb_serial_port_release; |
920 | port->dev.groups = usb_serial_port_groups; | ||
905 | device_initialize(&port->dev); | 921 | device_initialize(&port->dev); |
906 | } | 922 | } |
907 | 923 | ||
@@ -940,8 +956,9 @@ static int usb_serial_probe(struct usb_interface *interface, | |||
940 | port = serial->port[i]; | 956 | port = serial->port[i]; |
941 | if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL)) | 957 | if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL)) |
942 | goto probe_error; | 958 | goto probe_error; |
943 | buffer_size = max_t(int, serial->type->bulk_out_size, | 959 | buffer_size = serial->type->bulk_out_size; |
944 | usb_endpoint_maxp(endpoint)); | 960 | if (!buffer_size) |
961 | buffer_size = usb_endpoint_maxp(endpoint); | ||
945 | port->bulk_out_size = buffer_size; | 962 | port->bulk_out_size = buffer_size; |
946 | port->bulk_out_endpointAddress = endpoint->bEndpointAddress; | 963 | port->bulk_out_endpointAddress = endpoint->bEndpointAddress; |
947 | 964 | ||
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index dbc00e56c7f5..c85ea530085f 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h | |||
@@ -113,6 +113,20 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999, | |||
113 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 113 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
114 | US_FL_NO_ATA_1X), | 114 | US_FL_NO_ATA_1X), |
115 | 115 | ||
116 | /* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */ | ||
117 | UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999, | ||
118 | "Initio Corporation", | ||
119 | "", | ||
120 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
121 | US_FL_NO_ATA_1X), | ||
122 | |||
123 | /* Reported-by: Tom Arild Naess <tanaess@gmail.com> */ | ||
124 | UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999, | ||
125 | "JMicron", | ||
126 | "JMS539", | ||
127 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
128 | US_FL_NO_REPORT_OPCODES), | ||
129 | |||
116 | /* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */ | 130 | /* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */ |
117 | UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999, | 131 | UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999, |
118 | "JMicron", | 132 | "JMicron", |
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index d468d02179f4..5600c33fcadb 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c | |||
@@ -889,6 +889,12 @@ static void usb_stor_scan_dwork(struct work_struct *work) | |||
889 | !(us->fflags & US_FL_SCM_MULT_TARG)) { | 889 | !(us->fflags & US_FL_SCM_MULT_TARG)) { |
890 | mutex_lock(&us->dev_mutex); | 890 | mutex_lock(&us->dev_mutex); |
891 | us->max_lun = usb_stor_Bulk_max_lun(us); | 891 | us->max_lun = usb_stor_Bulk_max_lun(us); |
892 | /* | ||
893 | * Allow proper scanning of devices that present more than 8 LUNs | ||
894 | * While not affecting other devices that may need the previous behavior | ||
895 | */ | ||
896 | if (us->max_lun >= 8) | ||
897 | us_to_host(us)->max_lun = us->max_lun+1; | ||
892 | mutex_unlock(&us->dev_mutex); | 898 | mutex_unlock(&us->dev_mutex); |
893 | } | 899 | } |
894 | scsi_scan_host(us_to_host(us)); | 900 | scsi_scan_host(us_to_host(us)); |
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index f88bfdf5b6a0..2027a27546ef 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c | |||
@@ -868,12 +868,14 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags, | |||
868 | func = vfio_pci_set_err_trigger; | 868 | func = vfio_pci_set_err_trigger; |
869 | break; | 869 | break; |
870 | } | 870 | } |
871 | break; | ||
871 | case VFIO_PCI_REQ_IRQ_INDEX: | 872 | case VFIO_PCI_REQ_IRQ_INDEX: |
872 | switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { | 873 | switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { |
873 | case VFIO_IRQ_SET_ACTION_TRIGGER: | 874 | case VFIO_IRQ_SET_ACTION_TRIGGER: |
874 | func = vfio_pci_set_req_trigger; | 875 | func = vfio_pci_set_req_trigger; |
875 | break; | 876 | break; |
876 | } | 877 | } |
878 | break; | ||
877 | } | 879 | } |
878 | 880 | ||
879 | if (!func) | 881 | if (!func) |
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index afa06d28725d..2bbfc25e582c 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -591,11 +591,6 @@ static void handle_rx(struct vhost_net *net) | |||
591 | * TODO: support TSO. | 591 | * TODO: support TSO. |
592 | */ | 592 | */ |
593 | iov_iter_advance(&msg.msg_iter, vhost_hlen); | 593 | iov_iter_advance(&msg.msg_iter, vhost_hlen); |
594 | } else { | ||
595 | /* It'll come from socket; we'll need to patch | ||
596 | * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF | ||
597 | */ | ||
598 | iov_iter_advance(&fixup, sizeof(hdr)); | ||
599 | } | 594 | } |
600 | err = sock->ops->recvmsg(NULL, sock, &msg, | 595 | err = sock->ops->recvmsg(NULL, sock, &msg, |
601 | sock_len, MSG_DONTWAIT | MSG_TRUNC); | 596 | sock_len, MSG_DONTWAIT | MSG_TRUNC); |
@@ -609,17 +604,25 @@ static void handle_rx(struct vhost_net *net) | |||
609 | continue; | 604 | continue; |
610 | } | 605 | } |
611 | /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */ | 606 | /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */ |
612 | if (unlikely(vhost_hlen) && | 607 | if (unlikely(vhost_hlen)) { |
613 | copy_to_iter(&hdr, sizeof(hdr), &fixup) != sizeof(hdr)) { | 608 | if (copy_to_iter(&hdr, sizeof(hdr), |
614 | vq_err(vq, "Unable to write vnet_hdr at addr %p\n", | 609 | &fixup) != sizeof(hdr)) { |
615 | vq->iov->iov_base); | 610 | vq_err(vq, "Unable to write vnet_hdr " |
616 | break; | 611 | "at addr %p\n", vq->iov->iov_base); |
612 | break; | ||
613 | } | ||
614 | } else { | ||
615 | /* Header came from socket; we'll need to patch | ||
616 | * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF | ||
617 | */ | ||
618 | iov_iter_advance(&fixup, sizeof(hdr)); | ||
617 | } | 619 | } |
618 | /* TODO: Should check and handle checksum. */ | 620 | /* TODO: Should check and handle checksum. */ |
619 | 621 | ||
620 | num_buffers = cpu_to_vhost16(vq, headcount); | 622 | num_buffers = cpu_to_vhost16(vq, headcount); |
621 | if (likely(mergeable) && | 623 | if (likely(mergeable) && |
622 | copy_to_iter(&num_buffers, 2, &fixup) != 2) { | 624 | copy_to_iter(&num_buffers, sizeof num_buffers, |
625 | &fixup) != sizeof num_buffers) { | ||
623 | vq_err(vq, "Failed num_buffers write"); | 626 | vq_err(vq, "Failed num_buffers write"); |
624 | vhost_discard_vq_desc(vq, headcount); | 627 | vhost_discard_vq_desc(vq, headcount); |
625 | break; | 628 | break; |
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 8d4f3f1ff799..71df240a467a 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
@@ -1956,10 +1956,9 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg, | |||
1956 | goto out; | 1956 | goto out; |
1957 | } | 1957 | } |
1958 | /* | 1958 | /* |
1959 | * Now register the TCM vhost virtual I_T Nexus as active with the | 1959 | * Now register the TCM vhost virtual I_T Nexus as active. |
1960 | * call to __transport_register_session() | ||
1961 | */ | 1960 | */ |
1962 | __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, | 1961 | transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, |
1963 | tv_nexus->tvn_se_sess, tv_nexus); | 1962 | tv_nexus->tvn_se_sess, tv_nexus); |
1964 | tpg->tpg_nexus = tv_nexus; | 1963 | tpg->tpg_nexus = tv_nexus; |
1965 | 1964 | ||
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c index 32c0b6b28097..9362424c2340 100644 --- a/drivers/video/fbdev/amba-clcd.c +++ b/drivers/video/fbdev/amba-clcd.c | |||
@@ -599,6 +599,9 @@ static int clcdfb_of_get_mode(struct device *dev, struct device_node *endpoint, | |||
599 | 599 | ||
600 | len = clcdfb_snprintf_mode(NULL, 0, mode); | 600 | len = clcdfb_snprintf_mode(NULL, 0, mode); |
601 | name = devm_kzalloc(dev, len + 1, GFP_KERNEL); | 601 | name = devm_kzalloc(dev, len + 1, GFP_KERNEL); |
602 | if (!name) | ||
603 | return -ENOMEM; | ||
604 | |||
602 | clcdfb_snprintf_mode(name, len + 1, mode); | 605 | clcdfb_snprintf_mode(name, len + 1, mode); |
603 | mode->name = name; | 606 | mode->name = name; |
604 | 607 | ||
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c index 95338593ebf4..868facdec638 100644 --- a/drivers/video/fbdev/core/fbmon.c +++ b/drivers/video/fbdev/core/fbmon.c | |||
@@ -624,9 +624,6 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize, | |||
624 | int num = 0, i, first = 1; | 624 | int num = 0, i, first = 1; |
625 | int ver, rev; | 625 | int ver, rev; |
626 | 626 | ||
627 | ver = edid[EDID_STRUCT_VERSION]; | ||
628 | rev = edid[EDID_STRUCT_REVISION]; | ||
629 | |||
630 | mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL); | 627 | mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL); |
631 | if (mode == NULL) | 628 | if (mode == NULL) |
632 | return NULL; | 629 | return NULL; |
@@ -637,6 +634,9 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize, | |||
637 | return NULL; | 634 | return NULL; |
638 | } | 635 | } |
639 | 636 | ||
637 | ver = edid[EDID_STRUCT_VERSION]; | ||
638 | rev = edid[EDID_STRUCT_REVISION]; | ||
639 | |||
640 | *dbsize = 0; | 640 | *dbsize = 0; |
641 | 641 | ||
642 | DPRINTK(" Detailed Timings\n"); | 642 | DPRINTK(" Detailed Timings\n"); |
diff --git a/drivers/video/fbdev/omap2/dss/display-sysfs.c b/drivers/video/fbdev/omap2/dss/display-sysfs.c index 5a2095a98ed8..12186557a9d4 100644 --- a/drivers/video/fbdev/omap2/dss/display-sysfs.c +++ b/drivers/video/fbdev/omap2/dss/display-sysfs.c | |||
@@ -28,44 +28,22 @@ | |||
28 | #include <video/omapdss.h> | 28 | #include <video/omapdss.h> |
29 | #include "dss.h" | 29 | #include "dss.h" |
30 | 30 | ||
31 | static struct omap_dss_device *to_dss_device_sysfs(struct device *dev) | 31 | static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf) |
32 | { | 32 | { |
33 | struct omap_dss_device *dssdev = NULL; | ||
34 | |||
35 | for_each_dss_dev(dssdev) { | ||
36 | if (dssdev->dev == dev) { | ||
37 | omap_dss_put_device(dssdev); | ||
38 | return dssdev; | ||
39 | } | ||
40 | } | ||
41 | |||
42 | return NULL; | ||
43 | } | ||
44 | |||
45 | static ssize_t display_name_show(struct device *dev, | ||
46 | struct device_attribute *attr, char *buf) | ||
47 | { | ||
48 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
49 | |||
50 | return snprintf(buf, PAGE_SIZE, "%s\n", | 33 | return snprintf(buf, PAGE_SIZE, "%s\n", |
51 | dssdev->name ? | 34 | dssdev->name ? |
52 | dssdev->name : ""); | 35 | dssdev->name : ""); |
53 | } | 36 | } |
54 | 37 | ||
55 | static ssize_t display_enabled_show(struct device *dev, | 38 | static ssize_t display_enabled_show(struct omap_dss_device *dssdev, char *buf) |
56 | struct device_attribute *attr, char *buf) | ||
57 | { | 39 | { |
58 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
59 | |||
60 | return snprintf(buf, PAGE_SIZE, "%d\n", | 40 | return snprintf(buf, PAGE_SIZE, "%d\n", |
61 | omapdss_device_is_enabled(dssdev)); | 41 | omapdss_device_is_enabled(dssdev)); |
62 | } | 42 | } |
63 | 43 | ||
64 | static ssize_t display_enabled_store(struct device *dev, | 44 | static ssize_t display_enabled_store(struct omap_dss_device *dssdev, |
65 | struct device_attribute *attr, | ||
66 | const char *buf, size_t size) | 45 | const char *buf, size_t size) |
67 | { | 46 | { |
68 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
69 | int r; | 47 | int r; |
70 | bool enable; | 48 | bool enable; |
71 | 49 | ||
@@ -90,19 +68,16 @@ static ssize_t display_enabled_store(struct device *dev, | |||
90 | return size; | 68 | return size; |
91 | } | 69 | } |
92 | 70 | ||
93 | static ssize_t display_tear_show(struct device *dev, | 71 | static ssize_t display_tear_show(struct omap_dss_device *dssdev, char *buf) |
94 | struct device_attribute *attr, char *buf) | ||
95 | { | 72 | { |
96 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
97 | return snprintf(buf, PAGE_SIZE, "%d\n", | 73 | return snprintf(buf, PAGE_SIZE, "%d\n", |
98 | dssdev->driver->get_te ? | 74 | dssdev->driver->get_te ? |
99 | dssdev->driver->get_te(dssdev) : 0); | 75 | dssdev->driver->get_te(dssdev) : 0); |
100 | } | 76 | } |
101 | 77 | ||
102 | static ssize_t display_tear_store(struct device *dev, | 78 | static ssize_t display_tear_store(struct omap_dss_device *dssdev, |
103 | struct device_attribute *attr, const char *buf, size_t size) | 79 | const char *buf, size_t size) |
104 | { | 80 | { |
105 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
106 | int r; | 81 | int r; |
107 | bool te; | 82 | bool te; |
108 | 83 | ||
@@ -120,10 +95,8 @@ static ssize_t display_tear_store(struct device *dev, | |||
120 | return size; | 95 | return size; |
121 | } | 96 | } |
122 | 97 | ||
123 | static ssize_t display_timings_show(struct device *dev, | 98 | static ssize_t display_timings_show(struct omap_dss_device *dssdev, char *buf) |
124 | struct device_attribute *attr, char *buf) | ||
125 | { | 99 | { |
126 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
127 | struct omap_video_timings t; | 100 | struct omap_video_timings t; |
128 | 101 | ||
129 | if (!dssdev->driver->get_timings) | 102 | if (!dssdev->driver->get_timings) |
@@ -137,10 +110,9 @@ static ssize_t display_timings_show(struct device *dev, | |||
137 | t.y_res, t.vfp, t.vbp, t.vsw); | 110 | t.y_res, t.vfp, t.vbp, t.vsw); |
138 | } | 111 | } |
139 | 112 | ||
140 | static ssize_t display_timings_store(struct device *dev, | 113 | static ssize_t display_timings_store(struct omap_dss_device *dssdev, |
141 | struct device_attribute *attr, const char *buf, size_t size) | 114 | const char *buf, size_t size) |
142 | { | 115 | { |
143 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
144 | struct omap_video_timings t = dssdev->panel.timings; | 116 | struct omap_video_timings t = dssdev->panel.timings; |
145 | int r, found; | 117 | int r, found; |
146 | 118 | ||
@@ -176,10 +148,8 @@ static ssize_t display_timings_store(struct device *dev, | |||
176 | return size; | 148 | return size; |
177 | } | 149 | } |
178 | 150 | ||
179 | static ssize_t display_rotate_show(struct device *dev, | 151 | static ssize_t display_rotate_show(struct omap_dss_device *dssdev, char *buf) |
180 | struct device_attribute *attr, char *buf) | ||
181 | { | 152 | { |
182 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
183 | int rotate; | 153 | int rotate; |
184 | if (!dssdev->driver->get_rotate) | 154 | if (!dssdev->driver->get_rotate) |
185 | return -ENOENT; | 155 | return -ENOENT; |
@@ -187,10 +157,9 @@ static ssize_t display_rotate_show(struct device *dev, | |||
187 | return snprintf(buf, PAGE_SIZE, "%u\n", rotate); | 157 | return snprintf(buf, PAGE_SIZE, "%u\n", rotate); |
188 | } | 158 | } |
189 | 159 | ||
190 | static ssize_t display_rotate_store(struct device *dev, | 160 | static ssize_t display_rotate_store(struct omap_dss_device *dssdev, |
191 | struct device_attribute *attr, const char *buf, size_t size) | 161 | const char *buf, size_t size) |
192 | { | 162 | { |
193 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
194 | int rot, r; | 163 | int rot, r; |
195 | 164 | ||
196 | if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate) | 165 | if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate) |
@@ -207,10 +176,8 @@ static ssize_t display_rotate_store(struct device *dev, | |||
207 | return size; | 176 | return size; |
208 | } | 177 | } |
209 | 178 | ||
210 | static ssize_t display_mirror_show(struct device *dev, | 179 | static ssize_t display_mirror_show(struct omap_dss_device *dssdev, char *buf) |
211 | struct device_attribute *attr, char *buf) | ||
212 | { | 180 | { |
213 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
214 | int mirror; | 181 | int mirror; |
215 | if (!dssdev->driver->get_mirror) | 182 | if (!dssdev->driver->get_mirror) |
216 | return -ENOENT; | 183 | return -ENOENT; |
@@ -218,10 +185,9 @@ static ssize_t display_mirror_show(struct device *dev, | |||
218 | return snprintf(buf, PAGE_SIZE, "%u\n", mirror); | 185 | return snprintf(buf, PAGE_SIZE, "%u\n", mirror); |
219 | } | 186 | } |
220 | 187 | ||
221 | static ssize_t display_mirror_store(struct device *dev, | 188 | static ssize_t display_mirror_store(struct omap_dss_device *dssdev, |
222 | struct device_attribute *attr, const char *buf, size_t size) | 189 | const char *buf, size_t size) |
223 | { | 190 | { |
224 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
225 | int r; | 191 | int r; |
226 | bool mirror; | 192 | bool mirror; |
227 | 193 | ||
@@ -239,10 +205,8 @@ static ssize_t display_mirror_store(struct device *dev, | |||
239 | return size; | 205 | return size; |
240 | } | 206 | } |
241 | 207 | ||
242 | static ssize_t display_wss_show(struct device *dev, | 208 | static ssize_t display_wss_show(struct omap_dss_device *dssdev, char *buf) |
243 | struct device_attribute *attr, char *buf) | ||
244 | { | 209 | { |
245 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
246 | unsigned int wss; | 210 | unsigned int wss; |
247 | 211 | ||
248 | if (!dssdev->driver->get_wss) | 212 | if (!dssdev->driver->get_wss) |
@@ -253,10 +217,9 @@ static ssize_t display_wss_show(struct device *dev, | |||
253 | return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss); | 217 | return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss); |
254 | } | 218 | } |
255 | 219 | ||
256 | static ssize_t display_wss_store(struct device *dev, | 220 | static ssize_t display_wss_store(struct omap_dss_device *dssdev, |
257 | struct device_attribute *attr, const char *buf, size_t size) | 221 | const char *buf, size_t size) |
258 | { | 222 | { |
259 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
260 | u32 wss; | 223 | u32 wss; |
261 | int r; | 224 | int r; |
262 | 225 | ||
@@ -277,50 +240,94 @@ static ssize_t display_wss_store(struct device *dev, | |||
277 | return size; | 240 | return size; |
278 | } | 241 | } |
279 | 242 | ||
280 | static DEVICE_ATTR(display_name, S_IRUGO, display_name_show, NULL); | 243 | struct display_attribute { |
281 | static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR, | 244 | struct attribute attr; |
245 | ssize_t (*show)(struct omap_dss_device *, char *); | ||
246 | ssize_t (*store)(struct omap_dss_device *, const char *, size_t); | ||
247 | }; | ||
248 | |||
249 | #define DISPLAY_ATTR(_name, _mode, _show, _store) \ | ||
250 | struct display_attribute display_attr_##_name = \ | ||
251 | __ATTR(_name, _mode, _show, _store) | ||
252 | |||
253 | static DISPLAY_ATTR(name, S_IRUGO, display_name_show, NULL); | ||
254 | static DISPLAY_ATTR(display_name, S_IRUGO, display_name_show, NULL); | ||
255 | static DISPLAY_ATTR(enabled, S_IRUGO|S_IWUSR, | ||
282 | display_enabled_show, display_enabled_store); | 256 | display_enabled_show, display_enabled_store); |
283 | static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR, | 257 | static DISPLAY_ATTR(tear_elim, S_IRUGO|S_IWUSR, |
284 | display_tear_show, display_tear_store); | 258 | display_tear_show, display_tear_store); |
285 | static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR, | 259 | static DISPLAY_ATTR(timings, S_IRUGO|S_IWUSR, |
286 | display_timings_show, display_timings_store); | 260 | display_timings_show, display_timings_store); |
287 | static DEVICE_ATTR(rotate, S_IRUGO|S_IWUSR, | 261 | static DISPLAY_ATTR(rotate, S_IRUGO|S_IWUSR, |
288 | display_rotate_show, display_rotate_store); | 262 | display_rotate_show, display_rotate_store); |
289 | static DEVICE_ATTR(mirror, S_IRUGO|S_IWUSR, | 263 | static DISPLAY_ATTR(mirror, S_IRUGO|S_IWUSR, |
290 | display_mirror_show, display_mirror_store); | 264 | display_mirror_show, display_mirror_store); |
291 | static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR, | 265 | static DISPLAY_ATTR(wss, S_IRUGO|S_IWUSR, |
292 | display_wss_show, display_wss_store); | 266 | display_wss_show, display_wss_store); |
293 | 267 | ||
294 | static const struct attribute *display_sysfs_attrs[] = { | 268 | static struct attribute *display_sysfs_attrs[] = { |
295 | &dev_attr_display_name.attr, | 269 | &display_attr_name.attr, |
296 | &dev_attr_enabled.attr, | 270 | &display_attr_display_name.attr, |
297 | &dev_attr_tear_elim.attr, | 271 | &display_attr_enabled.attr, |
298 | &dev_attr_timings.attr, | 272 | &display_attr_tear_elim.attr, |
299 | &dev_attr_rotate.attr, | 273 | &display_attr_timings.attr, |
300 | &dev_attr_mirror.attr, | 274 | &display_attr_rotate.attr, |
301 | &dev_attr_wss.attr, | 275 | &display_attr_mirror.attr, |
276 | &display_attr_wss.attr, | ||
302 | NULL | 277 | NULL |
303 | }; | 278 | }; |
304 | 279 | ||
280 | static ssize_t display_attr_show(struct kobject *kobj, struct attribute *attr, | ||
281 | char *buf) | ||
282 | { | ||
283 | struct omap_dss_device *dssdev; | ||
284 | struct display_attribute *display_attr; | ||
285 | |||
286 | dssdev = container_of(kobj, struct omap_dss_device, kobj); | ||
287 | display_attr = container_of(attr, struct display_attribute, attr); | ||
288 | |||
289 | if (!display_attr->show) | ||
290 | return -ENOENT; | ||
291 | |||
292 | return display_attr->show(dssdev, buf); | ||
293 | } | ||
294 | |||
295 | static ssize_t display_attr_store(struct kobject *kobj, struct attribute *attr, | ||
296 | const char *buf, size_t size) | ||
297 | { | ||
298 | struct omap_dss_device *dssdev; | ||
299 | struct display_attribute *display_attr; | ||
300 | |||
301 | dssdev = container_of(kobj, struct omap_dss_device, kobj); | ||
302 | display_attr = container_of(attr, struct display_attribute, attr); | ||
303 | |||
304 | if (!display_attr->store) | ||
305 | return -ENOENT; | ||
306 | |||
307 | return display_attr->store(dssdev, buf, size); | ||
308 | } | ||
309 | |||
310 | static const struct sysfs_ops display_sysfs_ops = { | ||
311 | .show = display_attr_show, | ||
312 | .store = display_attr_store, | ||
313 | }; | ||
314 | |||
315 | static struct kobj_type display_ktype = { | ||
316 | .sysfs_ops = &display_sysfs_ops, | ||
317 | .default_attrs = display_sysfs_attrs, | ||
318 | }; | ||
319 | |||
305 | int display_init_sysfs(struct platform_device *pdev) | 320 | int display_init_sysfs(struct platform_device *pdev) |
306 | { | 321 | { |
307 | struct omap_dss_device *dssdev = NULL; | 322 | struct omap_dss_device *dssdev = NULL; |
308 | int r; | 323 | int r; |
309 | 324 | ||
310 | for_each_dss_dev(dssdev) { | 325 | for_each_dss_dev(dssdev) { |
311 | struct kobject *kobj = &dssdev->dev->kobj; | 326 | r = kobject_init_and_add(&dssdev->kobj, &display_ktype, |
312 | 327 | &pdev->dev.kobj, dssdev->alias); | |
313 | r = sysfs_create_files(kobj, display_sysfs_attrs); | ||
314 | if (r) { | 328 | if (r) { |
315 | DSSERR("failed to create sysfs files\n"); | 329 | DSSERR("failed to create sysfs files\n"); |
316 | goto err; | 330 | omap_dss_put_device(dssdev); |
317 | } | ||
318 | |||
319 | r = sysfs_create_link(&pdev->dev.kobj, kobj, dssdev->alias); | ||
320 | if (r) { | ||
321 | sysfs_remove_files(kobj, display_sysfs_attrs); | ||
322 | |||
323 | DSSERR("failed to create sysfs display link\n"); | ||
324 | goto err; | 331 | goto err; |
325 | } | 332 | } |
326 | } | 333 | } |
@@ -338,8 +345,12 @@ void display_uninit_sysfs(struct platform_device *pdev) | |||
338 | struct omap_dss_device *dssdev = NULL; | 345 | struct omap_dss_device *dssdev = NULL; |
339 | 346 | ||
340 | for_each_dss_dev(dssdev) { | 347 | for_each_dss_dev(dssdev) { |
341 | sysfs_remove_link(&pdev->dev.kobj, dssdev->alias); | 348 | if (kobject_name(&dssdev->kobj) == NULL) |
342 | sysfs_remove_files(&dssdev->dev->kobj, | 349 | continue; |
343 | display_sysfs_attrs); | 350 | |
351 | kobject_del(&dssdev->kobj); | ||
352 | kobject_put(&dssdev->kobj); | ||
353 | |||
354 | memset(&dssdev->kobj, 0, sizeof(dssdev->kobj)); | ||
344 | } | 355 | } |
345 | } | 356 | } |
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 0413157f3b49..6a356e344f82 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | #include <linux/balloon_compaction.h> | 30 | #include <linux/balloon_compaction.h> |
31 | #include <linux/oom.h> | 31 | #include <linux/oom.h> |
32 | #include <linux/wait.h> | ||
32 | 33 | ||
33 | /* | 34 | /* |
34 | * Balloon device works in 4K page units. So each page is pointed to by | 35 | * Balloon device works in 4K page units. So each page is pointed to by |
@@ -334,17 +335,25 @@ static int virtballoon_oom_notify(struct notifier_block *self, | |||
334 | static int balloon(void *_vballoon) | 335 | static int balloon(void *_vballoon) |
335 | { | 336 | { |
336 | struct virtio_balloon *vb = _vballoon; | 337 | struct virtio_balloon *vb = _vballoon; |
338 | DEFINE_WAIT_FUNC(wait, woken_wake_function); | ||
337 | 339 | ||
338 | set_freezable(); | 340 | set_freezable(); |
339 | while (!kthread_should_stop()) { | 341 | while (!kthread_should_stop()) { |
340 | s64 diff; | 342 | s64 diff; |
341 | 343 | ||
342 | try_to_freeze(); | 344 | try_to_freeze(); |
343 | wait_event_interruptible(vb->config_change, | 345 | |
344 | (diff = towards_target(vb)) != 0 | 346 | add_wait_queue(&vb->config_change, &wait); |
345 | || vb->need_stats_update | 347 | for (;;) { |
346 | || kthread_should_stop() | 348 | if ((diff = towards_target(vb)) != 0 || |
347 | || freezing(current)); | 349 | vb->need_stats_update || |
350 | kthread_should_stop() || | ||
351 | freezing(current)) | ||
352 | break; | ||
353 | wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); | ||
354 | } | ||
355 | remove_wait_queue(&vb->config_change, &wait); | ||
356 | |||
348 | if (vb->need_stats_update) | 357 | if (vb->need_stats_update) |
349 | stats_handle_request(vb); | 358 | stats_handle_request(vb); |
350 | if (diff > 0) | 359 | if (diff > 0) |
@@ -499,6 +508,8 @@ static int virtballoon_probe(struct virtio_device *vdev) | |||
499 | if (err < 0) | 508 | if (err < 0) |
500 | goto out_oom_notify; | 509 | goto out_oom_notify; |
501 | 510 | ||
511 | virtio_device_ready(vdev); | ||
512 | |||
502 | vb->thread = kthread_run(balloon, vb, "vballoon"); | 513 | vb->thread = kthread_run(balloon, vb, "vballoon"); |
503 | if (IS_ERR(vb->thread)) { | 514 | if (IS_ERR(vb->thread)) { |
504 | err = PTR_ERR(vb->thread); | 515 | err = PTR_ERR(vb->thread); |
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index cad569890908..6010d7ec0a0f 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c | |||
@@ -156,22 +156,95 @@ static void vm_get(struct virtio_device *vdev, unsigned offset, | |||
156 | void *buf, unsigned len) | 156 | void *buf, unsigned len) |
157 | { | 157 | { |
158 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); | 158 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); |
159 | u8 *ptr = buf; | 159 | void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG; |
160 | int i; | 160 | u8 b; |
161 | __le16 w; | ||
162 | __le32 l; | ||
161 | 163 | ||
162 | for (i = 0; i < len; i++) | 164 | if (vm_dev->version == 1) { |
163 | ptr[i] = readb(vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); | 165 | u8 *ptr = buf; |
166 | int i; | ||
167 | |||
168 | for (i = 0; i < len; i++) | ||
169 | ptr[i] = readb(base + offset + i); | ||
170 | return; | ||
171 | } | ||
172 | |||
173 | switch (len) { | ||
174 | case 1: | ||
175 | b = readb(base + offset); | ||
176 | memcpy(buf, &b, sizeof b); | ||
177 | break; | ||
178 | case 2: | ||
179 | w = cpu_to_le16(readw(base + offset)); | ||
180 | memcpy(buf, &w, sizeof w); | ||
181 | break; | ||
182 | case 4: | ||
183 | l = cpu_to_le32(readl(base + offset)); | ||
184 | memcpy(buf, &l, sizeof l); | ||
185 | break; | ||
186 | case 8: | ||
187 | l = cpu_to_le32(readl(base + offset)); | ||
188 | memcpy(buf, &l, sizeof l); | ||
189 | l = cpu_to_le32(ioread32(base + offset + sizeof l)); | ||
190 | memcpy(buf + sizeof l, &l, sizeof l); | ||
191 | break; | ||
192 | default: | ||
193 | BUG(); | ||
194 | } | ||
164 | } | 195 | } |
165 | 196 | ||
166 | static void vm_set(struct virtio_device *vdev, unsigned offset, | 197 | static void vm_set(struct virtio_device *vdev, unsigned offset, |
167 | const void *buf, unsigned len) | 198 | const void *buf, unsigned len) |
168 | { | 199 | { |
169 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); | 200 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); |
170 | const u8 *ptr = buf; | 201 | void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG; |
171 | int i; | 202 | u8 b; |
203 | __le16 w; | ||
204 | __le32 l; | ||
172 | 205 | ||
173 | for (i = 0; i < len; i++) | 206 | if (vm_dev->version == 1) { |
174 | writeb(ptr[i], vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); | 207 | const u8 *ptr = buf; |
208 | int i; | ||
209 | |||
210 | for (i = 0; i < len; i++) | ||
211 | writeb(ptr[i], base + offset + i); | ||
212 | |||
213 | return; | ||
214 | } | ||
215 | |||
216 | switch (len) { | ||
217 | case 1: | ||
218 | memcpy(&b, buf, sizeof b); | ||
219 | writeb(b, base + offset); | ||
220 | break; | ||
221 | case 2: | ||
222 | memcpy(&w, buf, sizeof w); | ||
223 | writew(le16_to_cpu(w), base + offset); | ||
224 | break; | ||
225 | case 4: | ||
226 | memcpy(&l, buf, sizeof l); | ||
227 | writel(le32_to_cpu(l), base + offset); | ||
228 | break; | ||
229 | case 8: | ||
230 | memcpy(&l, buf, sizeof l); | ||
231 | writel(le32_to_cpu(l), base + offset); | ||
232 | memcpy(&l, buf + sizeof l, sizeof l); | ||
233 | writel(le32_to_cpu(l), base + offset + sizeof l); | ||
234 | break; | ||
235 | default: | ||
236 | BUG(); | ||
237 | } | ||
238 | } | ||
239 | |||
240 | static u32 vm_generation(struct virtio_device *vdev) | ||
241 | { | ||
242 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); | ||
243 | |||
244 | if (vm_dev->version == 1) | ||
245 | return 0; | ||
246 | else | ||
247 | return readl(vm_dev->base + VIRTIO_MMIO_CONFIG_GENERATION); | ||
175 | } | 248 | } |
176 | 249 | ||
177 | static u8 vm_get_status(struct virtio_device *vdev) | 250 | static u8 vm_get_status(struct virtio_device *vdev) |
@@ -440,6 +513,7 @@ static const char *vm_bus_name(struct virtio_device *vdev) | |||
440 | static const struct virtio_config_ops virtio_mmio_config_ops = { | 513 | static const struct virtio_config_ops virtio_mmio_config_ops = { |
441 | .get = vm_get, | 514 | .get = vm_get, |
442 | .set = vm_set, | 515 | .set = vm_set, |
516 | .generation = vm_generation, | ||
443 | .get_status = vm_get_status, | 517 | .get_status = vm_get_status, |
444 | .set_status = vm_set_status, | 518 | .set_status = vm_set_status, |
445 | .reset = vm_reset, | 519 | .reset = vm_reset, |
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c index 6df940528fd2..1443b3c391de 100644 --- a/drivers/watchdog/at91sam9_wdt.c +++ b/drivers/watchdog/at91sam9_wdt.c | |||
@@ -208,7 +208,8 @@ static int at91_wdt_init(struct platform_device *pdev, struct at91wdt *wdt) | |||
208 | 208 | ||
209 | if ((tmp & AT91_WDT_WDFIEN) && wdt->irq) { | 209 | if ((tmp & AT91_WDT_WDFIEN) && wdt->irq) { |
210 | err = request_irq(wdt->irq, wdt_interrupt, | 210 | err = request_irq(wdt->irq, wdt_interrupt, |
211 | IRQF_SHARED | IRQF_IRQPOLL, | 211 | IRQF_SHARED | IRQF_IRQPOLL | |
212 | IRQF_NO_SUSPEND, | ||
212 | pdev->name, wdt); | 213 | pdev->name, wdt); |
213 | if (err) | 214 | if (err) |
214 | return err; | 215 | return err; |
diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c index c8def68d9e4c..0deaa4f971f5 100644 --- a/drivers/watchdog/imgpdc_wdt.c +++ b/drivers/watchdog/imgpdc_wdt.c | |||
@@ -42,10 +42,10 @@ | |||
42 | #define PDC_WDT_MIN_TIMEOUT 1 | 42 | #define PDC_WDT_MIN_TIMEOUT 1 |
43 | #define PDC_WDT_DEF_TIMEOUT 64 | 43 | #define PDC_WDT_DEF_TIMEOUT 64 |
44 | 44 | ||
45 | static int heartbeat; | 45 | static int heartbeat = PDC_WDT_DEF_TIMEOUT; |
46 | module_param(heartbeat, int, 0); | 46 | module_param(heartbeat, int, 0); |
47 | MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. " | 47 | MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds " |
48 | "(default = " __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")"); | 48 | "(default=" __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")"); |
49 | 49 | ||
50 | static bool nowayout = WATCHDOG_NOWAYOUT; | 50 | static bool nowayout = WATCHDOG_NOWAYOUT; |
51 | module_param(nowayout, bool, 0); | 51 | module_param(nowayout, bool, 0); |
@@ -191,6 +191,7 @@ static int pdc_wdt_probe(struct platform_device *pdev) | |||
191 | pdc_wdt->wdt_dev.ops = &pdc_wdt_ops; | 191 | pdc_wdt->wdt_dev.ops = &pdc_wdt_ops; |
192 | pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK; | 192 | pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK; |
193 | pdc_wdt->wdt_dev.parent = &pdev->dev; | 193 | pdc_wdt->wdt_dev.parent = &pdev->dev; |
194 | watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt); | ||
194 | 195 | ||
195 | ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev); | 196 | ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev); |
196 | if (ret < 0) { | 197 | if (ret < 0) { |
@@ -232,7 +233,6 @@ static int pdc_wdt_probe(struct platform_device *pdev) | |||
232 | watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout); | 233 | watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout); |
233 | 234 | ||
234 | platform_set_drvdata(pdev, pdc_wdt); | 235 | platform_set_drvdata(pdev, pdc_wdt); |
235 | watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt); | ||
236 | 236 | ||
237 | ret = watchdog_register_device(&pdc_wdt->wdt_dev); | 237 | ret = watchdog_register_device(&pdc_wdt->wdt_dev); |
238 | if (ret) | 238 | if (ret) |
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c index a87f6df6e85f..938b987de551 100644 --- a/drivers/watchdog/mtk_wdt.c +++ b/drivers/watchdog/mtk_wdt.c | |||
@@ -133,7 +133,7 @@ static int mtk_wdt_start(struct watchdog_device *wdt_dev) | |||
133 | u32 reg; | 133 | u32 reg; |
134 | struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev); | 134 | struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev); |
135 | void __iomem *wdt_base = mtk_wdt->wdt_base; | 135 | void __iomem *wdt_base = mtk_wdt->wdt_base; |
136 | u32 ret; | 136 | int ret; |
137 | 137 | ||
138 | ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout); | 138 | ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout); |
139 | if (ret < 0) | 139 | if (ret < 0) |
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index b812462083fc..94d96809e686 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig | |||
@@ -55,6 +55,23 @@ config XEN_BALLOON_MEMORY_HOTPLUG | |||
55 | 55 | ||
56 | In that case step 3 should be omitted. | 56 | In that case step 3 should be omitted. |
57 | 57 | ||
58 | config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT | ||
59 | int "Hotplugged memory limit (in GiB) for a PV guest" | ||
60 | default 512 if X86_64 | ||
61 | default 4 if X86_32 | ||
62 | range 0 64 if X86_32 | ||
63 | depends on XEN_HAVE_PVMMU | ||
64 | depends on XEN_BALLOON_MEMORY_HOTPLUG | ||
65 | help | ||
66 | Maxmium amount of memory (in GiB) that a PV guest can be | ||
67 | expanded to when using memory hotplug. | ||
68 | |||
69 | A PV guest can have more memory than this limit if is | ||
70 | started with a larger maximum. | ||
71 | |||
72 | This value is used to allocate enough space in internal | ||
73 | tables needed for physical memory administration. | ||
74 | |||
58 | config XEN_SCRUB_PAGES | 75 | config XEN_SCRUB_PAGES |
59 | bool "Scrub pages before returning them to system" | 76 | bool "Scrub pages before returning them to system" |
60 | depends on XEN_BALLOON | 77 | depends on XEN_BALLOON |
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 2140398a2a8c..2ccd3592d41f 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile | |||
@@ -2,7 +2,7 @@ ifeq ($(filter y, $(CONFIG_ARM) $(CONFIG_ARM64)),) | |||
2 | obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o | 2 | obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o |
3 | endif | 3 | endif |
4 | obj-$(CONFIG_X86) += fallback.o | 4 | obj-$(CONFIG_X86) += fallback.o |
5 | obj-y += grant-table.o features.o balloon.o manage.o | 5 | obj-y += grant-table.o features.o balloon.o manage.o preempt.o |
6 | obj-y += events/ | 6 | obj-y += events/ |
7 | obj-y += xenbus/ | 7 | obj-y += xenbus/ |
8 | 8 | ||
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 0b52d92cb2e5..fd933695f232 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
@@ -229,6 +229,29 @@ static enum bp_state reserve_additional_memory(long credit) | |||
229 | balloon_hotplug = round_up(balloon_hotplug, PAGES_PER_SECTION); | 229 | balloon_hotplug = round_up(balloon_hotplug, PAGES_PER_SECTION); |
230 | nid = memory_add_physaddr_to_nid(hotplug_start_paddr); | 230 | nid = memory_add_physaddr_to_nid(hotplug_start_paddr); |
231 | 231 | ||
232 | #ifdef CONFIG_XEN_HAVE_PVMMU | ||
233 | /* | ||
234 | * add_memory() will build page tables for the new memory so | ||
235 | * the p2m must contain invalid entries so the correct | ||
236 | * non-present PTEs will be written. | ||
237 | * | ||
238 | * If a failure occurs, the original (identity) p2m entries | ||
239 | * are not restored since this region is now known not to | ||
240 | * conflict with any devices. | ||
241 | */ | ||
242 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | ||
243 | unsigned long pfn, i; | ||
244 | |||
245 | pfn = PFN_DOWN(hotplug_start_paddr); | ||
246 | for (i = 0; i < balloon_hotplug; i++) { | ||
247 | if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) { | ||
248 | pr_warn("set_phys_to_machine() failed, no memory added\n"); | ||
249 | return BP_ECANCELED; | ||
250 | } | ||
251 | } | ||
252 | } | ||
253 | #endif | ||
254 | |||
232 | rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT); | 255 | rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT); |
233 | 256 | ||
234 | if (rc) { | 257 | if (rc) { |
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index b4bca2d4a7e5..70fba973a107 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c | |||
@@ -526,20 +526,26 @@ static unsigned int __startup_pirq(unsigned int irq) | |||
526 | pirq_query_unmask(irq); | 526 | pirq_query_unmask(irq); |
527 | 527 | ||
528 | rc = set_evtchn_to_irq(evtchn, irq); | 528 | rc = set_evtchn_to_irq(evtchn, irq); |
529 | if (rc != 0) { | 529 | if (rc) |
530 | pr_err("irq%d: Failed to set port to irq mapping (%d)\n", | 530 | goto err; |
531 | irq, rc); | 531 | |
532 | xen_evtchn_close(evtchn); | ||
533 | return 0; | ||
534 | } | ||
535 | bind_evtchn_to_cpu(evtchn, 0); | 532 | bind_evtchn_to_cpu(evtchn, 0); |
536 | info->evtchn = evtchn; | 533 | info->evtchn = evtchn; |
537 | 534 | ||
535 | rc = xen_evtchn_port_setup(info); | ||
536 | if (rc) | ||
537 | goto err; | ||
538 | |||
538 | out: | 539 | out: |
539 | unmask_evtchn(evtchn); | 540 | unmask_evtchn(evtchn); |
540 | eoi_pirq(irq_get_irq_data(irq)); | 541 | eoi_pirq(irq_get_irq_data(irq)); |
541 | 542 | ||
542 | return 0; | 543 | return 0; |
544 | |||
545 | err: | ||
546 | pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc); | ||
547 | xen_evtchn_close(evtchn); | ||
548 | return 0; | ||
543 | } | 549 | } |
544 | 550 | ||
545 | static unsigned int startup_pirq(struct irq_data *data) | 551 | static unsigned int startup_pirq(struct irq_data *data) |
diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c new file mode 100644 index 000000000000..a1800c150839 --- /dev/null +++ b/drivers/xen/preempt.c | |||
@@ -0,0 +1,44 @@ | |||
1 | /* | ||
2 | * Preemptible hypercalls | ||
3 | * | ||
4 | * Copyright (C) 2014 Citrix Systems R&D ltd. | ||
5 | * | ||
6 | * This source code is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License as | ||
8 | * published by the Free Software Foundation; either version 2 of the | ||
9 | * License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/sched.h> | ||
13 | #include <xen/xen-ops.h> | ||
14 | |||
15 | #ifndef CONFIG_PREEMPT | ||
16 | |||
17 | /* | ||
18 | * Some hypercalls issued by the toolstack can take many 10s of | ||
19 | * seconds. Allow tasks running hypercalls via the privcmd driver to | ||
20 | * be voluntarily preempted even if full kernel preemption is | ||
21 | * disabled. | ||
22 | * | ||
23 | * Such preemptible hypercalls are bracketed by | ||
24 | * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end() | ||
25 | * calls. | ||
26 | */ | ||
27 | |||
28 | DEFINE_PER_CPU(bool, xen_in_preemptible_hcall); | ||
29 | EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall); | ||
30 | |||
31 | asmlinkage __visible void xen_maybe_preempt_hcall(void) | ||
32 | { | ||
33 | if (unlikely(__this_cpu_read(xen_in_preemptible_hcall) | ||
34 | && should_resched())) { | ||
35 | /* | ||
36 | * Clear flag as we may be rescheduled on a different | ||
37 | * cpu. | ||
38 | */ | ||
39 | __this_cpu_write(xen_in_preemptible_hcall, false); | ||
40 | _cond_resched(); | ||
41 | __this_cpu_write(xen_in_preemptible_hcall, true); | ||
42 | } | ||
43 | } | ||
44 | #endif /* CONFIG_PREEMPT */ | ||
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 569a13b9e856..59ac71c4a043 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c | |||
@@ -56,10 +56,12 @@ static long privcmd_ioctl_hypercall(void __user *udata) | |||
56 | if (copy_from_user(&hypercall, udata, sizeof(hypercall))) | 56 | if (copy_from_user(&hypercall, udata, sizeof(hypercall))) |
57 | return -EFAULT; | 57 | return -EFAULT; |
58 | 58 | ||
59 | xen_preemptible_hcall_begin(); | ||
59 | ret = privcmd_call(hypercall.op, | 60 | ret = privcmd_call(hypercall.op, |
60 | hypercall.arg[0], hypercall.arg[1], | 61 | hypercall.arg[0], hypercall.arg[1], |
61 | hypercall.arg[2], hypercall.arg[3], | 62 | hypercall.arg[2], hypercall.arg[3], |
62 | hypercall.arg[4]); | 63 | hypercall.arg[4]); |
64 | xen_preemptible_hcall_end(); | ||
63 | 65 | ||
64 | return ret; | 66 | return ret; |
65 | } | 67 | } |
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c index 46ae0f9f02ad..75fe3d466515 100644 --- a/drivers/xen/xen-pciback/conf_space.c +++ b/drivers/xen/xen-pciback/conf_space.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include "conf_space.h" | 16 | #include "conf_space.h" |
17 | #include "conf_space_quirks.h" | 17 | #include "conf_space_quirks.h" |
18 | 18 | ||
19 | static bool permissive; | 19 | bool permissive; |
20 | module_param(permissive, bool, 0644); | 20 | module_param(permissive, bool, 0644); |
21 | 21 | ||
22 | /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word, | 22 | /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word, |
diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h index e56c934ad137..2e1d73d1d5d0 100644 --- a/drivers/xen/xen-pciback/conf_space.h +++ b/drivers/xen/xen-pciback/conf_space.h | |||
@@ -64,6 +64,8 @@ struct config_field_entry { | |||
64 | void *data; | 64 | void *data; |
65 | }; | 65 | }; |
66 | 66 | ||
67 | extern bool permissive; | ||
68 | |||
67 | #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset) | 69 | #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset) |
68 | 70 | ||
69 | /* Add fields to a device - the add_fields macro expects to get a pointer to | 71 | /* Add fields to a device - the add_fields macro expects to get a pointer to |
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c index c5ee82587e8c..2d7369391472 100644 --- a/drivers/xen/xen-pciback/conf_space_header.c +++ b/drivers/xen/xen-pciback/conf_space_header.c | |||
@@ -11,6 +11,10 @@ | |||
11 | #include "pciback.h" | 11 | #include "pciback.h" |
12 | #include "conf_space.h" | 12 | #include "conf_space.h" |
13 | 13 | ||
14 | struct pci_cmd_info { | ||
15 | u16 val; | ||
16 | }; | ||
17 | |||
14 | struct pci_bar_info { | 18 | struct pci_bar_info { |
15 | u32 val; | 19 | u32 val; |
16 | u32 len_val; | 20 | u32 len_val; |
@@ -20,22 +24,36 @@ struct pci_bar_info { | |||
20 | #define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO)) | 24 | #define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO)) |
21 | #define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER) | 25 | #define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER) |
22 | 26 | ||
23 | static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data) | 27 | /* Bits guests are allowed to control in permissive mode. */ |
28 | #define PCI_COMMAND_GUEST (PCI_COMMAND_MASTER|PCI_COMMAND_SPECIAL| \ | ||
29 | PCI_COMMAND_INVALIDATE|PCI_COMMAND_VGA_PALETTE| \ | ||
30 | PCI_COMMAND_WAIT|PCI_COMMAND_FAST_BACK) | ||
31 | |||
32 | static void *command_init(struct pci_dev *dev, int offset) | ||
24 | { | 33 | { |
25 | int i; | 34 | struct pci_cmd_info *cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); |
26 | int ret; | 35 | int err; |
27 | 36 | ||
28 | ret = xen_pcibk_read_config_word(dev, offset, value, data); | 37 | if (!cmd) |
29 | if (!pci_is_enabled(dev)) | 38 | return ERR_PTR(-ENOMEM); |
30 | return ret; | 39 | |
31 | 40 | err = pci_read_config_word(dev, PCI_COMMAND, &cmd->val); | |
32 | for (i = 0; i < PCI_ROM_RESOURCE; i++) { | 41 | if (err) { |
33 | if (dev->resource[i].flags & IORESOURCE_IO) | 42 | kfree(cmd); |
34 | *value |= PCI_COMMAND_IO; | 43 | return ERR_PTR(err); |
35 | if (dev->resource[i].flags & IORESOURCE_MEM) | ||
36 | *value |= PCI_COMMAND_MEMORY; | ||
37 | } | 44 | } |
38 | 45 | ||
46 | return cmd; | ||
47 | } | ||
48 | |||
49 | static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data) | ||
50 | { | ||
51 | int ret = pci_read_config_word(dev, offset, value); | ||
52 | const struct pci_cmd_info *cmd = data; | ||
53 | |||
54 | *value &= PCI_COMMAND_GUEST; | ||
55 | *value |= cmd->val & ~PCI_COMMAND_GUEST; | ||
56 | |||
39 | return ret; | 57 | return ret; |
40 | } | 58 | } |
41 | 59 | ||
@@ -43,6 +61,8 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) | |||
43 | { | 61 | { |
44 | struct xen_pcibk_dev_data *dev_data; | 62 | struct xen_pcibk_dev_data *dev_data; |
45 | int err; | 63 | int err; |
64 | u16 val; | ||
65 | struct pci_cmd_info *cmd = data; | ||
46 | 66 | ||
47 | dev_data = pci_get_drvdata(dev); | 67 | dev_data = pci_get_drvdata(dev); |
48 | if (!pci_is_enabled(dev) && is_enable_cmd(value)) { | 68 | if (!pci_is_enabled(dev) && is_enable_cmd(value)) { |
@@ -83,6 +103,19 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) | |||
83 | } | 103 | } |
84 | } | 104 | } |
85 | 105 | ||
106 | cmd->val = value; | ||
107 | |||
108 | if (!permissive && (!dev_data || !dev_data->permissive)) | ||
109 | return 0; | ||
110 | |||
111 | /* Only allow the guest to control certain bits. */ | ||
112 | err = pci_read_config_word(dev, offset, &val); | ||
113 | if (err || val == value) | ||
114 | return err; | ||
115 | |||
116 | value &= PCI_COMMAND_GUEST; | ||
117 | value |= val & ~PCI_COMMAND_GUEST; | ||
118 | |||
86 | return pci_write_config_word(dev, offset, value); | 119 | return pci_write_config_word(dev, offset, value); |
87 | } | 120 | } |
88 | 121 | ||
@@ -282,6 +315,8 @@ static const struct config_field header_common[] = { | |||
282 | { | 315 | { |
283 | .offset = PCI_COMMAND, | 316 | .offset = PCI_COMMAND, |
284 | .size = 2, | 317 | .size = 2, |
318 | .init = command_init, | ||
319 | .release = bar_release, | ||
285 | .u.w.read = command_read, | 320 | .u.w.read = command_read, |
286 | .u.w.write = command_write, | 321 | .u.w.write = command_write, |
287 | }, | 322 | }, |
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index 61653a03a8f5..42bd55a6c237 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c | |||
@@ -709,12 +709,11 @@ static int prepare_pending_reqs(struct vscsibk_info *info, | |||
709 | static int scsiback_do_cmd_fn(struct vscsibk_info *info) | 709 | static int scsiback_do_cmd_fn(struct vscsibk_info *info) |
710 | { | 710 | { |
711 | struct vscsiif_back_ring *ring = &info->ring; | 711 | struct vscsiif_back_ring *ring = &info->ring; |
712 | struct vscsiif_request *ring_req; | 712 | struct vscsiif_request ring_req; |
713 | struct vscsibk_pend *pending_req; | 713 | struct vscsibk_pend *pending_req; |
714 | RING_IDX rc, rp; | 714 | RING_IDX rc, rp; |
715 | int err, more_to_do; | 715 | int err, more_to_do; |
716 | uint32_t result; | 716 | uint32_t result; |
717 | uint8_t act; | ||
718 | 717 | ||
719 | rc = ring->req_cons; | 718 | rc = ring->req_cons; |
720 | rp = ring->sring->req_prod; | 719 | rp = ring->sring->req_prod; |
@@ -735,11 +734,10 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info) | |||
735 | if (!pending_req) | 734 | if (!pending_req) |
736 | return 1; | 735 | return 1; |
737 | 736 | ||
738 | ring_req = RING_GET_REQUEST(ring, rc); | 737 | ring_req = *RING_GET_REQUEST(ring, rc); |
739 | ring->req_cons = ++rc; | 738 | ring->req_cons = ++rc; |
740 | 739 | ||
741 | act = ring_req->act; | 740 | err = prepare_pending_reqs(info, &ring_req, pending_req); |
742 | err = prepare_pending_reqs(info, ring_req, pending_req); | ||
743 | if (err) { | 741 | if (err) { |
744 | switch (err) { | 742 | switch (err) { |
745 | case -ENODEV: | 743 | case -ENODEV: |
@@ -755,9 +753,9 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info) | |||
755 | return 1; | 753 | return 1; |
756 | } | 754 | } |
757 | 755 | ||
758 | switch (act) { | 756 | switch (ring_req.act) { |
759 | case VSCSIIF_ACT_SCSI_CDB: | 757 | case VSCSIIF_ACT_SCSI_CDB: |
760 | if (scsiback_gnttab_data_map(ring_req, pending_req)) { | 758 | if (scsiback_gnttab_data_map(&ring_req, pending_req)) { |
761 | scsiback_fast_flush_area(pending_req); | 759 | scsiback_fast_flush_area(pending_req); |
762 | scsiback_do_resp_with_sense(NULL, | 760 | scsiback_do_resp_with_sense(NULL, |
763 | DRIVER_ERROR << 24, 0, pending_req); | 761 | DRIVER_ERROR << 24, 0, pending_req); |
@@ -768,7 +766,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info) | |||
768 | break; | 766 | break; |
769 | case VSCSIIF_ACT_SCSI_ABORT: | 767 | case VSCSIIF_ACT_SCSI_ABORT: |
770 | scsiback_device_action(pending_req, TMR_ABORT_TASK, | 768 | scsiback_device_action(pending_req, TMR_ABORT_TASK, |
771 | ring_req->ref_rqid); | 769 | ring_req.ref_rqid); |
772 | break; | 770 | break; |
773 | case VSCSIIF_ACT_SCSI_RESET: | 771 | case VSCSIIF_ACT_SCSI_RESET: |
774 | scsiback_device_action(pending_req, TMR_LUN_RESET, 0); | 772 | scsiback_device_action(pending_req, TMR_LUN_RESET, 0); |
@@ -1661,11 +1659,8 @@ static int scsiback_make_nexus(struct scsiback_tpg *tpg, | |||
1661 | name); | 1659 | name); |
1662 | goto out; | 1660 | goto out; |
1663 | } | 1661 | } |
1664 | /* | 1662 | /* Now register the TCM pvscsi virtual I_T Nexus as active. */ |
1665 | * Now register the TCM pvscsi virtual I_T Nexus as active with the | 1663 | transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, |
1666 | * call to __transport_register_session() | ||
1667 | */ | ||
1668 | __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, | ||
1669 | tv_nexus->tvn_se_sess, tv_nexus); | 1664 | tv_nexus->tvn_se_sess, tv_nexus); |
1670 | tpg->tpg_nexus = tv_nexus; | 1665 | tpg->tpg_nexus = tv_nexus; |
1671 | 1666 | ||
diff --git a/fs/affs/file.c b/fs/affs/file.c index d2468bf95669..a91795e01a7f 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c | |||
@@ -699,8 +699,10 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, | |||
699 | boff = tmp % bsize; | 699 | boff = tmp % bsize; |
700 | if (boff) { | 700 | if (boff) { |
701 | bh = affs_bread_ino(inode, bidx, 0); | 701 | bh = affs_bread_ino(inode, bidx, 0); |
702 | if (IS_ERR(bh)) | 702 | if (IS_ERR(bh)) { |
703 | return PTR_ERR(bh); | 703 | written = PTR_ERR(bh); |
704 | goto err_first_bh; | ||
705 | } | ||
704 | tmp = min(bsize - boff, to - from); | 706 | tmp = min(bsize - boff, to - from); |
705 | BUG_ON(boff + tmp > bsize || tmp > bsize); | 707 | BUG_ON(boff + tmp > bsize || tmp > bsize); |
706 | memcpy(AFFS_DATA(bh) + boff, data + from, tmp); | 708 | memcpy(AFFS_DATA(bh) + boff, data + from, tmp); |
@@ -712,14 +714,16 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, | |||
712 | bidx++; | 714 | bidx++; |
713 | } else if (bidx) { | 715 | } else if (bidx) { |
714 | bh = affs_bread_ino(inode, bidx - 1, 0); | 716 | bh = affs_bread_ino(inode, bidx - 1, 0); |
715 | if (IS_ERR(bh)) | 717 | if (IS_ERR(bh)) { |
716 | return PTR_ERR(bh); | 718 | written = PTR_ERR(bh); |
719 | goto err_first_bh; | ||
720 | } | ||
717 | } | 721 | } |
718 | while (from + bsize <= to) { | 722 | while (from + bsize <= to) { |
719 | prev_bh = bh; | 723 | prev_bh = bh; |
720 | bh = affs_getemptyblk_ino(inode, bidx); | 724 | bh = affs_getemptyblk_ino(inode, bidx); |
721 | if (IS_ERR(bh)) | 725 | if (IS_ERR(bh)) |
722 | goto out; | 726 | goto err_bh; |
723 | memcpy(AFFS_DATA(bh), data + from, bsize); | 727 | memcpy(AFFS_DATA(bh), data + from, bsize); |
724 | if (buffer_new(bh)) { | 728 | if (buffer_new(bh)) { |
725 | AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); | 729 | AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); |
@@ -751,7 +755,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, | |||
751 | prev_bh = bh; | 755 | prev_bh = bh; |
752 | bh = affs_bread_ino(inode, bidx, 1); | 756 | bh = affs_bread_ino(inode, bidx, 1); |
753 | if (IS_ERR(bh)) | 757 | if (IS_ERR(bh)) |
754 | goto out; | 758 | goto err_bh; |
755 | tmp = min(bsize, to - from); | 759 | tmp = min(bsize, to - from); |
756 | BUG_ON(tmp > bsize); | 760 | BUG_ON(tmp > bsize); |
757 | memcpy(AFFS_DATA(bh), data + from, tmp); | 761 | memcpy(AFFS_DATA(bh), data + from, tmp); |
@@ -790,12 +794,13 @@ done: | |||
790 | if (tmp > inode->i_size) | 794 | if (tmp > inode->i_size) |
791 | inode->i_size = AFFS_I(inode)->mmu_private = tmp; | 795 | inode->i_size = AFFS_I(inode)->mmu_private = tmp; |
792 | 796 | ||
797 | err_first_bh: | ||
793 | unlock_page(page); | 798 | unlock_page(page); |
794 | page_cache_release(page); | 799 | page_cache_release(page); |
795 | 800 | ||
796 | return written; | 801 | return written; |
797 | 802 | ||
798 | out: | 803 | err_bh: |
799 | bh = prev_bh; | 804 | bh = prev_bh; |
800 | if (!written) | 805 | if (!written) |
801 | written = PTR_ERR(bh); | 806 | written = PTR_ERR(bh); |
@@ -278,11 +278,11 @@ static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma) | |||
278 | return 0; | 278 | return 0; |
279 | } | 279 | } |
280 | 280 | ||
281 | static void aio_ring_remap(struct file *file, struct vm_area_struct *vma) | 281 | static int aio_ring_remap(struct file *file, struct vm_area_struct *vma) |
282 | { | 282 | { |
283 | struct mm_struct *mm = vma->vm_mm; | 283 | struct mm_struct *mm = vma->vm_mm; |
284 | struct kioctx_table *table; | 284 | struct kioctx_table *table; |
285 | int i; | 285 | int i, res = -EINVAL; |
286 | 286 | ||
287 | spin_lock(&mm->ioctx_lock); | 287 | spin_lock(&mm->ioctx_lock); |
288 | rcu_read_lock(); | 288 | rcu_read_lock(); |
@@ -292,13 +292,17 @@ static void aio_ring_remap(struct file *file, struct vm_area_struct *vma) | |||
292 | 292 | ||
293 | ctx = table->table[i]; | 293 | ctx = table->table[i]; |
294 | if (ctx && ctx->aio_ring_file == file) { | 294 | if (ctx && ctx->aio_ring_file == file) { |
295 | ctx->user_id = ctx->mmap_base = vma->vm_start; | 295 | if (!atomic_read(&ctx->dead)) { |
296 | ctx->user_id = ctx->mmap_base = vma->vm_start; | ||
297 | res = 0; | ||
298 | } | ||
296 | break; | 299 | break; |
297 | } | 300 | } |
298 | } | 301 | } |
299 | 302 | ||
300 | rcu_read_unlock(); | 303 | rcu_read_unlock(); |
301 | spin_unlock(&mm->ioctx_lock); | 304 | spin_unlock(&mm->ioctx_lock); |
305 | return res; | ||
302 | } | 306 | } |
303 | 307 | ||
304 | static const struct file_operations aio_ring_fops = { | 308 | static const struct file_operations aio_ring_fops = { |
@@ -727,6 +731,9 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
727 | err_cleanup: | 731 | err_cleanup: |
728 | aio_nr_sub(ctx->max_reqs); | 732 | aio_nr_sub(ctx->max_reqs); |
729 | err_ctx: | 733 | err_ctx: |
734 | atomic_set(&ctx->dead, 1); | ||
735 | if (ctx->mmap_size) | ||
736 | vm_munmap(ctx->mmap_base, ctx->mmap_size); | ||
730 | aio_free_ring(ctx); | 737 | aio_free_ring(ctx); |
731 | err: | 738 | err: |
732 | mutex_unlock(&ctx->ring_lock); | 739 | mutex_unlock(&ctx->ring_lock); |
@@ -748,11 +755,12 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, | |||
748 | { | 755 | { |
749 | struct kioctx_table *table; | 756 | struct kioctx_table *table; |
750 | 757 | ||
751 | if (atomic_xchg(&ctx->dead, 1)) | 758 | spin_lock(&mm->ioctx_lock); |
759 | if (atomic_xchg(&ctx->dead, 1)) { | ||
760 | spin_unlock(&mm->ioctx_lock); | ||
752 | return -EINVAL; | 761 | return -EINVAL; |
762 | } | ||
753 | 763 | ||
754 | |||
755 | spin_lock(&mm->ioctx_lock); | ||
756 | table = rcu_dereference_raw(mm->ioctx_table); | 764 | table = rcu_dereference_raw(mm->ioctx_table); |
757 | WARN_ON(ctx != table->table[ctx->id]); | 765 | WARN_ON(ctx != table->table[ctx->id]); |
758 | table->table[ctx->id] = NULL; | 766 | table->table[ctx->id] = NULL; |
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 993642199326..6d67f32e648d 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
@@ -1645,14 +1645,14 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, | |||
1645 | 1645 | ||
1646 | parent_nritems = btrfs_header_nritems(parent); | 1646 | parent_nritems = btrfs_header_nritems(parent); |
1647 | blocksize = root->nodesize; | 1647 | blocksize = root->nodesize; |
1648 | end_slot = parent_nritems; | 1648 | end_slot = parent_nritems - 1; |
1649 | 1649 | ||
1650 | if (parent_nritems == 1) | 1650 | if (parent_nritems <= 1) |
1651 | return 0; | 1651 | return 0; |
1652 | 1652 | ||
1653 | btrfs_set_lock_blocking(parent); | 1653 | btrfs_set_lock_blocking(parent); |
1654 | 1654 | ||
1655 | for (i = start_slot; i < end_slot; i++) { | 1655 | for (i = start_slot; i <= end_slot; i++) { |
1656 | int close = 1; | 1656 | int close = 1; |
1657 | 1657 | ||
1658 | btrfs_node_key(parent, &disk_key, i); | 1658 | btrfs_node_key(parent, &disk_key, i); |
@@ -1669,7 +1669,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, | |||
1669 | other = btrfs_node_blockptr(parent, i - 1); | 1669 | other = btrfs_node_blockptr(parent, i - 1); |
1670 | close = close_blocks(blocknr, other, blocksize); | 1670 | close = close_blocks(blocknr, other, blocksize); |
1671 | } | 1671 | } |
1672 | if (!close && i < end_slot - 2) { | 1672 | if (!close && i < end_slot) { |
1673 | other = btrfs_node_blockptr(parent, i + 1); | 1673 | other = btrfs_node_blockptr(parent, i + 1); |
1674 | close = close_blocks(blocknr, other, blocksize); | 1674 | close = close_blocks(blocknr, other, blocksize); |
1675 | } | 1675 | } |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 84c3b00f3de8..f9c89cae39ee 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -3387,6 +3387,8 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, | |||
3387 | 3387 | ||
3388 | int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, | 3388 | int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, |
3389 | struct btrfs_root *root); | 3389 | struct btrfs_root *root); |
3390 | int btrfs_setup_space_cache(struct btrfs_trans_handle *trans, | ||
3391 | struct btrfs_root *root); | ||
3390 | int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr); | 3392 | int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr); |
3391 | int btrfs_free_block_groups(struct btrfs_fs_info *info); | 3393 | int btrfs_free_block_groups(struct btrfs_fs_info *info); |
3392 | int btrfs_read_block_groups(struct btrfs_root *root); | 3394 | int btrfs_read_block_groups(struct btrfs_root *root); |
@@ -3909,6 +3911,9 @@ int btrfs_prealloc_file_range_trans(struct inode *inode, | |||
3909 | loff_t actual_len, u64 *alloc_hint); | 3911 | loff_t actual_len, u64 *alloc_hint); |
3910 | int btrfs_inode_check_errors(struct inode *inode); | 3912 | int btrfs_inode_check_errors(struct inode *inode); |
3911 | extern const struct dentry_operations btrfs_dentry_operations; | 3913 | extern const struct dentry_operations btrfs_dentry_operations; |
3914 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | ||
3915 | void btrfs_test_inode_set_ops(struct inode *inode); | ||
3916 | #endif | ||
3912 | 3917 | ||
3913 | /* ioctl.c */ | 3918 | /* ioctl.c */ |
3914 | long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); | 3919 | long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index f79f38542a73..639f2663ed3f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -3921,7 +3921,7 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, | |||
3921 | } | 3921 | } |
3922 | if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key) | 3922 | if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key) |
3923 | + sizeof(struct btrfs_chunk)) { | 3923 | + sizeof(struct btrfs_chunk)) { |
3924 | printk(KERN_ERR "BTRFS: system chunk array too small %u < %lu\n", | 3924 | printk(KERN_ERR "BTRFS: system chunk array too small %u < %zu\n", |
3925 | btrfs_super_sys_array_size(sb), | 3925 | btrfs_super_sys_array_size(sb), |
3926 | sizeof(struct btrfs_disk_key) | 3926 | sizeof(struct btrfs_disk_key) |
3927 | + sizeof(struct btrfs_chunk)); | 3927 | + sizeof(struct btrfs_chunk)); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 571f402d3fc4..8b353ad02f03 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -3208,6 +3208,8 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group, | |||
3208 | return 0; | 3208 | return 0; |
3209 | } | 3209 | } |
3210 | 3210 | ||
3211 | if (trans->aborted) | ||
3212 | return 0; | ||
3211 | again: | 3213 | again: |
3212 | inode = lookup_free_space_inode(root, block_group, path); | 3214 | inode = lookup_free_space_inode(root, block_group, path); |
3213 | if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { | 3215 | if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { |
@@ -3243,6 +3245,20 @@ again: | |||
3243 | */ | 3245 | */ |
3244 | BTRFS_I(inode)->generation = 0; | 3246 | BTRFS_I(inode)->generation = 0; |
3245 | ret = btrfs_update_inode(trans, root, inode); | 3247 | ret = btrfs_update_inode(trans, root, inode); |
3248 | if (ret) { | ||
3249 | /* | ||
3250 | * So theoretically we could recover from this, simply set the | ||
3251 | * super cache generation to 0 so we know to invalidate the | ||
3252 | * cache, but then we'd have to keep track of the block groups | ||
3253 | * that fail this way so we know we _have_ to reset this cache | ||
3254 | * before the next commit or risk reading stale cache. So to | ||
3255 | * limit our exposure to horrible edge cases lets just abort the | ||
3256 | * transaction, this only happens in really bad situations | ||
3257 | * anyway. | ||
3258 | */ | ||
3259 | btrfs_abort_transaction(trans, root, ret); | ||
3260 | goto out_put; | ||
3261 | } | ||
3246 | WARN_ON(ret); | 3262 | WARN_ON(ret); |
3247 | 3263 | ||
3248 | if (i_size_read(inode) > 0) { | 3264 | if (i_size_read(inode) > 0) { |
@@ -3309,6 +3325,32 @@ out: | |||
3309 | return ret; | 3325 | return ret; |
3310 | } | 3326 | } |
3311 | 3327 | ||
3328 | int btrfs_setup_space_cache(struct btrfs_trans_handle *trans, | ||
3329 | struct btrfs_root *root) | ||
3330 | { | ||
3331 | struct btrfs_block_group_cache *cache, *tmp; | ||
3332 | struct btrfs_transaction *cur_trans = trans->transaction; | ||
3333 | struct btrfs_path *path; | ||
3334 | |||
3335 | if (list_empty(&cur_trans->dirty_bgs) || | ||
3336 | !btrfs_test_opt(root, SPACE_CACHE)) | ||
3337 | return 0; | ||
3338 | |||
3339 | path = btrfs_alloc_path(); | ||
3340 | if (!path) | ||
3341 | return -ENOMEM; | ||
3342 | |||
3343 | /* Could add new block groups, use _safe just in case */ | ||
3344 | list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs, | ||
3345 | dirty_list) { | ||
3346 | if (cache->disk_cache_state == BTRFS_DC_CLEAR) | ||
3347 | cache_save_setup(cache, trans, path); | ||
3348 | } | ||
3349 | |||
3350 | btrfs_free_path(path); | ||
3351 | return 0; | ||
3352 | } | ||
3353 | |||
3312 | int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, | 3354 | int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, |
3313 | struct btrfs_root *root) | 3355 | struct btrfs_root *root) |
3314 | { | 3356 | { |
@@ -5094,7 +5136,11 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) | |||
5094 | num_bytes = ALIGN(num_bytes, root->sectorsize); | 5136 | num_bytes = ALIGN(num_bytes, root->sectorsize); |
5095 | 5137 | ||
5096 | spin_lock(&BTRFS_I(inode)->lock); | 5138 | spin_lock(&BTRFS_I(inode)->lock); |
5097 | BTRFS_I(inode)->outstanding_extents++; | 5139 | nr_extents = (unsigned)div64_u64(num_bytes + |
5140 | BTRFS_MAX_EXTENT_SIZE - 1, | ||
5141 | BTRFS_MAX_EXTENT_SIZE); | ||
5142 | BTRFS_I(inode)->outstanding_extents += nr_extents; | ||
5143 | nr_extents = 0; | ||
5098 | 5144 | ||
5099 | if (BTRFS_I(inode)->outstanding_extents > | 5145 | if (BTRFS_I(inode)->outstanding_extents > |
5100 | BTRFS_I(inode)->reserved_extents) | 5146 | BTRFS_I(inode)->reserved_extents) |
@@ -5239,6 +5285,9 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) | |||
5239 | if (dropped > 0) | 5285 | if (dropped > 0) |
5240 | to_free += btrfs_calc_trans_metadata_size(root, dropped); | 5286 | to_free += btrfs_calc_trans_metadata_size(root, dropped); |
5241 | 5287 | ||
5288 | if (btrfs_test_is_dummy_root(root)) | ||
5289 | return; | ||
5290 | |||
5242 | trace_btrfs_space_reservation(root->fs_info, "delalloc", | 5291 | trace_btrfs_space_reservation(root->fs_info, "delalloc", |
5243 | btrfs_ino(inode), to_free, 0); | 5292 | btrfs_ino(inode), to_free, 0); |
5244 | if (root->fs_info->quota_enabled) { | 5293 | if (root->fs_info->quota_enabled) { |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index c7233ff1d533..d688cfe5d496 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -4968,6 +4968,12 @@ static int release_extent_buffer(struct extent_buffer *eb) | |||
4968 | 4968 | ||
4969 | /* Should be safe to release our pages at this point */ | 4969 | /* Should be safe to release our pages at this point */ |
4970 | btrfs_release_extent_buffer_page(eb); | 4970 | btrfs_release_extent_buffer_page(eb); |
4971 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | ||
4972 | if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))) { | ||
4973 | __free_extent_buffer(eb); | ||
4974 | return 1; | ||
4975 | } | ||
4976 | #endif | ||
4971 | call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); | 4977 | call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); |
4972 | return 1; | 4978 | return 1; |
4973 | } | 4979 | } |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index b78bbbac900d..30982bbd31c3 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -1811,22 +1811,10 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, | |||
1811 | mutex_unlock(&inode->i_mutex); | 1811 | mutex_unlock(&inode->i_mutex); |
1812 | 1812 | ||
1813 | /* | 1813 | /* |
1814 | * we want to make sure fsync finds this change | ||
1815 | * but we haven't joined a transaction running right now. | ||
1816 | * | ||
1817 | * Later on, someone is sure to update the inode and get the | ||
1818 | * real transid recorded. | ||
1819 | * | ||
1820 | * We set last_trans now to the fs_info generation + 1, | ||
1821 | * this will either be one more than the running transaction | ||
1822 | * or the generation used for the next transaction if there isn't | ||
1823 | * one running right now. | ||
1824 | * | ||
1825 | * We also have to set last_sub_trans to the current log transid, | 1814 | * We also have to set last_sub_trans to the current log transid, |
1826 | * otherwise subsequent syncs to a file that's been synced in this | 1815 | * otherwise subsequent syncs to a file that's been synced in this |
1827 | * transaction will appear to have already occured. | 1816 | * transaction will appear to have already occured. |
1828 | */ | 1817 | */ |
1829 | BTRFS_I(inode)->last_trans = root->fs_info->generation + 1; | ||
1830 | BTRFS_I(inode)->last_sub_trans = root->log_transid; | 1818 | BTRFS_I(inode)->last_sub_trans = root->log_transid; |
1831 | if (num_written > 0) { | 1819 | if (num_written > 0) { |
1832 | err = generic_write_sync(file, pos, num_written); | 1820 | err = generic_write_sync(file, pos, num_written); |
@@ -1959,25 +1947,37 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) | |||
1959 | atomic_inc(&root->log_batch); | 1947 | atomic_inc(&root->log_batch); |
1960 | 1948 | ||
1961 | /* | 1949 | /* |
1962 | * check the transaction that last modified this inode | 1950 | * If the last transaction that changed this file was before the current |
1963 | * and see if its already been committed | 1951 | * transaction and we have the full sync flag set in our inode, we can |
1964 | */ | 1952 | * bail out now without any syncing. |
1965 | if (!BTRFS_I(inode)->last_trans) { | 1953 | * |
1966 | mutex_unlock(&inode->i_mutex); | 1954 | * Note that we can't bail out if the full sync flag isn't set. This is |
1967 | goto out; | 1955 | * because when the full sync flag is set we start all ordered extents |
1968 | } | 1956 | * and wait for them to fully complete - when they complete they update |
1969 | 1957 | * the inode's last_trans field through: | |
1970 | /* | 1958 | * |
1971 | * if the last transaction that changed this file was before | 1959 | * btrfs_finish_ordered_io() -> |
1972 | * the current transaction, we can bail out now without any | 1960 | * btrfs_update_inode_fallback() -> |
1973 | * syncing | 1961 | * btrfs_update_inode() -> |
1962 | * btrfs_set_inode_last_trans() | ||
1963 | * | ||
1964 | * So we are sure that last_trans is up to date and can do this check to | ||
1965 | * bail out safely. For the fast path, when the full sync flag is not | ||
1966 | * set in our inode, we can not do it because we start only our ordered | ||
1967 | * extents and don't wait for them to complete (that is when | ||
1968 | * btrfs_finish_ordered_io runs), so here at this point their last_trans | ||
1969 | * value might be less than or equals to fs_info->last_trans_committed, | ||
1970 | * and setting a speculative last_trans for an inode when a buffered | ||
1971 | * write is made (such as fs_info->generation + 1 for example) would not | ||
1972 | * be reliable since after setting the value and before fsync is called | ||
1973 | * any number of transactions can start and commit (transaction kthread | ||
1974 | * commits the current transaction periodically), and a transaction | ||
1975 | * commit does not start nor waits for ordered extents to complete. | ||
1974 | */ | 1976 | */ |
1975 | smp_mb(); | 1977 | smp_mb(); |
1976 | if (btrfs_inode_in_log(inode, root->fs_info->generation) || | 1978 | if (btrfs_inode_in_log(inode, root->fs_info->generation) || |
1977 | BTRFS_I(inode)->last_trans <= | 1979 | (full_sync && BTRFS_I(inode)->last_trans <= |
1978 | root->fs_info->last_trans_committed) { | 1980 | root->fs_info->last_trans_committed)) { |
1979 | BTRFS_I(inode)->last_trans = 0; | ||
1980 | |||
1981 | /* | 1981 | /* |
1982 | * We'v had everything committed since the last time we were | 1982 | * We'v had everything committed since the last time we were |
1983 | * modified so clear this flag in case it was set for whatever | 1983 | * modified so clear this flag in case it was set for whatever |
@@ -2275,6 +2275,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) | |||
2275 | bool same_page; | 2275 | bool same_page; |
2276 | bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES); | 2276 | bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES); |
2277 | u64 ino_size; | 2277 | u64 ino_size; |
2278 | bool truncated_page = false; | ||
2279 | bool updated_inode = false; | ||
2278 | 2280 | ||
2279 | ret = btrfs_wait_ordered_range(inode, offset, len); | 2281 | ret = btrfs_wait_ordered_range(inode, offset, len); |
2280 | if (ret) | 2282 | if (ret) |
@@ -2306,13 +2308,18 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) | |||
2306 | * entire page. | 2308 | * entire page. |
2307 | */ | 2309 | */ |
2308 | if (same_page && len < PAGE_CACHE_SIZE) { | 2310 | if (same_page && len < PAGE_CACHE_SIZE) { |
2309 | if (offset < ino_size) | 2311 | if (offset < ino_size) { |
2312 | truncated_page = true; | ||
2310 | ret = btrfs_truncate_page(inode, offset, len, 0); | 2313 | ret = btrfs_truncate_page(inode, offset, len, 0); |
2314 | } else { | ||
2315 | ret = 0; | ||
2316 | } | ||
2311 | goto out_only_mutex; | 2317 | goto out_only_mutex; |
2312 | } | 2318 | } |
2313 | 2319 | ||
2314 | /* zero back part of the first page */ | 2320 | /* zero back part of the first page */ |
2315 | if (offset < ino_size) { | 2321 | if (offset < ino_size) { |
2322 | truncated_page = true; | ||
2316 | ret = btrfs_truncate_page(inode, offset, 0, 0); | 2323 | ret = btrfs_truncate_page(inode, offset, 0, 0); |
2317 | if (ret) { | 2324 | if (ret) { |
2318 | mutex_unlock(&inode->i_mutex); | 2325 | mutex_unlock(&inode->i_mutex); |
@@ -2348,6 +2355,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) | |||
2348 | if (!ret) { | 2355 | if (!ret) { |
2349 | /* zero the front end of the last page */ | 2356 | /* zero the front end of the last page */ |
2350 | if (tail_start + tail_len < ino_size) { | 2357 | if (tail_start + tail_len < ino_size) { |
2358 | truncated_page = true; | ||
2351 | ret = btrfs_truncate_page(inode, | 2359 | ret = btrfs_truncate_page(inode, |
2352 | tail_start + tail_len, 0, 1); | 2360 | tail_start + tail_len, 0, 1); |
2353 | if (ret) | 2361 | if (ret) |
@@ -2357,8 +2365,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) | |||
2357 | } | 2365 | } |
2358 | 2366 | ||
2359 | if (lockend < lockstart) { | 2367 | if (lockend < lockstart) { |
2360 | mutex_unlock(&inode->i_mutex); | 2368 | ret = 0; |
2361 | return 0; | 2369 | goto out_only_mutex; |
2362 | } | 2370 | } |
2363 | 2371 | ||
2364 | while (1) { | 2372 | while (1) { |
@@ -2506,6 +2514,7 @@ out_trans: | |||
2506 | 2514 | ||
2507 | trans->block_rsv = &root->fs_info->trans_block_rsv; | 2515 | trans->block_rsv = &root->fs_info->trans_block_rsv; |
2508 | ret = btrfs_update_inode(trans, root, inode); | 2516 | ret = btrfs_update_inode(trans, root, inode); |
2517 | updated_inode = true; | ||
2509 | btrfs_end_transaction(trans, root); | 2518 | btrfs_end_transaction(trans, root); |
2510 | btrfs_btree_balance_dirty(root); | 2519 | btrfs_btree_balance_dirty(root); |
2511 | out_free: | 2520 | out_free: |
@@ -2515,6 +2524,22 @@ out: | |||
2515 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, | 2524 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, |
2516 | &cached_state, GFP_NOFS); | 2525 | &cached_state, GFP_NOFS); |
2517 | out_only_mutex: | 2526 | out_only_mutex: |
2527 | if (!updated_inode && truncated_page && !ret && !err) { | ||
2528 | /* | ||
2529 | * If we only end up zeroing part of a page, we still need to | ||
2530 | * update the inode item, so that all the time fields are | ||
2531 | * updated as well as the necessary btrfs inode in memory fields | ||
2532 | * for detecting, at fsync time, if the inode isn't yet in the | ||
2533 | * log tree or it's there but not up to date. | ||
2534 | */ | ||
2535 | trans = btrfs_start_transaction(root, 1); | ||
2536 | if (IS_ERR(trans)) { | ||
2537 | err = PTR_ERR(trans); | ||
2538 | } else { | ||
2539 | err = btrfs_update_inode(trans, root, inode); | ||
2540 | ret = btrfs_end_transaction(trans, root); | ||
2541 | } | ||
2542 | } | ||
2518 | mutex_unlock(&inode->i_mutex); | 2543 | mutex_unlock(&inode->i_mutex); |
2519 | if (ret && !err) | 2544 | if (ret && !err) |
2520 | err = ret; | 2545 | err = ret; |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a85c23dfcddb..d2e732d7af52 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -108,6 +108,13 @@ static struct extent_map *create_pinned_em(struct inode *inode, u64 start, | |||
108 | 108 | ||
109 | static int btrfs_dirty_inode(struct inode *inode); | 109 | static int btrfs_dirty_inode(struct inode *inode); |
110 | 110 | ||
111 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | ||
112 | void btrfs_test_inode_set_ops(struct inode *inode) | ||
113 | { | ||
114 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; | ||
115 | } | ||
116 | #endif | ||
117 | |||
111 | static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, | 118 | static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, |
112 | struct inode *inode, struct inode *dir, | 119 | struct inode *inode, struct inode *dir, |
113 | const struct qstr *qstr) | 120 | const struct qstr *qstr) |
@@ -1542,30 +1549,17 @@ static void btrfs_split_extent_hook(struct inode *inode, | |||
1542 | u64 new_size; | 1549 | u64 new_size; |
1543 | 1550 | ||
1544 | /* | 1551 | /* |
1545 | * We need the largest size of the remaining extent to see if we | 1552 | * See the explanation in btrfs_merge_extent_hook, the same |
1546 | * need to add a new outstanding extent. Think of the following | 1553 | * applies here, just in reverse. |
1547 | * case | ||
1548 | * | ||
1549 | * [MEAX_EXTENT_SIZEx2 - 4k][4k] | ||
1550 | * | ||
1551 | * The new_size would just be 4k and we'd think we had enough | ||
1552 | * outstanding extents for this if we only took one side of the | ||
1553 | * split, same goes for the other direction. We need to see if | ||
1554 | * the larger size still is the same amount of extents as the | ||
1555 | * original size, because if it is we need to add a new | ||
1556 | * outstanding extent. But if we split up and the larger size | ||
1557 | * is less than the original then we are good to go since we've | ||
1558 | * already accounted for the extra extent in our original | ||
1559 | * accounting. | ||
1560 | */ | 1554 | */ |
1561 | new_size = orig->end - split + 1; | 1555 | new_size = orig->end - split + 1; |
1562 | if ((split - orig->start) > new_size) | 1556 | num_extents = div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, |
1563 | new_size = split - orig->start; | ||
1564 | |||
1565 | num_extents = div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, | ||
1566 | BTRFS_MAX_EXTENT_SIZE); | 1557 | BTRFS_MAX_EXTENT_SIZE); |
1567 | if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, | 1558 | new_size = split - orig->start; |
1568 | BTRFS_MAX_EXTENT_SIZE) < num_extents) | 1559 | num_extents += div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, |
1560 | BTRFS_MAX_EXTENT_SIZE); | ||
1561 | if (div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, | ||
1562 | BTRFS_MAX_EXTENT_SIZE) >= num_extents) | ||
1569 | return; | 1563 | return; |
1570 | } | 1564 | } |
1571 | 1565 | ||
@@ -1591,8 +1585,10 @@ static void btrfs_merge_extent_hook(struct inode *inode, | |||
1591 | if (!(other->state & EXTENT_DELALLOC)) | 1585 | if (!(other->state & EXTENT_DELALLOC)) |
1592 | return; | 1586 | return; |
1593 | 1587 | ||
1594 | old_size = other->end - other->start + 1; | 1588 | if (new->start > other->start) |
1595 | new_size = old_size + (new->end - new->start + 1); | 1589 | new_size = new->end - other->start + 1; |
1590 | else | ||
1591 | new_size = other->end - new->start + 1; | ||
1596 | 1592 | ||
1597 | /* we're not bigger than the max, unreserve the space and go */ | 1593 | /* we're not bigger than the max, unreserve the space and go */ |
1598 | if (new_size <= BTRFS_MAX_EXTENT_SIZE) { | 1594 | if (new_size <= BTRFS_MAX_EXTENT_SIZE) { |
@@ -1603,13 +1599,32 @@ static void btrfs_merge_extent_hook(struct inode *inode, | |||
1603 | } | 1599 | } |
1604 | 1600 | ||
1605 | /* | 1601 | /* |
1606 | * If we grew by another max_extent, just return, we want to keep that | 1602 | * We have to add up either side to figure out how many extents were |
1607 | * reserved amount. | 1603 | * accounted for before we merged into one big extent. If the number of |
1604 | * extents we accounted for is <= the amount we need for the new range | ||
1605 | * then we can return, otherwise drop. Think of it like this | ||
1606 | * | ||
1607 | * [ 4k][MAX_SIZE] | ||
1608 | * | ||
1609 | * So we've grown the extent by a MAX_SIZE extent, this would mean we | ||
1610 | * need 2 outstanding extents, on one side we have 1 and the other side | ||
1611 | * we have 1 so they are == and we can return. But in this case | ||
1612 | * | ||
1613 | * [MAX_SIZE+4k][MAX_SIZE+4k] | ||
1614 | * | ||
1615 | * Each range on their own accounts for 2 extents, but merged together | ||
1616 | * they are only 3 extents worth of accounting, so we need to drop in | ||
1617 | * this case. | ||
1608 | */ | 1618 | */ |
1619 | old_size = other->end - other->start + 1; | ||
1609 | num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1, | 1620 | num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1, |
1610 | BTRFS_MAX_EXTENT_SIZE); | 1621 | BTRFS_MAX_EXTENT_SIZE); |
1622 | old_size = new->end - new->start + 1; | ||
1623 | num_extents += div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1, | ||
1624 | BTRFS_MAX_EXTENT_SIZE); | ||
1625 | |||
1611 | if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, | 1626 | if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, |
1612 | BTRFS_MAX_EXTENT_SIZE) > num_extents) | 1627 | BTRFS_MAX_EXTENT_SIZE) >= num_extents) |
1613 | return; | 1628 | return; |
1614 | 1629 | ||
1615 | spin_lock(&BTRFS_I(inode)->lock); | 1630 | spin_lock(&BTRFS_I(inode)->lock); |
@@ -1686,6 +1701,10 @@ static void btrfs_set_bit_hook(struct inode *inode, | |||
1686 | spin_unlock(&BTRFS_I(inode)->lock); | 1701 | spin_unlock(&BTRFS_I(inode)->lock); |
1687 | } | 1702 | } |
1688 | 1703 | ||
1704 | /* For sanity tests */ | ||
1705 | if (btrfs_test_is_dummy_root(root)) | ||
1706 | return; | ||
1707 | |||
1689 | __percpu_counter_add(&root->fs_info->delalloc_bytes, len, | 1708 | __percpu_counter_add(&root->fs_info->delalloc_bytes, len, |
1690 | root->fs_info->delalloc_batch); | 1709 | root->fs_info->delalloc_batch); |
1691 | spin_lock(&BTRFS_I(inode)->lock); | 1710 | spin_lock(&BTRFS_I(inode)->lock); |
@@ -1741,6 +1760,10 @@ static void btrfs_clear_bit_hook(struct inode *inode, | |||
1741 | root != root->fs_info->tree_root) | 1760 | root != root->fs_info->tree_root) |
1742 | btrfs_delalloc_release_metadata(inode, len); | 1761 | btrfs_delalloc_release_metadata(inode, len); |
1743 | 1762 | ||
1763 | /* For sanity tests. */ | ||
1764 | if (btrfs_test_is_dummy_root(root)) | ||
1765 | return; | ||
1766 | |||
1744 | if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID | 1767 | if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID |
1745 | && do_list && !(state->state & EXTENT_NORESERVE)) | 1768 | && do_list && !(state->state & EXTENT_NORESERVE)) |
1746 | btrfs_free_reserved_data_space(inode, len); | 1769 | btrfs_free_reserved_data_space(inode, len); |
@@ -7213,7 +7236,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, | |||
7213 | u64 start = iblock << inode->i_blkbits; | 7236 | u64 start = iblock << inode->i_blkbits; |
7214 | u64 lockstart, lockend; | 7237 | u64 lockstart, lockend; |
7215 | u64 len = bh_result->b_size; | 7238 | u64 len = bh_result->b_size; |
7216 | u64 orig_len = len; | 7239 | u64 *outstanding_extents = NULL; |
7217 | int unlock_bits = EXTENT_LOCKED; | 7240 | int unlock_bits = EXTENT_LOCKED; |
7218 | int ret = 0; | 7241 | int ret = 0; |
7219 | 7242 | ||
@@ -7225,6 +7248,16 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, | |||
7225 | lockstart = start; | 7248 | lockstart = start; |
7226 | lockend = start + len - 1; | 7249 | lockend = start + len - 1; |
7227 | 7250 | ||
7251 | if (current->journal_info) { | ||
7252 | /* | ||
7253 | * Need to pull our outstanding extents and set journal_info to NULL so | ||
7254 | * that anything that needs to check if there's a transction doesn't get | ||
7255 | * confused. | ||
7256 | */ | ||
7257 | outstanding_extents = current->journal_info; | ||
7258 | current->journal_info = NULL; | ||
7259 | } | ||
7260 | |||
7228 | /* | 7261 | /* |
7229 | * If this errors out it's because we couldn't invalidate pagecache for | 7262 | * If this errors out it's because we couldn't invalidate pagecache for |
7230 | * this range and we need to fallback to buffered. | 7263 | * this range and we need to fallback to buffered. |
@@ -7285,7 +7318,6 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, | |||
7285 | ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && | 7318 | ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && |
7286 | em->block_start != EXTENT_MAP_HOLE)) { | 7319 | em->block_start != EXTENT_MAP_HOLE)) { |
7287 | int type; | 7320 | int type; |
7288 | int ret; | ||
7289 | u64 block_start, orig_start, orig_block_len, ram_bytes; | 7321 | u64 block_start, orig_start, orig_block_len, ram_bytes; |
7290 | 7322 | ||
7291 | if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) | 7323 | if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) |
@@ -7349,11 +7381,20 @@ unlock: | |||
7349 | if (start + len > i_size_read(inode)) | 7381 | if (start + len > i_size_read(inode)) |
7350 | i_size_write(inode, start + len); | 7382 | i_size_write(inode, start + len); |
7351 | 7383 | ||
7352 | if (len < orig_len) { | 7384 | /* |
7385 | * If we have an outstanding_extents count still set then we're | ||
7386 | * within our reservation, otherwise we need to adjust our inode | ||
7387 | * counter appropriately. | ||
7388 | */ | ||
7389 | if (*outstanding_extents) { | ||
7390 | (*outstanding_extents)--; | ||
7391 | } else { | ||
7353 | spin_lock(&BTRFS_I(inode)->lock); | 7392 | spin_lock(&BTRFS_I(inode)->lock); |
7354 | BTRFS_I(inode)->outstanding_extents++; | 7393 | BTRFS_I(inode)->outstanding_extents++; |
7355 | spin_unlock(&BTRFS_I(inode)->lock); | 7394 | spin_unlock(&BTRFS_I(inode)->lock); |
7356 | } | 7395 | } |
7396 | |||
7397 | current->journal_info = outstanding_extents; | ||
7357 | btrfs_free_reserved_data_space(inode, len); | 7398 | btrfs_free_reserved_data_space(inode, len); |
7358 | } | 7399 | } |
7359 | 7400 | ||
@@ -7377,6 +7418,8 @@ unlock: | |||
7377 | unlock_err: | 7418 | unlock_err: |
7378 | clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, | 7419 | clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, |
7379 | unlock_bits, 1, 0, &cached_state, GFP_NOFS); | 7420 | unlock_bits, 1, 0, &cached_state, GFP_NOFS); |
7421 | if (outstanding_extents) | ||
7422 | current->journal_info = outstanding_extents; | ||
7380 | return ret; | 7423 | return ret; |
7381 | } | 7424 | } |
7382 | 7425 | ||
@@ -8076,6 +8119,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, | |||
8076 | { | 8119 | { |
8077 | struct file *file = iocb->ki_filp; | 8120 | struct file *file = iocb->ki_filp; |
8078 | struct inode *inode = file->f_mapping->host; | 8121 | struct inode *inode = file->f_mapping->host; |
8122 | u64 outstanding_extents = 0; | ||
8079 | size_t count = 0; | 8123 | size_t count = 0; |
8080 | int flags = 0; | 8124 | int flags = 0; |
8081 | bool wakeup = true; | 8125 | bool wakeup = true; |
@@ -8113,6 +8157,16 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, | |||
8113 | ret = btrfs_delalloc_reserve_space(inode, count); | 8157 | ret = btrfs_delalloc_reserve_space(inode, count); |
8114 | if (ret) | 8158 | if (ret) |
8115 | goto out; | 8159 | goto out; |
8160 | outstanding_extents = div64_u64(count + | ||
8161 | BTRFS_MAX_EXTENT_SIZE - 1, | ||
8162 | BTRFS_MAX_EXTENT_SIZE); | ||
8163 | |||
8164 | /* | ||
8165 | * We need to know how many extents we reserved so that we can | ||
8166 | * do the accounting properly if we go over the number we | ||
8167 | * originally calculated. Abuse current->journal_info for this. | ||
8168 | */ | ||
8169 | current->journal_info = &outstanding_extents; | ||
8116 | } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, | 8170 | } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, |
8117 | &BTRFS_I(inode)->runtime_flags)) { | 8171 | &BTRFS_I(inode)->runtime_flags)) { |
8118 | inode_dio_done(inode); | 8172 | inode_dio_done(inode); |
@@ -8125,6 +8179,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, | |||
8125 | iter, offset, btrfs_get_blocks_direct, NULL, | 8179 | iter, offset, btrfs_get_blocks_direct, NULL, |
8126 | btrfs_submit_direct, flags); | 8180 | btrfs_submit_direct, flags); |
8127 | if (rw & WRITE) { | 8181 | if (rw & WRITE) { |
8182 | current->journal_info = NULL; | ||
8128 | if (ret < 0 && ret != -EIOCBQUEUED) | 8183 | if (ret < 0 && ret != -EIOCBQUEUED) |
8129 | btrfs_delalloc_release_space(inode, count); | 8184 | btrfs_delalloc_release_space(inode, count); |
8130 | else if (ret >= 0 && (size_t)ret < count) | 8185 | else if (ret >= 0 && (size_t)ret < count) |
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 534544e08f76..157cc54fc634 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
@@ -452,9 +452,7 @@ void btrfs_get_logged_extents(struct inode *inode, | |||
452 | continue; | 452 | continue; |
453 | if (entry_end(ordered) <= start) | 453 | if (entry_end(ordered) <= start) |
454 | break; | 454 | break; |
455 | if (!list_empty(&ordered->log_list)) | 455 | if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags)) |
456 | continue; | ||
457 | if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags)) | ||
458 | continue; | 456 | continue; |
459 | list_add(&ordered->log_list, logged_list); | 457 | list_add(&ordered->log_list, logged_list); |
460 | atomic_inc(&ordered->refs); | 458 | atomic_inc(&ordered->refs); |
@@ -511,8 +509,7 @@ void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans, | |||
511 | wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE, | 509 | wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE, |
512 | &ordered->flags)); | 510 | &ordered->flags)); |
513 | 511 | ||
514 | if (!test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags)) | 512 | list_add_tail(&ordered->trans_list, &trans->ordered); |
515 | list_add_tail(&ordered->trans_list, &trans->ordered); | ||
516 | spin_lock_irq(&log->log_extents_lock[index]); | 513 | spin_lock_irq(&log->log_extents_lock[index]); |
517 | } | 514 | } |
518 | spin_unlock_irq(&log->log_extents_lock[index]); | 515 | spin_unlock_irq(&log->log_extents_lock[index]); |
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 97159a8e91d4..058c79eecbfb 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c | |||
@@ -1259,7 +1259,7 @@ static int comp_oper(struct btrfs_qgroup_operation *oper1, | |||
1259 | if (oper1->seq < oper2->seq) | 1259 | if (oper1->seq < oper2->seq) |
1260 | return -1; | 1260 | return -1; |
1261 | if (oper1->seq > oper2->seq) | 1261 | if (oper1->seq > oper2->seq) |
1262 | return -1; | 1262 | return 1; |
1263 | if (oper1->ref_root < oper2->ref_root) | 1263 | if (oper1->ref_root < oper2->ref_root) |
1264 | return -1; | 1264 | return -1; |
1265 | if (oper1->ref_root > oper2->ref_root) | 1265 | if (oper1->ref_root > oper2->ref_root) |
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index fe5857223515..d6033f540cc7 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c | |||
@@ -230,6 +230,7 @@ struct pending_dir_move { | |||
230 | u64 parent_ino; | 230 | u64 parent_ino; |
231 | u64 ino; | 231 | u64 ino; |
232 | u64 gen; | 232 | u64 gen; |
233 | bool is_orphan; | ||
233 | struct list_head update_refs; | 234 | struct list_head update_refs; |
234 | }; | 235 | }; |
235 | 236 | ||
@@ -2984,7 +2985,8 @@ static int add_pending_dir_move(struct send_ctx *sctx, | |||
2984 | u64 ino_gen, | 2985 | u64 ino_gen, |
2985 | u64 parent_ino, | 2986 | u64 parent_ino, |
2986 | struct list_head *new_refs, | 2987 | struct list_head *new_refs, |
2987 | struct list_head *deleted_refs) | 2988 | struct list_head *deleted_refs, |
2989 | const bool is_orphan) | ||
2988 | { | 2990 | { |
2989 | struct rb_node **p = &sctx->pending_dir_moves.rb_node; | 2991 | struct rb_node **p = &sctx->pending_dir_moves.rb_node; |
2990 | struct rb_node *parent = NULL; | 2992 | struct rb_node *parent = NULL; |
@@ -2999,6 +3001,7 @@ static int add_pending_dir_move(struct send_ctx *sctx, | |||
2999 | pm->parent_ino = parent_ino; | 3001 | pm->parent_ino = parent_ino; |
3000 | pm->ino = ino; | 3002 | pm->ino = ino; |
3001 | pm->gen = ino_gen; | 3003 | pm->gen = ino_gen; |
3004 | pm->is_orphan = is_orphan; | ||
3002 | INIT_LIST_HEAD(&pm->list); | 3005 | INIT_LIST_HEAD(&pm->list); |
3003 | INIT_LIST_HEAD(&pm->update_refs); | 3006 | INIT_LIST_HEAD(&pm->update_refs); |
3004 | RB_CLEAR_NODE(&pm->node); | 3007 | RB_CLEAR_NODE(&pm->node); |
@@ -3131,16 +3134,20 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) | |||
3131 | rmdir_ino = dm->rmdir_ino; | 3134 | rmdir_ino = dm->rmdir_ino; |
3132 | free_waiting_dir_move(sctx, dm); | 3135 | free_waiting_dir_move(sctx, dm); |
3133 | 3136 | ||
3134 | ret = get_first_ref(sctx->parent_root, pm->ino, | 3137 | if (pm->is_orphan) { |
3135 | &parent_ino, &parent_gen, name); | 3138 | ret = gen_unique_name(sctx, pm->ino, |
3136 | if (ret < 0) | 3139 | pm->gen, from_path); |
3137 | goto out; | 3140 | } else { |
3138 | 3141 | ret = get_first_ref(sctx->parent_root, pm->ino, | |
3139 | ret = get_cur_path(sctx, parent_ino, parent_gen, | 3142 | &parent_ino, &parent_gen, name); |
3140 | from_path); | 3143 | if (ret < 0) |
3141 | if (ret < 0) | 3144 | goto out; |
3142 | goto out; | 3145 | ret = get_cur_path(sctx, parent_ino, parent_gen, |
3143 | ret = fs_path_add_path(from_path, name); | 3146 | from_path); |
3147 | if (ret < 0) | ||
3148 | goto out; | ||
3149 | ret = fs_path_add_path(from_path, name); | ||
3150 | } | ||
3144 | if (ret < 0) | 3151 | if (ret < 0) |
3145 | goto out; | 3152 | goto out; |
3146 | 3153 | ||
@@ -3150,7 +3157,8 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) | |||
3150 | LIST_HEAD(deleted_refs); | 3157 | LIST_HEAD(deleted_refs); |
3151 | ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID); | 3158 | ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID); |
3152 | ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor, | 3159 | ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor, |
3153 | &pm->update_refs, &deleted_refs); | 3160 | &pm->update_refs, &deleted_refs, |
3161 | pm->is_orphan); | ||
3154 | if (ret < 0) | 3162 | if (ret < 0) |
3155 | goto out; | 3163 | goto out; |
3156 | if (rmdir_ino) { | 3164 | if (rmdir_ino) { |
@@ -3283,6 +3291,127 @@ out: | |||
3283 | return ret; | 3291 | return ret; |
3284 | } | 3292 | } |
3285 | 3293 | ||
3294 | /* | ||
3295 | * We might need to delay a directory rename even when no ancestor directory | ||
3296 | * (in the send root) with a higher inode number than ours (sctx->cur_ino) was | ||
3297 | * renamed. This happens when we rename a directory to the old name (the name | ||
3298 | * in the parent root) of some other unrelated directory that got its rename | ||
3299 | * delayed due to some ancestor with higher number that got renamed. | ||
3300 | * | ||
3301 | * Example: | ||
3302 | * | ||
3303 | * Parent snapshot: | ||
3304 | * . (ino 256) | ||
3305 | * |---- a/ (ino 257) | ||
3306 | * | |---- file (ino 260) | ||
3307 | * | | ||
3308 | * |---- b/ (ino 258) | ||
3309 | * |---- c/ (ino 259) | ||
3310 | * | ||
3311 | * Send snapshot: | ||
3312 | * . (ino 256) | ||
3313 | * |---- a/ (ino 258) | ||
3314 | * |---- x/ (ino 259) | ||
3315 | * |---- y/ (ino 257) | ||
3316 | * |----- file (ino 260) | ||
3317 | * | ||
3318 | * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257 | ||
3319 | * from 'a' to 'x/y' happening first, which in turn depends on the rename of | ||
3320 | * inode 259 from 'c' to 'x'. So the order of rename commands the send stream | ||
3321 | * must issue is: | ||
3322 | * | ||
3323 | * 1 - rename 259 from 'c' to 'x' | ||
3324 | * 2 - rename 257 from 'a' to 'x/y' | ||
3325 | * 3 - rename 258 from 'b' to 'a' | ||
3326 | * | ||
3327 | * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can | ||
3328 | * be done right away and < 0 on error. | ||
3329 | */ | ||
3330 | static int wait_for_dest_dir_move(struct send_ctx *sctx, | ||
3331 | struct recorded_ref *parent_ref, | ||
3332 | const bool is_orphan) | ||
3333 | { | ||
3334 | struct btrfs_path *path; | ||
3335 | struct btrfs_key key; | ||
3336 | struct btrfs_key di_key; | ||
3337 | struct btrfs_dir_item *di; | ||
3338 | u64 left_gen; | ||
3339 | u64 right_gen; | ||
3340 | int ret = 0; | ||
3341 | |||
3342 | if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) | ||
3343 | return 0; | ||
3344 | |||
3345 | path = alloc_path_for_send(); | ||
3346 | if (!path) | ||
3347 | return -ENOMEM; | ||
3348 | |||
3349 | key.objectid = parent_ref->dir; | ||
3350 | key.type = BTRFS_DIR_ITEM_KEY; | ||
3351 | key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len); | ||
3352 | |||
3353 | ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0); | ||
3354 | if (ret < 0) { | ||
3355 | goto out; | ||
3356 | } else if (ret > 0) { | ||
3357 | ret = 0; | ||
3358 | goto out; | ||
3359 | } | ||
3360 | |||
3361 | di = btrfs_match_dir_item_name(sctx->parent_root, path, | ||
3362 | parent_ref->name, parent_ref->name_len); | ||
3363 | if (!di) { | ||
3364 | ret = 0; | ||
3365 | goto out; | ||
3366 | } | ||
3367 | /* | ||
3368 | * di_key.objectid has the number of the inode that has a dentry in the | ||
3369 | * parent directory with the same name that sctx->cur_ino is being | ||
3370 | * renamed to. We need to check if that inode is in the send root as | ||
3371 | * well and if it is currently marked as an inode with a pending rename, | ||
3372 | * if it is, we need to delay the rename of sctx->cur_ino as well, so | ||
3373 | * that it happens after that other inode is renamed. | ||
3374 | */ | ||
3375 | btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key); | ||
3376 | if (di_key.type != BTRFS_INODE_ITEM_KEY) { | ||
3377 | ret = 0; | ||
3378 | goto out; | ||
3379 | } | ||
3380 | |||
3381 | ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL, | ||
3382 | &left_gen, NULL, NULL, NULL, NULL); | ||
3383 | if (ret < 0) | ||
3384 | goto out; | ||
3385 | ret = get_inode_info(sctx->send_root, di_key.objectid, NULL, | ||
3386 | &right_gen, NULL, NULL, NULL, NULL); | ||
3387 | if (ret < 0) { | ||
3388 | if (ret == -ENOENT) | ||
3389 | ret = 0; | ||
3390 | goto out; | ||
3391 | } | ||
3392 | |||
3393 | /* Different inode, no need to delay the rename of sctx->cur_ino */ | ||
3394 | if (right_gen != left_gen) { | ||
3395 | ret = 0; | ||
3396 | goto out; | ||
3397 | } | ||
3398 | |||
3399 | if (is_waiting_for_move(sctx, di_key.objectid)) { | ||
3400 | ret = add_pending_dir_move(sctx, | ||
3401 | sctx->cur_ino, | ||
3402 | sctx->cur_inode_gen, | ||
3403 | di_key.objectid, | ||
3404 | &sctx->new_refs, | ||
3405 | &sctx->deleted_refs, | ||
3406 | is_orphan); | ||
3407 | if (!ret) | ||
3408 | ret = 1; | ||
3409 | } | ||
3410 | out: | ||
3411 | btrfs_free_path(path); | ||
3412 | return ret; | ||
3413 | } | ||
3414 | |||
3286 | static int wait_for_parent_move(struct send_ctx *sctx, | 3415 | static int wait_for_parent_move(struct send_ctx *sctx, |
3287 | struct recorded_ref *parent_ref) | 3416 | struct recorded_ref *parent_ref) |
3288 | { | 3417 | { |
@@ -3349,7 +3478,8 @@ out: | |||
3349 | sctx->cur_inode_gen, | 3478 | sctx->cur_inode_gen, |
3350 | ino, | 3479 | ino, |
3351 | &sctx->new_refs, | 3480 | &sctx->new_refs, |
3352 | &sctx->deleted_refs); | 3481 | &sctx->deleted_refs, |
3482 | false); | ||
3353 | if (!ret) | 3483 | if (!ret) |
3354 | ret = 1; | 3484 | ret = 1; |
3355 | } | 3485 | } |
@@ -3372,6 +3502,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) | |||
3372 | int did_overwrite = 0; | 3502 | int did_overwrite = 0; |
3373 | int is_orphan = 0; | 3503 | int is_orphan = 0; |
3374 | u64 last_dir_ino_rm = 0; | 3504 | u64 last_dir_ino_rm = 0; |
3505 | bool can_rename = true; | ||
3375 | 3506 | ||
3376 | verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | 3507 | verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); |
3377 | 3508 | ||
@@ -3490,12 +3621,22 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | |||
3490 | } | 3621 | } |
3491 | } | 3622 | } |
3492 | 3623 | ||
3624 | if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) { | ||
3625 | ret = wait_for_dest_dir_move(sctx, cur, is_orphan); | ||
3626 | if (ret < 0) | ||
3627 | goto out; | ||
3628 | if (ret == 1) { | ||
3629 | can_rename = false; | ||
3630 | *pending_move = 1; | ||
3631 | } | ||
3632 | } | ||
3633 | |||
3493 | /* | 3634 | /* |
3494 | * link/move the ref to the new place. If we have an orphan | 3635 | * link/move the ref to the new place. If we have an orphan |
3495 | * inode, move it and update valid_path. If not, link or move | 3636 | * inode, move it and update valid_path. If not, link or move |
3496 | * it depending on the inode mode. | 3637 | * it depending on the inode mode. |
3497 | */ | 3638 | */ |
3498 | if (is_orphan) { | 3639 | if (is_orphan && can_rename) { |
3499 | ret = send_rename(sctx, valid_path, cur->full_path); | 3640 | ret = send_rename(sctx, valid_path, cur->full_path); |
3500 | if (ret < 0) | 3641 | if (ret < 0) |
3501 | goto out; | 3642 | goto out; |
@@ -3503,7 +3644,7 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | |||
3503 | ret = fs_path_copy(valid_path, cur->full_path); | 3644 | ret = fs_path_copy(valid_path, cur->full_path); |
3504 | if (ret < 0) | 3645 | if (ret < 0) |
3505 | goto out; | 3646 | goto out; |
3506 | } else { | 3647 | } else if (can_rename) { |
3507 | if (S_ISDIR(sctx->cur_inode_mode)) { | 3648 | if (S_ISDIR(sctx->cur_inode_mode)) { |
3508 | /* | 3649 | /* |
3509 | * Dirs can't be linked, so move it. For moved | 3650 | * Dirs can't be linked, so move it. For moved |
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c index a116b55ce788..054fc0d97131 100644 --- a/fs/btrfs/tests/inode-tests.c +++ b/fs/btrfs/tests/inode-tests.c | |||
@@ -911,6 +911,197 @@ out: | |||
911 | return ret; | 911 | return ret; |
912 | } | 912 | } |
913 | 913 | ||
914 | static int test_extent_accounting(void) | ||
915 | { | ||
916 | struct inode *inode = NULL; | ||
917 | struct btrfs_root *root = NULL; | ||
918 | int ret = -ENOMEM; | ||
919 | |||
920 | inode = btrfs_new_test_inode(); | ||
921 | if (!inode) { | ||
922 | test_msg("Couldn't allocate inode\n"); | ||
923 | return ret; | ||
924 | } | ||
925 | |||
926 | root = btrfs_alloc_dummy_root(); | ||
927 | if (IS_ERR(root)) { | ||
928 | test_msg("Couldn't allocate root\n"); | ||
929 | goto out; | ||
930 | } | ||
931 | |||
932 | root->fs_info = btrfs_alloc_dummy_fs_info(); | ||
933 | if (!root->fs_info) { | ||
934 | test_msg("Couldn't allocate dummy fs info\n"); | ||
935 | goto out; | ||
936 | } | ||
937 | |||
938 | BTRFS_I(inode)->root = root; | ||
939 | btrfs_test_inode_set_ops(inode); | ||
940 | |||
941 | /* [BTRFS_MAX_EXTENT_SIZE] */ | ||
942 | BTRFS_I(inode)->outstanding_extents++; | ||
943 | ret = btrfs_set_extent_delalloc(inode, 0, BTRFS_MAX_EXTENT_SIZE - 1, | ||
944 | NULL); | ||
945 | if (ret) { | ||
946 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | ||
947 | goto out; | ||
948 | } | ||
949 | if (BTRFS_I(inode)->outstanding_extents != 1) { | ||
950 | ret = -EINVAL; | ||
951 | test_msg("Miscount, wanted 1, got %u\n", | ||
952 | BTRFS_I(inode)->outstanding_extents); | ||
953 | goto out; | ||
954 | } | ||
955 | |||
956 | /* [BTRFS_MAX_EXTENT_SIZE][4k] */ | ||
957 | BTRFS_I(inode)->outstanding_extents++; | ||
958 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE, | ||
959 | BTRFS_MAX_EXTENT_SIZE + 4095, NULL); | ||
960 | if (ret) { | ||
961 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | ||
962 | goto out; | ||
963 | } | ||
964 | if (BTRFS_I(inode)->outstanding_extents != 2) { | ||
965 | ret = -EINVAL; | ||
966 | test_msg("Miscount, wanted 2, got %u\n", | ||
967 | BTRFS_I(inode)->outstanding_extents); | ||
968 | goto out; | ||
969 | } | ||
970 | |||
971 | /* [BTRFS_MAX_EXTENT_SIZE/2][4K HOLE][the rest] */ | ||
972 | ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, | ||
973 | BTRFS_MAX_EXTENT_SIZE >> 1, | ||
974 | (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095, | ||
975 | EXTENT_DELALLOC | EXTENT_DIRTY | | ||
976 | EXTENT_UPTODATE | EXTENT_DO_ACCOUNTING, 0, 0, | ||
977 | NULL, GFP_NOFS); | ||
978 | if (ret) { | ||
979 | test_msg("clear_extent_bit returned %d\n", ret); | ||
980 | goto out; | ||
981 | } | ||
982 | if (BTRFS_I(inode)->outstanding_extents != 2) { | ||
983 | ret = -EINVAL; | ||
984 | test_msg("Miscount, wanted 2, got %u\n", | ||
985 | BTRFS_I(inode)->outstanding_extents); | ||
986 | goto out; | ||
987 | } | ||
988 | |||
989 | /* [BTRFS_MAX_EXTENT_SIZE][4K] */ | ||
990 | BTRFS_I(inode)->outstanding_extents++; | ||
991 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1, | ||
992 | (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095, | ||
993 | NULL); | ||
994 | if (ret) { | ||
995 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | ||
996 | goto out; | ||
997 | } | ||
998 | if (BTRFS_I(inode)->outstanding_extents != 2) { | ||
999 | ret = -EINVAL; | ||
1000 | test_msg("Miscount, wanted 2, got %u\n", | ||
1001 | BTRFS_I(inode)->outstanding_extents); | ||
1002 | goto out; | ||
1003 | } | ||
1004 | |||
1005 | /* | ||
1006 | * [BTRFS_MAX_EXTENT_SIZE+4K][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4K] | ||
1007 | * | ||
1008 | * I'm artificially adding 2 to outstanding_extents because in the | ||
1009 | * buffered IO case we'd add things up as we go, but I don't feel like | ||
1010 | * doing that here, this isn't the interesting case we want to test. | ||
1011 | */ | ||
1012 | BTRFS_I(inode)->outstanding_extents += 2; | ||
1013 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE + 8192, | ||
1014 | (BTRFS_MAX_EXTENT_SIZE << 1) + 12287, | ||
1015 | NULL); | ||
1016 | if (ret) { | ||
1017 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | ||
1018 | goto out; | ||
1019 | } | ||
1020 | if (BTRFS_I(inode)->outstanding_extents != 4) { | ||
1021 | ret = -EINVAL; | ||
1022 | test_msg("Miscount, wanted 4, got %u\n", | ||
1023 | BTRFS_I(inode)->outstanding_extents); | ||
1024 | goto out; | ||
1025 | } | ||
1026 | |||
1027 | /* [BTRFS_MAX_EXTENT_SIZE+4k][4k][BTRFS_MAX_EXTENT_SIZE+4k] */ | ||
1028 | BTRFS_I(inode)->outstanding_extents++; | ||
1029 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096, | ||
1030 | BTRFS_MAX_EXTENT_SIZE+8191, NULL); | ||
1031 | if (ret) { | ||
1032 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | ||
1033 | goto out; | ||
1034 | } | ||
1035 | if (BTRFS_I(inode)->outstanding_extents != 3) { | ||
1036 | ret = -EINVAL; | ||
1037 | test_msg("Miscount, wanted 3, got %u\n", | ||
1038 | BTRFS_I(inode)->outstanding_extents); | ||
1039 | goto out; | ||
1040 | } | ||
1041 | |||
1042 | /* [BTRFS_MAX_EXTENT_SIZE+4k][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4k] */ | ||
1043 | ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, | ||
1044 | BTRFS_MAX_EXTENT_SIZE+4096, | ||
1045 | BTRFS_MAX_EXTENT_SIZE+8191, | ||
1046 | EXTENT_DIRTY | EXTENT_DELALLOC | | ||
1047 | EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0, | ||
1048 | NULL, GFP_NOFS); | ||
1049 | if (ret) { | ||
1050 | test_msg("clear_extent_bit returned %d\n", ret); | ||
1051 | goto out; | ||
1052 | } | ||
1053 | if (BTRFS_I(inode)->outstanding_extents != 4) { | ||
1054 | ret = -EINVAL; | ||
1055 | test_msg("Miscount, wanted 4, got %u\n", | ||
1056 | BTRFS_I(inode)->outstanding_extents); | ||
1057 | goto out; | ||
1058 | } | ||
1059 | |||
1060 | /* | ||
1061 | * Refill the hole again just for good measure, because I thought it | ||
1062 | * might fail and I'd rather satisfy my paranoia at this point. | ||
1063 | */ | ||
1064 | BTRFS_I(inode)->outstanding_extents++; | ||
1065 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096, | ||
1066 | BTRFS_MAX_EXTENT_SIZE+8191, NULL); | ||
1067 | if (ret) { | ||
1068 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | ||
1069 | goto out; | ||
1070 | } | ||
1071 | if (BTRFS_I(inode)->outstanding_extents != 3) { | ||
1072 | ret = -EINVAL; | ||
1073 | test_msg("Miscount, wanted 3, got %u\n", | ||
1074 | BTRFS_I(inode)->outstanding_extents); | ||
1075 | goto out; | ||
1076 | } | ||
1077 | |||
1078 | /* Empty */ | ||
1079 | ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1, | ||
1080 | EXTENT_DIRTY | EXTENT_DELALLOC | | ||
1081 | EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0, | ||
1082 | NULL, GFP_NOFS); | ||
1083 | if (ret) { | ||
1084 | test_msg("clear_extent_bit returned %d\n", ret); | ||
1085 | goto out; | ||
1086 | } | ||
1087 | if (BTRFS_I(inode)->outstanding_extents) { | ||
1088 | ret = -EINVAL; | ||
1089 | test_msg("Miscount, wanted 0, got %u\n", | ||
1090 | BTRFS_I(inode)->outstanding_extents); | ||
1091 | goto out; | ||
1092 | } | ||
1093 | ret = 0; | ||
1094 | out: | ||
1095 | if (ret) | ||
1096 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1, | ||
1097 | EXTENT_DIRTY | EXTENT_DELALLOC | | ||
1098 | EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0, | ||
1099 | NULL, GFP_NOFS); | ||
1100 | iput(inode); | ||
1101 | btrfs_free_dummy_root(root); | ||
1102 | return ret; | ||
1103 | } | ||
1104 | |||
914 | int btrfs_test_inodes(void) | 1105 | int btrfs_test_inodes(void) |
915 | { | 1106 | { |
916 | int ret; | 1107 | int ret; |
@@ -924,5 +1115,9 @@ int btrfs_test_inodes(void) | |||
924 | if (ret) | 1115 | if (ret) |
925 | return ret; | 1116 | return ret; |
926 | test_msg("Running hole first btrfs_get_extent test\n"); | 1117 | test_msg("Running hole first btrfs_get_extent test\n"); |
927 | return test_hole_first(); | 1118 | ret = test_hole_first(); |
1119 | if (ret) | ||
1120 | return ret; | ||
1121 | test_msg("Running outstanding_extents tests\n"); | ||
1122 | return test_extent_accounting(); | ||
928 | } | 1123 | } |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 7e80f32550a6..8be4278e25e8 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -1023,17 +1023,13 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, | |||
1023 | u64 old_root_bytenr; | 1023 | u64 old_root_bytenr; |
1024 | u64 old_root_used; | 1024 | u64 old_root_used; |
1025 | struct btrfs_root *tree_root = root->fs_info->tree_root; | 1025 | struct btrfs_root *tree_root = root->fs_info->tree_root; |
1026 | bool extent_root = (root->objectid == BTRFS_EXTENT_TREE_OBJECTID); | ||
1027 | 1026 | ||
1028 | old_root_used = btrfs_root_used(&root->root_item); | 1027 | old_root_used = btrfs_root_used(&root->root_item); |
1029 | btrfs_write_dirty_block_groups(trans, root); | ||
1030 | 1028 | ||
1031 | while (1) { | 1029 | while (1) { |
1032 | old_root_bytenr = btrfs_root_bytenr(&root->root_item); | 1030 | old_root_bytenr = btrfs_root_bytenr(&root->root_item); |
1033 | if (old_root_bytenr == root->node->start && | 1031 | if (old_root_bytenr == root->node->start && |
1034 | old_root_used == btrfs_root_used(&root->root_item) && | 1032 | old_root_used == btrfs_root_used(&root->root_item)) |
1035 | (!extent_root || | ||
1036 | list_empty(&trans->transaction->dirty_bgs))) | ||
1037 | break; | 1033 | break; |
1038 | 1034 | ||
1039 | btrfs_set_root_node(&root->root_item, root->node); | 1035 | btrfs_set_root_node(&root->root_item, root->node); |
@@ -1044,17 +1040,6 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, | |||
1044 | return ret; | 1040 | return ret; |
1045 | 1041 | ||
1046 | old_root_used = btrfs_root_used(&root->root_item); | 1042 | old_root_used = btrfs_root_used(&root->root_item); |
1047 | if (extent_root) { | ||
1048 | ret = btrfs_write_dirty_block_groups(trans, root); | ||
1049 | if (ret) | ||
1050 | return ret; | ||
1051 | } | ||
1052 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); | ||
1053 | if (ret) | ||
1054 | return ret; | ||
1055 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); | ||
1056 | if (ret) | ||
1057 | return ret; | ||
1058 | } | 1043 | } |
1059 | 1044 | ||
1060 | return 0; | 1045 | return 0; |
@@ -1071,6 +1056,7 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, | |||
1071 | struct btrfs_root *root) | 1056 | struct btrfs_root *root) |
1072 | { | 1057 | { |
1073 | struct btrfs_fs_info *fs_info = root->fs_info; | 1058 | struct btrfs_fs_info *fs_info = root->fs_info; |
1059 | struct list_head *dirty_bgs = &trans->transaction->dirty_bgs; | ||
1074 | struct list_head *next; | 1060 | struct list_head *next; |
1075 | struct extent_buffer *eb; | 1061 | struct extent_buffer *eb; |
1076 | int ret; | 1062 | int ret; |
@@ -1098,11 +1084,15 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, | |||
1098 | if (ret) | 1084 | if (ret) |
1099 | return ret; | 1085 | return ret; |
1100 | 1086 | ||
1087 | ret = btrfs_setup_space_cache(trans, root); | ||
1088 | if (ret) | ||
1089 | return ret; | ||
1090 | |||
1101 | /* run_qgroups might have added some more refs */ | 1091 | /* run_qgroups might have added some more refs */ |
1102 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); | 1092 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); |
1103 | if (ret) | 1093 | if (ret) |
1104 | return ret; | 1094 | return ret; |
1105 | 1095 | again: | |
1106 | while (!list_empty(&fs_info->dirty_cowonly_roots)) { | 1096 | while (!list_empty(&fs_info->dirty_cowonly_roots)) { |
1107 | next = fs_info->dirty_cowonly_roots.next; | 1097 | next = fs_info->dirty_cowonly_roots.next; |
1108 | list_del_init(next); | 1098 | list_del_init(next); |
@@ -1115,8 +1105,23 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, | |||
1115 | ret = update_cowonly_root(trans, root); | 1105 | ret = update_cowonly_root(trans, root); |
1116 | if (ret) | 1106 | if (ret) |
1117 | return ret; | 1107 | return ret; |
1108 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); | ||
1109 | if (ret) | ||
1110 | return ret; | ||
1118 | } | 1111 | } |
1119 | 1112 | ||
1113 | while (!list_empty(dirty_bgs)) { | ||
1114 | ret = btrfs_write_dirty_block_groups(trans, root); | ||
1115 | if (ret) | ||
1116 | return ret; | ||
1117 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); | ||
1118 | if (ret) | ||
1119 | return ret; | ||
1120 | } | ||
1121 | |||
1122 | if (!list_empty(&fs_info->dirty_cowonly_roots)) | ||
1123 | goto again; | ||
1124 | |||
1120 | list_add_tail(&fs_info->extent_root->dirty_list, | 1125 | list_add_tail(&fs_info->extent_root->dirty_list, |
1121 | &trans->transaction->switch_commits); | 1126 | &trans->transaction->switch_commits); |
1122 | btrfs_after_dev_replace_commit(fs_info); | 1127 | btrfs_after_dev_replace_commit(fs_info); |
@@ -1814,6 +1819,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
1814 | 1819 | ||
1815 | wait_for_commit(root, cur_trans); | 1820 | wait_for_commit(root, cur_trans); |
1816 | 1821 | ||
1822 | if (unlikely(cur_trans->aborted)) | ||
1823 | ret = cur_trans->aborted; | ||
1824 | |||
1817 | btrfs_put_transaction(cur_trans); | 1825 | btrfs_put_transaction(cur_trans); |
1818 | 1826 | ||
1819 | return ret; | 1827 | return ret; |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 9a37f8b39bae..c5b8ba37f88e 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
@@ -1012,7 +1012,7 @@ again: | |||
1012 | base = btrfs_item_ptr_offset(leaf, path->slots[0]); | 1012 | base = btrfs_item_ptr_offset(leaf, path->slots[0]); |
1013 | 1013 | ||
1014 | while (cur_offset < item_size) { | 1014 | while (cur_offset < item_size) { |
1015 | extref = (struct btrfs_inode_extref *)base + cur_offset; | 1015 | extref = (struct btrfs_inode_extref *)(base + cur_offset); |
1016 | 1016 | ||
1017 | victim_name_len = btrfs_inode_extref_name_len(leaf, extref); | 1017 | victim_name_len = btrfs_inode_extref_name_len(leaf, extref); |
1018 | 1018 | ||
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index cd4d1315aaa9..8222f6f74147 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -4903,10 +4903,17 @@ static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes) | |||
4903 | static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes) | 4903 | static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes) |
4904 | { | 4904 | { |
4905 | struct btrfs_bio *bbio = kzalloc( | 4905 | struct btrfs_bio *bbio = kzalloc( |
4906 | /* the size of the btrfs_bio */ | ||
4906 | sizeof(struct btrfs_bio) + | 4907 | sizeof(struct btrfs_bio) + |
4908 | /* plus the variable array for the stripes */ | ||
4907 | sizeof(struct btrfs_bio_stripe) * (total_stripes) + | 4909 | sizeof(struct btrfs_bio_stripe) * (total_stripes) + |
4910 | /* plus the variable array for the tgt dev */ | ||
4908 | sizeof(int) * (real_stripes) + | 4911 | sizeof(int) * (real_stripes) + |
4909 | sizeof(u64) * (real_stripes), | 4912 | /* |
4913 | * plus the raid_map, which includes both the tgt dev | ||
4914 | * and the stripes | ||
4915 | */ | ||
4916 | sizeof(u64) * (total_stripes), | ||
4910 | GFP_NOFS); | 4917 | GFP_NOFS); |
4911 | if (!bbio) | 4918 | if (!bbio) |
4912 | return NULL; | 4919 | return NULL; |
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index 47b19465f0dc..883b93623bc5 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c | |||
@@ -111,6 +111,8 @@ static int do_setxattr(struct btrfs_trans_handle *trans, | |||
111 | name, name_len, -1); | 111 | name, name_len, -1); |
112 | if (!di && (flags & XATTR_REPLACE)) | 112 | if (!di && (flags & XATTR_REPLACE)) |
113 | ret = -ENODATA; | 113 | ret = -ENODATA; |
114 | else if (IS_ERR(di)) | ||
115 | ret = PTR_ERR(di); | ||
114 | else if (di) | 116 | else if (di) |
115 | ret = btrfs_delete_one_dir_name(trans, root, path, di); | 117 | ret = btrfs_delete_one_dir_name(trans, root, path, di); |
116 | goto out; | 118 | goto out; |
@@ -127,10 +129,12 @@ static int do_setxattr(struct btrfs_trans_handle *trans, | |||
127 | ASSERT(mutex_is_locked(&inode->i_mutex)); | 129 | ASSERT(mutex_is_locked(&inode->i_mutex)); |
128 | di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), | 130 | di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), |
129 | name, name_len, 0); | 131 | name, name_len, 0); |
130 | if (!di) { | 132 | if (!di) |
131 | ret = -ENODATA; | 133 | ret = -ENODATA; |
134 | else if (IS_ERR(di)) | ||
135 | ret = PTR_ERR(di); | ||
136 | if (ret) | ||
132 | goto out; | 137 | goto out; |
133 | } | ||
134 | btrfs_release_path(path); | 138 | btrfs_release_path(path); |
135 | di = NULL; | 139 | di = NULL; |
136 | } | 140 | } |
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 4ac7445e6ec7..aa0dc2573374 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c | |||
@@ -1,6 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * fs/cifs/cifsencrypt.c | 2 | * fs/cifs/cifsencrypt.c |
3 | * | 3 | * |
4 | * Encryption and hashing operations relating to NTLM, NTLMv2. See MS-NLMP | ||
5 | * for more detailed information | ||
6 | * | ||
4 | * Copyright (C) International Business Machines Corp., 2005,2013 | 7 | * Copyright (C) International Business Machines Corp., 2005,2013 |
5 | * Author(s): Steve French (sfrench@us.ibm.com) | 8 | * Author(s): Steve French (sfrench@us.ibm.com) |
6 | * | 9 | * |
@@ -515,7 +518,8 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash, | |||
515 | __func__); | 518 | __func__); |
516 | return rc; | 519 | return rc; |
517 | } | 520 | } |
518 | } else if (ses->serverName) { | 521 | } else { |
522 | /* We use ses->serverName if no domain name available */ | ||
519 | len = strlen(ses->serverName); | 523 | len = strlen(ses->serverName); |
520 | 524 | ||
521 | server = kmalloc(2 + (len * 2), GFP_KERNEL); | 525 | server = kmalloc(2 + (len * 2), GFP_KERNEL); |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index d3aa999ab785..480cf9c81d50 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -1599,6 +1599,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
1599 | pr_warn("CIFS: username too long\n"); | 1599 | pr_warn("CIFS: username too long\n"); |
1600 | goto cifs_parse_mount_err; | 1600 | goto cifs_parse_mount_err; |
1601 | } | 1601 | } |
1602 | |||
1603 | kfree(vol->username); | ||
1602 | vol->username = kstrdup(string, GFP_KERNEL); | 1604 | vol->username = kstrdup(string, GFP_KERNEL); |
1603 | if (!vol->username) | 1605 | if (!vol->username) |
1604 | goto cifs_parse_mount_err; | 1606 | goto cifs_parse_mount_err; |
@@ -1700,6 +1702,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
1700 | goto cifs_parse_mount_err; | 1702 | goto cifs_parse_mount_err; |
1701 | } | 1703 | } |
1702 | 1704 | ||
1705 | kfree(vol->domainname); | ||
1703 | vol->domainname = kstrdup(string, GFP_KERNEL); | 1706 | vol->domainname = kstrdup(string, GFP_KERNEL); |
1704 | if (!vol->domainname) { | 1707 | if (!vol->domainname) { |
1705 | pr_warn("CIFS: no memory for domainname\n"); | 1708 | pr_warn("CIFS: no memory for domainname\n"); |
@@ -1731,6 +1734,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
1731 | } | 1734 | } |
1732 | 1735 | ||
1733 | if (strncasecmp(string, "default", 7) != 0) { | 1736 | if (strncasecmp(string, "default", 7) != 0) { |
1737 | kfree(vol->iocharset); | ||
1734 | vol->iocharset = kstrdup(string, | 1738 | vol->iocharset = kstrdup(string, |
1735 | GFP_KERNEL); | 1739 | GFP_KERNEL); |
1736 | if (!vol->iocharset) { | 1740 | if (!vol->iocharset) { |
@@ -2913,8 +2917,7 @@ ip_rfc1001_connect(struct TCP_Server_Info *server) | |||
2913 | * calling name ends in null (byte 16) from old smb | 2917 | * calling name ends in null (byte 16) from old smb |
2914 | * convention. | 2918 | * convention. |
2915 | */ | 2919 | */ |
2916 | if (server->workstation_RFC1001_name && | 2920 | if (server->workstation_RFC1001_name[0] != 0) |
2917 | server->workstation_RFC1001_name[0] != 0) | ||
2918 | rfc1002mangle(ses_init_buf->trailer. | 2921 | rfc1002mangle(ses_init_buf->trailer. |
2919 | session_req.calling_name, | 2922 | session_req.calling_name, |
2920 | server->workstation_RFC1001_name, | 2923 | server->workstation_RFC1001_name, |
@@ -3692,6 +3695,12 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses, | |||
3692 | #endif /* CIFS_WEAK_PW_HASH */ | 3695 | #endif /* CIFS_WEAK_PW_HASH */ |
3693 | rc = SMBNTencrypt(tcon->password, ses->server->cryptkey, | 3696 | rc = SMBNTencrypt(tcon->password, ses->server->cryptkey, |
3694 | bcc_ptr, nls_codepage); | 3697 | bcc_ptr, nls_codepage); |
3698 | if (rc) { | ||
3699 | cifs_dbg(FYI, "%s Can't generate NTLM rsp. Error: %d\n", | ||
3700 | __func__, rc); | ||
3701 | cifs_buf_release(smb_buffer); | ||
3702 | return rc; | ||
3703 | } | ||
3695 | 3704 | ||
3696 | bcc_ptr += CIFS_AUTH_RESP_SIZE; | 3705 | bcc_ptr += CIFS_AUTH_RESP_SIZE; |
3697 | if (ses->capabilities & CAP_UNICODE) { | 3706 | if (ses->capabilities & CAP_UNICODE) { |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index a94b3e673182..ca30c391a894 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -1823,6 +1823,7 @@ refind_writable: | |||
1823 | cifsFileInfo_put(inv_file); | 1823 | cifsFileInfo_put(inv_file); |
1824 | spin_lock(&cifs_file_list_lock); | 1824 | spin_lock(&cifs_file_list_lock); |
1825 | ++refind; | 1825 | ++refind; |
1826 | inv_file = NULL; | ||
1826 | goto refind_writable; | 1827 | goto refind_writable; |
1827 | } | 1828 | } |
1828 | } | 1829 | } |
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 2d4f37235ed0..3e126d7bb2ea 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -771,6 +771,8 @@ cifs_get_inode_info(struct inode **inode, const char *full_path, | |||
771 | cifs_buf_release(srchinf->ntwrk_buf_start); | 771 | cifs_buf_release(srchinf->ntwrk_buf_start); |
772 | } | 772 | } |
773 | kfree(srchinf); | 773 | kfree(srchinf); |
774 | if (rc) | ||
775 | goto cgii_exit; | ||
774 | } else | 776 | } else |
775 | goto cgii_exit; | 777 | goto cgii_exit; |
776 | 778 | ||
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index 689f035915cf..22dfdf17d065 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c | |||
@@ -322,7 +322,7 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr) | |||
322 | 322 | ||
323 | /* return pointer to beginning of data area, ie offset from SMB start */ | 323 | /* return pointer to beginning of data area, ie offset from SMB start */ |
324 | if ((*off != 0) && (*len != 0)) | 324 | if ((*off != 0) && (*len != 0)) |
325 | return hdr->ProtocolId + *off; | 325 | return (char *)(&hdr->ProtocolId[0]) + *off; |
326 | else | 326 | else |
327 | return NULL; | 327 | return NULL; |
328 | } | 328 | } |
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 96b5d40a2ece..eab05e1aa587 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c | |||
@@ -684,7 +684,8 @@ smb2_clone_range(const unsigned int xid, | |||
684 | 684 | ||
685 | /* No need to change MaxChunks since already set to 1 */ | 685 | /* No need to change MaxChunks since already set to 1 */ |
686 | chunk_sizes_updated = true; | 686 | chunk_sizes_updated = true; |
687 | } | 687 | } else |
688 | goto cchunk_out; | ||
688 | } | 689 | } |
689 | 690 | ||
690 | cchunk_out: | 691 | cchunk_out: |
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 3417340bf89e..65cd7a84c8bc 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c | |||
@@ -1218,7 +1218,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
1218 | struct smb2_ioctl_req *req; | 1218 | struct smb2_ioctl_req *req; |
1219 | struct smb2_ioctl_rsp *rsp; | 1219 | struct smb2_ioctl_rsp *rsp; |
1220 | struct TCP_Server_Info *server; | 1220 | struct TCP_Server_Info *server; |
1221 | struct cifs_ses *ses = tcon->ses; | 1221 | struct cifs_ses *ses; |
1222 | struct kvec iov[2]; | 1222 | struct kvec iov[2]; |
1223 | int resp_buftype; | 1223 | int resp_buftype; |
1224 | int num_iovecs; | 1224 | int num_iovecs; |
@@ -1233,6 +1233,11 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
1233 | if (plen) | 1233 | if (plen) |
1234 | *plen = 0; | 1234 | *plen = 0; |
1235 | 1235 | ||
1236 | if (tcon) | ||
1237 | ses = tcon->ses; | ||
1238 | else | ||
1239 | return -EIO; | ||
1240 | |||
1236 | if (ses && (ses->server)) | 1241 | if (ses && (ses->server)) |
1237 | server = ses->server; | 1242 | server = ses->server; |
1238 | else | 1243 | else |
@@ -1296,14 +1301,12 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
1296 | rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base; | 1301 | rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base; |
1297 | 1302 | ||
1298 | if ((rc != 0) && (rc != -EINVAL)) { | 1303 | if ((rc != 0) && (rc != -EINVAL)) { |
1299 | if (tcon) | 1304 | cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); |
1300 | cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); | ||
1301 | goto ioctl_exit; | 1305 | goto ioctl_exit; |
1302 | } else if (rc == -EINVAL) { | 1306 | } else if (rc == -EINVAL) { |
1303 | if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) && | 1307 | if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) && |
1304 | (opcode != FSCTL_SRV_COPYCHUNK)) { | 1308 | (opcode != FSCTL_SRV_COPYCHUNK)) { |
1305 | if (tcon) | 1309 | cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); |
1306 | cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE); | ||
1307 | goto ioctl_exit; | 1310 | goto ioctl_exit; |
1308 | } | 1311 | } |
1309 | } | 1312 | } |
@@ -1629,7 +1632,7 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
1629 | 1632 | ||
1630 | rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0); | 1633 | rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0); |
1631 | 1634 | ||
1632 | if ((rc != 0) && tcon) | 1635 | if (rc != 0) |
1633 | cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE); | 1636 | cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE); |
1634 | 1637 | ||
1635 | free_rsp_buf(resp_buftype, iov[0].iov_base); | 1638 | free_rsp_buf(resp_buftype, iov[0].iov_base); |
@@ -2114,7 +2117,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, | |||
2114 | struct kvec iov[2]; | 2117 | struct kvec iov[2]; |
2115 | int rc = 0; | 2118 | int rc = 0; |
2116 | int len; | 2119 | int len; |
2117 | int resp_buftype; | 2120 | int resp_buftype = CIFS_NO_BUFFER; |
2118 | unsigned char *bufptr; | 2121 | unsigned char *bufptr; |
2119 | struct TCP_Server_Info *server; | 2122 | struct TCP_Server_Info *server; |
2120 | struct cifs_ses *ses = tcon->ses; | 2123 | struct cifs_ses *ses = tcon->ses; |
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index 90d1882b306f..5ba029e627cc 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h | |||
@@ -124,7 +124,7 @@ ecryptfs_get_key_payload_data(struct key *key) | |||
124 | } | 124 | } |
125 | 125 | ||
126 | #define ECRYPTFS_MAX_KEYSET_SIZE 1024 | 126 | #define ECRYPTFS_MAX_KEYSET_SIZE 1024 |
127 | #define ECRYPTFS_MAX_CIPHER_NAME_SIZE 32 | 127 | #define ECRYPTFS_MAX_CIPHER_NAME_SIZE 31 |
128 | #define ECRYPTFS_MAX_NUM_ENC_KEYS 64 | 128 | #define ECRYPTFS_MAX_NUM_ENC_KEYS 64 |
129 | #define ECRYPTFS_MAX_IV_BYTES 16 /* 128 bits */ | 129 | #define ECRYPTFS_MAX_IV_BYTES 16 /* 128 bits */ |
130 | #define ECRYPTFS_SALT_BYTES 2 | 130 | #define ECRYPTFS_SALT_BYTES 2 |
@@ -237,7 +237,7 @@ struct ecryptfs_crypt_stat { | |||
237 | struct crypto_ablkcipher *tfm; | 237 | struct crypto_ablkcipher *tfm; |
238 | struct crypto_hash *hash_tfm; /* Crypto context for generating | 238 | struct crypto_hash *hash_tfm; /* Crypto context for generating |
239 | * the initialization vectors */ | 239 | * the initialization vectors */ |
240 | unsigned char cipher[ECRYPTFS_MAX_CIPHER_NAME_SIZE]; | 240 | unsigned char cipher[ECRYPTFS_MAX_CIPHER_NAME_SIZE + 1]; |
241 | unsigned char key[ECRYPTFS_MAX_KEY_BYTES]; | 241 | unsigned char key[ECRYPTFS_MAX_KEY_BYTES]; |
242 | unsigned char root_iv[ECRYPTFS_MAX_IV_BYTES]; | 242 | unsigned char root_iv[ECRYPTFS_MAX_IV_BYTES]; |
243 | struct list_head keysig_list; | 243 | struct list_head keysig_list; |
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index b07731e68c0b..fd39bad6f1bd 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c | |||
@@ -303,9 +303,22 @@ ecryptfs_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
303 | struct file *lower_file = ecryptfs_file_to_lower(file); | 303 | struct file *lower_file = ecryptfs_file_to_lower(file); |
304 | long rc = -ENOTTY; | 304 | long rc = -ENOTTY; |
305 | 305 | ||
306 | if (lower_file->f_op->unlocked_ioctl) | 306 | if (!lower_file->f_op->unlocked_ioctl) |
307 | return rc; | ||
308 | |||
309 | switch (cmd) { | ||
310 | case FITRIM: | ||
311 | case FS_IOC_GETFLAGS: | ||
312 | case FS_IOC_SETFLAGS: | ||
313 | case FS_IOC_GETVERSION: | ||
314 | case FS_IOC_SETVERSION: | ||
307 | rc = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg); | 315 | rc = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg); |
308 | return rc; | 316 | fsstack_copy_attr_all(file_inode(file), file_inode(lower_file)); |
317 | |||
318 | return rc; | ||
319 | default: | ||
320 | return rc; | ||
321 | } | ||
309 | } | 322 | } |
310 | 323 | ||
311 | #ifdef CONFIG_COMPAT | 324 | #ifdef CONFIG_COMPAT |
@@ -315,9 +328,22 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
315 | struct file *lower_file = ecryptfs_file_to_lower(file); | 328 | struct file *lower_file = ecryptfs_file_to_lower(file); |
316 | long rc = -ENOIOCTLCMD; | 329 | long rc = -ENOIOCTLCMD; |
317 | 330 | ||
318 | if (lower_file->f_op->compat_ioctl) | 331 | if (!lower_file->f_op->compat_ioctl) |
332 | return rc; | ||
333 | |||
334 | switch (cmd) { | ||
335 | case FITRIM: | ||
336 | case FS_IOC32_GETFLAGS: | ||
337 | case FS_IOC32_SETFLAGS: | ||
338 | case FS_IOC32_GETVERSION: | ||
339 | case FS_IOC32_SETVERSION: | ||
319 | rc = lower_file->f_op->compat_ioctl(lower_file, cmd, arg); | 340 | rc = lower_file->f_op->compat_ioctl(lower_file, cmd, arg); |
320 | return rc; | 341 | fsstack_copy_attr_all(file_inode(file), file_inode(lower_file)); |
342 | |||
343 | return rc; | ||
344 | default: | ||
345 | return rc; | ||
346 | } | ||
321 | } | 347 | } |
322 | #endif | 348 | #endif |
323 | 349 | ||
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index 917bd5c9776a..6bd67e2011f0 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c | |||
@@ -891,7 +891,7 @@ struct ecryptfs_parse_tag_70_packet_silly_stack { | |||
891 | struct blkcipher_desc desc; | 891 | struct blkcipher_desc desc; |
892 | char fnek_sig_hex[ECRYPTFS_SIG_SIZE_HEX + 1]; | 892 | char fnek_sig_hex[ECRYPTFS_SIG_SIZE_HEX + 1]; |
893 | char iv[ECRYPTFS_MAX_IV_BYTES]; | 893 | char iv[ECRYPTFS_MAX_IV_BYTES]; |
894 | char cipher_string[ECRYPTFS_MAX_CIPHER_NAME_SIZE]; | 894 | char cipher_string[ECRYPTFS_MAX_CIPHER_NAME_SIZE + 1]; |
895 | }; | 895 | }; |
896 | 896 | ||
897 | /** | 897 | /** |
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 1895d60f4122..c095d3264259 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c | |||
@@ -407,7 +407,7 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options, | |||
407 | if (!cipher_name_set) { | 407 | if (!cipher_name_set) { |
408 | int cipher_name_len = strlen(ECRYPTFS_DEFAULT_CIPHER); | 408 | int cipher_name_len = strlen(ECRYPTFS_DEFAULT_CIPHER); |
409 | 409 | ||
410 | BUG_ON(cipher_name_len >= ECRYPTFS_MAX_CIPHER_NAME_SIZE); | 410 | BUG_ON(cipher_name_len > ECRYPTFS_MAX_CIPHER_NAME_SIZE); |
411 | strcpy(mount_crypt_stat->global_default_cipher_name, | 411 | strcpy(mount_crypt_stat->global_default_cipher_name, |
412 | ECRYPTFS_DEFAULT_CIPHER); | 412 | ECRYPTFS_DEFAULT_CIPHER); |
413 | } | 413 | } |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index e907052eeadb..32a8bbd7a9ad 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -53,6 +53,18 @@ struct wb_writeback_work { | |||
53 | struct completion *done; /* set if the caller waits */ | 53 | struct completion *done; /* set if the caller waits */ |
54 | }; | 54 | }; |
55 | 55 | ||
56 | /* | ||
57 | * If an inode is constantly having its pages dirtied, but then the | ||
58 | * updates stop dirtytime_expire_interval seconds in the past, it's | ||
59 | * possible for the worst case time between when an inode has its | ||
60 | * timestamps updated and when they finally get written out to be two | ||
61 | * dirtytime_expire_intervals. We set the default to 12 hours (in | ||
62 | * seconds), which means most of the time inodes will have their | ||
63 | * timestamps written to disk after 12 hours, but in the worst case a | ||
64 | * few inodes might not their timestamps updated for 24 hours. | ||
65 | */ | ||
66 | unsigned int dirtytime_expire_interval = 12 * 60 * 60; | ||
67 | |||
56 | /** | 68 | /** |
57 | * writeback_in_progress - determine whether there is writeback in progress | 69 | * writeback_in_progress - determine whether there is writeback in progress |
58 | * @bdi: the device's backing_dev_info structure. | 70 | * @bdi: the device's backing_dev_info structure. |
@@ -275,8 +287,8 @@ static int move_expired_inodes(struct list_head *delaying_queue, | |||
275 | 287 | ||
276 | if ((flags & EXPIRE_DIRTY_ATIME) == 0) | 288 | if ((flags & EXPIRE_DIRTY_ATIME) == 0) |
277 | older_than_this = work->older_than_this; | 289 | older_than_this = work->older_than_this; |
278 | else if ((work->reason == WB_REASON_SYNC) == 0) { | 290 | else if (!work->for_sync) { |
279 | expire_time = jiffies - (HZ * 86400); | 291 | expire_time = jiffies - (dirtytime_expire_interval * HZ); |
280 | older_than_this = &expire_time; | 292 | older_than_this = &expire_time; |
281 | } | 293 | } |
282 | while (!list_empty(delaying_queue)) { | 294 | while (!list_empty(delaying_queue)) { |
@@ -458,6 +470,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, | |||
458 | */ | 470 | */ |
459 | redirty_tail(inode, wb); | 471 | redirty_tail(inode, wb); |
460 | } else if (inode->i_state & I_DIRTY_TIME) { | 472 | } else if (inode->i_state & I_DIRTY_TIME) { |
473 | inode->dirtied_when = jiffies; | ||
461 | list_move(&inode->i_wb_list, &wb->b_dirty_time); | 474 | list_move(&inode->i_wb_list, &wb->b_dirty_time); |
462 | } else { | 475 | } else { |
463 | /* The inode is clean. Remove from writeback lists. */ | 476 | /* The inode is clean. Remove from writeback lists. */ |
@@ -505,12 +518,17 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) | |||
505 | spin_lock(&inode->i_lock); | 518 | spin_lock(&inode->i_lock); |
506 | 519 | ||
507 | dirty = inode->i_state & I_DIRTY; | 520 | dirty = inode->i_state & I_DIRTY; |
508 | if (((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) && | 521 | if (inode->i_state & I_DIRTY_TIME) { |
509 | (inode->i_state & I_DIRTY_TIME)) || | 522 | if ((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) || |
510 | (inode->i_state & I_DIRTY_TIME_EXPIRED)) { | 523 | unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) || |
511 | dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED; | 524 | unlikely(time_after(jiffies, |
512 | trace_writeback_lazytime(inode); | 525 | (inode->dirtied_time_when + |
513 | } | 526 | dirtytime_expire_interval * HZ)))) { |
527 | dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED; | ||
528 | trace_writeback_lazytime(inode); | ||
529 | } | ||
530 | } else | ||
531 | inode->i_state &= ~I_DIRTY_TIME_EXPIRED; | ||
514 | inode->i_state &= ~dirty; | 532 | inode->i_state &= ~dirty; |
515 | 533 | ||
516 | /* | 534 | /* |
@@ -1131,6 +1149,56 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason) | |||
1131 | rcu_read_unlock(); | 1149 | rcu_read_unlock(); |
1132 | } | 1150 | } |
1133 | 1151 | ||
1152 | /* | ||
1153 | * Wake up bdi's periodically to make sure dirtytime inodes gets | ||
1154 | * written back periodically. We deliberately do *not* check the | ||
1155 | * b_dirtytime list in wb_has_dirty_io(), since this would cause the | ||
1156 | * kernel to be constantly waking up once there are any dirtytime | ||
1157 | * inodes on the system. So instead we define a separate delayed work | ||
1158 | * function which gets called much more rarely. (By default, only | ||
1159 | * once every 12 hours.) | ||
1160 | * | ||
1161 | * If there is any other write activity going on in the file system, | ||
1162 | * this function won't be necessary. But if the only thing that has | ||
1163 | * happened on the file system is a dirtytime inode caused by an atime | ||
1164 | * update, we need this infrastructure below to make sure that inode | ||
1165 | * eventually gets pushed out to disk. | ||
1166 | */ | ||
1167 | static void wakeup_dirtytime_writeback(struct work_struct *w); | ||
1168 | static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback); | ||
1169 | |||
1170 | static void wakeup_dirtytime_writeback(struct work_struct *w) | ||
1171 | { | ||
1172 | struct backing_dev_info *bdi; | ||
1173 | |||
1174 | rcu_read_lock(); | ||
1175 | list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { | ||
1176 | if (list_empty(&bdi->wb.b_dirty_time)) | ||
1177 | continue; | ||
1178 | bdi_wakeup_thread(bdi); | ||
1179 | } | ||
1180 | rcu_read_unlock(); | ||
1181 | schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); | ||
1182 | } | ||
1183 | |||
1184 | static int __init start_dirtytime_writeback(void) | ||
1185 | { | ||
1186 | schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); | ||
1187 | return 0; | ||
1188 | } | ||
1189 | __initcall(start_dirtytime_writeback); | ||
1190 | |||
1191 | int dirtytime_interval_handler(struct ctl_table *table, int write, | ||
1192 | void __user *buffer, size_t *lenp, loff_t *ppos) | ||
1193 | { | ||
1194 | int ret; | ||
1195 | |||
1196 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | ||
1197 | if (ret == 0 && write) | ||
1198 | mod_delayed_work(system_wq, &dirtytime_work, 0); | ||
1199 | return ret; | ||
1200 | } | ||
1201 | |||
1134 | static noinline void block_dump___mark_inode_dirty(struct inode *inode) | 1202 | static noinline void block_dump___mark_inode_dirty(struct inode *inode) |
1135 | { | 1203 | { |
1136 | if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { | 1204 | if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { |
@@ -1269,8 +1337,13 @@ void __mark_inode_dirty(struct inode *inode, int flags) | |||
1269 | } | 1337 | } |
1270 | 1338 | ||
1271 | inode->dirtied_when = jiffies; | 1339 | inode->dirtied_when = jiffies; |
1272 | list_move(&inode->i_wb_list, dirtytime ? | 1340 | if (dirtytime) |
1273 | &bdi->wb.b_dirty_time : &bdi->wb.b_dirty); | 1341 | inode->dirtied_time_when = jiffies; |
1342 | if (inode->i_state & (I_DIRTY_INODE | I_DIRTY_PAGES)) | ||
1343 | list_move(&inode->i_wb_list, &bdi->wb.b_dirty); | ||
1344 | else | ||
1345 | list_move(&inode->i_wb_list, | ||
1346 | &bdi->wb.b_dirty_time); | ||
1274 | spin_unlock(&bdi->wb.list_lock); | 1347 | spin_unlock(&bdi->wb.list_lock); |
1275 | trace_writeback_dirty_inode_enqueue(inode); | 1348 | trace_writeback_dirty_inode_enqueue(inode); |
1276 | 1349 | ||
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index ed19a7d622fa..39706c57ad3c 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -890,8 +890,8 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) | |||
890 | 890 | ||
891 | newpage = buf->page; | 891 | newpage = buf->page; |
892 | 892 | ||
893 | if (WARN_ON(!PageUptodate(newpage))) | 893 | if (!PageUptodate(newpage)) |
894 | return -EIO; | 894 | SetPageUptodate(newpage); |
895 | 895 | ||
896 | ClearPageMappedToDisk(newpage); | 896 | ClearPageMappedToDisk(newpage); |
897 | 897 | ||
@@ -1353,6 +1353,17 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file, | |||
1353 | return err; | 1353 | return err; |
1354 | } | 1354 | } |
1355 | 1355 | ||
1356 | static int fuse_dev_open(struct inode *inode, struct file *file) | ||
1357 | { | ||
1358 | /* | ||
1359 | * The fuse device's file's private_data is used to hold | ||
1360 | * the fuse_conn(ection) when it is mounted, and is used to | ||
1361 | * keep track of whether the file has been mounted already. | ||
1362 | */ | ||
1363 | file->private_data = NULL; | ||
1364 | return 0; | ||
1365 | } | ||
1366 | |||
1356 | static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, | 1367 | static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, |
1357 | unsigned long nr_segs, loff_t pos) | 1368 | unsigned long nr_segs, loff_t pos) |
1358 | { | 1369 | { |
@@ -1797,6 +1808,9 @@ copy_finish: | |||
1797 | static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, | 1808 | static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, |
1798 | unsigned int size, struct fuse_copy_state *cs) | 1809 | unsigned int size, struct fuse_copy_state *cs) |
1799 | { | 1810 | { |
1811 | /* Don't try to move pages (yet) */ | ||
1812 | cs->move_pages = 0; | ||
1813 | |||
1800 | switch (code) { | 1814 | switch (code) { |
1801 | case FUSE_NOTIFY_POLL: | 1815 | case FUSE_NOTIFY_POLL: |
1802 | return fuse_notify_poll(fc, size, cs); | 1816 | return fuse_notify_poll(fc, size, cs); |
@@ -2217,6 +2231,7 @@ static int fuse_dev_fasync(int fd, struct file *file, int on) | |||
2217 | 2231 | ||
2218 | const struct file_operations fuse_dev_operations = { | 2232 | const struct file_operations fuse_dev_operations = { |
2219 | .owner = THIS_MODULE, | 2233 | .owner = THIS_MODULE, |
2234 | .open = fuse_dev_open, | ||
2220 | .llseek = no_llseek, | 2235 | .llseek = no_llseek, |
2221 | .read = do_sync_read, | 2236 | .read = do_sync_read, |
2222 | .aio_read = fuse_dev_read, | 2237 | .aio_read = fuse_dev_read, |
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c index 6e560d56094b..754fdf8c6356 100644 --- a/fs/hfsplus/brec.c +++ b/fs/hfsplus/brec.c | |||
@@ -131,13 +131,16 @@ skip: | |||
131 | hfs_bnode_write(node, entry, data_off + key_len, entry_len); | 131 | hfs_bnode_write(node, entry, data_off + key_len, entry_len); |
132 | hfs_bnode_dump(node); | 132 | hfs_bnode_dump(node); |
133 | 133 | ||
134 | if (new_node) { | 134 | /* |
135 | /* update parent key if we inserted a key | 135 | * update parent key if we inserted a key |
136 | * at the start of the first node | 136 | * at the start of the node and it is not the new node |
137 | */ | 137 | */ |
138 | if (!rec && new_node != node) | 138 | if (!rec && new_node != node) { |
139 | hfs_brec_update_parent(fd); | 139 | hfs_bnode_read_key(node, fd->search_key, data_off + size); |
140 | hfs_brec_update_parent(fd); | ||
141 | } | ||
140 | 142 | ||
143 | if (new_node) { | ||
141 | hfs_bnode_put(fd->bnode); | 144 | hfs_bnode_put(fd->bnode); |
142 | if (!new_node->parent) { | 145 | if (!new_node->parent) { |
143 | hfs_btree_inc_height(tree); | 146 | hfs_btree_inc_height(tree); |
@@ -168,9 +171,6 @@ skip: | |||
168 | goto again; | 171 | goto again; |
169 | } | 172 | } |
170 | 173 | ||
171 | if (!rec) | ||
172 | hfs_brec_update_parent(fd); | ||
173 | |||
174 | return 0; | 174 | return 0; |
175 | } | 175 | } |
176 | 176 | ||
@@ -370,6 +370,8 @@ again: | |||
370 | if (IS_ERR(parent)) | 370 | if (IS_ERR(parent)) |
371 | return PTR_ERR(parent); | 371 | return PTR_ERR(parent); |
372 | __hfs_brec_find(parent, fd, hfs_find_rec_by_key); | 372 | __hfs_brec_find(parent, fd, hfs_find_rec_by_key); |
373 | if (fd->record < 0) | ||
374 | return -ENOENT; | ||
373 | hfs_bnode_dump(parent); | 375 | hfs_bnode_dump(parent); |
374 | rec = fd->record; | 376 | rec = fd->record; |
375 | 377 | ||
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index b684e8a132e6..2bacb9988566 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c | |||
@@ -207,6 +207,7 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of, | |||
207 | goto out_free; | 207 | goto out_free; |
208 | } | 208 | } |
209 | 209 | ||
210 | of->event = atomic_read(&of->kn->attr.open->event); | ||
210 | ops = kernfs_ops(of->kn); | 211 | ops = kernfs_ops(of->kn); |
211 | if (ops->read) | 212 | if (ops->read) |
212 | len = ops->read(of, buf, len, *ppos); | 213 | len = ops->read(of, buf, len, *ppos); |
diff --git a/fs/locks.c b/fs/locks.c index 365c82e1b3a9..40bc384728c0 100644 --- a/fs/locks.c +++ b/fs/locks.c | |||
@@ -1388,9 +1388,8 @@ any_leases_conflict(struct inode *inode, struct file_lock *breaker) | |||
1388 | int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) | 1388 | int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) |
1389 | { | 1389 | { |
1390 | int error = 0; | 1390 | int error = 0; |
1391 | struct file_lock *new_fl; | ||
1392 | struct file_lock_context *ctx = inode->i_flctx; | 1391 | struct file_lock_context *ctx = inode->i_flctx; |
1393 | struct file_lock *fl; | 1392 | struct file_lock *new_fl, *fl, *tmp; |
1394 | unsigned long break_time; | 1393 | unsigned long break_time; |
1395 | int want_write = (mode & O_ACCMODE) != O_RDONLY; | 1394 | int want_write = (mode & O_ACCMODE) != O_RDONLY; |
1396 | LIST_HEAD(dispose); | 1395 | LIST_HEAD(dispose); |
@@ -1420,7 +1419,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) | |||
1420 | break_time++; /* so that 0 means no break time */ | 1419 | break_time++; /* so that 0 means no break time */ |
1421 | } | 1420 | } |
1422 | 1421 | ||
1423 | list_for_each_entry(fl, &ctx->flc_lease, fl_list) { | 1422 | list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) { |
1424 | if (!leases_conflict(fl, new_fl)) | 1423 | if (!leases_conflict(fl, new_fl)) |
1425 | continue; | 1424 | continue; |
1426 | if (want_write) { | 1425 | if (want_write) { |
@@ -1665,7 +1664,8 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr | |||
1665 | } | 1664 | } |
1666 | 1665 | ||
1667 | if (my_fl != NULL) { | 1666 | if (my_fl != NULL) { |
1668 | error = lease->fl_lmops->lm_change(my_fl, arg, &dispose); | 1667 | lease = my_fl; |
1668 | error = lease->fl_lmops->lm_change(lease, arg, &dispose); | ||
1669 | if (error) | 1669 | if (error) |
1670 | goto out; | 1670 | goto out; |
1671 | goto out_setup; | 1671 | goto out_setup; |
@@ -1727,7 +1727,7 @@ static int generic_delete_lease(struct file *filp, void *owner) | |||
1727 | break; | 1727 | break; |
1728 | } | 1728 | } |
1729 | } | 1729 | } |
1730 | trace_generic_delete_lease(inode, fl); | 1730 | trace_generic_delete_lease(inode, victim); |
1731 | if (victim) | 1731 | if (victim) |
1732 | error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose); | 1732 | error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose); |
1733 | spin_unlock(&ctx->flc_lock); | 1733 | spin_unlock(&ctx->flc_lock); |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index f9f4845db989..19874151e95c 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
@@ -433,7 +433,7 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat | |||
433 | 433 | ||
434 | static bool nfs_client_init_is_complete(const struct nfs_client *clp) | 434 | static bool nfs_client_init_is_complete(const struct nfs_client *clp) |
435 | { | 435 | { |
436 | return clp->cl_cons_state != NFS_CS_INITING; | 436 | return clp->cl_cons_state <= NFS_CS_READY; |
437 | } | 437 | } |
438 | 438 | ||
439 | int nfs_wait_client_init_complete(const struct nfs_client *clp) | 439 | int nfs_wait_client_init_complete(const struct nfs_client *clp) |
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index a1f0685b42ff..a6ad68865880 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c | |||
@@ -181,8 +181,8 @@ void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, | |||
181 | clear_bit(NFS_DELEGATION_NEED_RECLAIM, | 181 | clear_bit(NFS_DELEGATION_NEED_RECLAIM, |
182 | &delegation->flags); | 182 | &delegation->flags); |
183 | spin_unlock(&delegation->lock); | 183 | spin_unlock(&delegation->lock); |
184 | put_rpccred(oldcred); | ||
185 | rcu_read_unlock(); | 184 | rcu_read_unlock(); |
185 | put_rpccred(oldcred); | ||
186 | trace_nfs4_reclaim_delegation(inode, res->delegation_type); | 186 | trace_nfs4_reclaim_delegation(inode, res->delegation_type); |
187 | } else { | 187 | } else { |
188 | /* We appear to have raced with a delegation return. */ | 188 | /* We appear to have raced with a delegation return. */ |
@@ -370,7 +370,10 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct | |||
370 | delegation = NULL; | 370 | delegation = NULL; |
371 | goto out; | 371 | goto out; |
372 | } | 372 | } |
373 | freeme = nfs_detach_delegation_locked(nfsi, | 373 | if (test_and_set_bit(NFS_DELEGATION_RETURNING, |
374 | &old_delegation->flags)) | ||
375 | goto out; | ||
376 | freeme = nfs_detach_delegation_locked(nfsi, | ||
374 | old_delegation, clp); | 377 | old_delegation, clp); |
375 | if (freeme == NULL) | 378 | if (freeme == NULL) |
376 | goto out; | 379 | goto out; |
@@ -433,6 +436,8 @@ static bool nfs_delegation_need_return(struct nfs_delegation *delegation) | |||
433 | { | 436 | { |
434 | bool ret = false; | 437 | bool ret = false; |
435 | 438 | ||
439 | if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) | ||
440 | goto out; | ||
436 | if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags)) | 441 | if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags)) |
437 | ret = true; | 442 | ret = true; |
438 | if (test_and_clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) && !ret) { | 443 | if (test_and_clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) && !ret) { |
@@ -444,6 +449,7 @@ static bool nfs_delegation_need_return(struct nfs_delegation *delegation) | |||
444 | ret = true; | 449 | ret = true; |
445 | spin_unlock(&delegation->lock); | 450 | spin_unlock(&delegation->lock); |
446 | } | 451 | } |
452 | out: | ||
447 | return ret; | 453 | return ret; |
448 | } | 454 | } |
449 | 455 | ||
@@ -471,14 +477,20 @@ restart: | |||
471 | super_list) { | 477 | super_list) { |
472 | if (!nfs_delegation_need_return(delegation)) | 478 | if (!nfs_delegation_need_return(delegation)) |
473 | continue; | 479 | continue; |
474 | inode = nfs_delegation_grab_inode(delegation); | 480 | if (!nfs_sb_active(server->super)) |
475 | if (inode == NULL) | ||
476 | continue; | 481 | continue; |
482 | inode = nfs_delegation_grab_inode(delegation); | ||
483 | if (inode == NULL) { | ||
484 | rcu_read_unlock(); | ||
485 | nfs_sb_deactive(server->super); | ||
486 | goto restart; | ||
487 | } | ||
477 | delegation = nfs_start_delegation_return_locked(NFS_I(inode)); | 488 | delegation = nfs_start_delegation_return_locked(NFS_I(inode)); |
478 | rcu_read_unlock(); | 489 | rcu_read_unlock(); |
479 | 490 | ||
480 | err = nfs_end_delegation_return(inode, delegation, 0); | 491 | err = nfs_end_delegation_return(inode, delegation, 0); |
481 | iput(inode); | 492 | iput(inode); |
493 | nfs_sb_deactive(server->super); | ||
482 | if (!err) | 494 | if (!err) |
483 | goto restart; | 495 | goto restart; |
484 | set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state); | 496 | set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state); |
@@ -809,19 +821,30 @@ restart: | |||
809 | list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { | 821 | list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { |
810 | list_for_each_entry_rcu(delegation, &server->delegations, | 822 | list_for_each_entry_rcu(delegation, &server->delegations, |
811 | super_list) { | 823 | super_list) { |
824 | if (test_bit(NFS_DELEGATION_RETURNING, | ||
825 | &delegation->flags)) | ||
826 | continue; | ||
812 | if (test_bit(NFS_DELEGATION_NEED_RECLAIM, | 827 | if (test_bit(NFS_DELEGATION_NEED_RECLAIM, |
813 | &delegation->flags) == 0) | 828 | &delegation->flags) == 0) |
814 | continue; | 829 | continue; |
815 | inode = nfs_delegation_grab_inode(delegation); | 830 | if (!nfs_sb_active(server->super)) |
816 | if (inode == NULL) | ||
817 | continue; | 831 | continue; |
818 | delegation = nfs_detach_delegation(NFS_I(inode), | 832 | inode = nfs_delegation_grab_inode(delegation); |
819 | delegation, server); | 833 | if (inode == NULL) { |
834 | rcu_read_unlock(); | ||
835 | nfs_sb_deactive(server->super); | ||
836 | goto restart; | ||
837 | } | ||
838 | delegation = nfs_start_delegation_return_locked(NFS_I(inode)); | ||
820 | rcu_read_unlock(); | 839 | rcu_read_unlock(); |
821 | 840 | if (delegation != NULL) { | |
822 | if (delegation != NULL) | 841 | delegation = nfs_detach_delegation(NFS_I(inode), |
823 | nfs_free_delegation(delegation); | 842 | delegation, server); |
843 | if (delegation != NULL) | ||
844 | nfs_free_delegation(delegation); | ||
845 | } | ||
824 | iput(inode); | 846 | iput(inode); |
847 | nfs_sb_deactive(server->super); | ||
825 | goto restart; | 848 | goto restart; |
826 | } | 849 | } |
827 | } | 850 | } |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 9b0c55cb2a2e..c19e16f0b2d0 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -408,14 +408,22 @@ static int xdr_decode(nfs_readdir_descriptor_t *desc, | |||
408 | return 0; | 408 | return 0; |
409 | } | 409 | } |
410 | 410 | ||
411 | /* Match file and dirent using either filehandle or fileid | ||
412 | * Note: caller is responsible for checking the fsid | ||
413 | */ | ||
411 | static | 414 | static |
412 | int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry) | 415 | int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry) |
413 | { | 416 | { |
417 | struct nfs_inode *nfsi; | ||
418 | |||
414 | if (dentry->d_inode == NULL) | 419 | if (dentry->d_inode == NULL) |
415 | goto different; | 420 | goto different; |
416 | if (nfs_compare_fh(entry->fh, NFS_FH(dentry->d_inode)) != 0) | 421 | |
417 | goto different; | 422 | nfsi = NFS_I(dentry->d_inode); |
418 | return 1; | 423 | if (entry->fattr->fileid == nfsi->fileid) |
424 | return 1; | ||
425 | if (nfs_compare_fh(entry->fh, &nfsi->fh) == 0) | ||
426 | return 1; | ||
419 | different: | 427 | different: |
420 | return 0; | 428 | return 0; |
421 | } | 429 | } |
@@ -469,6 +477,10 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry) | |||
469 | struct inode *inode; | 477 | struct inode *inode; |
470 | int status; | 478 | int status; |
471 | 479 | ||
480 | if (!(entry->fattr->valid & NFS_ATTR_FATTR_FILEID)) | ||
481 | return; | ||
482 | if (!(entry->fattr->valid & NFS_ATTR_FATTR_FSID)) | ||
483 | return; | ||
472 | if (filename.name[0] == '.') { | 484 | if (filename.name[0] == '.') { |
473 | if (filename.len == 1) | 485 | if (filename.len == 1) |
474 | return; | 486 | return; |
@@ -479,6 +491,10 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry) | |||
479 | 491 | ||
480 | dentry = d_lookup(parent, &filename); | 492 | dentry = d_lookup(parent, &filename); |
481 | if (dentry != NULL) { | 493 | if (dentry != NULL) { |
494 | /* Is there a mountpoint here? If so, just exit */ | ||
495 | if (!nfs_fsid_equal(&NFS_SB(dentry->d_sb)->fsid, | ||
496 | &entry->fattr->fsid)) | ||
497 | goto out; | ||
482 | if (nfs_same_file(dentry, entry)) { | 498 | if (nfs_same_file(dentry, entry)) { |
483 | nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); | 499 | nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); |
484 | status = nfs_refresh_inode(dentry->d_inode, entry->fattr); | 500 | status = nfs_refresh_inode(dentry->d_inode, entry->fattr); |
diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 94712fc781fa..e679d24c39d3 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c | |||
@@ -178,7 +178,7 @@ nfs_file_read(struct kiocb *iocb, struct iov_iter *to) | |||
178 | iocb->ki_filp, | 178 | iocb->ki_filp, |
179 | iov_iter_count(to), (unsigned long) iocb->ki_pos); | 179 | iov_iter_count(to), (unsigned long) iocb->ki_pos); |
180 | 180 | ||
181 | result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping); | 181 | result = nfs_revalidate_mapping_protected(inode, iocb->ki_filp->f_mapping); |
182 | if (!result) { | 182 | if (!result) { |
183 | result = generic_file_read_iter(iocb, to); | 183 | result = generic_file_read_iter(iocb, to); |
184 | if (result > 0) | 184 | if (result > 0) |
@@ -199,7 +199,7 @@ nfs_file_splice_read(struct file *filp, loff_t *ppos, | |||
199 | dprintk("NFS: splice_read(%pD2, %lu@%Lu)\n", | 199 | dprintk("NFS: splice_read(%pD2, %lu@%Lu)\n", |
200 | filp, (unsigned long) count, (unsigned long long) *ppos); | 200 | filp, (unsigned long) count, (unsigned long long) *ppos); |
201 | 201 | ||
202 | res = nfs_revalidate_mapping(inode, filp->f_mapping); | 202 | res = nfs_revalidate_mapping_protected(inode, filp->f_mapping); |
203 | if (!res) { | 203 | if (!res) { |
204 | res = generic_file_splice_read(filp, ppos, pipe, count, flags); | 204 | res = generic_file_splice_read(filp, ppos, pipe, count, flags); |
205 | if (res > 0) | 205 | if (res > 0) |
@@ -372,6 +372,10 @@ start: | |||
372 | nfs_wait_bit_killable, TASK_KILLABLE); | 372 | nfs_wait_bit_killable, TASK_KILLABLE); |
373 | if (ret) | 373 | if (ret) |
374 | return ret; | 374 | return ret; |
375 | /* | ||
376 | * Wait for O_DIRECT to complete | ||
377 | */ | ||
378 | nfs_inode_dio_wait(mapping->host); | ||
375 | 379 | ||
376 | page = grab_cache_page_write_begin(mapping, index, flags); | 380 | page = grab_cache_page_write_begin(mapping, index, flags); |
377 | if (!page) | 381 | if (!page) |
@@ -619,6 +623,9 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
619 | /* make sure the cache has finished storing the page */ | 623 | /* make sure the cache has finished storing the page */ |
620 | nfs_fscache_wait_on_page_write(NFS_I(inode), page); | 624 | nfs_fscache_wait_on_page_write(NFS_I(inode), page); |
621 | 625 | ||
626 | wait_on_bit_action(&NFS_I(inode)->flags, NFS_INO_INVALIDATING, | ||
627 | nfs_wait_bit_killable, TASK_KILLABLE); | ||
628 | |||
622 | lock_page(page); | 629 | lock_page(page); |
623 | mapping = page_file_mapping(page); | 630 | mapping = page_file_mapping(page); |
624 | if (mapping != inode->i_mapping) | 631 | if (mapping != inode->i_mapping) |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 83107be3dd01..d42dff6d5e98 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -556,6 +556,7 @@ EXPORT_SYMBOL_GPL(nfs_setattr); | |||
556 | * This is a copy of the common vmtruncate, but with the locking | 556 | * This is a copy of the common vmtruncate, but with the locking |
557 | * corrected to take into account the fact that NFS requires | 557 | * corrected to take into account the fact that NFS requires |
558 | * inode->i_size to be updated under the inode->i_lock. | 558 | * inode->i_size to be updated under the inode->i_lock. |
559 | * Note: must be called with inode->i_lock held! | ||
559 | */ | 560 | */ |
560 | static int nfs_vmtruncate(struct inode * inode, loff_t offset) | 561 | static int nfs_vmtruncate(struct inode * inode, loff_t offset) |
561 | { | 562 | { |
@@ -565,14 +566,14 @@ static int nfs_vmtruncate(struct inode * inode, loff_t offset) | |||
565 | if (err) | 566 | if (err) |
566 | goto out; | 567 | goto out; |
567 | 568 | ||
568 | spin_lock(&inode->i_lock); | ||
569 | i_size_write(inode, offset); | 569 | i_size_write(inode, offset); |
570 | /* Optimisation */ | 570 | /* Optimisation */ |
571 | if (offset == 0) | 571 | if (offset == 0) |
572 | NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_DATA; | 572 | NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_DATA; |
573 | spin_unlock(&inode->i_lock); | ||
574 | 573 | ||
574 | spin_unlock(&inode->i_lock); | ||
575 | truncate_pagecache(inode, offset); | 575 | truncate_pagecache(inode, offset); |
576 | spin_lock(&inode->i_lock); | ||
576 | out: | 577 | out: |
577 | return err; | 578 | return err; |
578 | } | 579 | } |
@@ -585,10 +586,15 @@ out: | |||
585 | * Note: we do this in the *proc.c in order to ensure that | 586 | * Note: we do this in the *proc.c in order to ensure that |
586 | * it works for things like exclusive creates too. | 587 | * it works for things like exclusive creates too. |
587 | */ | 588 | */ |
588 | void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr) | 589 | void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, |
590 | struct nfs_fattr *fattr) | ||
589 | { | 591 | { |
592 | /* Barrier: bump the attribute generation count. */ | ||
593 | nfs_fattr_set_barrier(fattr); | ||
594 | |||
595 | spin_lock(&inode->i_lock); | ||
596 | NFS_I(inode)->attr_gencount = fattr->gencount; | ||
590 | if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) { | 597 | if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) { |
591 | spin_lock(&inode->i_lock); | ||
592 | if ((attr->ia_valid & ATTR_MODE) != 0) { | 598 | if ((attr->ia_valid & ATTR_MODE) != 0) { |
593 | int mode = attr->ia_mode & S_IALLUGO; | 599 | int mode = attr->ia_mode & S_IALLUGO; |
594 | mode |= inode->i_mode & ~S_IALLUGO; | 600 | mode |= inode->i_mode & ~S_IALLUGO; |
@@ -600,12 +606,13 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr) | |||
600 | inode->i_gid = attr->ia_gid; | 606 | inode->i_gid = attr->ia_gid; |
601 | nfs_set_cache_invalid(inode, NFS_INO_INVALID_ACCESS | 607 | nfs_set_cache_invalid(inode, NFS_INO_INVALID_ACCESS |
602 | | NFS_INO_INVALID_ACL); | 608 | | NFS_INO_INVALID_ACL); |
603 | spin_unlock(&inode->i_lock); | ||
604 | } | 609 | } |
605 | if ((attr->ia_valid & ATTR_SIZE) != 0) { | 610 | if ((attr->ia_valid & ATTR_SIZE) != 0) { |
606 | nfs_inc_stats(inode, NFSIOS_SETATTRTRUNC); | 611 | nfs_inc_stats(inode, NFSIOS_SETATTRTRUNC); |
607 | nfs_vmtruncate(inode, attr->ia_size); | 612 | nfs_vmtruncate(inode, attr->ia_size); |
608 | } | 613 | } |
614 | nfs_update_inode(inode, fattr); | ||
615 | spin_unlock(&inode->i_lock); | ||
609 | } | 616 | } |
610 | EXPORT_SYMBOL_GPL(nfs_setattr_update_inode); | 617 | EXPORT_SYMBOL_GPL(nfs_setattr_update_inode); |
611 | 618 | ||
@@ -1028,6 +1035,7 @@ static int nfs_invalidate_mapping(struct inode *inode, struct address_space *map | |||
1028 | 1035 | ||
1029 | if (mapping->nrpages != 0) { | 1036 | if (mapping->nrpages != 0) { |
1030 | if (S_ISREG(inode->i_mode)) { | 1037 | if (S_ISREG(inode->i_mode)) { |
1038 | unmap_mapping_range(mapping, 0, 0, 0); | ||
1031 | ret = nfs_sync_mapping(mapping); | 1039 | ret = nfs_sync_mapping(mapping); |
1032 | if (ret < 0) | 1040 | if (ret < 0) |
1033 | return ret; | 1041 | return ret; |
@@ -1060,11 +1068,14 @@ static bool nfs_mapping_need_revalidate_inode(struct inode *inode) | |||
1060 | } | 1068 | } |
1061 | 1069 | ||
1062 | /** | 1070 | /** |
1063 | * nfs_revalidate_mapping - Revalidate the pagecache | 1071 | * __nfs_revalidate_mapping - Revalidate the pagecache |
1064 | * @inode - pointer to host inode | 1072 | * @inode - pointer to host inode |
1065 | * @mapping - pointer to mapping | 1073 | * @mapping - pointer to mapping |
1074 | * @may_lock - take inode->i_mutex? | ||
1066 | */ | 1075 | */ |
1067 | int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping) | 1076 | static int __nfs_revalidate_mapping(struct inode *inode, |
1077 | struct address_space *mapping, | ||
1078 | bool may_lock) | ||
1068 | { | 1079 | { |
1069 | struct nfs_inode *nfsi = NFS_I(inode); | 1080 | struct nfs_inode *nfsi = NFS_I(inode); |
1070 | unsigned long *bitlock = &nfsi->flags; | 1081 | unsigned long *bitlock = &nfsi->flags; |
@@ -1113,7 +1124,12 @@ int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping) | |||
1113 | nfsi->cache_validity &= ~NFS_INO_INVALID_DATA; | 1124 | nfsi->cache_validity &= ~NFS_INO_INVALID_DATA; |
1114 | spin_unlock(&inode->i_lock); | 1125 | spin_unlock(&inode->i_lock); |
1115 | trace_nfs_invalidate_mapping_enter(inode); | 1126 | trace_nfs_invalidate_mapping_enter(inode); |
1116 | ret = nfs_invalidate_mapping(inode, mapping); | 1127 | if (may_lock) { |
1128 | mutex_lock(&inode->i_mutex); | ||
1129 | ret = nfs_invalidate_mapping(inode, mapping); | ||
1130 | mutex_unlock(&inode->i_mutex); | ||
1131 | } else | ||
1132 | ret = nfs_invalidate_mapping(inode, mapping); | ||
1117 | trace_nfs_invalidate_mapping_exit(inode, ret); | 1133 | trace_nfs_invalidate_mapping_exit(inode, ret); |
1118 | 1134 | ||
1119 | clear_bit_unlock(NFS_INO_INVALIDATING, bitlock); | 1135 | clear_bit_unlock(NFS_INO_INVALIDATING, bitlock); |
@@ -1123,6 +1139,29 @@ out: | |||
1123 | return ret; | 1139 | return ret; |
1124 | } | 1140 | } |
1125 | 1141 | ||
1142 | /** | ||
1143 | * nfs_revalidate_mapping - Revalidate the pagecache | ||
1144 | * @inode - pointer to host inode | ||
1145 | * @mapping - pointer to mapping | ||
1146 | */ | ||
1147 | int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping) | ||
1148 | { | ||
1149 | return __nfs_revalidate_mapping(inode, mapping, false); | ||
1150 | } | ||
1151 | |||
1152 | /** | ||
1153 | * nfs_revalidate_mapping_protected - Revalidate the pagecache | ||
1154 | * @inode - pointer to host inode | ||
1155 | * @mapping - pointer to mapping | ||
1156 | * | ||
1157 | * Differs from nfs_revalidate_mapping() in that it grabs the inode->i_mutex | ||
1158 | * while invalidating the mapping. | ||
1159 | */ | ||
1160 | int nfs_revalidate_mapping_protected(struct inode *inode, struct address_space *mapping) | ||
1161 | { | ||
1162 | return __nfs_revalidate_mapping(inode, mapping, true); | ||
1163 | } | ||
1164 | |||
1126 | static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr) | 1165 | static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr) |
1127 | { | 1166 | { |
1128 | struct nfs_inode *nfsi = NFS_I(inode); | 1167 | struct nfs_inode *nfsi = NFS_I(inode); |
@@ -1231,13 +1270,6 @@ static int nfs_ctime_need_update(const struct inode *inode, const struct nfs_fat | |||
1231 | return timespec_compare(&fattr->ctime, &inode->i_ctime) > 0; | 1270 | return timespec_compare(&fattr->ctime, &inode->i_ctime) > 0; |
1232 | } | 1271 | } |
1233 | 1272 | ||
1234 | static int nfs_size_need_update(const struct inode *inode, const struct nfs_fattr *fattr) | ||
1235 | { | ||
1236 | if (!(fattr->valid & NFS_ATTR_FATTR_SIZE)) | ||
1237 | return 0; | ||
1238 | return nfs_size_to_loff_t(fattr->size) > i_size_read(inode); | ||
1239 | } | ||
1240 | |||
1241 | static atomic_long_t nfs_attr_generation_counter; | 1273 | static atomic_long_t nfs_attr_generation_counter; |
1242 | 1274 | ||
1243 | static unsigned long nfs_read_attr_generation_counter(void) | 1275 | static unsigned long nfs_read_attr_generation_counter(void) |
@@ -1249,6 +1281,7 @@ unsigned long nfs_inc_attr_generation_counter(void) | |||
1249 | { | 1281 | { |
1250 | return atomic_long_inc_return(&nfs_attr_generation_counter); | 1282 | return atomic_long_inc_return(&nfs_attr_generation_counter); |
1251 | } | 1283 | } |
1284 | EXPORT_SYMBOL_GPL(nfs_inc_attr_generation_counter); | ||
1252 | 1285 | ||
1253 | void nfs_fattr_init(struct nfs_fattr *fattr) | 1286 | void nfs_fattr_init(struct nfs_fattr *fattr) |
1254 | { | 1287 | { |
@@ -1260,6 +1293,22 @@ void nfs_fattr_init(struct nfs_fattr *fattr) | |||
1260 | } | 1293 | } |
1261 | EXPORT_SYMBOL_GPL(nfs_fattr_init); | 1294 | EXPORT_SYMBOL_GPL(nfs_fattr_init); |
1262 | 1295 | ||
1296 | /** | ||
1297 | * nfs_fattr_set_barrier | ||
1298 | * @fattr: attributes | ||
1299 | * | ||
1300 | * Used to set a barrier after an attribute was updated. This | ||
1301 | * barrier ensures that older attributes from RPC calls that may | ||
1302 | * have raced with our update cannot clobber these new values. | ||
1303 | * Note that you are still responsible for ensuring that other | ||
1304 | * operations which change the attribute on the server do not | ||
1305 | * collide. | ||
1306 | */ | ||
1307 | void nfs_fattr_set_barrier(struct nfs_fattr *fattr) | ||
1308 | { | ||
1309 | fattr->gencount = nfs_inc_attr_generation_counter(); | ||
1310 | } | ||
1311 | |||
1263 | struct nfs_fattr *nfs_alloc_fattr(void) | 1312 | struct nfs_fattr *nfs_alloc_fattr(void) |
1264 | { | 1313 | { |
1265 | struct nfs_fattr *fattr; | 1314 | struct nfs_fattr *fattr; |
@@ -1370,7 +1419,6 @@ static int nfs_inode_attrs_need_update(const struct inode *inode, const struct n | |||
1370 | 1419 | ||
1371 | return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 || | 1420 | return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 || |
1372 | nfs_ctime_need_update(inode, fattr) || | 1421 | nfs_ctime_need_update(inode, fattr) || |
1373 | nfs_size_need_update(inode, fattr) || | ||
1374 | ((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0); | 1422 | ((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0); |
1375 | } | 1423 | } |
1376 | 1424 | ||
@@ -1460,6 +1508,7 @@ int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr) | |||
1460 | int status; | 1508 | int status; |
1461 | 1509 | ||
1462 | spin_lock(&inode->i_lock); | 1510 | spin_lock(&inode->i_lock); |
1511 | nfs_fattr_set_barrier(fattr); | ||
1463 | status = nfs_post_op_update_inode_locked(inode, fattr); | 1512 | status = nfs_post_op_update_inode_locked(inode, fattr); |
1464 | spin_unlock(&inode->i_lock); | 1513 | spin_unlock(&inode->i_lock); |
1465 | 1514 | ||
@@ -1468,7 +1517,7 @@ int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr) | |||
1468 | EXPORT_SYMBOL_GPL(nfs_post_op_update_inode); | 1517 | EXPORT_SYMBOL_GPL(nfs_post_op_update_inode); |
1469 | 1518 | ||
1470 | /** | 1519 | /** |
1471 | * nfs_post_op_update_inode_force_wcc - try to update the inode attribute cache | 1520 | * nfs_post_op_update_inode_force_wcc_locked - update the inode attribute cache |
1472 | * @inode - pointer to inode | 1521 | * @inode - pointer to inode |
1473 | * @fattr - updated attributes | 1522 | * @fattr - updated attributes |
1474 | * | 1523 | * |
@@ -1478,11 +1527,10 @@ EXPORT_SYMBOL_GPL(nfs_post_op_update_inode); | |||
1478 | * | 1527 | * |
1479 | * This function is mainly designed to be used by the ->write_done() functions. | 1528 | * This function is mainly designed to be used by the ->write_done() functions. |
1480 | */ | 1529 | */ |
1481 | int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr) | 1530 | int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr) |
1482 | { | 1531 | { |
1483 | int status; | 1532 | int status; |
1484 | 1533 | ||
1485 | spin_lock(&inode->i_lock); | ||
1486 | /* Don't do a WCC update if these attributes are already stale */ | 1534 | /* Don't do a WCC update if these attributes are already stale */ |
1487 | if ((fattr->valid & NFS_ATTR_FATTR) == 0 || | 1535 | if ((fattr->valid & NFS_ATTR_FATTR) == 0 || |
1488 | !nfs_inode_attrs_need_update(inode, fattr)) { | 1536 | !nfs_inode_attrs_need_update(inode, fattr)) { |
@@ -1514,6 +1562,27 @@ int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fa | |||
1514 | } | 1562 | } |
1515 | out_noforce: | 1563 | out_noforce: |
1516 | status = nfs_post_op_update_inode_locked(inode, fattr); | 1564 | status = nfs_post_op_update_inode_locked(inode, fattr); |
1565 | return status; | ||
1566 | } | ||
1567 | |||
1568 | /** | ||
1569 | * nfs_post_op_update_inode_force_wcc - try to update the inode attribute cache | ||
1570 | * @inode - pointer to inode | ||
1571 | * @fattr - updated attributes | ||
1572 | * | ||
1573 | * After an operation that has changed the inode metadata, mark the | ||
1574 | * attribute cache as being invalid, then try to update it. Fake up | ||
1575 | * weak cache consistency data, if none exist. | ||
1576 | * | ||
1577 | * This function is mainly designed to be used by the ->write_done() functions. | ||
1578 | */ | ||
1579 | int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr) | ||
1580 | { | ||
1581 | int status; | ||
1582 | |||
1583 | spin_lock(&inode->i_lock); | ||
1584 | nfs_fattr_set_barrier(fattr); | ||
1585 | status = nfs_post_op_update_inode_force_wcc_locked(inode, fattr); | ||
1517 | spin_unlock(&inode->i_lock); | 1586 | spin_unlock(&inode->i_lock); |
1518 | return status; | 1587 | return status; |
1519 | } | 1588 | } |
@@ -1715,6 +1784,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) | |||
1715 | nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE); | 1784 | nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE); |
1716 | nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); | 1785 | nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); |
1717 | nfsi->attrtimeo_timestamp = now; | 1786 | nfsi->attrtimeo_timestamp = now; |
1787 | /* Set barrier to be more recent than all outstanding updates */ | ||
1718 | nfsi->attr_gencount = nfs_inc_attr_generation_counter(); | 1788 | nfsi->attr_gencount = nfs_inc_attr_generation_counter(); |
1719 | } else { | 1789 | } else { |
1720 | if (!time_in_range_open(now, nfsi->attrtimeo_timestamp, nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) { | 1790 | if (!time_in_range_open(now, nfsi->attrtimeo_timestamp, nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) { |
@@ -1722,6 +1792,9 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) | |||
1722 | nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode); | 1792 | nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode); |
1723 | nfsi->attrtimeo_timestamp = now; | 1793 | nfsi->attrtimeo_timestamp = now; |
1724 | } | 1794 | } |
1795 | /* Set the barrier to be more recent than this fattr */ | ||
1796 | if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0) | ||
1797 | nfsi->attr_gencount = fattr->gencount; | ||
1725 | } | 1798 | } |
1726 | invalid &= ~NFS_INO_INVALID_ATTR; | 1799 | invalid &= ~NFS_INO_INVALID_ATTR; |
1727 | /* Don't invalidate the data if we were to blame */ | 1800 | /* Don't invalidate the data if we were to blame */ |
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index b802fb3a2d99..9e6475bc5ba2 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
@@ -459,6 +459,7 @@ void nfs_mark_request_commit(struct nfs_page *req, | |||
459 | struct nfs_commit_info *cinfo, | 459 | struct nfs_commit_info *cinfo, |
460 | u32 ds_commit_idx); | 460 | u32 ds_commit_idx); |
461 | int nfs_write_need_commit(struct nfs_pgio_header *); | 461 | int nfs_write_need_commit(struct nfs_pgio_header *); |
462 | void nfs_writeback_update_inode(struct nfs_pgio_header *hdr); | ||
462 | int nfs_generic_commit_list(struct inode *inode, struct list_head *head, | 463 | int nfs_generic_commit_list(struct inode *inode, struct list_head *head, |
463 | int how, struct nfs_commit_info *cinfo); | 464 | int how, struct nfs_commit_info *cinfo); |
464 | void nfs_retry_commit(struct list_head *page_list, | 465 | void nfs_retry_commit(struct list_head *page_list, |
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 78e557c3ab87..1f11d2533ee4 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c | |||
@@ -138,7 +138,7 @@ nfs3_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, | |||
138 | nfs_fattr_init(fattr); | 138 | nfs_fattr_init(fattr); |
139 | status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); | 139 | status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); |
140 | if (status == 0) | 140 | if (status == 0) |
141 | nfs_setattr_update_inode(inode, sattr); | 141 | nfs_setattr_update_inode(inode, sattr, fattr); |
142 | dprintk("NFS reply setattr: %d\n", status); | 142 | dprintk("NFS reply setattr: %d\n", status); |
143 | return status; | 143 | return status; |
144 | } | 144 | } |
@@ -834,7 +834,7 @@ static int nfs3_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) | |||
834 | if (nfs3_async_handle_jukebox(task, inode)) | 834 | if (nfs3_async_handle_jukebox(task, inode)) |
835 | return -EAGAIN; | 835 | return -EAGAIN; |
836 | if (task->tk_status >= 0) | 836 | if (task->tk_status >= 0) |
837 | nfs_post_op_update_inode_force_wcc(inode, hdr->res.fattr); | 837 | nfs_writeback_update_inode(hdr); |
838 | return 0; | 838 | return 0; |
839 | } | 839 | } |
840 | 840 | ||
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 2a932fdc57cb..53852a4bd88b 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c | |||
@@ -1987,6 +1987,11 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, | |||
1987 | if (entry->fattr->valid & NFS_ATTR_FATTR_V3) | 1987 | if (entry->fattr->valid & NFS_ATTR_FATTR_V3) |
1988 | entry->d_type = nfs_umode_to_dtype(entry->fattr->mode); | 1988 | entry->d_type = nfs_umode_to_dtype(entry->fattr->mode); |
1989 | 1989 | ||
1990 | if (entry->fattr->fileid != entry->ino) { | ||
1991 | entry->fattr->mounted_on_fileid = entry->ino; | ||
1992 | entry->fattr->valid |= NFS_ATTR_FATTR_MOUNTED_ON_FILEID; | ||
1993 | } | ||
1994 | |||
1990 | /* In fact, a post_op_fh3: */ | 1995 | /* In fact, a post_op_fh3: */ |
1991 | p = xdr_inline_decode(xdr, 4); | 1996 | p = xdr_inline_decode(xdr, 4); |
1992 | if (unlikely(p == NULL)) | 1997 | if (unlikely(p == NULL)) |
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 8646af9b11d2..86d6214ea022 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c | |||
@@ -621,6 +621,9 @@ int nfs41_walk_client_list(struct nfs_client *new, | |||
621 | spin_lock(&nn->nfs_client_lock); | 621 | spin_lock(&nn->nfs_client_lock); |
622 | list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) { | 622 | list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) { |
623 | 623 | ||
624 | if (pos == new) | ||
625 | goto found; | ||
626 | |||
624 | if (pos->rpc_ops != new->rpc_ops) | 627 | if (pos->rpc_ops != new->rpc_ops) |
625 | continue; | 628 | continue; |
626 | 629 | ||
@@ -639,10 +642,6 @@ int nfs41_walk_client_list(struct nfs_client *new, | |||
639 | prev = pos; | 642 | prev = pos; |
640 | 643 | ||
641 | status = nfs_wait_client_init_complete(pos); | 644 | status = nfs_wait_client_init_complete(pos); |
642 | if (pos->cl_cons_state == NFS_CS_SESSION_INITING) { | ||
643 | nfs4_schedule_lease_recovery(pos); | ||
644 | status = nfs4_wait_clnt_recover(pos); | ||
645 | } | ||
646 | spin_lock(&nn->nfs_client_lock); | 645 | spin_lock(&nn->nfs_client_lock); |
647 | if (status < 0) | 646 | if (status < 0) |
648 | break; | 647 | break; |
@@ -668,7 +667,7 @@ int nfs41_walk_client_list(struct nfs_client *new, | |||
668 | */ | 667 | */ |
669 | if (!nfs4_match_client_owner_id(pos, new)) | 668 | if (!nfs4_match_client_owner_id(pos, new)) |
670 | continue; | 669 | continue; |
671 | 670 | found: | |
672 | atomic_inc(&pos->cl_count); | 671 | atomic_inc(&pos->cl_count); |
673 | *result = pos; | 672 | *result = pos; |
674 | status = 0; | 673 | status = 0; |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 88180ac5ea0e..627f37c44456 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -901,6 +901,7 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) | |||
901 | if (!cinfo->atomic || cinfo->before != dir->i_version) | 901 | if (!cinfo->atomic || cinfo->before != dir->i_version) |
902 | nfs_force_lookup_revalidate(dir); | 902 | nfs_force_lookup_revalidate(dir); |
903 | dir->i_version = cinfo->after; | 903 | dir->i_version = cinfo->after; |
904 | nfsi->attr_gencount = nfs_inc_attr_generation_counter(); | ||
904 | nfs_fscache_invalidate(dir); | 905 | nfs_fscache_invalidate(dir); |
905 | spin_unlock(&dir->i_lock); | 906 | spin_unlock(&dir->i_lock); |
906 | } | 907 | } |
@@ -1552,6 +1553,9 @@ static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmod | |||
1552 | 1553 | ||
1553 | opendata->o_arg.open_flags = 0; | 1554 | opendata->o_arg.open_flags = 0; |
1554 | opendata->o_arg.fmode = fmode; | 1555 | opendata->o_arg.fmode = fmode; |
1556 | opendata->o_arg.share_access = nfs4_map_atomic_open_share( | ||
1557 | NFS_SB(opendata->dentry->d_sb), | ||
1558 | fmode, 0); | ||
1555 | memset(&opendata->o_res, 0, sizeof(opendata->o_res)); | 1559 | memset(&opendata->o_res, 0, sizeof(opendata->o_res)); |
1556 | memset(&opendata->c_res, 0, sizeof(opendata->c_res)); | 1560 | memset(&opendata->c_res, 0, sizeof(opendata->c_res)); |
1557 | nfs4_init_opendata_res(opendata); | 1561 | nfs4_init_opendata_res(opendata); |
@@ -2413,8 +2417,8 @@ static int _nfs4_do_open(struct inode *dir, | |||
2413 | opendata->o_res.f_attr, sattr, | 2417 | opendata->o_res.f_attr, sattr, |
2414 | state, label, olabel); | 2418 | state, label, olabel); |
2415 | if (status == 0) { | 2419 | if (status == 0) { |
2416 | nfs_setattr_update_inode(state->inode, sattr); | 2420 | nfs_setattr_update_inode(state->inode, sattr, |
2417 | nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr); | 2421 | opendata->o_res.f_attr); |
2418 | nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel); | 2422 | nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel); |
2419 | } | 2423 | } |
2420 | } | 2424 | } |
@@ -2651,7 +2655,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data) | |||
2651 | case -NFS4ERR_BAD_STATEID: | 2655 | case -NFS4ERR_BAD_STATEID: |
2652 | case -NFS4ERR_EXPIRED: | 2656 | case -NFS4ERR_EXPIRED: |
2653 | if (!nfs4_stateid_match(&calldata->arg.stateid, | 2657 | if (!nfs4_stateid_match(&calldata->arg.stateid, |
2654 | &state->stateid)) { | 2658 | &state->open_stateid)) { |
2655 | rpc_restart_call_prepare(task); | 2659 | rpc_restart_call_prepare(task); |
2656 | goto out_release; | 2660 | goto out_release; |
2657 | } | 2661 | } |
@@ -2687,7 +2691,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) | |||
2687 | is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); | 2691 | is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); |
2688 | is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); | 2692 | is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); |
2689 | is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); | 2693 | is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); |
2690 | nfs4_stateid_copy(&calldata->arg.stateid, &state->stateid); | 2694 | nfs4_stateid_copy(&calldata->arg.stateid, &state->open_stateid); |
2691 | /* Calculate the change in open mode */ | 2695 | /* Calculate the change in open mode */ |
2692 | calldata->arg.fmode = 0; | 2696 | calldata->arg.fmode = 0; |
2693 | if (state->n_rdwr == 0) { | 2697 | if (state->n_rdwr == 0) { |
@@ -3288,7 +3292,7 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, | |||
3288 | 3292 | ||
3289 | status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label); | 3293 | status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label); |
3290 | if (status == 0) { | 3294 | if (status == 0) { |
3291 | nfs_setattr_update_inode(inode, sattr); | 3295 | nfs_setattr_update_inode(inode, sattr, fattr); |
3292 | nfs_setsecurity(inode, fattr, label); | 3296 | nfs_setsecurity(inode, fattr, label); |
3293 | } | 3297 | } |
3294 | nfs4_label_free(label); | 3298 | nfs4_label_free(label); |
@@ -4234,7 +4238,7 @@ static int nfs4_write_done_cb(struct rpc_task *task, | |||
4234 | } | 4238 | } |
4235 | if (task->tk_status >= 0) { | 4239 | if (task->tk_status >= 0) { |
4236 | renew_lease(NFS_SERVER(inode), hdr->timestamp); | 4240 | renew_lease(NFS_SERVER(inode), hdr->timestamp); |
4237 | nfs_post_op_update_inode_force_wcc(inode, &hdr->fattr); | 4241 | nfs_writeback_update_inode(hdr); |
4238 | } | 4242 | } |
4239 | return 0; | 4243 | return 0; |
4240 | } | 4244 | } |
@@ -6893,9 +6897,13 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, | |||
6893 | 6897 | ||
6894 | if (status == 0) { | 6898 | if (status == 0) { |
6895 | clp->cl_clientid = res.clientid; | 6899 | clp->cl_clientid = res.clientid; |
6896 | clp->cl_exchange_flags = (res.flags & ~EXCHGID4_FLAG_CONFIRMED_R); | 6900 | clp->cl_exchange_flags = res.flags; |
6897 | if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) | 6901 | /* Client ID is not confirmed */ |
6902 | if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) { | ||
6903 | clear_bit(NFS4_SESSION_ESTABLISHED, | ||
6904 | &clp->cl_session->session_state); | ||
6898 | clp->cl_seqid = res.seqid; | 6905 | clp->cl_seqid = res.seqid; |
6906 | } | ||
6899 | 6907 | ||
6900 | kfree(clp->cl_serverowner); | 6908 | kfree(clp->cl_serverowner); |
6901 | clp->cl_serverowner = res.server_owner; | 6909 | clp->cl_serverowner = res.server_owner; |
@@ -7227,6 +7235,9 @@ static void nfs4_update_session(struct nfs4_session *session, | |||
7227 | struct nfs41_create_session_res *res) | 7235 | struct nfs41_create_session_res *res) |
7228 | { | 7236 | { |
7229 | nfs4_copy_sessionid(&session->sess_id, &res->sessionid); | 7237 | nfs4_copy_sessionid(&session->sess_id, &res->sessionid); |
7238 | /* Mark client id and session as being confirmed */ | ||
7239 | session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; | ||
7240 | set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state); | ||
7230 | session->flags = res->flags; | 7241 | session->flags = res->flags; |
7231 | memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); | 7242 | memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); |
7232 | if (res->flags & SESSION4_BACK_CHAN) | 7243 | if (res->flags & SESSION4_BACK_CHAN) |
@@ -7322,8 +7333,8 @@ int nfs4_proc_destroy_session(struct nfs4_session *session, | |||
7322 | dprintk("--> nfs4_proc_destroy_session\n"); | 7333 | dprintk("--> nfs4_proc_destroy_session\n"); |
7323 | 7334 | ||
7324 | /* session is still being setup */ | 7335 | /* session is still being setup */ |
7325 | if (session->clp->cl_cons_state != NFS_CS_READY) | 7336 | if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state)) |
7326 | return status; | 7337 | return 0; |
7327 | 7338 | ||
7328 | status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); | 7339 | status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); |
7329 | trace_nfs4_destroy_session(session->clp, status); | 7340 | trace_nfs4_destroy_session(session->clp, status); |
diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h index fc46c7455898..e3ea2c5324d6 100644 --- a/fs/nfs/nfs4session.h +++ b/fs/nfs/nfs4session.h | |||
@@ -70,6 +70,7 @@ struct nfs4_session { | |||
70 | 70 | ||
71 | enum nfs4_session_state { | 71 | enum nfs4_session_state { |
72 | NFS4_SESSION_INITING, | 72 | NFS4_SESSION_INITING, |
73 | NFS4_SESSION_ESTABLISHED, | ||
73 | }; | 74 | }; |
74 | 75 | ||
75 | extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl, | 76 | extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl, |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 5ad908e9ce9c..f95e3b58bbc3 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -346,9 +346,23 @@ int nfs41_discover_server_trunking(struct nfs_client *clp, | |||
346 | status = nfs4_proc_exchange_id(clp, cred); | 346 | status = nfs4_proc_exchange_id(clp, cred); |
347 | if (status != NFS4_OK) | 347 | if (status != NFS4_OK) |
348 | return status; | 348 | return status; |
349 | set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); | ||
350 | 349 | ||
351 | return nfs41_walk_client_list(clp, result, cred); | 350 | status = nfs41_walk_client_list(clp, result, cred); |
351 | if (status < 0) | ||
352 | return status; | ||
353 | if (clp != *result) | ||
354 | return 0; | ||
355 | |||
356 | /* Purge state if the client id was established in a prior instance */ | ||
357 | if (clp->cl_exchange_flags & EXCHGID4_FLAG_CONFIRMED_R) | ||
358 | set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state); | ||
359 | else | ||
360 | set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); | ||
361 | nfs4_schedule_state_manager(clp); | ||
362 | status = nfs_wait_client_init_complete(clp); | ||
363 | if (status < 0) | ||
364 | nfs_put_client(clp); | ||
365 | return status; | ||
352 | } | 366 | } |
353 | 367 | ||
354 | #endif /* CONFIG_NFS_V4_1 */ | 368 | #endif /* CONFIG_NFS_V4_1 */ |
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index b09cc23d6f43..c63189acd052 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c | |||
@@ -139,7 +139,7 @@ nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, | |||
139 | nfs_fattr_init(fattr); | 139 | nfs_fattr_init(fattr); |
140 | status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); | 140 | status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); |
141 | if (status == 0) | 141 | if (status == 0) |
142 | nfs_setattr_update_inode(inode, sattr); | 142 | nfs_setattr_update_inode(inode, sattr, fattr); |
143 | dprintk("NFS reply setattr: %d\n", status); | 143 | dprintk("NFS reply setattr: %d\n", status); |
144 | return status; | 144 | return status; |
145 | } | 145 | } |
@@ -609,10 +609,8 @@ static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task, | |||
609 | 609 | ||
610 | static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) | 610 | static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) |
611 | { | 611 | { |
612 | struct inode *inode = hdr->inode; | ||
613 | |||
614 | if (task->tk_status >= 0) | 612 | if (task->tk_status >= 0) |
615 | nfs_post_op_update_inode_force_wcc(inode, hdr->res.fattr); | 613 | nfs_writeback_update_inode(hdr); |
616 | return 0; | 614 | return 0; |
617 | } | 615 | } |
618 | 616 | ||
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 595d81e354d1..849ed784d6ac 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -1377,6 +1377,36 @@ static int nfs_should_remove_suid(const struct inode *inode) | |||
1377 | return 0; | 1377 | return 0; |
1378 | } | 1378 | } |
1379 | 1379 | ||
1380 | static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr, | ||
1381 | struct nfs_fattr *fattr) | ||
1382 | { | ||
1383 | struct nfs_pgio_args *argp = &hdr->args; | ||
1384 | struct nfs_pgio_res *resp = &hdr->res; | ||
1385 | |||
1386 | if (!(fattr->valid & NFS_ATTR_FATTR_SIZE)) | ||
1387 | return; | ||
1388 | if (argp->offset + resp->count != fattr->size) | ||
1389 | return; | ||
1390 | if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) | ||
1391 | return; | ||
1392 | /* Set attribute barrier */ | ||
1393 | nfs_fattr_set_barrier(fattr); | ||
1394 | } | ||
1395 | |||
1396 | void nfs_writeback_update_inode(struct nfs_pgio_header *hdr) | ||
1397 | { | ||
1398 | struct nfs_fattr *fattr = hdr->res.fattr; | ||
1399 | struct inode *inode = hdr->inode; | ||
1400 | |||
1401 | if (fattr == NULL) | ||
1402 | return; | ||
1403 | spin_lock(&inode->i_lock); | ||
1404 | nfs_writeback_check_extend(hdr, fattr); | ||
1405 | nfs_post_op_update_inode_force_wcc_locked(inode, fattr); | ||
1406 | spin_unlock(&inode->i_lock); | ||
1407 | } | ||
1408 | EXPORT_SYMBOL_GPL(nfs_writeback_update_inode); | ||
1409 | |||
1380 | /* | 1410 | /* |
1381 | * This function is called when the WRITE call is complete. | 1411 | * This function is called when the WRITE call is complete. |
1382 | */ | 1412 | */ |
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c index cdbc78c72542..03d647bf195d 100644 --- a/fs/nfsd/blocklayout.c +++ b/fs/nfsd/blocklayout.c | |||
@@ -137,7 +137,7 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp, | |||
137 | seg->offset = iomap.offset; | 137 | seg->offset = iomap.offset; |
138 | seg->length = iomap.length; | 138 | seg->length = iomap.length; |
139 | 139 | ||
140 | dprintk("GET: %lld:%lld %d\n", bex->foff, bex->len, bex->es); | 140 | dprintk("GET: 0x%llx:0x%llx %d\n", bex->foff, bex->len, bex->es); |
141 | return 0; | 141 | return 0; |
142 | 142 | ||
143 | out_error: | 143 | out_error: |
diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c index 9da89fddab33..9aa2796da90d 100644 --- a/fs/nfsd/blocklayoutxdr.c +++ b/fs/nfsd/blocklayoutxdr.c | |||
@@ -122,19 +122,19 @@ nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp, | |||
122 | 122 | ||
123 | p = xdr_decode_hyper(p, &bex.foff); | 123 | p = xdr_decode_hyper(p, &bex.foff); |
124 | if (bex.foff & (block_size - 1)) { | 124 | if (bex.foff & (block_size - 1)) { |
125 | dprintk("%s: unaligned offset %lld\n", | 125 | dprintk("%s: unaligned offset 0x%llx\n", |
126 | __func__, bex.foff); | 126 | __func__, bex.foff); |
127 | goto fail; | 127 | goto fail; |
128 | } | 128 | } |
129 | p = xdr_decode_hyper(p, &bex.len); | 129 | p = xdr_decode_hyper(p, &bex.len); |
130 | if (bex.len & (block_size - 1)) { | 130 | if (bex.len & (block_size - 1)) { |
131 | dprintk("%s: unaligned length %lld\n", | 131 | dprintk("%s: unaligned length 0x%llx\n", |
132 | __func__, bex.foff); | 132 | __func__, bex.foff); |
133 | goto fail; | 133 | goto fail; |
134 | } | 134 | } |
135 | p = xdr_decode_hyper(p, &bex.soff); | 135 | p = xdr_decode_hyper(p, &bex.soff); |
136 | if (bex.soff & (block_size - 1)) { | 136 | if (bex.soff & (block_size - 1)) { |
137 | dprintk("%s: unaligned disk offset %lld\n", | 137 | dprintk("%s: unaligned disk offset 0x%llx\n", |
138 | __func__, bex.soff); | 138 | __func__, bex.soff); |
139 | goto fail; | 139 | goto fail; |
140 | } | 140 | } |
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index 3c1bfa155571..6904213a4363 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c | |||
@@ -118,7 +118,7 @@ void nfsd4_setup_layout_type(struct svc_export *exp) | |||
118 | { | 118 | { |
119 | struct super_block *sb = exp->ex_path.mnt->mnt_sb; | 119 | struct super_block *sb = exp->ex_path.mnt->mnt_sb; |
120 | 120 | ||
121 | if (exp->ex_flags & NFSEXP_NOPNFS) | 121 | if (!(exp->ex_flags & NFSEXP_PNFS)) |
122 | return; | 122 | return; |
123 | 123 | ||
124 | if (sb->s_export_op->get_uuid && | 124 | if (sb->s_export_op->get_uuid && |
@@ -440,15 +440,14 @@ nfsd4_return_file_layout(struct nfs4_layout *lp, struct nfsd4_layout_seg *seg, | |||
440 | list_move_tail(&lp->lo_perstate, reaplist); | 440 | list_move_tail(&lp->lo_perstate, reaplist); |
441 | return; | 441 | return; |
442 | } | 442 | } |
443 | end = seg->offset; | 443 | lo->offset = layout_end(seg); |
444 | } else { | 444 | } else { |
445 | /* retain the whole layout segment on a split. */ | 445 | /* retain the whole layout segment on a split. */ |
446 | if (layout_end(seg) < end) { | 446 | if (layout_end(seg) < end) { |
447 | dprintk("%s: split not supported\n", __func__); | 447 | dprintk("%s: split not supported\n", __func__); |
448 | return; | 448 | return; |
449 | } | 449 | } |
450 | 450 | end = seg->offset; | |
451 | lo->offset = layout_end(seg); | ||
452 | } | 451 | } |
453 | 452 | ||
454 | layout_update_len(lo, end); | 453 | layout_update_len(lo, end); |
@@ -513,6 +512,9 @@ nfsd4_return_client_layouts(struct svc_rqst *rqstp, | |||
513 | 512 | ||
514 | spin_lock(&clp->cl_lock); | 513 | spin_lock(&clp->cl_lock); |
515 | list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) { | 514 | list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) { |
515 | if (ls->ls_layout_type != lrp->lr_layout_type) | ||
516 | continue; | ||
517 | |||
516 | if (lrp->lr_return_type == RETURN_FSID && | 518 | if (lrp->lr_return_type == RETURN_FSID && |
517 | !fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle, | 519 | !fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle, |
518 | &cstate->current_fh.fh_handle)) | 520 | &cstate->current_fh.fh_handle)) |
@@ -587,7 +589,7 @@ nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls) | |||
587 | 589 | ||
588 | rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str)); | 590 | rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str)); |
589 | 591 | ||
590 | nfsd4_cb_layout_fail(ls); | 592 | trace_layout_recall_fail(&ls->ls_stid.sc_stateid); |
591 | 593 | ||
592 | printk(KERN_WARNING | 594 | printk(KERN_WARNING |
593 | "nfsd: client %s failed to respond to layout recall. " | 595 | "nfsd: client %s failed to respond to layout recall. " |
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index d30bea8d0277..92b9d97aff4f 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c | |||
@@ -1237,8 +1237,8 @@ nfsd4_getdeviceinfo(struct svc_rqst *rqstp, | |||
1237 | nfserr = ops->proc_getdeviceinfo(exp->ex_path.mnt->mnt_sb, gdp); | 1237 | nfserr = ops->proc_getdeviceinfo(exp->ex_path.mnt->mnt_sb, gdp); |
1238 | 1238 | ||
1239 | gdp->gd_notify_types &= ops->notify_types; | 1239 | gdp->gd_notify_types &= ops->notify_types; |
1240 | exp_put(exp); | ||
1241 | out: | 1240 | out: |
1241 | exp_put(exp); | ||
1242 | return nfserr; | 1242 | return nfserr; |
1243 | } | 1243 | } |
1244 | 1244 | ||
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index f6b2a09f793f..8ba1d888f1e6 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -1638,7 +1638,7 @@ __destroy_client(struct nfs4_client *clp) | |||
1638 | nfs4_put_stid(&dp->dl_stid); | 1638 | nfs4_put_stid(&dp->dl_stid); |
1639 | } | 1639 | } |
1640 | while (!list_empty(&clp->cl_revoked)) { | 1640 | while (!list_empty(&clp->cl_revoked)) { |
1641 | dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); | 1641 | dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru); |
1642 | list_del_init(&dp->dl_recall_lru); | 1642 | list_del_init(&dp->dl_recall_lru); |
1643 | nfs4_put_stid(&dp->dl_stid); | 1643 | nfs4_put_stid(&dp->dl_stid); |
1644 | } | 1644 | } |
@@ -3221,7 +3221,7 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open, | |||
3221 | } else | 3221 | } else |
3222 | nfs4_free_openowner(&oo->oo_owner); | 3222 | nfs4_free_openowner(&oo->oo_owner); |
3223 | spin_unlock(&clp->cl_lock); | 3223 | spin_unlock(&clp->cl_lock); |
3224 | return oo; | 3224 | return ret; |
3225 | } | 3225 | } |
3226 | 3226 | ||
3227 | static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) { | 3227 | static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) { |
@@ -5062,7 +5062,7 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, | |||
5062 | } else | 5062 | } else |
5063 | nfs4_free_lockowner(&lo->lo_owner); | 5063 | nfs4_free_lockowner(&lo->lo_owner); |
5064 | spin_unlock(&clp->cl_lock); | 5064 | spin_unlock(&clp->cl_lock); |
5065 | return lo; | 5065 | return ret; |
5066 | } | 5066 | } |
5067 | 5067 | ||
5068 | static void | 5068 | static void |
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index df5e66caf100..5fb7e78169a6 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c | |||
@@ -1562,7 +1562,11 @@ nfsd4_decode_layoutget(struct nfsd4_compoundargs *argp, | |||
1562 | p = xdr_decode_hyper(p, &lgp->lg_seg.offset); | 1562 | p = xdr_decode_hyper(p, &lgp->lg_seg.offset); |
1563 | p = xdr_decode_hyper(p, &lgp->lg_seg.length); | 1563 | p = xdr_decode_hyper(p, &lgp->lg_seg.length); |
1564 | p = xdr_decode_hyper(p, &lgp->lg_minlength); | 1564 | p = xdr_decode_hyper(p, &lgp->lg_minlength); |
1565 | nfsd4_decode_stateid(argp, &lgp->lg_sid); | 1565 | |
1566 | status = nfsd4_decode_stateid(argp, &lgp->lg_sid); | ||
1567 | if (status) | ||
1568 | return status; | ||
1569 | |||
1566 | READ_BUF(4); | 1570 | READ_BUF(4); |
1567 | lgp->lg_maxcount = be32_to_cpup(p++); | 1571 | lgp->lg_maxcount = be32_to_cpup(p++); |
1568 | 1572 | ||
@@ -1580,7 +1584,11 @@ nfsd4_decode_layoutcommit(struct nfsd4_compoundargs *argp, | |||
1580 | p = xdr_decode_hyper(p, &lcp->lc_seg.offset); | 1584 | p = xdr_decode_hyper(p, &lcp->lc_seg.offset); |
1581 | p = xdr_decode_hyper(p, &lcp->lc_seg.length); | 1585 | p = xdr_decode_hyper(p, &lcp->lc_seg.length); |
1582 | lcp->lc_reclaim = be32_to_cpup(p++); | 1586 | lcp->lc_reclaim = be32_to_cpup(p++); |
1583 | nfsd4_decode_stateid(argp, &lcp->lc_sid); | 1587 | |
1588 | status = nfsd4_decode_stateid(argp, &lcp->lc_sid); | ||
1589 | if (status) | ||
1590 | return status; | ||
1591 | |||
1584 | READ_BUF(4); | 1592 | READ_BUF(4); |
1585 | lcp->lc_newoffset = be32_to_cpup(p++); | 1593 | lcp->lc_newoffset = be32_to_cpup(p++); |
1586 | if (lcp->lc_newoffset) { | 1594 | if (lcp->lc_newoffset) { |
@@ -1628,7 +1636,11 @@ nfsd4_decode_layoutreturn(struct nfsd4_compoundargs *argp, | |||
1628 | READ_BUF(16); | 1636 | READ_BUF(16); |
1629 | p = xdr_decode_hyper(p, &lrp->lr_seg.offset); | 1637 | p = xdr_decode_hyper(p, &lrp->lr_seg.offset); |
1630 | p = xdr_decode_hyper(p, &lrp->lr_seg.length); | 1638 | p = xdr_decode_hyper(p, &lrp->lr_seg.length); |
1631 | nfsd4_decode_stateid(argp, &lrp->lr_sid); | 1639 | |
1640 | status = nfsd4_decode_stateid(argp, &lrp->lr_sid); | ||
1641 | if (status) | ||
1642 | return status; | ||
1643 | |||
1632 | READ_BUF(4); | 1644 | READ_BUF(4); |
1633 | lrp->lrf_body_len = be32_to_cpup(p++); | 1645 | lrp->lrf_body_len = be32_to_cpup(p++); |
1634 | if (lrp->lrf_body_len > 0) { | 1646 | if (lrp->lrf_body_len > 0) { |
@@ -4123,7 +4135,7 @@ nfsd4_encode_layoutreturn(struct nfsd4_compoundres *resp, __be32 nfserr, | |||
4123 | return nfserr_resource; | 4135 | return nfserr_resource; |
4124 | *p++ = cpu_to_be32(lrp->lrs_present); | 4136 | *p++ = cpu_to_be32(lrp->lrs_present); |
4125 | if (lrp->lrs_present) | 4137 | if (lrp->lrs_present) |
4126 | nfsd4_encode_stateid(xdr, &lrp->lr_sid); | 4138 | return nfsd4_encode_stateid(xdr, &lrp->lr_sid); |
4127 | return nfs_ok; | 4139 | return nfs_ok; |
4128 | } | 4140 | } |
4129 | #endif /* CONFIG_NFSD_PNFS */ | 4141 | #endif /* CONFIG_NFSD_PNFS */ |
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index 83a9694ec485..46ec934f5dee 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c | |||
@@ -165,13 +165,17 @@ int nfsd_reply_cache_init(void) | |||
165 | { | 165 | { |
166 | unsigned int hashsize; | 166 | unsigned int hashsize; |
167 | unsigned int i; | 167 | unsigned int i; |
168 | int status = 0; | ||
168 | 169 | ||
169 | max_drc_entries = nfsd_cache_size_limit(); | 170 | max_drc_entries = nfsd_cache_size_limit(); |
170 | atomic_set(&num_drc_entries, 0); | 171 | atomic_set(&num_drc_entries, 0); |
171 | hashsize = nfsd_hashsize(max_drc_entries); | 172 | hashsize = nfsd_hashsize(max_drc_entries); |
172 | maskbits = ilog2(hashsize); | 173 | maskbits = ilog2(hashsize); |
173 | 174 | ||
174 | register_shrinker(&nfsd_reply_cache_shrinker); | 175 | status = register_shrinker(&nfsd_reply_cache_shrinker); |
176 | if (status) | ||
177 | return status; | ||
178 | |||
175 | drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), | 179 | drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), |
176 | 0, 0, NULL); | 180 | 0, 0, NULL); |
177 | if (!drc_slab) | 181 | if (!drc_slab) |
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index b2e3ff347620..ecdbae19a766 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c | |||
@@ -31,6 +31,8 @@ | |||
31 | #include "alloc.h" | 31 | #include "alloc.h" |
32 | #include "dat.h" | 32 | #include "dat.h" |
33 | 33 | ||
34 | static void __nilfs_btree_init(struct nilfs_bmap *bmap); | ||
35 | |||
34 | static struct nilfs_btree_path *nilfs_btree_alloc_path(void) | 36 | static struct nilfs_btree_path *nilfs_btree_alloc_path(void) |
35 | { | 37 | { |
36 | struct nilfs_btree_path *path; | 38 | struct nilfs_btree_path *path; |
@@ -368,6 +370,34 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node, | |||
368 | return ret; | 370 | return ret; |
369 | } | 371 | } |
370 | 372 | ||
373 | /** | ||
374 | * nilfs_btree_root_broken - verify consistency of btree root node | ||
375 | * @node: btree root node to be examined | ||
376 | * @ino: inode number | ||
377 | * | ||
378 | * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned. | ||
379 | */ | ||
380 | static int nilfs_btree_root_broken(const struct nilfs_btree_node *node, | ||
381 | unsigned long ino) | ||
382 | { | ||
383 | int level, flags, nchildren; | ||
384 | int ret = 0; | ||
385 | |||
386 | level = nilfs_btree_node_get_level(node); | ||
387 | flags = nilfs_btree_node_get_flags(node); | ||
388 | nchildren = nilfs_btree_node_get_nchildren(node); | ||
389 | |||
390 | if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN || | ||
391 | level > NILFS_BTREE_LEVEL_MAX || | ||
392 | nchildren < 0 || | ||
393 | nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) { | ||
394 | pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n", | ||
395 | ino, level, flags, nchildren); | ||
396 | ret = 1; | ||
397 | } | ||
398 | return ret; | ||
399 | } | ||
400 | |||
371 | int nilfs_btree_broken_node_block(struct buffer_head *bh) | 401 | int nilfs_btree_broken_node_block(struct buffer_head *bh) |
372 | { | 402 | { |
373 | int ret; | 403 | int ret; |
@@ -1713,7 +1743,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree, | |||
1713 | 1743 | ||
1714 | /* convert and insert */ | 1744 | /* convert and insert */ |
1715 | dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL; | 1745 | dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL; |
1716 | nilfs_btree_init(btree); | 1746 | __nilfs_btree_init(btree); |
1717 | if (nreq != NULL) { | 1747 | if (nreq != NULL) { |
1718 | nilfs_bmap_commit_alloc_ptr(btree, dreq, dat); | 1748 | nilfs_bmap_commit_alloc_ptr(btree, dreq, dat); |
1719 | nilfs_bmap_commit_alloc_ptr(btree, nreq, dat); | 1749 | nilfs_bmap_commit_alloc_ptr(btree, nreq, dat); |
@@ -2294,12 +2324,23 @@ static const struct nilfs_bmap_operations nilfs_btree_ops_gc = { | |||
2294 | .bop_gather_data = NULL, | 2324 | .bop_gather_data = NULL, |
2295 | }; | 2325 | }; |
2296 | 2326 | ||
2297 | int nilfs_btree_init(struct nilfs_bmap *bmap) | 2327 | static void __nilfs_btree_init(struct nilfs_bmap *bmap) |
2298 | { | 2328 | { |
2299 | bmap->b_ops = &nilfs_btree_ops; | 2329 | bmap->b_ops = &nilfs_btree_ops; |
2300 | bmap->b_nchildren_per_block = | 2330 | bmap->b_nchildren_per_block = |
2301 | NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap)); | 2331 | NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap)); |
2302 | return 0; | 2332 | } |
2333 | |||
2334 | int nilfs_btree_init(struct nilfs_bmap *bmap) | ||
2335 | { | ||
2336 | int ret = 0; | ||
2337 | |||
2338 | __nilfs_btree_init(bmap); | ||
2339 | |||
2340 | if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap), | ||
2341 | bmap->b_inode->i_ino)) | ||
2342 | ret = -EIO; | ||
2343 | return ret; | ||
2303 | } | 2344 | } |
2304 | 2345 | ||
2305 | void nilfs_btree_init_gc(struct nilfs_bmap *bmap) | 2346 | void nilfs_btree_init_gc(struct nilfs_bmap *bmap) |
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 469086b9f99b..0c3f303baf32 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c | |||
@@ -1907,6 +1907,7 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, | |||
1907 | struct the_nilfs *nilfs) | 1907 | struct the_nilfs *nilfs) |
1908 | { | 1908 | { |
1909 | struct nilfs_inode_info *ii, *n; | 1909 | struct nilfs_inode_info *ii, *n; |
1910 | int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE); | ||
1910 | int defer_iput = false; | 1911 | int defer_iput = false; |
1911 | 1912 | ||
1912 | spin_lock(&nilfs->ns_inode_lock); | 1913 | spin_lock(&nilfs->ns_inode_lock); |
@@ -1919,10 +1920,10 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, | |||
1919 | brelse(ii->i_bh); | 1920 | brelse(ii->i_bh); |
1920 | ii->i_bh = NULL; | 1921 | ii->i_bh = NULL; |
1921 | list_del_init(&ii->i_dirty); | 1922 | list_del_init(&ii->i_dirty); |
1922 | if (!ii->vfs_inode.i_nlink) { | 1923 | if (!ii->vfs_inode.i_nlink || during_mount) { |
1923 | /* | 1924 | /* |
1924 | * Defer calling iput() to avoid a deadlock | 1925 | * Defer calling iput() to avoid deadlocks if |
1925 | * over I_SYNC flag for inodes with i_nlink == 0 | 1926 | * i_nlink == 0 or mount is not yet finished. |
1926 | */ | 1927 | */ |
1927 | list_add_tail(&ii->i_dirty, &sci->sc_iput_queue); | 1928 | list_add_tail(&ii->i_dirty, &sci->sc_iput_queue); |
1928 | defer_iput = true; | 1929 | defer_iput = true; |
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index 9a66ff79ff27..d2f97ecca6a5 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c | |||
@@ -143,7 +143,8 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark, | |||
143 | !(marks_mask & FS_ISDIR & ~marks_ignored_mask)) | 143 | !(marks_mask & FS_ISDIR & ~marks_ignored_mask)) |
144 | return false; | 144 | return false; |
145 | 145 | ||
146 | if (event_mask & marks_mask & ~marks_ignored_mask) | 146 | if (event_mask & FAN_ALL_OUTGOING_EVENTS & marks_mask & |
147 | ~marks_ignored_mask) | ||
147 | return true; | 148 | return true; |
148 | 149 | ||
149 | return false; | 150 | return false; |
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 46e0d4e857c7..ba1790e52ff2 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
@@ -2394,7 +2394,6 @@ relock: | |||
2394 | /* | 2394 | /* |
2395 | * for completing the rest of the request. | 2395 | * for completing the rest of the request. |
2396 | */ | 2396 | */ |
2397 | *ppos += written; | ||
2398 | count -= written; | 2397 | count -= written; |
2399 | written_buffered = generic_perform_write(file, from, *ppos); | 2398 | written_buffered = generic_perform_write(file, from, *ppos); |
2400 | /* | 2399 | /* |
@@ -2409,7 +2408,6 @@ relock: | |||
2409 | goto out_dio; | 2408 | goto out_dio; |
2410 | } | 2409 | } |
2411 | 2410 | ||
2412 | iocb->ki_pos = *ppos + written_buffered; | ||
2413 | /* We need to ensure that the page cache pages are written to | 2411 | /* We need to ensure that the page cache pages are written to |
2414 | * disk and invalidated to preserve the expected O_DIRECT | 2412 | * disk and invalidated to preserve the expected O_DIRECT |
2415 | * semantics. | 2413 | * semantics. |
@@ -2418,6 +2416,7 @@ relock: | |||
2418 | ret = filemap_write_and_wait_range(file->f_mapping, *ppos, | 2416 | ret = filemap_write_and_wait_range(file->f_mapping, *ppos, |
2419 | endbyte); | 2417 | endbyte); |
2420 | if (ret == 0) { | 2418 | if (ret == 0) { |
2419 | iocb->ki_pos = *ppos + written_buffered; | ||
2421 | written += written_buffered; | 2420 | written += written_buffered; |
2422 | invalidate_mapping_pages(mapping, | 2421 | invalidate_mapping_pages(mapping, |
2423 | *ppos >> PAGE_CACHE_SHIFT, | 2422 | *ppos >> PAGE_CACHE_SHIFT, |
@@ -2440,10 +2439,14 @@ out_dio: | |||
2440 | /* buffered aio wouldn't have proper lock coverage today */ | 2439 | /* buffered aio wouldn't have proper lock coverage today */ |
2441 | BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); | 2440 | BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); |
2442 | 2441 | ||
2442 | if (unlikely(written <= 0)) | ||
2443 | goto no_sync; | ||
2444 | |||
2443 | if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || | 2445 | if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || |
2444 | ((file->f_flags & O_DIRECT) && !direct_io)) { | 2446 | ((file->f_flags & O_DIRECT) && !direct_io)) { |
2445 | ret = filemap_fdatawrite_range(file->f_mapping, *ppos, | 2447 | ret = filemap_fdatawrite_range(file->f_mapping, |
2446 | *ppos + count - 1); | 2448 | iocb->ki_pos - written, |
2449 | iocb->ki_pos - 1); | ||
2447 | if (ret < 0) | 2450 | if (ret < 0) |
2448 | written = ret; | 2451 | written = ret; |
2449 | 2452 | ||
@@ -2454,10 +2457,12 @@ out_dio: | |||
2454 | } | 2457 | } |
2455 | 2458 | ||
2456 | if (!ret) | 2459 | if (!ret) |
2457 | ret = filemap_fdatawait_range(file->f_mapping, *ppos, | 2460 | ret = filemap_fdatawait_range(file->f_mapping, |
2458 | *ppos + count - 1); | 2461 | iocb->ki_pos - written, |
2462 | iocb->ki_pos - 1); | ||
2459 | } | 2463 | } |
2460 | 2464 | ||
2465 | no_sync: | ||
2461 | /* | 2466 | /* |
2462 | * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io | 2467 | * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io |
2463 | * function pointer which is called when o_direct io completes so that | 2468 | * function pointer which is called when o_direct io completes so that |
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 8490c64d34fe..460c6c37e683 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h | |||
@@ -502,7 +502,7 @@ static inline int ocfs2_writes_unwritten_extents(struct ocfs2_super *osb) | |||
502 | 502 | ||
503 | static inline int ocfs2_supports_append_dio(struct ocfs2_super *osb) | 503 | static inline int ocfs2_supports_append_dio(struct ocfs2_super *osb) |
504 | { | 504 | { |
505 | if (osb->s_feature_ro_compat & OCFS2_FEATURE_RO_COMPAT_APPEND_DIO) | 505 | if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_APPEND_DIO) |
506 | return 1; | 506 | return 1; |
507 | return 0; | 507 | return 0; |
508 | } | 508 | } |
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 20e37a3ed26f..db64ce2d4667 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h | |||
@@ -102,11 +102,11 @@ | |||
102 | | OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS \ | 102 | | OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS \ |
103 | | OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE \ | 103 | | OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE \ |
104 | | OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG \ | 104 | | OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG \ |
105 | | OCFS2_FEATURE_INCOMPAT_CLUSTERINFO) | 105 | | OCFS2_FEATURE_INCOMPAT_CLUSTERINFO \ |
106 | | OCFS2_FEATURE_INCOMPAT_APPEND_DIO) | ||
106 | #define OCFS2_FEATURE_RO_COMPAT_SUPP (OCFS2_FEATURE_RO_COMPAT_UNWRITTEN \ | 107 | #define OCFS2_FEATURE_RO_COMPAT_SUPP (OCFS2_FEATURE_RO_COMPAT_UNWRITTEN \ |
107 | | OCFS2_FEATURE_RO_COMPAT_USRQUOTA \ | 108 | | OCFS2_FEATURE_RO_COMPAT_USRQUOTA \ |
108 | | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA \ | 109 | | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA) |
109 | | OCFS2_FEATURE_RO_COMPAT_APPEND_DIO) | ||
110 | 110 | ||
111 | /* | 111 | /* |
112 | * Heartbeat-only devices are missing journals and other files. The | 112 | * Heartbeat-only devices are missing journals and other files. The |
@@ -179,6 +179,11 @@ | |||
179 | #define OCFS2_FEATURE_INCOMPAT_CLUSTERINFO 0x4000 | 179 | #define OCFS2_FEATURE_INCOMPAT_CLUSTERINFO 0x4000 |
180 | 180 | ||
181 | /* | 181 | /* |
182 | * Append Direct IO support | ||
183 | */ | ||
184 | #define OCFS2_FEATURE_INCOMPAT_APPEND_DIO 0x8000 | ||
185 | |||
186 | /* | ||
182 | * backup superblock flag is used to indicate that this volume | 187 | * backup superblock flag is used to indicate that this volume |
183 | * has backup superblocks. | 188 | * has backup superblocks. |
184 | */ | 189 | */ |
@@ -200,10 +205,6 @@ | |||
200 | #define OCFS2_FEATURE_RO_COMPAT_USRQUOTA 0x0002 | 205 | #define OCFS2_FEATURE_RO_COMPAT_USRQUOTA 0x0002 |
201 | #define OCFS2_FEATURE_RO_COMPAT_GRPQUOTA 0x0004 | 206 | #define OCFS2_FEATURE_RO_COMPAT_GRPQUOTA 0x0004 |
202 | 207 | ||
203 | /* | ||
204 | * Append Direct IO support | ||
205 | */ | ||
206 | #define OCFS2_FEATURE_RO_COMPAT_APPEND_DIO 0x0008 | ||
207 | 208 | ||
208 | /* The byte offset of the first backup block will be 1G. | 209 | /* The byte offset of the first backup block will be 1G. |
209 | * The following will be 4G, 16G, 64G, 256G and 1T. | 210 | * The following will be 4G, 16G, 64G, 256G and 1T. |
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index b90952f528b1..5f0d1993e6e3 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c | |||
@@ -529,8 +529,7 @@ static int ovl_remount(struct super_block *sb, int *flags, char *data) | |||
529 | { | 529 | { |
530 | struct ovl_fs *ufs = sb->s_fs_info; | 530 | struct ovl_fs *ufs = sb->s_fs_info; |
531 | 531 | ||
532 | if (!(*flags & MS_RDONLY) && | 532 | if (!(*flags & MS_RDONLY) && !ufs->upper_mnt) |
533 | (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY))) | ||
534 | return -EROFS; | 533 | return -EROFS; |
535 | 534 | ||
536 | return 0; | 535 | return 0; |
@@ -615,9 +614,19 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config) | |||
615 | break; | 614 | break; |
616 | 615 | ||
617 | default: | 616 | default: |
617 | pr_err("overlayfs: unrecognized mount option \"%s\" or missing value\n", p); | ||
618 | return -EINVAL; | 618 | return -EINVAL; |
619 | } | 619 | } |
620 | } | 620 | } |
621 | |||
622 | /* Workdir is useless in non-upper mount */ | ||
623 | if (!config->upperdir && config->workdir) { | ||
624 | pr_info("overlayfs: option \"workdir=%s\" is useless in a non-upper mount, ignore\n", | ||
625 | config->workdir); | ||
626 | kfree(config->workdir); | ||
627 | config->workdir = NULL; | ||
628 | } | ||
629 | |||
621 | return 0; | 630 | return 0; |
622 | } | 631 | } |
623 | 632 | ||
@@ -837,7 +846,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) | |||
837 | 846 | ||
838 | sb->s_stack_depth = 0; | 847 | sb->s_stack_depth = 0; |
839 | if (ufs->config.upperdir) { | 848 | if (ufs->config.upperdir) { |
840 | /* FIXME: workdir is not needed for a R/O mount */ | ||
841 | if (!ufs->config.workdir) { | 849 | if (!ufs->config.workdir) { |
842 | pr_err("overlayfs: missing 'workdir'\n"); | 850 | pr_err("overlayfs: missing 'workdir'\n"); |
843 | goto out_free_config; | 851 | goto out_free_config; |
@@ -847,6 +855,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) | |||
847 | if (err) | 855 | if (err) |
848 | goto out_free_config; | 856 | goto out_free_config; |
849 | 857 | ||
858 | /* Upper fs should not be r/o */ | ||
859 | if (upperpath.mnt->mnt_sb->s_flags & MS_RDONLY) { | ||
860 | pr_err("overlayfs: upper fs is r/o, try multi-lower layers mount\n"); | ||
861 | err = -EINVAL; | ||
862 | goto out_put_upperpath; | ||
863 | } | ||
864 | |||
850 | err = ovl_mount_dir(ufs->config.workdir, &workpath); | 865 | err = ovl_mount_dir(ufs->config.workdir, &workpath); |
851 | if (err) | 866 | if (err) |
852 | goto out_put_upperpath; | 867 | goto out_put_upperpath; |
@@ -869,8 +884,14 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) | |||
869 | 884 | ||
870 | err = -EINVAL; | 885 | err = -EINVAL; |
871 | stacklen = ovl_split_lowerdirs(lowertmp); | 886 | stacklen = ovl_split_lowerdirs(lowertmp); |
872 | if (stacklen > OVL_MAX_STACK) | 887 | if (stacklen > OVL_MAX_STACK) { |
888 | pr_err("overlayfs: too many lower directries, limit is %d\n", | ||
889 | OVL_MAX_STACK); | ||
873 | goto out_free_lowertmp; | 890 | goto out_free_lowertmp; |
891 | } else if (!ufs->config.upperdir && stacklen == 1) { | ||
892 | pr_err("overlayfs: at least 2 lowerdir are needed while upperdir nonexistent\n"); | ||
893 | goto out_free_lowertmp; | ||
894 | } | ||
874 | 895 | ||
875 | stack = kcalloc(stacklen, sizeof(struct path), GFP_KERNEL); | 896 | stack = kcalloc(stacklen, sizeof(struct path), GFP_KERNEL); |
876 | if (!stack) | 897 | if (!stack) |
@@ -932,8 +953,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) | |||
932 | ufs->numlower++; | 953 | ufs->numlower++; |
933 | } | 954 | } |
934 | 955 | ||
935 | /* If the upper fs is r/o or nonexistent, we mark overlayfs r/o too */ | 956 | /* If the upper fs is nonexistent, we mark overlayfs r/o too */ |
936 | if (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY)) | 957 | if (!ufs->upper_mnt) |
937 | sb->s_flags |= MS_RDONLY; | 958 | sb->s_flags |= MS_RDONLY; |
938 | 959 | ||
939 | sb->s_d_op = &ovl_dentry_operations; | 960 | sb->s_d_op = &ovl_dentry_operations; |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 956b75d61809..6dee68d013ff 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -1325,6 +1325,9 @@ out: | |||
1325 | 1325 | ||
1326 | static int pagemap_open(struct inode *inode, struct file *file) | 1326 | static int pagemap_open(struct inode *inode, struct file *file) |
1327 | { | 1327 | { |
1328 | /* do not disclose physical addresses: attack vector */ | ||
1329 | if (!capable(CAP_SYS_ADMIN)) | ||
1330 | return -EPERM; | ||
1328 | pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about " | 1331 | pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about " |
1329 | "to stop being page-shift some time soon. See the " | 1332 | "to stop being page-shift some time soon. See the " |
1330 | "linux/Documentation/vm/pagemap.txt for details.\n"); | 1333 | "linux/Documentation/vm/pagemap.txt for details.\n"); |
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index ce615d12fb44..a2e1cb8a568b 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c | |||
@@ -397,7 +397,8 @@ STATIC int /* error (positive) */ | |||
397 | xfs_zero_last_block( | 397 | xfs_zero_last_block( |
398 | struct xfs_inode *ip, | 398 | struct xfs_inode *ip, |
399 | xfs_fsize_t offset, | 399 | xfs_fsize_t offset, |
400 | xfs_fsize_t isize) | 400 | xfs_fsize_t isize, |
401 | bool *did_zeroing) | ||
401 | { | 402 | { |
402 | struct xfs_mount *mp = ip->i_mount; | 403 | struct xfs_mount *mp = ip->i_mount; |
403 | xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize); | 404 | xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize); |
@@ -425,6 +426,7 @@ xfs_zero_last_block( | |||
425 | zero_len = mp->m_sb.sb_blocksize - zero_offset; | 426 | zero_len = mp->m_sb.sb_blocksize - zero_offset; |
426 | if (isize + zero_len > offset) | 427 | if (isize + zero_len > offset) |
427 | zero_len = offset - isize; | 428 | zero_len = offset - isize; |
429 | *did_zeroing = true; | ||
428 | return xfs_iozero(ip, isize, zero_len); | 430 | return xfs_iozero(ip, isize, zero_len); |
429 | } | 431 | } |
430 | 432 | ||
@@ -443,7 +445,8 @@ int /* error (positive) */ | |||
443 | xfs_zero_eof( | 445 | xfs_zero_eof( |
444 | struct xfs_inode *ip, | 446 | struct xfs_inode *ip, |
445 | xfs_off_t offset, /* starting I/O offset */ | 447 | xfs_off_t offset, /* starting I/O offset */ |
446 | xfs_fsize_t isize) /* current inode size */ | 448 | xfs_fsize_t isize, /* current inode size */ |
449 | bool *did_zeroing) | ||
447 | { | 450 | { |
448 | struct xfs_mount *mp = ip->i_mount; | 451 | struct xfs_mount *mp = ip->i_mount; |
449 | xfs_fileoff_t start_zero_fsb; | 452 | xfs_fileoff_t start_zero_fsb; |
@@ -465,7 +468,7 @@ xfs_zero_eof( | |||
465 | * We only zero a part of that block so it is handled specially. | 468 | * We only zero a part of that block so it is handled specially. |
466 | */ | 469 | */ |
467 | if (XFS_B_FSB_OFFSET(mp, isize) != 0) { | 470 | if (XFS_B_FSB_OFFSET(mp, isize) != 0) { |
468 | error = xfs_zero_last_block(ip, offset, isize); | 471 | error = xfs_zero_last_block(ip, offset, isize, did_zeroing); |
469 | if (error) | 472 | if (error) |
470 | return error; | 473 | return error; |
471 | } | 474 | } |
@@ -525,6 +528,7 @@ xfs_zero_eof( | |||
525 | if (error) | 528 | if (error) |
526 | return error; | 529 | return error; |
527 | 530 | ||
531 | *did_zeroing = true; | ||
528 | start_zero_fsb = imap.br_startoff + imap.br_blockcount; | 532 | start_zero_fsb = imap.br_startoff + imap.br_blockcount; |
529 | ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); | 533 | ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); |
530 | } | 534 | } |
@@ -567,13 +571,15 @@ restart: | |||
567 | * having to redo all checks before. | 571 | * having to redo all checks before. |
568 | */ | 572 | */ |
569 | if (*pos > i_size_read(inode)) { | 573 | if (*pos > i_size_read(inode)) { |
574 | bool zero = false; | ||
575 | |||
570 | if (*iolock == XFS_IOLOCK_SHARED) { | 576 | if (*iolock == XFS_IOLOCK_SHARED) { |
571 | xfs_rw_iunlock(ip, *iolock); | 577 | xfs_rw_iunlock(ip, *iolock); |
572 | *iolock = XFS_IOLOCK_EXCL; | 578 | *iolock = XFS_IOLOCK_EXCL; |
573 | xfs_rw_ilock(ip, *iolock); | 579 | xfs_rw_ilock(ip, *iolock); |
574 | goto restart; | 580 | goto restart; |
575 | } | 581 | } |
576 | error = xfs_zero_eof(ip, *pos, i_size_read(inode)); | 582 | error = xfs_zero_eof(ip, *pos, i_size_read(inode), &zero); |
577 | if (error) | 583 | if (error) |
578 | return error; | 584 | return error; |
579 | } | 585 | } |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index daafa1f6d260..6163767aa856 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -2867,6 +2867,10 @@ xfs_rename( | |||
2867 | * Handle RENAME_EXCHANGE flags | 2867 | * Handle RENAME_EXCHANGE flags |
2868 | */ | 2868 | */ |
2869 | if (flags & RENAME_EXCHANGE) { | 2869 | if (flags & RENAME_EXCHANGE) { |
2870 | if (target_ip == NULL) { | ||
2871 | error = -EINVAL; | ||
2872 | goto error_return; | ||
2873 | } | ||
2870 | error = xfs_cross_rename(tp, src_dp, src_name, src_ip, | 2874 | error = xfs_cross_rename(tp, src_dp, src_name, src_ip, |
2871 | target_dp, target_name, target_ip, | 2875 | target_dp, target_name, target_ip, |
2872 | &free_list, &first_block, spaceres); | 2876 | &free_list, &first_block, spaceres); |
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 86cd6b39bed7..a1cd55f3f351 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h | |||
@@ -384,10 +384,11 @@ enum xfs_prealloc_flags { | |||
384 | XFS_PREALLOC_INVISIBLE = (1 << 4), | 384 | XFS_PREALLOC_INVISIBLE = (1 << 4), |
385 | }; | 385 | }; |
386 | 386 | ||
387 | int xfs_update_prealloc_flags(struct xfs_inode *, | 387 | int xfs_update_prealloc_flags(struct xfs_inode *ip, |
388 | enum xfs_prealloc_flags); | 388 | enum xfs_prealloc_flags flags); |
389 | int xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t); | 389 | int xfs_zero_eof(struct xfs_inode *ip, xfs_off_t offset, |
390 | int xfs_iozero(struct xfs_inode *, loff_t, size_t); | 390 | xfs_fsize_t isize, bool *did_zeroing); |
391 | int xfs_iozero(struct xfs_inode *ip, loff_t pos, size_t count); | ||
391 | 392 | ||
392 | 393 | ||
393 | #define IHOLD(ip) \ | 394 | #define IHOLD(ip) \ |
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index d919ad7b16bf..e53a90331422 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c | |||
@@ -751,6 +751,7 @@ xfs_setattr_size( | |||
751 | int error; | 751 | int error; |
752 | uint lock_flags = 0; | 752 | uint lock_flags = 0; |
753 | uint commit_flags = 0; | 753 | uint commit_flags = 0; |
754 | bool did_zeroing = false; | ||
754 | 755 | ||
755 | trace_xfs_setattr(ip); | 756 | trace_xfs_setattr(ip); |
756 | 757 | ||
@@ -794,20 +795,16 @@ xfs_setattr_size( | |||
794 | return error; | 795 | return error; |
795 | 796 | ||
796 | /* | 797 | /* |
797 | * Now we can make the changes. Before we join the inode to the | 798 | * File data changes must be complete before we start the transaction to |
798 | * transaction, take care of the part of the truncation that must be | 799 | * modify the inode. This needs to be done before joining the inode to |
799 | * done without the inode lock. This needs to be done before joining | 800 | * the transaction because the inode cannot be unlocked once it is a |
800 | * the inode to the transaction, because the inode cannot be unlocked | 801 | * part of the transaction. |
801 | * once it is a part of the transaction. | 802 | * |
803 | * Start with zeroing any data block beyond EOF that we may expose on | ||
804 | * file extension. | ||
802 | */ | 805 | */ |
803 | if (newsize > oldsize) { | 806 | if (newsize > oldsize) { |
804 | /* | 807 | error = xfs_zero_eof(ip, newsize, oldsize, &did_zeroing); |
805 | * Do the first part of growing a file: zero any data in the | ||
806 | * last block that is beyond the old EOF. We need to do this | ||
807 | * before the inode is joined to the transaction to modify | ||
808 | * i_size. | ||
809 | */ | ||
810 | error = xfs_zero_eof(ip, newsize, oldsize); | ||
811 | if (error) | 808 | if (error) |
812 | return error; | 809 | return error; |
813 | } | 810 | } |
@@ -817,23 +814,18 @@ xfs_setattr_size( | |||
817 | * any previous writes that are beyond the on disk EOF and the new | 814 | * any previous writes that are beyond the on disk EOF and the new |
818 | * EOF that have not been written out need to be written here. If we | 815 | * EOF that have not been written out need to be written here. If we |
819 | * do not write the data out, we expose ourselves to the null files | 816 | * do not write the data out, we expose ourselves to the null files |
820 | * problem. | 817 | * problem. Note that this includes any block zeroing we did above; |
821 | * | 818 | * otherwise those blocks may not be zeroed after a crash. |
822 | * Only flush from the on disk size to the smaller of the in memory | ||
823 | * file size or the new size as that's the range we really care about | ||
824 | * here and prevents waiting for other data not within the range we | ||
825 | * care about here. | ||
826 | */ | 819 | */ |
827 | if (oldsize != ip->i_d.di_size && newsize > ip->i_d.di_size) { | 820 | if (newsize > ip->i_d.di_size && |
821 | (oldsize != ip->i_d.di_size || did_zeroing)) { | ||
828 | error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, | 822 | error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, |
829 | ip->i_d.di_size, newsize); | 823 | ip->i_d.di_size, newsize); |
830 | if (error) | 824 | if (error) |
831 | return error; | 825 | return error; |
832 | } | 826 | } |
833 | 827 | ||
834 | /* | 828 | /* Now wait for all direct I/O to complete. */ |
835 | * Wait for all direct I/O to complete. | ||
836 | */ | ||
837 | inode_dio_wait(inode); | 829 | inode_dio_wait(inode); |
838 | 830 | ||
839 | /* | 831 | /* |
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c index 4b33ef112400..365dd57ea760 100644 --- a/fs/xfs/xfs_pnfs.c +++ b/fs/xfs/xfs_pnfs.c | |||
@@ -300,8 +300,10 @@ xfs_fs_commit_blocks( | |||
300 | 300 | ||
301 | tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); | 301 | tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); |
302 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0); | 302 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0); |
303 | if (error) | 303 | if (error) { |
304 | xfs_trans_cancel(tp, 0); | ||
304 | goto out_drop_iolock; | 305 | goto out_drop_iolock; |
306 | } | ||
305 | 307 | ||
306 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 308 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
307 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 309 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); |
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 53cc2aaf8d2b..fbbb9e62e274 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c | |||
@@ -836,6 +836,11 @@ xfs_qm_reset_dqcounts( | |||
836 | */ | 836 | */ |
837 | xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR, | 837 | xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR, |
838 | "xfs_quotacheck"); | 838 | "xfs_quotacheck"); |
839 | /* | ||
840 | * Reset type in case we are reusing group quota file for | ||
841 | * project quotas or vice versa | ||
842 | */ | ||
843 | ddq->d_flags = type; | ||
839 | ddq->d_bcount = 0; | 844 | ddq->d_bcount = 0; |
840 | ddq->d_icount = 0; | 845 | ddq->d_icount = 0; |
841 | ddq->d_rtbcount = 0; | 846 | ddq->d_rtbcount = 0; |
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index a24addfdfcec..0de6290df4da 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h | |||
@@ -68,8 +68,8 @@ struct drm_mm_node { | |||
68 | unsigned scanned_preceeds_hole : 1; | 68 | unsigned scanned_preceeds_hole : 1; |
69 | unsigned allocated : 1; | 69 | unsigned allocated : 1; |
70 | unsigned long color; | 70 | unsigned long color; |
71 | unsigned long start; | 71 | u64 start; |
72 | unsigned long size; | 72 | u64 size; |
73 | struct drm_mm *mm; | 73 | struct drm_mm *mm; |
74 | }; | 74 | }; |
75 | 75 | ||
@@ -82,16 +82,16 @@ struct drm_mm { | |||
82 | unsigned int scan_check_range : 1; | 82 | unsigned int scan_check_range : 1; |
83 | unsigned scan_alignment; | 83 | unsigned scan_alignment; |
84 | unsigned long scan_color; | 84 | unsigned long scan_color; |
85 | unsigned long scan_size; | 85 | u64 scan_size; |
86 | unsigned long scan_hit_start; | 86 | u64 scan_hit_start; |
87 | unsigned long scan_hit_end; | 87 | u64 scan_hit_end; |
88 | unsigned scanned_blocks; | 88 | unsigned scanned_blocks; |
89 | unsigned long scan_start; | 89 | u64 scan_start; |
90 | unsigned long scan_end; | 90 | u64 scan_end; |
91 | struct drm_mm_node *prev_scanned_node; | 91 | struct drm_mm_node *prev_scanned_node; |
92 | 92 | ||
93 | void (*color_adjust)(struct drm_mm_node *node, unsigned long color, | 93 | void (*color_adjust)(struct drm_mm_node *node, unsigned long color, |
94 | unsigned long *start, unsigned long *end); | 94 | u64 *start, u64 *end); |
95 | }; | 95 | }; |
96 | 96 | ||
97 | /** | 97 | /** |
@@ -124,7 +124,7 @@ static inline bool drm_mm_initialized(struct drm_mm *mm) | |||
124 | return mm->hole_stack.next; | 124 | return mm->hole_stack.next; |
125 | } | 125 | } |
126 | 126 | ||
127 | static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node) | 127 | static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node) |
128 | { | 128 | { |
129 | return hole_node->start + hole_node->size; | 129 | return hole_node->start + hole_node->size; |
130 | } | 130 | } |
@@ -140,13 +140,13 @@ static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_no | |||
140 | * Returns: | 140 | * Returns: |
141 | * Start of the subsequent hole. | 141 | * Start of the subsequent hole. |
142 | */ | 142 | */ |
143 | static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node) | 143 | static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node) |
144 | { | 144 | { |
145 | BUG_ON(!hole_node->hole_follows); | 145 | BUG_ON(!hole_node->hole_follows); |
146 | return __drm_mm_hole_node_start(hole_node); | 146 | return __drm_mm_hole_node_start(hole_node); |
147 | } | 147 | } |
148 | 148 | ||
149 | static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node) | 149 | static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node) |
150 | { | 150 | { |
151 | return list_entry(hole_node->node_list.next, | 151 | return list_entry(hole_node->node_list.next, |
152 | struct drm_mm_node, node_list)->start; | 152 | struct drm_mm_node, node_list)->start; |
@@ -163,7 +163,7 @@ static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node | |||
163 | * Returns: | 163 | * Returns: |
164 | * End of the subsequent hole. | 164 | * End of the subsequent hole. |
165 | */ | 165 | */ |
166 | static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) | 166 | static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node) |
167 | { | 167 | { |
168 | return __drm_mm_hole_node_end(hole_node); | 168 | return __drm_mm_hole_node_end(hole_node); |
169 | } | 169 | } |
@@ -222,7 +222,7 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); | |||
222 | 222 | ||
223 | int drm_mm_insert_node_generic(struct drm_mm *mm, | 223 | int drm_mm_insert_node_generic(struct drm_mm *mm, |
224 | struct drm_mm_node *node, | 224 | struct drm_mm_node *node, |
225 | unsigned long size, | 225 | u64 size, |
226 | unsigned alignment, | 226 | unsigned alignment, |
227 | unsigned long color, | 227 | unsigned long color, |
228 | enum drm_mm_search_flags sflags, | 228 | enum drm_mm_search_flags sflags, |
@@ -245,7 +245,7 @@ int drm_mm_insert_node_generic(struct drm_mm *mm, | |||
245 | */ | 245 | */ |
246 | static inline int drm_mm_insert_node(struct drm_mm *mm, | 246 | static inline int drm_mm_insert_node(struct drm_mm *mm, |
247 | struct drm_mm_node *node, | 247 | struct drm_mm_node *node, |
248 | unsigned long size, | 248 | u64 size, |
249 | unsigned alignment, | 249 | unsigned alignment, |
250 | enum drm_mm_search_flags flags) | 250 | enum drm_mm_search_flags flags) |
251 | { | 251 | { |
@@ -255,11 +255,11 @@ static inline int drm_mm_insert_node(struct drm_mm *mm, | |||
255 | 255 | ||
256 | int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, | 256 | int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, |
257 | struct drm_mm_node *node, | 257 | struct drm_mm_node *node, |
258 | unsigned long size, | 258 | u64 size, |
259 | unsigned alignment, | 259 | unsigned alignment, |
260 | unsigned long color, | 260 | unsigned long color, |
261 | unsigned long start, | 261 | u64 start, |
262 | unsigned long end, | 262 | u64 end, |
263 | enum drm_mm_search_flags sflags, | 263 | enum drm_mm_search_flags sflags, |
264 | enum drm_mm_allocator_flags aflags); | 264 | enum drm_mm_allocator_flags aflags); |
265 | /** | 265 | /** |
@@ -282,10 +282,10 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, | |||
282 | */ | 282 | */ |
283 | static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, | 283 | static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, |
284 | struct drm_mm_node *node, | 284 | struct drm_mm_node *node, |
285 | unsigned long size, | 285 | u64 size, |
286 | unsigned alignment, | 286 | unsigned alignment, |
287 | unsigned long start, | 287 | u64 start, |
288 | unsigned long end, | 288 | u64 end, |
289 | enum drm_mm_search_flags flags) | 289 | enum drm_mm_search_flags flags) |
290 | { | 290 | { |
291 | return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, | 291 | return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, |
@@ -296,21 +296,21 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, | |||
296 | void drm_mm_remove_node(struct drm_mm_node *node); | 296 | void drm_mm_remove_node(struct drm_mm_node *node); |
297 | void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); | 297 | void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); |
298 | void drm_mm_init(struct drm_mm *mm, | 298 | void drm_mm_init(struct drm_mm *mm, |
299 | unsigned long start, | 299 | u64 start, |
300 | unsigned long size); | 300 | u64 size); |
301 | void drm_mm_takedown(struct drm_mm *mm); | 301 | void drm_mm_takedown(struct drm_mm *mm); |
302 | bool drm_mm_clean(struct drm_mm *mm); | 302 | bool drm_mm_clean(struct drm_mm *mm); |
303 | 303 | ||
304 | void drm_mm_init_scan(struct drm_mm *mm, | 304 | void drm_mm_init_scan(struct drm_mm *mm, |
305 | unsigned long size, | 305 | u64 size, |
306 | unsigned alignment, | 306 | unsigned alignment, |
307 | unsigned long color); | 307 | unsigned long color); |
308 | void drm_mm_init_scan_with_range(struct drm_mm *mm, | 308 | void drm_mm_init_scan_with_range(struct drm_mm *mm, |
309 | unsigned long size, | 309 | u64 size, |
310 | unsigned alignment, | 310 | unsigned alignment, |
311 | unsigned long color, | 311 | unsigned long color, |
312 | unsigned long start, | 312 | u64 start, |
313 | unsigned long end); | 313 | u64 end); |
314 | bool drm_mm_scan_add_block(struct drm_mm_node *node); | 314 | bool drm_mm_scan_add_block(struct drm_mm_node *node); |
315 | bool drm_mm_scan_remove_block(struct drm_mm_node *node); | 315 | bool drm_mm_scan_remove_block(struct drm_mm_node *node); |
316 | 316 | ||
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 180ad0e6de21..d016dc57f007 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h | |||
@@ -214,9 +214,9 @@ | |||
214 | INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info) | 214 | INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info) |
215 | 215 | ||
216 | #define _INTEL_BDW_M_IDS(gt, info) \ | 216 | #define _INTEL_BDW_M_IDS(gt, info) \ |
217 | _INTEL_BDW_M(gt, 0x1602, info), /* ULT */ \ | 217 | _INTEL_BDW_M(gt, 0x1602, info), /* Halo */ \ |
218 | _INTEL_BDW_M(gt, 0x1606, info), /* ULT */ \ | 218 | _INTEL_BDW_M(gt, 0x1606, info), /* ULT */ \ |
219 | _INTEL_BDW_M(gt, 0x160B, info), /* Iris */ \ | 219 | _INTEL_BDW_M(gt, 0x160B, info), /* ULT */ \ |
220 | _INTEL_BDW_M(gt, 0x160E, info) /* ULX */ | 220 | _INTEL_BDW_M(gt, 0x160E, info) /* ULX */ |
221 | 221 | ||
222 | #define _INTEL_BDW_D_IDS(gt, info) \ | 222 | #define _INTEL_BDW_D_IDS(gt, info) \ |
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 0ccf7f267ff9..c768ddfbe53c 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h | |||
@@ -249,7 +249,7 @@ struct ttm_buffer_object { | |||
249 | * either of these locks held. | 249 | * either of these locks held. |
250 | */ | 250 | */ |
251 | 251 | ||
252 | unsigned long offset; | 252 | uint64_t offset; /* GPU address space is independent of CPU word size */ |
253 | uint32_t cur_placement; | 253 | uint32_t cur_placement; |
254 | 254 | ||
255 | struct sg_table *sg; | 255 | struct sg_table *sg; |
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 142d752fc450..813042cede57 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
@@ -277,7 +277,7 @@ struct ttm_mem_type_manager { | |||
277 | bool has_type; | 277 | bool has_type; |
278 | bool use_type; | 278 | bool use_type; |
279 | uint32_t flags; | 279 | uint32_t flags; |
280 | unsigned long gpu_offset; | 280 | uint64_t gpu_offset; /* GPU address space is independent of CPU word size */ |
281 | uint64_t size; | 281 | uint64_t size; |
282 | uint32_t available_caching; | 282 | uint32_t available_caching; |
283 | uint32_t default_caching; | 283 | uint32_t default_caching; |
diff --git a/include/dt-bindings/pinctrl/am33xx.h b/include/dt-bindings/pinctrl/am33xx.h index 2fbc804e1a45..226f77246a70 100644 --- a/include/dt-bindings/pinctrl/am33xx.h +++ b/include/dt-bindings/pinctrl/am33xx.h | |||
@@ -13,7 +13,8 @@ | |||
13 | 13 | ||
14 | #define PULL_DISABLE (1 << 3) | 14 | #define PULL_DISABLE (1 << 3) |
15 | #define INPUT_EN (1 << 5) | 15 | #define INPUT_EN (1 << 5) |
16 | #define SLEWCTRL_FAST (1 << 6) | 16 | #define SLEWCTRL_SLOW (1 << 6) |
17 | #define SLEWCTRL_FAST 0 | ||
17 | 18 | ||
18 | /* update macro depending on INPUT_EN and PULL_ENA */ | 19 | /* update macro depending on INPUT_EN and PULL_ENA */ |
19 | #undef PIN_OUTPUT | 20 | #undef PIN_OUTPUT |
diff --git a/include/dt-bindings/pinctrl/am43xx.h b/include/dt-bindings/pinctrl/am43xx.h index 9c2e4f82381e..5f4d01898c9c 100644 --- a/include/dt-bindings/pinctrl/am43xx.h +++ b/include/dt-bindings/pinctrl/am43xx.h | |||
@@ -18,7 +18,8 @@ | |||
18 | #define PULL_DISABLE (1 << 16) | 18 | #define PULL_DISABLE (1 << 16) |
19 | #define PULL_UP (1 << 17) | 19 | #define PULL_UP (1 << 17) |
20 | #define INPUT_EN (1 << 18) | 20 | #define INPUT_EN (1 << 18) |
21 | #define SLEWCTRL_FAST (1 << 19) | 21 | #define SLEWCTRL_SLOW (1 << 19) |
22 | #define SLEWCTRL_FAST 0 | ||
22 | #define DS0_PULL_UP_DOWN_EN (1 << 27) | 23 | #define DS0_PULL_UP_DOWN_EN (1 << 27) |
23 | 24 | ||
24 | #define PIN_OUTPUT (PULL_DISABLE) | 25 | #define PIN_OUTPUT (PULL_DISABLE) |
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 7c55dd5dd2c9..66203b268984 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h | |||
@@ -114,6 +114,7 @@ struct vgic_ops { | |||
114 | void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr); | 114 | void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr); |
115 | u64 (*get_elrsr)(const struct kvm_vcpu *vcpu); | 115 | u64 (*get_elrsr)(const struct kvm_vcpu *vcpu); |
116 | u64 (*get_eisr)(const struct kvm_vcpu *vcpu); | 116 | u64 (*get_eisr)(const struct kvm_vcpu *vcpu); |
117 | void (*clear_eisr)(struct kvm_vcpu *vcpu); | ||
117 | u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu); | 118 | u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu); |
118 | void (*enable_underflow)(struct kvm_vcpu *vcpu); | 119 | void (*enable_underflow)(struct kvm_vcpu *vcpu); |
119 | void (*disable_underflow)(struct kvm_vcpu *vcpu); | 120 | void (*disable_underflow)(struct kvm_vcpu *vcpu); |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index c294e3e25e37..a1b25e35ea5f 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
@@ -181,7 +181,9 @@ enum rq_flag_bits { | |||
181 | __REQ_ELVPRIV, /* elevator private data attached */ | 181 | __REQ_ELVPRIV, /* elevator private data attached */ |
182 | __REQ_FAILED, /* set if the request failed */ | 182 | __REQ_FAILED, /* set if the request failed */ |
183 | __REQ_QUIET, /* don't worry about errors */ | 183 | __REQ_QUIET, /* don't worry about errors */ |
184 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ | 184 | __REQ_PREEMPT, /* set for "ide_preempt" requests and also |
185 | for requests for which the SCSI "quiesce" | ||
186 | state must be ignored. */ | ||
185 | __REQ_ALLOCED, /* request came from our alloc pool */ | 187 | __REQ_ALLOCED, /* request came from our alloc pool */ |
186 | __REQ_COPY_USER, /* contains copies of user pages */ | 188 | __REQ_COPY_USER, /* contains copies of user pages */ |
187 | __REQ_FLUSH_SEQ, /* request for flush sequence */ | 189 | __REQ_FLUSH_SEQ, /* request for flush sequence */ |
diff --git a/include/linux/clk.h b/include/linux/clk.h index 8381bbfbc308..68c16a6bedb3 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h | |||
@@ -125,6 +125,19 @@ int clk_set_phase(struct clk *clk, int degrees); | |||
125 | */ | 125 | */ |
126 | int clk_get_phase(struct clk *clk); | 126 | int clk_get_phase(struct clk *clk); |
127 | 127 | ||
128 | /** | ||
129 | * clk_is_match - check if two clk's point to the same hardware clock | ||
130 | * @p: clk compared against q | ||
131 | * @q: clk compared against p | ||
132 | * | ||
133 | * Returns true if the two struct clk pointers both point to the same hardware | ||
134 | * clock node. Put differently, returns true if struct clk *p and struct clk *q | ||
135 | * share the same struct clk_core object. | ||
136 | * | ||
137 | * Returns false otherwise. Note that two NULL clks are treated as matching. | ||
138 | */ | ||
139 | bool clk_is_match(const struct clk *p, const struct clk *q); | ||
140 | |||
128 | #else | 141 | #else |
129 | 142 | ||
130 | static inline long clk_get_accuracy(struct clk *clk) | 143 | static inline long clk_get_accuracy(struct clk *clk) |
@@ -142,6 +155,11 @@ static inline long clk_get_phase(struct clk *clk) | |||
142 | return -ENOTSUPP; | 155 | return -ENOTSUPP; |
143 | } | 156 | } |
144 | 157 | ||
158 | static inline bool clk_is_match(const struct clk *p, const struct clk *q) | ||
159 | { | ||
160 | return p == q; | ||
161 | } | ||
162 | |||
145 | #endif | 163 | #endif |
146 | 164 | ||
147 | /** | 165 | /** |
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index f551a9299ac9..9c5e89254796 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
@@ -77,7 +77,6 @@ struct cpuidle_device { | |||
77 | unsigned int cpu; | 77 | unsigned int cpu; |
78 | 78 | ||
79 | int last_residency; | 79 | int last_residency; |
80 | int state_count; | ||
81 | struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; | 80 | struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; |
82 | struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; | 81 | struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; |
83 | struct cpuidle_driver_kobj *kobj_driver; | 82 | struct cpuidle_driver_kobj *kobj_driver; |
@@ -126,6 +125,8 @@ struct cpuidle_driver { | |||
126 | 125 | ||
127 | #ifdef CONFIG_CPU_IDLE | 126 | #ifdef CONFIG_CPU_IDLE |
128 | extern void disable_cpuidle(void); | 127 | extern void disable_cpuidle(void); |
128 | extern bool cpuidle_not_available(struct cpuidle_driver *drv, | ||
129 | struct cpuidle_device *dev); | ||
129 | 130 | ||
130 | extern int cpuidle_select(struct cpuidle_driver *drv, | 131 | extern int cpuidle_select(struct cpuidle_driver *drv, |
131 | struct cpuidle_device *dev); | 132 | struct cpuidle_device *dev); |
@@ -150,11 +151,17 @@ extern void cpuidle_resume(void); | |||
150 | extern int cpuidle_enable_device(struct cpuidle_device *dev); | 151 | extern int cpuidle_enable_device(struct cpuidle_device *dev); |
151 | extern void cpuidle_disable_device(struct cpuidle_device *dev); | 152 | extern void cpuidle_disable_device(struct cpuidle_device *dev); |
152 | extern int cpuidle_play_dead(void); | 153 | extern int cpuidle_play_dead(void); |
153 | extern void cpuidle_enter_freeze(void); | 154 | extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv, |
155 | struct cpuidle_device *dev); | ||
156 | extern int cpuidle_enter_freeze(struct cpuidle_driver *drv, | ||
157 | struct cpuidle_device *dev); | ||
154 | 158 | ||
155 | extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); | 159 | extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); |
156 | #else | 160 | #else |
157 | static inline void disable_cpuidle(void) { } | 161 | static inline void disable_cpuidle(void) { } |
162 | static inline bool cpuidle_not_available(struct cpuidle_driver *drv, | ||
163 | struct cpuidle_device *dev) | ||
164 | {return true; } | ||
158 | static inline int cpuidle_select(struct cpuidle_driver *drv, | 165 | static inline int cpuidle_select(struct cpuidle_driver *drv, |
159 | struct cpuidle_device *dev) | 166 | struct cpuidle_device *dev) |
160 | {return -ENODEV; } | 167 | {return -ENODEV; } |
@@ -183,7 +190,12 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev) | |||
183 | {return -ENODEV; } | 190 | {return -ENODEV; } |
184 | static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } | 191 | static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } |
185 | static inline int cpuidle_play_dead(void) {return -ENODEV; } | 192 | static inline int cpuidle_play_dead(void) {return -ENODEV; } |
186 | static inline void cpuidle_enter_freeze(void) { } | 193 | static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, |
194 | struct cpuidle_device *dev) | ||
195 | {return -ENODEV; } | ||
196 | static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv, | ||
197 | struct cpuidle_device *dev) | ||
198 | {return -ENODEV; } | ||
187 | static inline struct cpuidle_driver *cpuidle_get_cpu_driver( | 199 | static inline struct cpuidle_driver *cpuidle_get_cpu_driver( |
188 | struct cpuidle_device *dev) {return NULL; } | 200 | struct cpuidle_device *dev) {return NULL; } |
189 | #endif | 201 | #endif |
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 2646aed1d3fe..fd23978d93fe 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -375,6 +375,7 @@ int dm_create(int minor, struct mapped_device **md); | |||
375 | */ | 375 | */ |
376 | struct mapped_device *dm_get_md(dev_t dev); | 376 | struct mapped_device *dm_get_md(dev_t dev); |
377 | void dm_get(struct mapped_device *md); | 377 | void dm_get(struct mapped_device *md); |
378 | int dm_hold(struct mapped_device *md); | ||
378 | void dm_put(struct mapped_device *md); | 379 | void dm_put(struct mapped_device *md); |
379 | 380 | ||
380 | /* | 381 | /* |
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h index 022e34fcbd1b..52456aa566a0 100644 --- a/include/linux/dmapool.h +++ b/include/linux/dmapool.h | |||
@@ -14,6 +14,8 @@ | |||
14 | #include <asm/io.h> | 14 | #include <asm/io.h> |
15 | #include <asm/scatterlist.h> | 15 | #include <asm/scatterlist.h> |
16 | 16 | ||
17 | struct device; | ||
18 | |||
17 | struct dma_pool *dma_pool_create(const char *name, struct device *dev, | 19 | struct dma_pool *dma_pool_create(const char *name, struct device *dev, |
18 | size_t size, size_t align, size_t allocation); | 20 | size_t size, size_t align, size_t allocation); |
19 | 21 | ||
diff --git a/include/linux/fs.h b/include/linux/fs.h index b4d71b5e1ff2..52cc4492cb3a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -604,6 +604,7 @@ struct inode { | |||
604 | struct mutex i_mutex; | 604 | struct mutex i_mutex; |
605 | 605 | ||
606 | unsigned long dirtied_when; /* jiffies of first dirtying */ | 606 | unsigned long dirtied_when; /* jiffies of first dirtying */ |
607 | unsigned long dirtied_time_when; | ||
607 | 608 | ||
608 | struct hlist_node i_hash; | 609 | struct hlist_node i_hash; |
609 | struct list_head i_wb_list; /* backing dev IO list */ | 610 | struct list_head i_wb_list; /* backing dev IO list */ |
@@ -1548,7 +1549,7 @@ struct file_operations { | |||
1548 | long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); | 1549 | long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); |
1549 | long (*compat_ioctl) (struct file *, unsigned int, unsigned long); | 1550 | long (*compat_ioctl) (struct file *, unsigned int, unsigned long); |
1550 | int (*mmap) (struct file *, struct vm_area_struct *); | 1551 | int (*mmap) (struct file *, struct vm_area_struct *); |
1551 | void (*mremap)(struct file *, struct vm_area_struct *); | 1552 | int (*mremap)(struct file *, struct vm_area_struct *); |
1552 | int (*open) (struct inode *, struct file *); | 1553 | int (*open) (struct inode *, struct file *); |
1553 | int (*flush) (struct file *, fl_owner_t id); | 1554 | int (*flush) (struct file *, fl_owner_t id); |
1554 | int (*release) (struct inode *, struct file *); | 1555 | int (*release) (struct inode *, struct file *); |
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h index 51f7ccadf923..4173a8fdad9e 100644 --- a/include/linux/hid-sensor-hub.h +++ b/include/linux/hid-sensor-hub.h | |||
@@ -33,6 +33,8 @@ | |||
33 | * @units: Measurment unit for this attribute. | 33 | * @units: Measurment unit for this attribute. |
34 | * @unit_expo: Exponent used in the data. | 34 | * @unit_expo: Exponent used in the data. |
35 | * @size: Size in bytes for data size. | 35 | * @size: Size in bytes for data size. |
36 | * @logical_minimum: Logical minimum value for this attribute. | ||
37 | * @logical_maximum: Logical maximum value for this attribute. | ||
36 | */ | 38 | */ |
37 | struct hid_sensor_hub_attribute_info { | 39 | struct hid_sensor_hub_attribute_info { |
38 | u32 usage_id; | 40 | u32 usage_id; |
@@ -146,6 +148,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev, | |||
146 | 148 | ||
147 | /** | 149 | /** |
148 | * sensor_hub_input_attr_get_raw_value() - Synchronous read request | 150 | * sensor_hub_input_attr_get_raw_value() - Synchronous read request |
151 | * @hsdev: Hub device instance. | ||
149 | * @usage_id: Attribute usage id of parent physical device as per spec | 152 | * @usage_id: Attribute usage id of parent physical device as per spec |
150 | * @attr_usage_id: Attribute usage id as per spec | 153 | * @attr_usage_id: Attribute usage id as per spec |
151 | * @report_id: Report id to look for | 154 | * @report_id: Report id to look for |
@@ -160,6 +163,7 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev, | |||
160 | u32 attr_usage_id, u32 report_id); | 163 | u32 attr_usage_id, u32 report_id); |
161 | /** | 164 | /** |
162 | * sensor_hub_set_feature() - Feature set request | 165 | * sensor_hub_set_feature() - Feature set request |
166 | * @hsdev: Hub device instance. | ||
163 | * @report_id: Report id to look for | 167 | * @report_id: Report id to look for |
164 | * @field_index: Field index inside a report | 168 | * @field_index: Field index inside a report |
165 | * @value: Value to set | 169 | * @value: Value to set |
@@ -172,6 +176,7 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id, | |||
172 | 176 | ||
173 | /** | 177 | /** |
174 | * sensor_hub_get_feature() - Feature get request | 178 | * sensor_hub_get_feature() - Feature get request |
179 | * @hsdev: Hub device instance. | ||
175 | * @report_id: Report id to look for | 180 | * @report_id: Report id to look for |
176 | * @field_index: Field index inside a report | 181 | * @field_index: Field index inside a report |
177 | * @value: Place holder for return value | 182 | * @value: Place holder for return value |
diff --git a/include/linux/intel_mid_dma.h b/include/linux/intel_mid_dma.h deleted file mode 100644 index 10496bd24c5c..000000000000 --- a/include/linux/intel_mid_dma.h +++ /dev/null | |||
@@ -1,76 +0,0 @@ | |||
1 | /* | ||
2 | * intel_mid_dma.h - Intel MID DMA Drivers | ||
3 | * | ||
4 | * Copyright (C) 2008-10 Intel Corp | ||
5 | * Author: Vinod Koul <vinod.koul@intel.com> | ||
6 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; version 2 of the License. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along | ||
18 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
19 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
20 | * | ||
21 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
22 | * | ||
23 | * | ||
24 | */ | ||
25 | #ifndef __INTEL_MID_DMA_H__ | ||
26 | #define __INTEL_MID_DMA_H__ | ||
27 | |||
28 | #include <linux/dmaengine.h> | ||
29 | |||
30 | #define DMA_PREP_CIRCULAR_LIST (1 << 10) | ||
31 | |||
32 | /*DMA mode configurations*/ | ||
33 | enum intel_mid_dma_mode { | ||
34 | LNW_DMA_PER_TO_MEM = 0, /*periphral to memory configuration*/ | ||
35 | LNW_DMA_MEM_TO_PER, /*memory to periphral configuration*/ | ||
36 | LNW_DMA_MEM_TO_MEM, /*mem to mem confg (testing only)*/ | ||
37 | }; | ||
38 | |||
39 | /*DMA handshaking*/ | ||
40 | enum intel_mid_dma_hs_mode { | ||
41 | LNW_DMA_HW_HS = 0, /*HW Handshaking only*/ | ||
42 | LNW_DMA_SW_HS = 1, /*SW Handshaking not recommended*/ | ||
43 | }; | ||
44 | |||
45 | /*Burst size configuration*/ | ||
46 | enum intel_mid_dma_msize { | ||
47 | LNW_DMA_MSIZE_1 = 0x0, | ||
48 | LNW_DMA_MSIZE_4 = 0x1, | ||
49 | LNW_DMA_MSIZE_8 = 0x2, | ||
50 | LNW_DMA_MSIZE_16 = 0x3, | ||
51 | LNW_DMA_MSIZE_32 = 0x4, | ||
52 | LNW_DMA_MSIZE_64 = 0x5, | ||
53 | }; | ||
54 | |||
55 | /** | ||
56 | * struct intel_mid_dma_slave - DMA slave structure | ||
57 | * | ||
58 | * @dirn: DMA trf direction | ||
59 | * @src_width: tx register width | ||
60 | * @dst_width: rx register width | ||
61 | * @hs_mode: HW/SW handshaking mode | ||
62 | * @cfg_mode: DMA data transfer mode (per-per/mem-per/mem-mem) | ||
63 | * @src_msize: Source DMA burst size | ||
64 | * @dst_msize: Dst DMA burst size | ||
65 | * @per_addr: Periphral address | ||
66 | * @device_instance: DMA peripheral device instance, we can have multiple | ||
67 | * peripheral device connected to single DMAC | ||
68 | */ | ||
69 | struct intel_mid_dma_slave { | ||
70 | enum intel_mid_dma_hs_mode hs_mode; /*handshaking*/ | ||
71 | enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ | ||
72 | unsigned int device_instance; /*0, 1 for periphral instance*/ | ||
73 | struct dma_slave_config dma_slave; | ||
74 | }; | ||
75 | |||
76 | #endif /*__INTEL_MID_DMA_H__*/ | ||
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index d9b05b5bf8c7..2e88580194f0 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -52,11 +52,17 @@ | |||
52 | * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. | 52 | * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. |
53 | * Used by threaded interrupts which need to keep the | 53 | * Used by threaded interrupts which need to keep the |
54 | * irq line disabled until the threaded handler has been run. | 54 | * irq line disabled until the threaded handler has been run. |
55 | * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend | 55 | * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee |
56 | * that this interrupt will wake the system from a suspended | ||
57 | * state. See Documentation/power/suspend-and-interrupts.txt | ||
56 | * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set | 58 | * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set |
57 | * IRQF_NO_THREAD - Interrupt cannot be threaded | 59 | * IRQF_NO_THREAD - Interrupt cannot be threaded |
58 | * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device | 60 | * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device |
59 | * resume time. | 61 | * resume time. |
62 | * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this | ||
63 | * interrupt handler after suspending interrupts. For system | ||
64 | * wakeup devices users need to implement wakeup detection in | ||
65 | * their interrupt handlers. | ||
60 | */ | 66 | */ |
61 | #define IRQF_DISABLED 0x00000020 | 67 | #define IRQF_DISABLED 0x00000020 |
62 | #define IRQF_SHARED 0x00000080 | 68 | #define IRQF_SHARED 0x00000080 |
@@ -70,6 +76,7 @@ | |||
70 | #define IRQF_FORCE_RESUME 0x00008000 | 76 | #define IRQF_FORCE_RESUME 0x00008000 |
71 | #define IRQF_NO_THREAD 0x00010000 | 77 | #define IRQF_NO_THREAD 0x00010000 |
72 | #define IRQF_EARLY_RESUME 0x00020000 | 78 | #define IRQF_EARLY_RESUME 0x00020000 |
79 | #define IRQF_COND_SUSPEND 0x00040000 | ||
73 | 80 | ||
74 | #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) | 81 | #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) |
75 | 82 | ||
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 800544bc7bfd..ffbc034c8810 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h | |||
@@ -126,8 +126,23 @@ | |||
126 | #define GICR_PROPBASER_WaWb (5U << 7) | 126 | #define GICR_PROPBASER_WaWb (5U << 7) |
127 | #define GICR_PROPBASER_RaWaWt (6U << 7) | 127 | #define GICR_PROPBASER_RaWaWt (6U << 7) |
128 | #define GICR_PROPBASER_RaWaWb (7U << 7) | 128 | #define GICR_PROPBASER_RaWaWb (7U << 7) |
129 | #define GICR_PROPBASER_CACHEABILITY_MASK (7U << 7) | ||
129 | #define GICR_PROPBASER_IDBITS_MASK (0x1f) | 130 | #define GICR_PROPBASER_IDBITS_MASK (0x1f) |
130 | 131 | ||
132 | #define GICR_PENDBASER_NonShareable (0U << 10) | ||
133 | #define GICR_PENDBASER_InnerShareable (1U << 10) | ||
134 | #define GICR_PENDBASER_OuterShareable (2U << 10) | ||
135 | #define GICR_PENDBASER_SHAREABILITY_MASK (3UL << 10) | ||
136 | #define GICR_PENDBASER_nCnB (0U << 7) | ||
137 | #define GICR_PENDBASER_nC (1U << 7) | ||
138 | #define GICR_PENDBASER_RaWt (2U << 7) | ||
139 | #define GICR_PENDBASER_RaWb (3U << 7) | ||
140 | #define GICR_PENDBASER_WaWt (4U << 7) | ||
141 | #define GICR_PENDBASER_WaWb (5U << 7) | ||
142 | #define GICR_PENDBASER_RaWaWt (6U << 7) | ||
143 | #define GICR_PENDBASER_RaWaWb (7U << 7) | ||
144 | #define GICR_PENDBASER_CACHEABILITY_MASK (7U << 7) | ||
145 | |||
131 | /* | 146 | /* |
132 | * Re-Distributor registers, offsets from SGI_base | 147 | * Re-Distributor registers, offsets from SGI_base |
133 | */ | 148 | */ |
@@ -166,6 +181,11 @@ | |||
166 | 181 | ||
167 | #define GITS_TRANSLATER 0x10040 | 182 | #define GITS_TRANSLATER 0x10040 |
168 | 183 | ||
184 | #define GITS_CTLR_ENABLE (1U << 0) | ||
185 | #define GITS_CTLR_QUIESCENT (1U << 31) | ||
186 | |||
187 | #define GITS_TYPER_DEVBITS_SHIFT 13 | ||
188 | #define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) | ||
169 | #define GITS_TYPER_PTA (1UL << 19) | 189 | #define GITS_TYPER_PTA (1UL << 19) |
170 | 190 | ||
171 | #define GITS_CBASER_VALID (1UL << 63) | 191 | #define GITS_CBASER_VALID (1UL << 63) |
@@ -177,6 +197,7 @@ | |||
177 | #define GITS_CBASER_WaWb (5UL << 59) | 197 | #define GITS_CBASER_WaWb (5UL << 59) |
178 | #define GITS_CBASER_RaWaWt (6UL << 59) | 198 | #define GITS_CBASER_RaWaWt (6UL << 59) |
179 | #define GITS_CBASER_RaWaWb (7UL << 59) | 199 | #define GITS_CBASER_RaWaWb (7UL << 59) |
200 | #define GITS_CBASER_CACHEABILITY_MASK (7UL << 59) | ||
180 | #define GITS_CBASER_NonShareable (0UL << 10) | 201 | #define GITS_CBASER_NonShareable (0UL << 10) |
181 | #define GITS_CBASER_InnerShareable (1UL << 10) | 202 | #define GITS_CBASER_InnerShareable (1UL << 10) |
182 | #define GITS_CBASER_OuterShareable (2UL << 10) | 203 | #define GITS_CBASER_OuterShareable (2UL << 10) |
@@ -193,6 +214,7 @@ | |||
193 | #define GITS_BASER_WaWb (5UL << 59) | 214 | #define GITS_BASER_WaWb (5UL << 59) |
194 | #define GITS_BASER_RaWaWt (6UL << 59) | 215 | #define GITS_BASER_RaWaWt (6UL << 59) |
195 | #define GITS_BASER_RaWaWb (7UL << 59) | 216 | #define GITS_BASER_RaWaWb (7UL << 59) |
217 | #define GITS_BASER_CACHEABILITY_MASK (7UL << 59) | ||
196 | #define GITS_BASER_TYPE_SHIFT (56) | 218 | #define GITS_BASER_TYPE_SHIFT (56) |
197 | #define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) | 219 | #define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) |
198 | #define GITS_BASER_ENTRY_SIZE_SHIFT (48) | 220 | #define GITS_BASER_ENTRY_SIZE_SHIFT (48) |
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index faf433af425e..dd1109fb241e 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h | |||
@@ -78,6 +78,7 @@ struct irq_desc { | |||
78 | #ifdef CONFIG_PM_SLEEP | 78 | #ifdef CONFIG_PM_SLEEP |
79 | unsigned int nr_actions; | 79 | unsigned int nr_actions; |
80 | unsigned int no_suspend_depth; | 80 | unsigned int no_suspend_depth; |
81 | unsigned int cond_suspend_depth; | ||
81 | unsigned int force_resume_depth; | 82 | unsigned int force_resume_depth; |
82 | #endif | 83 | #endif |
83 | #ifdef CONFIG_PROC_FS | 84 | #ifdef CONFIG_PROC_FS |
diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 72ba725ddf9c..5bb074431eb0 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h | |||
@@ -5,6 +5,7 @@ | |||
5 | 5 | ||
6 | struct kmem_cache; | 6 | struct kmem_cache; |
7 | struct page; | 7 | struct page; |
8 | struct vm_struct; | ||
8 | 9 | ||
9 | #ifdef CONFIG_KASAN | 10 | #ifdef CONFIG_KASAN |
10 | 11 | ||
@@ -49,15 +50,11 @@ void kasan_krealloc(const void *object, size_t new_size); | |||
49 | void kasan_slab_alloc(struct kmem_cache *s, void *object); | 50 | void kasan_slab_alloc(struct kmem_cache *s, void *object); |
50 | void kasan_slab_free(struct kmem_cache *s, void *object); | 51 | void kasan_slab_free(struct kmem_cache *s, void *object); |
51 | 52 | ||
52 | #define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) | ||
53 | |||
54 | int kasan_module_alloc(void *addr, size_t size); | 53 | int kasan_module_alloc(void *addr, size_t size); |
55 | void kasan_module_free(void *addr); | 54 | void kasan_free_shadow(const struct vm_struct *vm); |
56 | 55 | ||
57 | #else /* CONFIG_KASAN */ | 56 | #else /* CONFIG_KASAN */ |
58 | 57 | ||
59 | #define MODULE_ALIGN 1 | ||
60 | |||
61 | static inline void kasan_unpoison_shadow(const void *address, size_t size) {} | 58 | static inline void kasan_unpoison_shadow(const void *address, size_t size) {} |
62 | 59 | ||
63 | static inline void kasan_enable_current(void) {} | 60 | static inline void kasan_enable_current(void) {} |
@@ -82,7 +79,7 @@ static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {} | |||
82 | static inline void kasan_slab_free(struct kmem_cache *s, void *object) {} | 79 | static inline void kasan_slab_free(struct kmem_cache *s, void *object) {} |
83 | 80 | ||
84 | static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } | 81 | static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } |
85 | static inline void kasan_module_free(void *addr) {} | 82 | static inline void kasan_free_shadow(const struct vm_struct *vm) {} |
86 | 83 | ||
87 | #endif /* CONFIG_KASAN */ | 84 | #endif /* CONFIG_KASAN */ |
88 | 85 | ||
diff --git a/include/linux/lcm.h b/include/linux/lcm.h index 7bf01d779b45..1ce79a7f1daa 100644 --- a/include/linux/lcm.h +++ b/include/linux/lcm.h | |||
@@ -4,5 +4,6 @@ | |||
4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
5 | 5 | ||
6 | unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__; | 6 | unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__; |
7 | unsigned long lcm_not_zero(unsigned long a, unsigned long b) __attribute_const__; | ||
7 | 8 | ||
8 | #endif /* _LCM_H */ | 9 | #endif /* _LCM_H */ |
diff --git a/include/linux/libata.h b/include/linux/libata.h index fc03efa64ffe..6b08cc106c21 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -232,6 +232,7 @@ enum { | |||
232 | * led */ | 232 | * led */ |
233 | ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */ | 233 | ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */ |
234 | ATA_FLAG_LOWTAG = (1 << 24), /* host wants lowest available tag */ | 234 | ATA_FLAG_LOWTAG = (1 << 24), /* host wants lowest available tag */ |
235 | ATA_FLAG_SAS_HOST = (1 << 25), /* SAS host */ | ||
235 | 236 | ||
236 | /* bits 24:31 of ap->flags are reserved for LLD specific flags */ | 237 | /* bits 24:31 of ap->flags are reserved for LLD specific flags */ |
237 | 238 | ||
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h index fb0390a1a498..ee7b1ce7a6f8 100644 --- a/include/linux/mfd/palmas.h +++ b/include/linux/mfd/palmas.h | |||
@@ -2999,6 +2999,9 @@ enum usb_irq_events { | |||
2999 | #define PALMAS_GPADC_TRIM15 0x0E | 2999 | #define PALMAS_GPADC_TRIM15 0x0E |
3000 | #define PALMAS_GPADC_TRIM16 0x0F | 3000 | #define PALMAS_GPADC_TRIM16 0x0F |
3001 | 3001 | ||
3002 | /* TPS659038 regen2_ctrl offset iss different from palmas */ | ||
3003 | #define TPS659038_REGEN2_CTRL 0x12 | ||
3004 | |||
3002 | /* TPS65917 Interrupt registers */ | 3005 | /* TPS65917 Interrupt registers */ |
3003 | 3006 | ||
3004 | /* Registers for function INTERRUPT */ | 3007 | /* Registers for function INTERRUPT */ |
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 2bbc62aa818a..551f85456c11 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h | |||
@@ -427,7 +427,7 @@ struct mlx4_wqe_inline_seg { | |||
427 | 427 | ||
428 | enum mlx4_update_qp_attr { | 428 | enum mlx4_update_qp_attr { |
429 | MLX4_UPDATE_QP_SMAC = 1 << 0, | 429 | MLX4_UPDATE_QP_SMAC = 1 << 0, |
430 | MLX4_UPDATE_QP_VSD = 1 << 2, | 430 | MLX4_UPDATE_QP_VSD = 1 << 1, |
431 | MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 2) - 1 | 431 | MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 2) - 1 |
432 | }; | 432 | }; |
433 | 433 | ||
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index f279d9c158cd..2782df47101e 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -474,16 +474,15 @@ struct zone { | |||
474 | unsigned long wait_table_bits; | 474 | unsigned long wait_table_bits; |
475 | 475 | ||
476 | ZONE_PADDING(_pad1_) | 476 | ZONE_PADDING(_pad1_) |
477 | |||
478 | /* Write-intensive fields used from the page allocator */ | ||
479 | spinlock_t lock; | ||
480 | |||
481 | /* free areas of different sizes */ | 477 | /* free areas of different sizes */ |
482 | struct free_area free_area[MAX_ORDER]; | 478 | struct free_area free_area[MAX_ORDER]; |
483 | 479 | ||
484 | /* zone flags, see below */ | 480 | /* zone flags, see below */ |
485 | unsigned long flags; | 481 | unsigned long flags; |
486 | 482 | ||
483 | /* Write-intensive fields used from the page allocator */ | ||
484 | spinlock_t lock; | ||
485 | |||
487 | ZONE_PADDING(_pad2_) | 486 | ZONE_PADDING(_pad2_) |
488 | 487 | ||
489 | /* Write-intensive fields used by page reclaim */ | 488 | /* Write-intensive fields used by page reclaim */ |
diff --git a/include/linux/module.h b/include/linux/module.h index 42999fe2dbd0..b03485bcb82a 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -344,6 +344,10 @@ struct module { | |||
344 | unsigned long *ftrace_callsites; | 344 | unsigned long *ftrace_callsites; |
345 | #endif | 345 | #endif |
346 | 346 | ||
347 | #ifdef CONFIG_LIVEPATCH | ||
348 | bool klp_alive; | ||
349 | #endif | ||
350 | |||
347 | #ifdef CONFIG_MODULE_UNLOAD | 351 | #ifdef CONFIG_MODULE_UNLOAD |
348 | /* What modules depend on me? */ | 352 | /* What modules depend on me? */ |
349 | struct list_head source_list; | 353 | struct list_head source_list; |
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h index f7556261fe3c..4d0cb9bba93e 100644 --- a/include/linux/moduleloader.h +++ b/include/linux/moduleloader.h | |||
@@ -84,4 +84,12 @@ void module_arch_cleanup(struct module *mod); | |||
84 | 84 | ||
85 | /* Any cleanup before freeing mod->module_init */ | 85 | /* Any cleanup before freeing mod->module_init */ |
86 | void module_arch_freeing_init(struct module *mod); | 86 | void module_arch_freeing_init(struct module *mod); |
87 | |||
88 | #ifdef CONFIG_KASAN | ||
89 | #include <linux/kasan.h> | ||
90 | #define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) | ||
91 | #else | ||
92 | #define MODULE_ALIGN PAGE_SIZE | ||
93 | #endif | ||
94 | |||
87 | #endif | 95 | #endif |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 5897b4ea5a3f..278738873703 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -965,9 +965,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, | |||
965 | * Used to add FDB entries to dump requests. Implementers should add | 965 | * Used to add FDB entries to dump requests. Implementers should add |
966 | * entries to skb and update idx with the number of entries. | 966 | * entries to skb and update idx with the number of entries. |
967 | * | 967 | * |
968 | * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh) | 968 | * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, |
969 | * u16 flags) | ||
969 | * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, | 970 | * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, |
970 | * struct net_device *dev, u32 filter_mask) | 971 | * struct net_device *dev, u32 filter_mask) |
972 | * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, | ||
973 | * u16 flags); | ||
971 | * | 974 | * |
972 | * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); | 975 | * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); |
973 | * Called to change device carrier. Soft-devices (like dummy, team, etc) | 976 | * Called to change device carrier. Soft-devices (like dummy, team, etc) |
@@ -2182,6 +2185,12 @@ void netdev_freemem(struct net_device *dev); | |||
2182 | void synchronize_net(void); | 2185 | void synchronize_net(void); |
2183 | int init_dummy_netdev(struct net_device *dev); | 2186 | int init_dummy_netdev(struct net_device *dev); |
2184 | 2187 | ||
2188 | DECLARE_PER_CPU(int, xmit_recursion); | ||
2189 | static inline int dev_recursion_level(void) | ||
2190 | { | ||
2191 | return this_cpu_read(xmit_recursion); | ||
2192 | } | ||
2193 | |||
2185 | struct net_device *dev_get_by_index(struct net *net, int ifindex); | 2194 | struct net_device *dev_get_by_index(struct net *net, int ifindex); |
2186 | struct net_device *__dev_get_by_index(struct net *net, int ifindex); | 2195 | struct net_device *__dev_get_by_index(struct net *net, int ifindex); |
2187 | struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); | 2196 | struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); |
@@ -2342,6 +2351,7 @@ struct gro_remcsum { | |||
2342 | 2351 | ||
2343 | static inline void skb_gro_remcsum_init(struct gro_remcsum *grc) | 2352 | static inline void skb_gro_remcsum_init(struct gro_remcsum *grc) |
2344 | { | 2353 | { |
2354 | grc->offset = 0; | ||
2345 | grc->delta = 0; | 2355 | grc->delta = 0; |
2346 | } | 2356 | } |
2347 | 2357 | ||
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 2f77e0c651c8..b01ccf371fdc 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
@@ -343,6 +343,7 @@ extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *, | |||
343 | extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *); | 343 | extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *); |
344 | extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr); | 344 | extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr); |
345 | extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr); | 345 | extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr); |
346 | extern int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr); | ||
346 | extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); | 347 | extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); |
347 | extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *); | 348 | extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *); |
348 | extern void nfs_access_set_mask(struct nfs_access_entry *, u32); | 349 | extern void nfs_access_set_mask(struct nfs_access_entry *, u32); |
@@ -355,8 +356,9 @@ extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); | |||
355 | extern int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode); | 356 | extern int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode); |
356 | extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); | 357 | extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); |
357 | extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); | 358 | extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); |
359 | extern int nfs_revalidate_mapping_protected(struct inode *inode, struct address_space *mapping); | ||
358 | extern int nfs_setattr(struct dentry *, struct iattr *); | 360 | extern int nfs_setattr(struct dentry *, struct iattr *); |
359 | extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr); | 361 | extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *); |
360 | extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr, | 362 | extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr, |
361 | struct nfs4_label *label); | 363 | struct nfs4_label *label); |
362 | extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); | 364 | extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); |
@@ -369,6 +371,7 @@ extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ct | |||
369 | extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx); | 371 | extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx); |
370 | extern u64 nfs_compat_user_ino64(u64 fileid); | 372 | extern u64 nfs_compat_user_ino64(u64 fileid); |
371 | extern void nfs_fattr_init(struct nfs_fattr *fattr); | 373 | extern void nfs_fattr_init(struct nfs_fattr *fattr); |
374 | extern void nfs_fattr_set_barrier(struct nfs_fattr *fattr); | ||
372 | extern unsigned long nfs_inc_attr_generation_counter(void); | 375 | extern unsigned long nfs_inc_attr_generation_counter(void); |
373 | 376 | ||
374 | extern struct nfs_fattr *nfs_alloc_fattr(void); | 377 | extern struct nfs_fattr *nfs_alloc_fattr(void); |
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index 8a860f096c35..611a691145c4 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h | |||
@@ -84,7 +84,7 @@ static inline int of_platform_populate(struct device_node *root, | |||
84 | static inline void of_platform_depopulate(struct device *parent) { } | 84 | static inline void of_platform_depopulate(struct device *parent) { } |
85 | #endif | 85 | #endif |
86 | 86 | ||
87 | #ifdef CONFIG_OF_DYNAMIC | 87 | #if defined(CONFIG_OF_DYNAMIC) && defined(CONFIG_OF_ADDRESS) |
88 | extern void of_platform_register_reconfig_notifier(void); | 88 | extern void of_platform_register_reconfig_notifier(void); |
89 | #else | 89 | #else |
90 | static inline void of_platform_register_reconfig_notifier(void) { } | 90 | static inline void of_platform_register_reconfig_notifier(void) { } |
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h index 72c0415d6c21..18eccefea06e 100644 --- a/include/linux/pinctrl/consumer.h +++ b/include/linux/pinctrl/consumer.h | |||
@@ -82,7 +82,7 @@ static inline int pinctrl_gpio_direction_output(unsigned gpio) | |||
82 | 82 | ||
83 | static inline struct pinctrl * __must_check pinctrl_get(struct device *dev) | 83 | static inline struct pinctrl * __must_check pinctrl_get(struct device *dev) |
84 | { | 84 | { |
85 | return ERR_PTR(-ENOSYS); | 85 | return NULL; |
86 | } | 86 | } |
87 | 87 | ||
88 | static inline void pinctrl_put(struct pinctrl *p) | 88 | static inline void pinctrl_put(struct pinctrl *p) |
@@ -93,7 +93,7 @@ static inline struct pinctrl_state * __must_check pinctrl_lookup_state( | |||
93 | struct pinctrl *p, | 93 | struct pinctrl *p, |
94 | const char *name) | 94 | const char *name) |
95 | { | 95 | { |
96 | return ERR_PTR(-ENOSYS); | 96 | return NULL; |
97 | } | 97 | } |
98 | 98 | ||
99 | static inline int pinctrl_select_state(struct pinctrl *p, | 99 | static inline int pinctrl_select_state(struct pinctrl *p, |
@@ -104,7 +104,7 @@ static inline int pinctrl_select_state(struct pinctrl *p, | |||
104 | 104 | ||
105 | static inline struct pinctrl * __must_check devm_pinctrl_get(struct device *dev) | 105 | static inline struct pinctrl * __must_check devm_pinctrl_get(struct device *dev) |
106 | { | 106 | { |
107 | return ERR_PTR(-ENOSYS); | 107 | return NULL; |
108 | } | 108 | } |
109 | 109 | ||
110 | static inline void devm_pinctrl_put(struct pinctrl *p) | 110 | static inline void devm_pinctrl_put(struct pinctrl *p) |
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index d4ad5b5a02bb..045f709cb89b 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h | |||
@@ -316,7 +316,7 @@ struct regulator_desc { | |||
316 | * @driver_data: private regulator data | 316 | * @driver_data: private regulator data |
317 | * @of_node: OpenFirmware node to parse for device tree bindings (may be | 317 | * @of_node: OpenFirmware node to parse for device tree bindings (may be |
318 | * NULL). | 318 | * NULL). |
319 | * @regmap: regmap to use for core regmap helpers if dev_get_regulator() is | 319 | * @regmap: regmap to use for core regmap helpers if dev_get_regmap() is |
320 | * insufficient. | 320 | * insufficient. |
321 | * @ena_gpio_initialized: GPIO controlling regulator enable was properly | 321 | * @ena_gpio_initialized: GPIO controlling regulator enable was properly |
322 | * initialized, meaning that >= 0 is a valid gpio | 322 | * initialized, meaning that >= 0 is a valid gpio |
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 58851275fed9..d438eeb08bff 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h | |||
@@ -54,10 +54,11 @@ struct rhash_head { | |||
54 | * @buckets: size * hash buckets | 54 | * @buckets: size * hash buckets |
55 | */ | 55 | */ |
56 | struct bucket_table { | 56 | struct bucket_table { |
57 | size_t size; | 57 | size_t size; |
58 | unsigned int locks_mask; | 58 | unsigned int locks_mask; |
59 | spinlock_t *locks; | 59 | spinlock_t *locks; |
60 | struct rhash_head __rcu *buckets[]; | 60 | |
61 | struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; | ||
61 | }; | 62 | }; |
62 | 63 | ||
63 | typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed); | 64 | typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed); |
@@ -78,12 +79,6 @@ struct rhashtable; | |||
78 | * @locks_mul: Number of bucket locks to allocate per cpu (default: 128) | 79 | * @locks_mul: Number of bucket locks to allocate per cpu (default: 128) |
79 | * @hashfn: Function to hash key | 80 | * @hashfn: Function to hash key |
80 | * @obj_hashfn: Function to hash object | 81 | * @obj_hashfn: Function to hash object |
81 | * @grow_decision: If defined, may return true if table should expand | ||
82 | * @shrink_decision: If defined, may return true if table should shrink | ||
83 | * | ||
84 | * Note: when implementing the grow and shrink decision function, min/max | ||
85 | * shift must be enforced, otherwise, resizing watermarks they set may be | ||
86 | * useless. | ||
87 | */ | 82 | */ |
88 | struct rhashtable_params { | 83 | struct rhashtable_params { |
89 | size_t nelem_hint; | 84 | size_t nelem_hint; |
@@ -97,10 +92,6 @@ struct rhashtable_params { | |||
97 | size_t locks_mul; | 92 | size_t locks_mul; |
98 | rht_hashfn_t hashfn; | 93 | rht_hashfn_t hashfn; |
99 | rht_obj_hashfn_t obj_hashfn; | 94 | rht_obj_hashfn_t obj_hashfn; |
100 | bool (*grow_decision)(const struct rhashtable *ht, | ||
101 | size_t new_size); | ||
102 | bool (*shrink_decision)(const struct rhashtable *ht, | ||
103 | size_t new_size); | ||
104 | }; | 95 | }; |
105 | 96 | ||
106 | /** | 97 | /** |
@@ -192,9 +183,6 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params); | |||
192 | void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node); | 183 | void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node); |
193 | bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node); | 184 | bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node); |
194 | 185 | ||
195 | bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size); | ||
196 | bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size); | ||
197 | |||
198 | int rhashtable_expand(struct rhashtable *ht); | 186 | int rhashtable_expand(struct rhashtable *ht); |
199 | int rhashtable_shrink(struct rhashtable *ht); | 187 | int rhashtable_shrink(struct rhashtable *ht); |
200 | 188 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index 6d77432e14ff..a419b65770d6 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1625,11 +1625,11 @@ struct task_struct { | |||
1625 | 1625 | ||
1626 | /* | 1626 | /* |
1627 | * numa_faults_locality tracks if faults recorded during the last | 1627 | * numa_faults_locality tracks if faults recorded during the last |
1628 | * scan window were remote/local. The task scan period is adapted | 1628 | * scan window were remote/local or failed to migrate. The task scan |
1629 | * based on the locality of the faults with different weights | 1629 | * period is adapted based on the locality of the faults with different |
1630 | * depending on whether they were shared or private faults | 1630 | * weights depending on whether they were shared or private faults |
1631 | */ | 1631 | */ |
1632 | unsigned long numa_faults_locality[2]; | 1632 | unsigned long numa_faults_locality[3]; |
1633 | 1633 | ||
1634 | unsigned long numa_pages_migrated; | 1634 | unsigned long numa_pages_migrated; |
1635 | #endif /* CONFIG_NUMA_BALANCING */ | 1635 | #endif /* CONFIG_NUMA_BALANCING */ |
@@ -1719,6 +1719,7 @@ struct task_struct { | |||
1719 | #define TNF_NO_GROUP 0x02 | 1719 | #define TNF_NO_GROUP 0x02 |
1720 | #define TNF_SHARED 0x04 | 1720 | #define TNF_SHARED 0x04 |
1721 | #define TNF_FAULT_LOCAL 0x08 | 1721 | #define TNF_FAULT_LOCAL 0x08 |
1722 | #define TNF_MIGRATE_FAIL 0x10 | ||
1722 | 1723 | ||
1723 | #ifdef CONFIG_NUMA_BALANCING | 1724 | #ifdef CONFIG_NUMA_BALANCING |
1724 | extern void task_numa_fault(int last_node, int node, int pages, int flags); | 1725 | extern void task_numa_fault(int last_node, int node, int pages, int flags); |
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index baf3e1d08416..d10965f0d8a4 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h | |||
@@ -143,13 +143,13 @@ struct uart_port { | |||
143 | unsigned char iotype; /* io access style */ | 143 | unsigned char iotype; /* io access style */ |
144 | unsigned char unused1; | 144 | unsigned char unused1; |
145 | 145 | ||
146 | #define UPIO_PORT (0) /* 8b I/O port access */ | 146 | #define UPIO_PORT (SERIAL_IO_PORT) /* 8b I/O port access */ |
147 | #define UPIO_HUB6 (1) /* Hub6 ISA card */ | 147 | #define UPIO_HUB6 (SERIAL_IO_HUB6) /* Hub6 ISA card */ |
148 | #define UPIO_MEM (2) /* 8b MMIO access */ | 148 | #define UPIO_MEM (SERIAL_IO_MEM) /* 8b MMIO access */ |
149 | #define UPIO_MEM32 (3) /* 32b little endian */ | 149 | #define UPIO_MEM32 (SERIAL_IO_MEM32) /* 32b little endian */ |
150 | #define UPIO_MEM32BE (4) /* 32b big endian */ | 150 | #define UPIO_AU (SERIAL_IO_AU) /* Au1x00 and RT288x type IO */ |
151 | #define UPIO_AU (5) /* Au1x00 and RT288x type IO */ | 151 | #define UPIO_TSI (SERIAL_IO_TSI) /* Tsi108/109 type IO */ |
152 | #define UPIO_TSI (6) /* Tsi108/109 type IO */ | 152 | #define UPIO_MEM32BE (SERIAL_IO_MEM32BE) /* 32b big endian */ |
153 | 153 | ||
154 | unsigned int read_status_mask; /* driver specific */ | 154 | unsigned int read_status_mask; /* driver specific */ |
155 | unsigned int ignore_status_mask; /* driver specific */ | 155 | unsigned int ignore_status_mask; /* driver specific */ |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 30007afe70b3..f54d6659713a 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -948,6 +948,13 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) | |||
948 | to->l4_hash = from->l4_hash; | 948 | to->l4_hash = from->l4_hash; |
949 | }; | 949 | }; |
950 | 950 | ||
951 | static inline void skb_sender_cpu_clear(struct sk_buff *skb) | ||
952 | { | ||
953 | #ifdef CONFIG_XPS | ||
954 | skb->sender_cpu = 0; | ||
955 | #endif | ||
956 | } | ||
957 | |||
951 | #ifdef NET_SKBUFF_DATA_USES_OFFSET | 958 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
952 | static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) | 959 | static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) |
953 | { | 960 | { |
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index ed9489d893a4..d673072346f2 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
@@ -162,8 +162,6 @@ struct spi_transfer; | |||
162 | * @remove: Unbinds this driver from the spi device | 162 | * @remove: Unbinds this driver from the spi device |
163 | * @shutdown: Standard shutdown callback used during system state | 163 | * @shutdown: Standard shutdown callback used during system state |
164 | * transitions such as powerdown/halt and kexec | 164 | * transitions such as powerdown/halt and kexec |
165 | * @suspend: Standard suspend callback used during system state transitions | ||
166 | * @resume: Standard resume callback used during system state transitions | ||
167 | * @driver: SPI device drivers should initialize the name and owner | 165 | * @driver: SPI device drivers should initialize the name and owner |
168 | * field of this structure. | 166 | * field of this structure. |
169 | * | 167 | * |
@@ -184,8 +182,6 @@ struct spi_driver { | |||
184 | int (*probe)(struct spi_device *spi); | 182 | int (*probe)(struct spi_device *spi); |
185 | int (*remove)(struct spi_device *spi); | 183 | int (*remove)(struct spi_device *spi); |
186 | void (*shutdown)(struct spi_device *spi); | 184 | void (*shutdown)(struct spi_device *spi); |
187 | int (*suspend)(struct spi_device *spi, pm_message_t mesg); | ||
188 | int (*resume)(struct spi_device *spi); | ||
189 | struct device_driver driver; | 185 | struct device_driver driver; |
190 | }; | 186 | }; |
191 | 187 | ||
@@ -294,6 +290,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) | |||
294 | * transfer_one_message are mutually exclusive; when both | 290 | * transfer_one_message are mutually exclusive; when both |
295 | * are set, the generic subsystem does not call your | 291 | * are set, the generic subsystem does not call your |
296 | * transfer_one callback. | 292 | * transfer_one callback. |
293 | * @handle_err: the subsystem calls the driver to handle an error that occurs | ||
294 | * in the generic implementation of transfer_one_message(). | ||
297 | * @unprepare_message: undo any work done by prepare_message(). | 295 | * @unprepare_message: undo any work done by prepare_message(). |
298 | * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS | 296 | * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS |
299 | * number. Any individual value may be -ENOENT for CS lines that | 297 | * number. Any individual value may be -ENOENT for CS lines that |
@@ -448,6 +446,8 @@ struct spi_master { | |||
448 | void (*set_cs)(struct spi_device *spi, bool enable); | 446 | void (*set_cs)(struct spi_device *spi, bool enable); |
449 | int (*transfer_one)(struct spi_master *master, struct spi_device *spi, | 447 | int (*transfer_one)(struct spi_master *master, struct spi_device *spi, |
450 | struct spi_transfer *transfer); | 448 | struct spi_transfer *transfer); |
449 | void (*handle_err)(struct spi_master *master, | ||
450 | struct spi_message *message); | ||
451 | 451 | ||
452 | /* gpio chip select */ | 452 | /* gpio chip select */ |
453 | int *cs_gpios; | 453 | int *cs_gpios; |
@@ -649,7 +649,7 @@ struct spi_transfer { | |||
649 | * sequence completes. On some systems, many such sequences can execute as | 649 | * sequence completes. On some systems, many such sequences can execute as |
650 | * as single programmed DMA transfer. On all systems, these messages are | 650 | * as single programmed DMA transfer. On all systems, these messages are |
651 | * queued, and might complete after transactions to other devices. Messages | 651 | * queued, and might complete after transactions to other devices. Messages |
652 | * sent to a given spi_device are alway executed in FIFO order. | 652 | * sent to a given spi_device are always executed in FIFO order. |
653 | * | 653 | * |
654 | * The code that submits an spi_message (and its spi_transfers) | 654 | * The code that submits an spi_message (and its spi_transfers) |
655 | * to the lower layers is responsible for managing its memory. | 655 | * to the lower layers is responsible for managing its memory. |
diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h index c57d8ea0716c..59a7889e15db 100644 --- a/include/linux/sunrpc/debug.h +++ b/include/linux/sunrpc/debug.h | |||
@@ -60,17 +60,17 @@ struct rpc_xprt; | |||
60 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | 60 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
61 | void rpc_register_sysctl(void); | 61 | void rpc_register_sysctl(void); |
62 | void rpc_unregister_sysctl(void); | 62 | void rpc_unregister_sysctl(void); |
63 | int sunrpc_debugfs_init(void); | 63 | void sunrpc_debugfs_init(void); |
64 | void sunrpc_debugfs_exit(void); | 64 | void sunrpc_debugfs_exit(void); |
65 | int rpc_clnt_debugfs_register(struct rpc_clnt *); | 65 | void rpc_clnt_debugfs_register(struct rpc_clnt *); |
66 | void rpc_clnt_debugfs_unregister(struct rpc_clnt *); | 66 | void rpc_clnt_debugfs_unregister(struct rpc_clnt *); |
67 | int rpc_xprt_debugfs_register(struct rpc_xprt *); | 67 | void rpc_xprt_debugfs_register(struct rpc_xprt *); |
68 | void rpc_xprt_debugfs_unregister(struct rpc_xprt *); | 68 | void rpc_xprt_debugfs_unregister(struct rpc_xprt *); |
69 | #else | 69 | #else |
70 | static inline int | 70 | static inline void |
71 | sunrpc_debugfs_init(void) | 71 | sunrpc_debugfs_init(void) |
72 | { | 72 | { |
73 | return 0; | 73 | return; |
74 | } | 74 | } |
75 | 75 | ||
76 | static inline void | 76 | static inline void |
@@ -79,10 +79,10 @@ sunrpc_debugfs_exit(void) | |||
79 | return; | 79 | return; |
80 | } | 80 | } |
81 | 81 | ||
82 | static inline int | 82 | static inline void |
83 | rpc_clnt_debugfs_register(struct rpc_clnt *clnt) | 83 | rpc_clnt_debugfs_register(struct rpc_clnt *clnt) |
84 | { | 84 | { |
85 | return 0; | 85 | return; |
86 | } | 86 | } |
87 | 87 | ||
88 | static inline void | 88 | static inline void |
@@ -91,10 +91,10 @@ rpc_clnt_debugfs_unregister(struct rpc_clnt *clnt) | |||
91 | return; | 91 | return; |
92 | } | 92 | } |
93 | 93 | ||
94 | static inline int | 94 | static inline void |
95 | rpc_xprt_debugfs_register(struct rpc_xprt *xprt) | 95 | rpc_xprt_debugfs_register(struct rpc_xprt *xprt) |
96 | { | 96 | { |
97 | return 0; | 97 | return; |
98 | } | 98 | } |
99 | 99 | ||
100 | static inline void | 100 | static inline void |
diff --git a/include/linux/thermal.h b/include/linux/thermal.h index fc52e307efab..5eac316490ea 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h | |||
@@ -314,6 +314,8 @@ void thermal_zone_of_sensor_unregister(struct device *dev, | |||
314 | } | 314 | } |
315 | 315 | ||
316 | #endif | 316 | #endif |
317 | |||
318 | #if IS_ENABLED(CONFIG_THERMAL) | ||
317 | struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, | 319 | struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, |
318 | void *, struct thermal_zone_device_ops *, | 320 | void *, struct thermal_zone_device_ops *, |
319 | const struct thermal_zone_params *, int, int); | 321 | const struct thermal_zone_params *, int, int); |
@@ -340,8 +342,58 @@ struct thermal_instance *get_thermal_instance(struct thermal_zone_device *, | |||
340 | struct thermal_cooling_device *, int); | 342 | struct thermal_cooling_device *, int); |
341 | void thermal_cdev_update(struct thermal_cooling_device *); | 343 | void thermal_cdev_update(struct thermal_cooling_device *); |
342 | void thermal_notify_framework(struct thermal_zone_device *, int); | 344 | void thermal_notify_framework(struct thermal_zone_device *, int); |
343 | 345 | #else | |
344 | #ifdef CONFIG_NET | 346 | static inline struct thermal_zone_device *thermal_zone_device_register( |
347 | const char *type, int trips, int mask, void *devdata, | ||
348 | struct thermal_zone_device_ops *ops, | ||
349 | const struct thermal_zone_params *tzp, | ||
350 | int passive_delay, int polling_delay) | ||
351 | { return ERR_PTR(-ENODEV); } | ||
352 | static inline void thermal_zone_device_unregister( | ||
353 | struct thermal_zone_device *tz) | ||
354 | { } | ||
355 | static inline int thermal_zone_bind_cooling_device( | ||
356 | struct thermal_zone_device *tz, int trip, | ||
357 | struct thermal_cooling_device *cdev, | ||
358 | unsigned long upper, unsigned long lower) | ||
359 | { return -ENODEV; } | ||
360 | static inline int thermal_zone_unbind_cooling_device( | ||
361 | struct thermal_zone_device *tz, int trip, | ||
362 | struct thermal_cooling_device *cdev) | ||
363 | { return -ENODEV; } | ||
364 | static inline void thermal_zone_device_update(struct thermal_zone_device *tz) | ||
365 | { } | ||
366 | static inline struct thermal_cooling_device * | ||
367 | thermal_cooling_device_register(char *type, void *devdata, | ||
368 | const struct thermal_cooling_device_ops *ops) | ||
369 | { return ERR_PTR(-ENODEV); } | ||
370 | static inline struct thermal_cooling_device * | ||
371 | thermal_of_cooling_device_register(struct device_node *np, | ||
372 | char *type, void *devdata, const struct thermal_cooling_device_ops *ops) | ||
373 | { return ERR_PTR(-ENODEV); } | ||
374 | static inline void thermal_cooling_device_unregister( | ||
375 | struct thermal_cooling_device *cdev) | ||
376 | { } | ||
377 | static inline struct thermal_zone_device *thermal_zone_get_zone_by_name( | ||
378 | const char *name) | ||
379 | { return ERR_PTR(-ENODEV); } | ||
380 | static inline int thermal_zone_get_temp( | ||
381 | struct thermal_zone_device *tz, unsigned long *temp) | ||
382 | { return -ENODEV; } | ||
383 | static inline int get_tz_trend(struct thermal_zone_device *tz, int trip) | ||
384 | { return -ENODEV; } | ||
385 | static inline struct thermal_instance * | ||
386 | get_thermal_instance(struct thermal_zone_device *tz, | ||
387 | struct thermal_cooling_device *cdev, int trip) | ||
388 | { return ERR_PTR(-ENODEV); } | ||
389 | static inline void thermal_cdev_update(struct thermal_cooling_device *cdev) | ||
390 | { } | ||
391 | static inline void thermal_notify_framework(struct thermal_zone_device *tz, | ||
392 | int trip) | ||
393 | { } | ||
394 | #endif /* CONFIG_THERMAL */ | ||
395 | |||
396 | #if defined(CONFIG_NET) && IS_ENABLED(CONFIG_THERMAL) | ||
345 | extern int thermal_generate_netlink_event(struct thermal_zone_device *tz, | 397 | extern int thermal_generate_netlink_event(struct thermal_zone_device *tz, |
346 | enum events event); | 398 | enum events event); |
347 | #else | 399 | #else |
diff --git a/include/linux/uio.h b/include/linux/uio.h index 07a022641996..71880299ed48 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h | |||
@@ -98,6 +98,8 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, | |||
98 | size_t maxsize, size_t *start); | 98 | size_t maxsize, size_t *start); |
99 | int iov_iter_npages(const struct iov_iter *i, int maxpages); | 99 | int iov_iter_npages(const struct iov_iter *i, int maxpages); |
100 | 100 | ||
101 | const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); | ||
102 | |||
101 | static inline size_t iov_iter_count(struct iov_iter *i) | 103 | static inline size_t iov_iter_count(struct iov_iter *i) |
102 | { | 104 | { |
103 | return i->count; | 105 | return i->count; |
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 9bb547c7bce7..704a1ab8240c 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h | |||
@@ -190,8 +190,7 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data) | |||
190 | * @num_ports: the number of different ports this device will have. | 190 | * @num_ports: the number of different ports this device will have. |
191 | * @bulk_in_size: minimum number of bytes to allocate for bulk-in buffer | 191 | * @bulk_in_size: minimum number of bytes to allocate for bulk-in buffer |
192 | * (0 = end-point size) | 192 | * (0 = end-point size) |
193 | * @bulk_out_size: minimum number of bytes to allocate for bulk-out buffer | 193 | * @bulk_out_size: bytes to allocate for bulk-out buffer (0 = end-point size) |
194 | * (0 = end-point size) | ||
195 | * @calc_num_ports: pointer to a function to determine how many ports this | 194 | * @calc_num_ports: pointer to a function to determine how many ports this |
196 | * device has dynamically. It will be called after the probe() | 195 | * device has dynamically. It will be called after the probe() |
197 | * callback is called, but before attach() | 196 | * callback is called, but before attach() |
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index d9a4905e01d0..6e0ce8c7b8cb 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h | |||
@@ -227,9 +227,23 @@ struct skb_data { /* skb->cb is one of these */ | |||
227 | struct urb *urb; | 227 | struct urb *urb; |
228 | struct usbnet *dev; | 228 | struct usbnet *dev; |
229 | enum skb_state state; | 229 | enum skb_state state; |
230 | size_t length; | 230 | long length; |
231 | unsigned long packets; | ||
231 | }; | 232 | }; |
232 | 233 | ||
234 | /* Drivers that set FLAG_MULTI_PACKET must call this in their | ||
235 | * tx_fixup method before returning an skb. | ||
236 | */ | ||
237 | static inline void | ||
238 | usbnet_set_skb_tx_stats(struct sk_buff *skb, | ||
239 | unsigned long packets, long bytes_delta) | ||
240 | { | ||
241 | struct skb_data *entry = (struct skb_data *) skb->cb; | ||
242 | |||
243 | entry->packets = packets; | ||
244 | entry->length = bytes_delta; | ||
245 | } | ||
246 | |||
233 | extern int usbnet_open(struct net_device *net); | 247 | extern int usbnet_open(struct net_device *net); |
234 | extern int usbnet_stop(struct net_device *net); | 248 | extern int usbnet_stop(struct net_device *net); |
235 | extern netdev_tx_t usbnet_start_xmit(struct sk_buff *skb, | 249 | extern netdev_tx_t usbnet_start_xmit(struct sk_buff *skb, |
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 7d7acb35603d..0ec598381f97 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
@@ -17,6 +17,7 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */ | |||
17 | #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ | 17 | #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ |
18 | #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ | 18 | #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ |
19 | #define VM_NO_GUARD 0x00000040 /* don't add guard page */ | 19 | #define VM_NO_GUARD 0x00000040 /* don't add guard page */ |
20 | #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ | ||
20 | /* bits [20..32] reserved for arch specific ioremap internals */ | 21 | /* bits [20..32] reserved for arch specific ioremap internals */ |
21 | 22 | ||
22 | /* | 23 | /* |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 74db135f9957..f597846ff605 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -70,7 +70,8 @@ enum { | |||
70 | /* data contains off-queue information when !WORK_STRUCT_PWQ */ | 70 | /* data contains off-queue information when !WORK_STRUCT_PWQ */ |
71 | WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, | 71 | WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, |
72 | 72 | ||
73 | WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE), | 73 | __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE, |
74 | WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING), | ||
74 | 75 | ||
75 | /* | 76 | /* |
76 | * When a work item is off queue, its high bits point to the last | 77 | * When a work item is off queue, its high bits point to the last |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 00048339c23e..b2dd371ec0ca 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
@@ -130,6 +130,7 @@ extern int vm_dirty_ratio; | |||
130 | extern unsigned long vm_dirty_bytes; | 130 | extern unsigned long vm_dirty_bytes; |
131 | extern unsigned int dirty_writeback_interval; | 131 | extern unsigned int dirty_writeback_interval; |
132 | extern unsigned int dirty_expire_interval; | 132 | extern unsigned int dirty_expire_interval; |
133 | extern unsigned int dirtytime_expire_interval; | ||
133 | extern int vm_highmem_is_dirtyable; | 134 | extern int vm_highmem_is_dirtyable; |
134 | extern int block_dump; | 135 | extern int block_dump; |
135 | extern int laptop_mode; | 136 | extern int laptop_mode; |
@@ -146,6 +147,8 @@ extern int dirty_ratio_handler(struct ctl_table *table, int write, | |||
146 | extern int dirty_bytes_handler(struct ctl_table *table, int write, | 147 | extern int dirty_bytes_handler(struct ctl_table *table, int write, |
147 | void __user *buffer, size_t *lenp, | 148 | void __user *buffer, size_t *lenp, |
148 | loff_t *ppos); | 149 | loff_t *ppos); |
150 | int dirtytime_interval_handler(struct ctl_table *table, int write, | ||
151 | void __user *buffer, size_t *lenp, loff_t *ppos); | ||
149 | 152 | ||
150 | struct ctl_table; | 153 | struct ctl_table; |
151 | int dirty_writeback_centisecs_handler(struct ctl_table *, int, | 154 | int dirty_writeback_centisecs_handler(struct ctl_table *, int, |
diff --git a/include/media/atmel-isi.h b/include/media/atmel-isi.h index c2e570336269..6008b0985b7b 100644 --- a/include/media/atmel-isi.h +++ b/include/media/atmel-isi.h | |||
@@ -59,6 +59,10 @@ | |||
59 | #define ISI_CFG1_FRATE_DIV_MASK (7 << 8) | 59 | #define ISI_CFG1_FRATE_DIV_MASK (7 << 8) |
60 | #define ISI_CFG1_DISCR (1 << 11) | 60 | #define ISI_CFG1_DISCR (1 << 11) |
61 | #define ISI_CFG1_FULL_MODE (1 << 12) | 61 | #define ISI_CFG1_FULL_MODE (1 << 12) |
62 | /* Definition for THMASK(ISI_V2) */ | ||
63 | #define ISI_CFG1_THMASK_BEATS_4 (0 << 13) | ||
64 | #define ISI_CFG1_THMASK_BEATS_8 (1 << 13) | ||
65 | #define ISI_CFG1_THMASK_BEATS_16 (2 << 13) | ||
62 | 66 | ||
63 | /* Bitfields in CFG2 */ | 67 | /* Bitfields in CFG2 */ |
64 | #define ISI_CFG2_GRAYSCALE (1 << 13) | 68 | #define ISI_CFG2_GRAYSCALE (1 << 13) |
diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h index 1c1ad46250d5..fe328c52c46b 100644 --- a/include/net/caif/cfpkt.h +++ b/include/net/caif/cfpkt.h | |||
@@ -171,7 +171,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos); | |||
171 | * @return Checksum of buffer. | 171 | * @return Checksum of buffer. |
172 | */ | 172 | */ |
173 | 173 | ||
174 | u16 cfpkt_iterate(struct cfpkt *pkt, | 174 | int cfpkt_iterate(struct cfpkt *pkt, |
175 | u16 (*iter_func)(u16 chks, void *buf, u16 len), | 175 | u16 (*iter_func)(u16 chks, void *buf, u16 len), |
176 | u16 data); | 176 | u16 data); |
177 | 177 | ||
diff --git a/include/net/dst.h b/include/net/dst.h index a8ae4e760778..0fb99a26e973 100644 --- a/include/net/dst.h +++ b/include/net/dst.h | |||
@@ -481,6 +481,7 @@ void dst_init(void); | |||
481 | enum { | 481 | enum { |
482 | XFRM_LOOKUP_ICMP = 1 << 0, | 482 | XFRM_LOOKUP_ICMP = 1 << 0, |
483 | XFRM_LOOKUP_QUEUE = 1 << 1, | 483 | XFRM_LOOKUP_QUEUE = 1 << 1, |
484 | XFRM_LOOKUP_KEEP_DST_REF = 1 << 2, | ||
484 | }; | 485 | }; |
485 | 486 | ||
486 | struct flowi; | 487 | struct flowi; |
diff --git a/include/net/ip.h b/include/net/ip.h index 025c61c0dffb..6cc1eafb153a 100644 --- a/include/net/ip.h +++ b/include/net/ip.h | |||
@@ -453,22 +453,6 @@ static __inline__ void inet_reset_saddr(struct sock *sk) | |||
453 | 453 | ||
454 | #endif | 454 | #endif |
455 | 455 | ||
456 | static inline int sk_mc_loop(struct sock *sk) | ||
457 | { | ||
458 | if (!sk) | ||
459 | return 1; | ||
460 | switch (sk->sk_family) { | ||
461 | case AF_INET: | ||
462 | return inet_sk(sk)->mc_loop; | ||
463 | #if IS_ENABLED(CONFIG_IPV6) | ||
464 | case AF_INET6: | ||
465 | return inet6_sk(sk)->mc_loop; | ||
466 | #endif | ||
467 | } | ||
468 | WARN_ON(1); | ||
469 | return 1; | ||
470 | } | ||
471 | |||
472 | bool ip_call_ra_chain(struct sk_buff *skb); | 456 | bool ip_call_ra_chain(struct sk_buff *skb); |
473 | 457 | ||
474 | /* | 458 | /* |
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 1d09b46c1e48..eda131d179d9 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h | |||
@@ -174,7 +174,8 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); | |||
174 | 174 | ||
175 | static inline int ip6_skb_dst_mtu(struct sk_buff *skb) | 175 | static inline int ip6_skb_dst_mtu(struct sk_buff *skb) |
176 | { | 176 | { |
177 | struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; | 177 | struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? |
178 | inet6_sk(skb->sk) : NULL; | ||
178 | 179 | ||
179 | return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ? | 180 | return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ? |
180 | skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); | 181 | skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); |
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h index 534e1f2ac4fc..57639fca223a 100644 --- a/include/net/netfilter/nf_log.h +++ b/include/net/netfilter/nf_log.h | |||
@@ -79,6 +79,16 @@ void nf_log_packet(struct net *net, | |||
79 | const struct nf_loginfo *li, | 79 | const struct nf_loginfo *li, |
80 | const char *fmt, ...); | 80 | const char *fmt, ...); |
81 | 81 | ||
82 | __printf(8, 9) | ||
83 | void nf_log_trace(struct net *net, | ||
84 | u_int8_t pf, | ||
85 | unsigned int hooknum, | ||
86 | const struct sk_buff *skb, | ||
87 | const struct net_device *in, | ||
88 | const struct net_device *out, | ||
89 | const struct nf_loginfo *li, | ||
90 | const char *fmt, ...); | ||
91 | |||
82 | struct nf_log_buf; | 92 | struct nf_log_buf; |
83 | 93 | ||
84 | struct nf_log_buf *nf_log_buf_open(void); | 94 | struct nf_log_buf *nf_log_buf_open(void); |
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 9eaaa7884586..decb9a095ae7 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h | |||
@@ -119,6 +119,22 @@ int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg, | |||
119 | const struct nft_data *data, | 119 | const struct nft_data *data, |
120 | enum nft_data_types type); | 120 | enum nft_data_types type); |
121 | 121 | ||
122 | |||
123 | /** | ||
124 | * struct nft_userdata - user defined data associated with an object | ||
125 | * | ||
126 | * @len: length of the data | ||
127 | * @data: content | ||
128 | * | ||
129 | * The presence of user data is indicated in an object specific fashion, | ||
130 | * so a length of zero can't occur and the value "len" indicates data | ||
131 | * of length len + 1. | ||
132 | */ | ||
133 | struct nft_userdata { | ||
134 | u8 len; | ||
135 | unsigned char data[0]; | ||
136 | }; | ||
137 | |||
122 | /** | 138 | /** |
123 | * struct nft_set_elem - generic representation of set elements | 139 | * struct nft_set_elem - generic representation of set elements |
124 | * | 140 | * |
@@ -380,7 +396,7 @@ static inline void *nft_expr_priv(const struct nft_expr *expr) | |||
380 | * @handle: rule handle | 396 | * @handle: rule handle |
381 | * @genmask: generation mask | 397 | * @genmask: generation mask |
382 | * @dlen: length of expression data | 398 | * @dlen: length of expression data |
383 | * @ulen: length of user data (used for comments) | 399 | * @udata: user data is appended to the rule |
384 | * @data: expression data | 400 | * @data: expression data |
385 | */ | 401 | */ |
386 | struct nft_rule { | 402 | struct nft_rule { |
@@ -388,7 +404,7 @@ struct nft_rule { | |||
388 | u64 handle:42, | 404 | u64 handle:42, |
389 | genmask:2, | 405 | genmask:2, |
390 | dlen:12, | 406 | dlen:12, |
391 | ulen:8; | 407 | udata:1; |
392 | unsigned char data[] | 408 | unsigned char data[] |
393 | __attribute__((aligned(__alignof__(struct nft_expr)))); | 409 | __attribute__((aligned(__alignof__(struct nft_expr)))); |
394 | }; | 410 | }; |
@@ -476,7 +492,7 @@ static inline struct nft_expr *nft_expr_last(const struct nft_rule *rule) | |||
476 | return (struct nft_expr *)&rule->data[rule->dlen]; | 492 | return (struct nft_expr *)&rule->data[rule->dlen]; |
477 | } | 493 | } |
478 | 494 | ||
479 | static inline void *nft_userdata(const struct nft_rule *rule) | 495 | static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule) |
480 | { | 496 | { |
481 | return (void *)&rule->data[rule->dlen]; | 497 | return (void *)&rule->data[rule->dlen]; |
482 | } | 498 | } |
diff --git a/include/net/sock.h b/include/net/sock.h index ab186b1d31ff..e4079c28e6b8 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -1762,6 +1762,8 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); | |||
1762 | 1762 | ||
1763 | struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); | 1763 | struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); |
1764 | 1764 | ||
1765 | bool sk_mc_loop(struct sock *sk); | ||
1766 | |||
1765 | static inline bool sk_can_gso(const struct sock *sk) | 1767 | static inline bool sk_can_gso(const struct sock *sk) |
1766 | { | 1768 | { |
1767 | return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); | 1769 | return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); |
diff --git a/include/net/vxlan.h b/include/net/vxlan.h index eabd3a038674..c73e7abbbaa5 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h | |||
@@ -91,6 +91,7 @@ struct vxlanhdr { | |||
91 | 91 | ||
92 | #define VXLAN_N_VID (1u << 24) | 92 | #define VXLAN_N_VID (1u << 24) |
93 | #define VXLAN_VID_MASK (VXLAN_N_VID - 1) | 93 | #define VXLAN_VID_MASK (VXLAN_N_VID - 1) |
94 | #define VXLAN_VNI_MASK (VXLAN_VID_MASK << 8) | ||
94 | #define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr)) | 95 | #define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr)) |
95 | 96 | ||
96 | struct vxlan_metadata { | 97 | struct vxlan_metadata { |
diff --git a/include/soc/at91/at91sam9_ddrsdr.h b/include/soc/at91/at91sam9_ddrsdr.h index 0210797abf2e..dc10c52e0e91 100644 --- a/include/soc/at91/at91sam9_ddrsdr.h +++ b/include/soc/at91/at91sam9_ddrsdr.h | |||
@@ -92,7 +92,7 @@ | |||
92 | #define AT91_DDRSDRC_UPD_MR (3 << 20) /* Update load mode register and extended mode register */ | 92 | #define AT91_DDRSDRC_UPD_MR (3 << 20) /* Update load mode register and extended mode register */ |
93 | 93 | ||
94 | #define AT91_DDRSDRC_MDR 0x20 /* Memory Device Register */ | 94 | #define AT91_DDRSDRC_MDR 0x20 /* Memory Device Register */ |
95 | #define AT91_DDRSDRC_MD (3 << 0) /* Memory Device Type */ | 95 | #define AT91_DDRSDRC_MD (7 << 0) /* Memory Device Type */ |
96 | #define AT91_DDRSDRC_MD_SDR 0 | 96 | #define AT91_DDRSDRC_MD_SDR 0 |
97 | #define AT91_DDRSDRC_MD_LOW_POWER_SDR 1 | 97 | #define AT91_DDRSDRC_MD_LOW_POWER_SDR 1 |
98 | #define AT91_DDRSDRC_MD_LOW_POWER_DDR 3 | 98 | #define AT91_DDRSDRC_MD_LOW_POWER_DDR 3 |
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index db81c65b8f48..d61be7297b2c 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h | |||
@@ -111,6 +111,7 @@ void array_free(void *array, int n); | |||
111 | void target_core_setup_sub_cits(struct se_subsystem_api *); | 111 | void target_core_setup_sub_cits(struct se_subsystem_api *); |
112 | 112 | ||
113 | /* attribute helpers from target_core_device.c for backend drivers */ | 113 | /* attribute helpers from target_core_device.c for backend drivers */ |
114 | bool se_dev_check_wce(struct se_device *); | ||
114 | int se_dev_set_max_unmap_lba_count(struct se_device *, u32); | 115 | int se_dev_set_max_unmap_lba_count(struct se_device *, u32); |
115 | int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32); | 116 | int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32); |
116 | int se_dev_set_unmap_granularity(struct se_device *, u32); | 117 | int se_dev_set_unmap_granularity(struct se_device *, u32); |
diff --git a/include/trace/events/regmap.h b/include/trace/events/regmap.h index 23d561512f64..22317d2b52ab 100644 --- a/include/trace/events/regmap.h +++ b/include/trace/events/regmap.h | |||
@@ -7,27 +7,26 @@ | |||
7 | #include <linux/ktime.h> | 7 | #include <linux/ktime.h> |
8 | #include <linux/tracepoint.h> | 8 | #include <linux/tracepoint.h> |
9 | 9 | ||
10 | struct device; | 10 | #include "../../../drivers/base/regmap/internal.h" |
11 | struct regmap; | ||
12 | 11 | ||
13 | /* | 12 | /* |
14 | * Log register events | 13 | * Log register events |
15 | */ | 14 | */ |
16 | DECLARE_EVENT_CLASS(regmap_reg, | 15 | DECLARE_EVENT_CLASS(regmap_reg, |
17 | 16 | ||
18 | TP_PROTO(struct device *dev, unsigned int reg, | 17 | TP_PROTO(struct regmap *map, unsigned int reg, |
19 | unsigned int val), | 18 | unsigned int val), |
20 | 19 | ||
21 | TP_ARGS(dev, reg, val), | 20 | TP_ARGS(map, reg, val), |
22 | 21 | ||
23 | TP_STRUCT__entry( | 22 | TP_STRUCT__entry( |
24 | __string( name, dev_name(dev) ) | 23 | __string( name, regmap_name(map) ) |
25 | __field( unsigned int, reg ) | 24 | __field( unsigned int, reg ) |
26 | __field( unsigned int, val ) | 25 | __field( unsigned int, val ) |
27 | ), | 26 | ), |
28 | 27 | ||
29 | TP_fast_assign( | 28 | TP_fast_assign( |
30 | __assign_str(name, dev_name(dev)); | 29 | __assign_str(name, regmap_name(map)); |
31 | __entry->reg = reg; | 30 | __entry->reg = reg; |
32 | __entry->val = val; | 31 | __entry->val = val; |
33 | ), | 32 | ), |
@@ -39,45 +38,45 @@ DECLARE_EVENT_CLASS(regmap_reg, | |||
39 | 38 | ||
40 | DEFINE_EVENT(regmap_reg, regmap_reg_write, | 39 | DEFINE_EVENT(regmap_reg, regmap_reg_write, |
41 | 40 | ||
42 | TP_PROTO(struct device *dev, unsigned int reg, | 41 | TP_PROTO(struct regmap *map, unsigned int reg, |
43 | unsigned int val), | 42 | unsigned int val), |
44 | 43 | ||
45 | TP_ARGS(dev, reg, val) | 44 | TP_ARGS(map, reg, val) |
46 | 45 | ||
47 | ); | 46 | ); |
48 | 47 | ||
49 | DEFINE_EVENT(regmap_reg, regmap_reg_read, | 48 | DEFINE_EVENT(regmap_reg, regmap_reg_read, |
50 | 49 | ||
51 | TP_PROTO(struct device *dev, unsigned int reg, | 50 | TP_PROTO(struct regmap *map, unsigned int reg, |
52 | unsigned int val), | 51 | unsigned int val), |
53 | 52 | ||
54 | TP_ARGS(dev, reg, val) | 53 | TP_ARGS(map, reg, val) |
55 | 54 | ||
56 | ); | 55 | ); |
57 | 56 | ||
58 | DEFINE_EVENT(regmap_reg, regmap_reg_read_cache, | 57 | DEFINE_EVENT(regmap_reg, regmap_reg_read_cache, |
59 | 58 | ||
60 | TP_PROTO(struct device *dev, unsigned int reg, | 59 | TP_PROTO(struct regmap *map, unsigned int reg, |
61 | unsigned int val), | 60 | unsigned int val), |
62 | 61 | ||
63 | TP_ARGS(dev, reg, val) | 62 | TP_ARGS(map, reg, val) |
64 | 63 | ||
65 | ); | 64 | ); |
66 | 65 | ||
67 | DECLARE_EVENT_CLASS(regmap_block, | 66 | DECLARE_EVENT_CLASS(regmap_block, |
68 | 67 | ||
69 | TP_PROTO(struct device *dev, unsigned int reg, int count), | 68 | TP_PROTO(struct regmap *map, unsigned int reg, int count), |
70 | 69 | ||
71 | TP_ARGS(dev, reg, count), | 70 | TP_ARGS(map, reg, count), |
72 | 71 | ||
73 | TP_STRUCT__entry( | 72 | TP_STRUCT__entry( |
74 | __string( name, dev_name(dev) ) | 73 | __string( name, regmap_name(map) ) |
75 | __field( unsigned int, reg ) | 74 | __field( unsigned int, reg ) |
76 | __field( int, count ) | 75 | __field( int, count ) |
77 | ), | 76 | ), |
78 | 77 | ||
79 | TP_fast_assign( | 78 | TP_fast_assign( |
80 | __assign_str(name, dev_name(dev)); | 79 | __assign_str(name, regmap_name(map)); |
81 | __entry->reg = reg; | 80 | __entry->reg = reg; |
82 | __entry->count = count; | 81 | __entry->count = count; |
83 | ), | 82 | ), |
@@ -89,48 +88,48 @@ DECLARE_EVENT_CLASS(regmap_block, | |||
89 | 88 | ||
90 | DEFINE_EVENT(regmap_block, regmap_hw_read_start, | 89 | DEFINE_EVENT(regmap_block, regmap_hw_read_start, |
91 | 90 | ||
92 | TP_PROTO(struct device *dev, unsigned int reg, int count), | 91 | TP_PROTO(struct regmap *map, unsigned int reg, int count), |
93 | 92 | ||
94 | TP_ARGS(dev, reg, count) | 93 | TP_ARGS(map, reg, count) |
95 | ); | 94 | ); |
96 | 95 | ||
97 | DEFINE_EVENT(regmap_block, regmap_hw_read_done, | 96 | DEFINE_EVENT(regmap_block, regmap_hw_read_done, |
98 | 97 | ||
99 | TP_PROTO(struct device *dev, unsigned int reg, int count), | 98 | TP_PROTO(struct regmap *map, unsigned int reg, int count), |
100 | 99 | ||
101 | TP_ARGS(dev, reg, count) | 100 | TP_ARGS(map, reg, count) |
102 | ); | 101 | ); |
103 | 102 | ||
104 | DEFINE_EVENT(regmap_block, regmap_hw_write_start, | 103 | DEFINE_EVENT(regmap_block, regmap_hw_write_start, |
105 | 104 | ||
106 | TP_PROTO(struct device *dev, unsigned int reg, int count), | 105 | TP_PROTO(struct regmap *map, unsigned int reg, int count), |
107 | 106 | ||
108 | TP_ARGS(dev, reg, count) | 107 | TP_ARGS(map, reg, count) |
109 | ); | 108 | ); |
110 | 109 | ||
111 | DEFINE_EVENT(regmap_block, regmap_hw_write_done, | 110 | DEFINE_EVENT(regmap_block, regmap_hw_write_done, |
112 | 111 | ||
113 | TP_PROTO(struct device *dev, unsigned int reg, int count), | 112 | TP_PROTO(struct regmap *map, unsigned int reg, int count), |
114 | 113 | ||
115 | TP_ARGS(dev, reg, count) | 114 | TP_ARGS(map, reg, count) |
116 | ); | 115 | ); |
117 | 116 | ||
118 | TRACE_EVENT(regcache_sync, | 117 | TRACE_EVENT(regcache_sync, |
119 | 118 | ||
120 | TP_PROTO(struct device *dev, const char *type, | 119 | TP_PROTO(struct regmap *map, const char *type, |
121 | const char *status), | 120 | const char *status), |
122 | 121 | ||
123 | TP_ARGS(dev, type, status), | 122 | TP_ARGS(map, type, status), |
124 | 123 | ||
125 | TP_STRUCT__entry( | 124 | TP_STRUCT__entry( |
126 | __string( name, dev_name(dev) ) | 125 | __string( name, regmap_name(map) ) |
127 | __string( status, status ) | 126 | __string( status, status ) |
128 | __string( type, type ) | 127 | __string( type, type ) |
129 | __field( int, type ) | 128 | __field( int, type ) |
130 | ), | 129 | ), |
131 | 130 | ||
132 | TP_fast_assign( | 131 | TP_fast_assign( |
133 | __assign_str(name, dev_name(dev)); | 132 | __assign_str(name, regmap_name(map)); |
134 | __assign_str(status, status); | 133 | __assign_str(status, status); |
135 | __assign_str(type, type); | 134 | __assign_str(type, type); |
136 | ), | 135 | ), |
@@ -141,17 +140,17 @@ TRACE_EVENT(regcache_sync, | |||
141 | 140 | ||
142 | DECLARE_EVENT_CLASS(regmap_bool, | 141 | DECLARE_EVENT_CLASS(regmap_bool, |
143 | 142 | ||
144 | TP_PROTO(struct device *dev, bool flag), | 143 | TP_PROTO(struct regmap *map, bool flag), |
145 | 144 | ||
146 | TP_ARGS(dev, flag), | 145 | TP_ARGS(map, flag), |
147 | 146 | ||
148 | TP_STRUCT__entry( | 147 | TP_STRUCT__entry( |
149 | __string( name, dev_name(dev) ) | 148 | __string( name, regmap_name(map) ) |
150 | __field( int, flag ) | 149 | __field( int, flag ) |
151 | ), | 150 | ), |
152 | 151 | ||
153 | TP_fast_assign( | 152 | TP_fast_assign( |
154 | __assign_str(name, dev_name(dev)); | 153 | __assign_str(name, regmap_name(map)); |
155 | __entry->flag = flag; | 154 | __entry->flag = flag; |
156 | ), | 155 | ), |
157 | 156 | ||
@@ -161,32 +160,32 @@ DECLARE_EVENT_CLASS(regmap_bool, | |||
161 | 160 | ||
162 | DEFINE_EVENT(regmap_bool, regmap_cache_only, | 161 | DEFINE_EVENT(regmap_bool, regmap_cache_only, |
163 | 162 | ||
164 | TP_PROTO(struct device *dev, bool flag), | 163 | TP_PROTO(struct regmap *map, bool flag), |
165 | 164 | ||
166 | TP_ARGS(dev, flag) | 165 | TP_ARGS(map, flag) |
167 | 166 | ||
168 | ); | 167 | ); |
169 | 168 | ||
170 | DEFINE_EVENT(regmap_bool, regmap_cache_bypass, | 169 | DEFINE_EVENT(regmap_bool, regmap_cache_bypass, |
171 | 170 | ||
172 | TP_PROTO(struct device *dev, bool flag), | 171 | TP_PROTO(struct regmap *map, bool flag), |
173 | 172 | ||
174 | TP_ARGS(dev, flag) | 173 | TP_ARGS(map, flag) |
175 | 174 | ||
176 | ); | 175 | ); |
177 | 176 | ||
178 | DECLARE_EVENT_CLASS(regmap_async, | 177 | DECLARE_EVENT_CLASS(regmap_async, |
179 | 178 | ||
180 | TP_PROTO(struct device *dev), | 179 | TP_PROTO(struct regmap *map), |
181 | 180 | ||
182 | TP_ARGS(dev), | 181 | TP_ARGS(map), |
183 | 182 | ||
184 | TP_STRUCT__entry( | 183 | TP_STRUCT__entry( |
185 | __string( name, dev_name(dev) ) | 184 | __string( name, regmap_name(map) ) |
186 | ), | 185 | ), |
187 | 186 | ||
188 | TP_fast_assign( | 187 | TP_fast_assign( |
189 | __assign_str(name, dev_name(dev)); | 188 | __assign_str(name, regmap_name(map)); |
190 | ), | 189 | ), |
191 | 190 | ||
192 | TP_printk("%s", __get_str(name)) | 191 | TP_printk("%s", __get_str(name)) |
@@ -194,50 +193,50 @@ DECLARE_EVENT_CLASS(regmap_async, | |||
194 | 193 | ||
195 | DEFINE_EVENT(regmap_block, regmap_async_write_start, | 194 | DEFINE_EVENT(regmap_block, regmap_async_write_start, |
196 | 195 | ||
197 | TP_PROTO(struct device *dev, unsigned int reg, int count), | 196 | TP_PROTO(struct regmap *map, unsigned int reg, int count), |
198 | 197 | ||
199 | TP_ARGS(dev, reg, count) | 198 | TP_ARGS(map, reg, count) |
200 | ); | 199 | ); |
201 | 200 | ||
202 | DEFINE_EVENT(regmap_async, regmap_async_io_complete, | 201 | DEFINE_EVENT(regmap_async, regmap_async_io_complete, |
203 | 202 | ||
204 | TP_PROTO(struct device *dev), | 203 | TP_PROTO(struct regmap *map), |
205 | 204 | ||
206 | TP_ARGS(dev) | 205 | TP_ARGS(map) |
207 | 206 | ||
208 | ); | 207 | ); |
209 | 208 | ||
210 | DEFINE_EVENT(regmap_async, regmap_async_complete_start, | 209 | DEFINE_EVENT(regmap_async, regmap_async_complete_start, |
211 | 210 | ||
212 | TP_PROTO(struct device *dev), | 211 | TP_PROTO(struct regmap *map), |
213 | 212 | ||
214 | TP_ARGS(dev) | 213 | TP_ARGS(map) |
215 | 214 | ||
216 | ); | 215 | ); |
217 | 216 | ||
218 | DEFINE_EVENT(regmap_async, regmap_async_complete_done, | 217 | DEFINE_EVENT(regmap_async, regmap_async_complete_done, |
219 | 218 | ||
220 | TP_PROTO(struct device *dev), | 219 | TP_PROTO(struct regmap *map), |
221 | 220 | ||
222 | TP_ARGS(dev) | 221 | TP_ARGS(map) |
223 | 222 | ||
224 | ); | 223 | ); |
225 | 224 | ||
226 | TRACE_EVENT(regcache_drop_region, | 225 | TRACE_EVENT(regcache_drop_region, |
227 | 226 | ||
228 | TP_PROTO(struct device *dev, unsigned int from, | 227 | TP_PROTO(struct regmap *map, unsigned int from, |
229 | unsigned int to), | 228 | unsigned int to), |
230 | 229 | ||
231 | TP_ARGS(dev, from, to), | 230 | TP_ARGS(map, from, to), |
232 | 231 | ||
233 | TP_STRUCT__entry( | 232 | TP_STRUCT__entry( |
234 | __string( name, dev_name(dev) ) | 233 | __string( name, regmap_name(map) ) |
235 | __field( unsigned int, from ) | 234 | __field( unsigned int, from ) |
236 | __field( unsigned int, to ) | 235 | __field( unsigned int, to ) |
237 | ), | 236 | ), |
238 | 237 | ||
239 | TP_fast_assign( | 238 | TP_fast_assign( |
240 | __assign_str(name, dev_name(dev)); | 239 | __assign_str(name, regmap_name(map)); |
241 | __entry->from = from; | 240 | __entry->from = from; |
242 | __entry->to = to; | 241 | __entry->to = to; |
243 | ), | 242 | ), |
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h index b0a813079852..2f62ab2d7bf9 100644 --- a/include/uapi/linux/input.h +++ b/include/uapi/linux/input.h | |||
@@ -973,7 +973,8 @@ struct input_keymap_entry { | |||
973 | */ | 973 | */ |
974 | #define MT_TOOL_FINGER 0 | 974 | #define MT_TOOL_FINGER 0 |
975 | #define MT_TOOL_PEN 1 | 975 | #define MT_TOOL_PEN 1 |
976 | #define MT_TOOL_MAX 1 | 976 | #define MT_TOOL_PALM 2 |
977 | #define MT_TOOL_MAX 2 | ||
977 | 978 | ||
978 | /* | 979 | /* |
979 | * Values describing the status of a force-feedback effect | 980 | * Values describing the status of a force-feedback effect |
diff --git a/include/uapi/linux/nfsd/export.h b/include/uapi/linux/nfsd/export.h index 4742f2cb42f2..d3bd6ffec041 100644 --- a/include/uapi/linux/nfsd/export.h +++ b/include/uapi/linux/nfsd/export.h | |||
@@ -47,7 +47,7 @@ | |||
47 | * exported filesystem. | 47 | * exported filesystem. |
48 | */ | 48 | */ |
49 | #define NFSEXP_V4ROOT 0x10000 | 49 | #define NFSEXP_V4ROOT 0x10000 |
50 | #define NFSEXP_NOPNFS 0x20000 | 50 | #define NFSEXP_PNFS 0x20000 |
51 | 51 | ||
52 | /* All flags that we claim to support. (Note we don't support NOACL.) */ | 52 | /* All flags that we claim to support. (Note we don't support NOACL.) */ |
53 | #define NFSEXP_ALLFLAGS 0x3FE7F | 53 | #define NFSEXP_ALLFLAGS 0x3FE7F |
diff --git a/include/uapi/linux/serial.h b/include/uapi/linux/serial.h index 5e0d0ed61cf3..25331f9faa76 100644 --- a/include/uapi/linux/serial.h +++ b/include/uapi/linux/serial.h | |||
@@ -65,6 +65,10 @@ struct serial_struct { | |||
65 | #define SERIAL_IO_PORT 0 | 65 | #define SERIAL_IO_PORT 0 |
66 | #define SERIAL_IO_HUB6 1 | 66 | #define SERIAL_IO_HUB6 1 |
67 | #define SERIAL_IO_MEM 2 | 67 | #define SERIAL_IO_MEM 2 |
68 | #define SERIAL_IO_MEM32 3 | ||
69 | #define SERIAL_IO_AU 4 | ||
70 | #define SERIAL_IO_TSI 5 | ||
71 | #define SERIAL_IO_MEM32BE 6 | ||
68 | 72 | ||
69 | #define UART_CLEAR_FIFO 0x01 | 73 | #define UART_CLEAR_FIFO 0x01 |
70 | #define UART_USE_FIFO 0x02 | 74 | #define UART_USE_FIFO 0x02 |
diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild index 19d5219b0b99..242cf0c6e33d 100644 --- a/include/uapi/linux/tc_act/Kbuild +++ b/include/uapi/linux/tc_act/Kbuild | |||
@@ -9,3 +9,4 @@ header-y += tc_pedit.h | |||
9 | header-y += tc_skbedit.h | 9 | header-y += tc_skbedit.h |
10 | header-y += tc_vlan.h | 10 | header-y += tc_vlan.h |
11 | header-y += tc_bpf.h | 11 | header-y += tc_bpf.h |
12 | header-y += tc_connmark.h | ||
diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h index 3c53eec4ae22..19c66fcbab8a 100644 --- a/include/uapi/linux/virtio_blk.h +++ b/include/uapi/linux/virtio_blk.h | |||
@@ -60,7 +60,7 @@ struct virtio_blk_config { | |||
60 | __u32 size_max; | 60 | __u32 size_max; |
61 | /* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */ | 61 | /* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */ |
62 | __u32 seg_max; | 62 | __u32 seg_max; |
63 | /* geometry the device (if VIRTIO_BLK_F_GEOMETRY) */ | 63 | /* geometry of the device (if VIRTIO_BLK_F_GEOMETRY) */ |
64 | struct virtio_blk_geometry { | 64 | struct virtio_blk_geometry { |
65 | __u16 cylinders; | 65 | __u16 cylinders; |
66 | __u8 heads; | 66 | __u8 heads; |
@@ -119,7 +119,11 @@ struct virtio_blk_config { | |||
119 | #define VIRTIO_BLK_T_BARRIER 0x80000000 | 119 | #define VIRTIO_BLK_T_BARRIER 0x80000000 |
120 | #endif /* !VIRTIO_BLK_NO_LEGACY */ | 120 | #endif /* !VIRTIO_BLK_NO_LEGACY */ |
121 | 121 | ||
122 | /* This is the first element of the read scatter-gather list. */ | 122 | /* |
123 | * This comes first in the read scatter-gather list. | ||
124 | * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated, | ||
125 | * this is the first element of the read scatter-gather list. | ||
126 | */ | ||
123 | struct virtio_blk_outhdr { | 127 | struct virtio_blk_outhdr { |
124 | /* VIRTIO_BLK_T* */ | 128 | /* VIRTIO_BLK_T* */ |
125 | __virtio32 type; | 129 | __virtio32 type; |
diff --git a/include/uapi/linux/virtio_scsi.h b/include/uapi/linux/virtio_scsi.h index 42b9370771b0..cc18ef8825c0 100644 --- a/include/uapi/linux/virtio_scsi.h +++ b/include/uapi/linux/virtio_scsi.h | |||
@@ -29,8 +29,16 @@ | |||
29 | 29 | ||
30 | #include <linux/virtio_types.h> | 30 | #include <linux/virtio_types.h> |
31 | 31 | ||
32 | #define VIRTIO_SCSI_CDB_SIZE 32 | 32 | /* Default values of the CDB and sense data size configuration fields */ |
33 | #define VIRTIO_SCSI_SENSE_SIZE 96 | 33 | #define VIRTIO_SCSI_CDB_DEFAULT_SIZE 32 |
34 | #define VIRTIO_SCSI_SENSE_DEFAULT_SIZE 96 | ||
35 | |||
36 | #ifndef VIRTIO_SCSI_CDB_SIZE | ||
37 | #define VIRTIO_SCSI_CDB_SIZE VIRTIO_SCSI_CDB_DEFAULT_SIZE | ||
38 | #endif | ||
39 | #ifndef VIRTIO_SCSI_SENSE_SIZE | ||
40 | #define VIRTIO_SCSI_SENSE_SIZE VIRTIO_SCSI_SENSE_DEFAULT_SIZE | ||
41 | #endif | ||
34 | 42 | ||
35 | /* SCSI command request, followed by data-out */ | 43 | /* SCSI command request, followed by data-out */ |
36 | struct virtio_scsi_cmd_req { | 44 | struct virtio_scsi_cmd_req { |
diff --git a/include/video/omapdss.h b/include/video/omapdss.h index 60de61fea8e3..c8ed15daad02 100644 --- a/include/video/omapdss.h +++ b/include/video/omapdss.h | |||
@@ -689,6 +689,7 @@ struct omapdss_dsi_ops { | |||
689 | }; | 689 | }; |
690 | 690 | ||
691 | struct omap_dss_device { | 691 | struct omap_dss_device { |
692 | struct kobject kobj; | ||
692 | struct device *dev; | 693 | struct device *dev; |
693 | 694 | ||
694 | struct module *owner; | 695 | struct module *owner; |
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index 7491ee5d8164..83338210ee04 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h | |||
@@ -46,4 +46,30 @@ static inline efi_system_table_t __init *xen_efi_probe(void) | |||
46 | } | 46 | } |
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | #ifdef CONFIG_PREEMPT | ||
50 | |||
51 | static inline void xen_preemptible_hcall_begin(void) | ||
52 | { | ||
53 | } | ||
54 | |||
55 | static inline void xen_preemptible_hcall_end(void) | ||
56 | { | ||
57 | } | ||
58 | |||
59 | #else | ||
60 | |||
61 | DECLARE_PER_CPU(bool, xen_in_preemptible_hcall); | ||
62 | |||
63 | static inline void xen_preemptible_hcall_begin(void) | ||
64 | { | ||
65 | __this_cpu_write(xen_in_preemptible_hcall, true); | ||
66 | } | ||
67 | |||
68 | static inline void xen_preemptible_hcall_end(void) | ||
69 | { | ||
70 | __this_cpu_write(xen_in_preemptible_hcall, false); | ||
71 | } | ||
72 | |||
73 | #endif /* CONFIG_PREEMPT */ | ||
74 | |||
49 | #endif /* INCLUDE_XEN_OPS_H */ | 75 | #endif /* INCLUDE_XEN_OPS_H */ |
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h index b78f21caf55a..b0f1c9e5d687 100644 --- a/include/xen/xenbus.h +++ b/include/xen/xenbus.h | |||
@@ -114,9 +114,9 @@ int __must_check __xenbus_register_backend(struct xenbus_driver *drv, | |||
114 | const char *mod_name); | 114 | const char *mod_name); |
115 | 115 | ||
116 | #define xenbus_register_frontend(drv) \ | 116 | #define xenbus_register_frontend(drv) \ |
117 | __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME); | 117 | __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME) |
118 | #define xenbus_register_backend(drv) \ | 118 | #define xenbus_register_backend(drv) \ |
119 | __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME); | 119 | __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME) |
120 | 120 | ||
121 | void xenbus_unregister_driver(struct xenbus_driver *drv); | 121 | void xenbus_unregister_driver(struct xenbus_driver *drv); |
122 | 122 | ||
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 1d1fe9361d29..fc7f4748d34a 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -548,9 +548,6 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr, | |||
548 | 548 | ||
549 | rcu_read_lock(); | 549 | rcu_read_lock(); |
550 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { | 550 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { |
551 | if (cp == root_cs) | ||
552 | continue; | ||
553 | |||
554 | /* skip the whole subtree if @cp doesn't have any CPU */ | 551 | /* skip the whole subtree if @cp doesn't have any CPU */ |
555 | if (cpumask_empty(cp->cpus_allowed)) { | 552 | if (cpumask_empty(cp->cpus_allowed)) { |
556 | pos_css = css_rightmost_descendant(pos_css); | 553 | pos_css = css_rightmost_descendant(pos_css); |
@@ -873,7 +870,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus) | |||
873 | * If it becomes empty, inherit the effective mask of the | 870 | * If it becomes empty, inherit the effective mask of the |
874 | * parent, which is guaranteed to have some CPUs. | 871 | * parent, which is guaranteed to have some CPUs. |
875 | */ | 872 | */ |
876 | if (cpumask_empty(new_cpus)) | 873 | if (cgroup_on_dfl(cp->css.cgroup) && cpumask_empty(new_cpus)) |
877 | cpumask_copy(new_cpus, parent->effective_cpus); | 874 | cpumask_copy(new_cpus, parent->effective_cpus); |
878 | 875 | ||
879 | /* Skip the whole subtree if the cpumask remains the same. */ | 876 | /* Skip the whole subtree if the cpumask remains the same. */ |
@@ -1129,7 +1126,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) | |||
1129 | * If it becomes empty, inherit the effective mask of the | 1126 | * If it becomes empty, inherit the effective mask of the |
1130 | * parent, which is guaranteed to have some MEMs. | 1127 | * parent, which is guaranteed to have some MEMs. |
1131 | */ | 1128 | */ |
1132 | if (nodes_empty(*new_mems)) | 1129 | if (cgroup_on_dfl(cp->css.cgroup) && nodes_empty(*new_mems)) |
1133 | *new_mems = parent->effective_mems; | 1130 | *new_mems = parent->effective_mems; |
1134 | 1131 | ||
1135 | /* Skip the whole subtree if the nodemask remains the same. */ | 1132 | /* Skip the whole subtree if the nodemask remains the same. */ |
@@ -1979,7 +1976,9 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) | |||
1979 | 1976 | ||
1980 | spin_lock_irq(&callback_lock); | 1977 | spin_lock_irq(&callback_lock); |
1981 | cs->mems_allowed = parent->mems_allowed; | 1978 | cs->mems_allowed = parent->mems_allowed; |
1979 | cs->effective_mems = parent->mems_allowed; | ||
1982 | cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); | 1980 | cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); |
1981 | cpumask_copy(cs->effective_cpus, parent->cpus_allowed); | ||
1983 | spin_unlock_irq(&callback_lock); | 1982 | spin_unlock_irq(&callback_lock); |
1984 | out_unlock: | 1983 | out_unlock: |
1985 | mutex_unlock(&cpuset_mutex); | 1984 | mutex_unlock(&cpuset_mutex); |
diff --git a/kernel/events/core.c b/kernel/events/core.c index f04daabfd1cf..2fabc0627165 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -3591,7 +3591,7 @@ static void put_event(struct perf_event *event) | |||
3591 | ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING); | 3591 | ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING); |
3592 | WARN_ON_ONCE(ctx->parent_ctx); | 3592 | WARN_ON_ONCE(ctx->parent_ctx); |
3593 | perf_remove_from_context(event, true); | 3593 | perf_remove_from_context(event, true); |
3594 | mutex_unlock(&ctx->mutex); | 3594 | perf_event_ctx_unlock(event, ctx); |
3595 | 3595 | ||
3596 | _free_event(event); | 3596 | _free_event(event); |
3597 | } | 3597 | } |
@@ -4574,6 +4574,13 @@ static void perf_pending_event(struct irq_work *entry) | |||
4574 | { | 4574 | { |
4575 | struct perf_event *event = container_of(entry, | 4575 | struct perf_event *event = container_of(entry, |
4576 | struct perf_event, pending); | 4576 | struct perf_event, pending); |
4577 | int rctx; | ||
4578 | |||
4579 | rctx = perf_swevent_get_recursion_context(); | ||
4580 | /* | ||
4581 | * If we 'fail' here, that's OK, it means recursion is already disabled | ||
4582 | * and we won't recurse 'further'. | ||
4583 | */ | ||
4577 | 4584 | ||
4578 | if (event->pending_disable) { | 4585 | if (event->pending_disable) { |
4579 | event->pending_disable = 0; | 4586 | event->pending_disable = 0; |
@@ -4584,6 +4591,9 @@ static void perf_pending_event(struct irq_work *entry) | |||
4584 | event->pending_wakeup = 0; | 4591 | event->pending_wakeup = 0; |
4585 | perf_event_wakeup(event); | 4592 | perf_event_wakeup(event); |
4586 | } | 4593 | } |
4594 | |||
4595 | if (rctx >= 0) | ||
4596 | perf_swevent_put_recursion_context(rctx); | ||
4587 | } | 4597 | } |
4588 | 4598 | ||
4589 | /* | 4599 | /* |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 196a06fbc122..886d09e691d5 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -1474,8 +1474,13 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
1474 | * otherwise we'll have trouble later trying to figure out | 1474 | * otherwise we'll have trouble later trying to figure out |
1475 | * which interrupt is which (messes up the interrupt freeing | 1475 | * which interrupt is which (messes up the interrupt freeing |
1476 | * logic etc). | 1476 | * logic etc). |
1477 | * | ||
1478 | * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and | ||
1479 | * it cannot be set along with IRQF_NO_SUSPEND. | ||
1477 | */ | 1480 | */ |
1478 | if ((irqflags & IRQF_SHARED) && !dev_id) | 1481 | if (((irqflags & IRQF_SHARED) && !dev_id) || |
1482 | (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) || | ||
1483 | ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND))) | ||
1479 | return -EINVAL; | 1484 | return -EINVAL; |
1480 | 1485 | ||
1481 | desc = irq_to_desc(irq); | 1486 | desc = irq_to_desc(irq); |
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index 3ca532592704..5204a6d1b985 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c | |||
@@ -43,9 +43,12 @@ void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) | |||
43 | 43 | ||
44 | if (action->flags & IRQF_NO_SUSPEND) | 44 | if (action->flags & IRQF_NO_SUSPEND) |
45 | desc->no_suspend_depth++; | 45 | desc->no_suspend_depth++; |
46 | else if (action->flags & IRQF_COND_SUSPEND) | ||
47 | desc->cond_suspend_depth++; | ||
46 | 48 | ||
47 | WARN_ON_ONCE(desc->no_suspend_depth && | 49 | WARN_ON_ONCE(desc->no_suspend_depth && |
48 | desc->no_suspend_depth != desc->nr_actions); | 50 | (desc->no_suspend_depth + |
51 | desc->cond_suspend_depth) != desc->nr_actions); | ||
49 | } | 52 | } |
50 | 53 | ||
51 | /* | 54 | /* |
@@ -61,6 +64,8 @@ void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) | |||
61 | 64 | ||
62 | if (action->flags & IRQF_NO_SUSPEND) | 65 | if (action->flags & IRQF_NO_SUSPEND) |
63 | desc->no_suspend_depth--; | 66 | desc->no_suspend_depth--; |
67 | else if (action->flags & IRQF_COND_SUSPEND) | ||
68 | desc->cond_suspend_depth--; | ||
64 | } | 69 | } |
65 | 70 | ||
66 | static bool suspend_device_irq(struct irq_desc *desc, int irq) | 71 | static bool suspend_device_irq(struct irq_desc *desc, int irq) |
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index ff7f47d026ac..3f9f1d6b4c2e 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c | |||
@@ -89,16 +89,28 @@ static bool klp_is_object_loaded(struct klp_object *obj) | |||
89 | /* sets obj->mod if object is not vmlinux and module is found */ | 89 | /* sets obj->mod if object is not vmlinux and module is found */ |
90 | static void klp_find_object_module(struct klp_object *obj) | 90 | static void klp_find_object_module(struct klp_object *obj) |
91 | { | 91 | { |
92 | struct module *mod; | ||
93 | |||
92 | if (!klp_is_module(obj)) | 94 | if (!klp_is_module(obj)) |
93 | return; | 95 | return; |
94 | 96 | ||
95 | mutex_lock(&module_mutex); | 97 | mutex_lock(&module_mutex); |
96 | /* | 98 | /* |
97 | * We don't need to take a reference on the module here because we have | 99 | * We do not want to block removal of patched modules and therefore |
98 | * the klp_mutex, which is also taken by the module notifier. This | 100 | * we do not take a reference here. The patches are removed by |
99 | * prevents any module from unloading until we release the klp_mutex. | 101 | * a going module handler instead. |
102 | */ | ||
103 | mod = find_module(obj->name); | ||
104 | /* | ||
105 | * Do not mess work of the module coming and going notifiers. | ||
106 | * Note that the patch might still be needed before the going handler | ||
107 | * is called. Module functions can be called even in the GOING state | ||
108 | * until mod->exit() finishes. This is especially important for | ||
109 | * patches that modify semantic of the functions. | ||
100 | */ | 110 | */ |
101 | obj->mod = find_module(obj->name); | 111 | if (mod && mod->klp_alive) |
112 | obj->mod = mod; | ||
113 | |||
102 | mutex_unlock(&module_mutex); | 114 | mutex_unlock(&module_mutex); |
103 | } | 115 | } |
104 | 116 | ||
@@ -248,11 +260,12 @@ static int klp_find_external_symbol(struct module *pmod, const char *name, | |||
248 | /* first, check if it's an exported symbol */ | 260 | /* first, check if it's an exported symbol */ |
249 | preempt_disable(); | 261 | preempt_disable(); |
250 | sym = find_symbol(name, NULL, NULL, true, true); | 262 | sym = find_symbol(name, NULL, NULL, true, true); |
251 | preempt_enable(); | ||
252 | if (sym) { | 263 | if (sym) { |
253 | *addr = sym->value; | 264 | *addr = sym->value; |
265 | preempt_enable(); | ||
254 | return 0; | 266 | return 0; |
255 | } | 267 | } |
268 | preempt_enable(); | ||
256 | 269 | ||
257 | /* otherwise check if it's in another .o within the patch module */ | 270 | /* otherwise check if it's in another .o within the patch module */ |
258 | return klp_find_object_symbol(pmod->name, name, addr); | 271 | return klp_find_object_symbol(pmod->name, name, addr); |
@@ -314,12 +327,12 @@ static void notrace klp_ftrace_handler(unsigned long ip, | |||
314 | rcu_read_lock(); | 327 | rcu_read_lock(); |
315 | func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, | 328 | func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, |
316 | stack_node); | 329 | stack_node); |
317 | rcu_read_unlock(); | ||
318 | |||
319 | if (WARN_ON_ONCE(!func)) | 330 | if (WARN_ON_ONCE(!func)) |
320 | return; | 331 | goto unlock; |
321 | 332 | ||
322 | klp_arch_set_pc(regs, (unsigned long)func->new_func); | 333 | klp_arch_set_pc(regs, (unsigned long)func->new_func); |
334 | unlock: | ||
335 | rcu_read_unlock(); | ||
323 | } | 336 | } |
324 | 337 | ||
325 | static int klp_disable_func(struct klp_func *func) | 338 | static int klp_disable_func(struct klp_func *func) |
@@ -731,7 +744,7 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func) | |||
731 | func->state = KLP_DISABLED; | 744 | func->state = KLP_DISABLED; |
732 | 745 | ||
733 | return kobject_init_and_add(&func->kobj, &klp_ktype_func, | 746 | return kobject_init_and_add(&func->kobj, &klp_ktype_func, |
734 | obj->kobj, func->old_name); | 747 | obj->kobj, "%s", func->old_name); |
735 | } | 748 | } |
736 | 749 | ||
737 | /* parts of the initialization that is done only when the object is loaded */ | 750 | /* parts of the initialization that is done only when the object is loaded */ |
@@ -766,6 +779,7 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) | |||
766 | return -EINVAL; | 779 | return -EINVAL; |
767 | 780 | ||
768 | obj->state = KLP_DISABLED; | 781 | obj->state = KLP_DISABLED; |
782 | obj->mod = NULL; | ||
769 | 783 | ||
770 | klp_find_object_module(obj); | 784 | klp_find_object_module(obj); |
771 | 785 | ||
@@ -807,7 +821,7 @@ static int klp_init_patch(struct klp_patch *patch) | |||
807 | patch->state = KLP_DISABLED; | 821 | patch->state = KLP_DISABLED; |
808 | 822 | ||
809 | ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch, | 823 | ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch, |
810 | klp_root_kobj, patch->mod->name); | 824 | klp_root_kobj, "%s", patch->mod->name); |
811 | if (ret) | 825 | if (ret) |
812 | goto unlock; | 826 | goto unlock; |
813 | 827 | ||
@@ -960,6 +974,15 @@ static int klp_module_notify(struct notifier_block *nb, unsigned long action, | |||
960 | 974 | ||
961 | mutex_lock(&klp_mutex); | 975 | mutex_lock(&klp_mutex); |
962 | 976 | ||
977 | /* | ||
978 | * Each module has to know that the notifier has been called. | ||
979 | * We never know what module will get patched by a new patch. | ||
980 | */ | ||
981 | if (action == MODULE_STATE_COMING) | ||
982 | mod->klp_alive = true; | ||
983 | else /* MODULE_STATE_GOING */ | ||
984 | mod->klp_alive = false; | ||
985 | |||
963 | list_for_each_entry(patch, &klp_patches, list) { | 986 | list_for_each_entry(patch, &klp_patches, list) { |
964 | for (obj = patch->objs; obj->funcs; obj++) { | 987 | for (obj = patch->objs; obj->funcs; obj++) { |
965 | if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) | 988 | if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) |
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 88d0d4420ad2..ba77ab5f64dd 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
@@ -633,7 +633,7 @@ static int count_matching_names(struct lock_class *new_class) | |||
633 | if (!new_class->name) | 633 | if (!new_class->name) |
634 | return 0; | 634 | return 0; |
635 | 635 | ||
636 | list_for_each_entry(class, &all_lock_classes, lock_entry) { | 636 | list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) { |
637 | if (new_class->key - new_class->subclass == class->key) | 637 | if (new_class->key - new_class->subclass == class->key) |
638 | return class->name_version; | 638 | return class->name_version; |
639 | if (class->name && !strcmp(class->name, new_class->name)) | 639 | if (class->name && !strcmp(class->name, new_class->name)) |
@@ -700,10 +700,12 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) | |||
700 | hash_head = classhashentry(key); | 700 | hash_head = classhashentry(key); |
701 | 701 | ||
702 | /* | 702 | /* |
703 | * We can walk the hash lockfree, because the hash only | 703 | * We do an RCU walk of the hash, see lockdep_free_key_range(). |
704 | * grows, and we are careful when adding entries to the end: | ||
705 | */ | 704 | */ |
706 | list_for_each_entry(class, hash_head, hash_entry) { | 705 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
706 | return NULL; | ||
707 | |||
708 | list_for_each_entry_rcu(class, hash_head, hash_entry) { | ||
707 | if (class->key == key) { | 709 | if (class->key == key) { |
708 | /* | 710 | /* |
709 | * Huh! same key, different name? Did someone trample | 711 | * Huh! same key, different name? Did someone trample |
@@ -728,7 +730,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
728 | struct lockdep_subclass_key *key; | 730 | struct lockdep_subclass_key *key; |
729 | struct list_head *hash_head; | 731 | struct list_head *hash_head; |
730 | struct lock_class *class; | 732 | struct lock_class *class; |
731 | unsigned long flags; | 733 | |
734 | DEBUG_LOCKS_WARN_ON(!irqs_disabled()); | ||
732 | 735 | ||
733 | class = look_up_lock_class(lock, subclass); | 736 | class = look_up_lock_class(lock, subclass); |
734 | if (likely(class)) | 737 | if (likely(class)) |
@@ -750,28 +753,26 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
750 | key = lock->key->subkeys + subclass; | 753 | key = lock->key->subkeys + subclass; |
751 | hash_head = classhashentry(key); | 754 | hash_head = classhashentry(key); |
752 | 755 | ||
753 | raw_local_irq_save(flags); | ||
754 | if (!graph_lock()) { | 756 | if (!graph_lock()) { |
755 | raw_local_irq_restore(flags); | ||
756 | return NULL; | 757 | return NULL; |
757 | } | 758 | } |
758 | /* | 759 | /* |
759 | * We have to do the hash-walk again, to avoid races | 760 | * We have to do the hash-walk again, to avoid races |
760 | * with another CPU: | 761 | * with another CPU: |
761 | */ | 762 | */ |
762 | list_for_each_entry(class, hash_head, hash_entry) | 763 | list_for_each_entry_rcu(class, hash_head, hash_entry) { |
763 | if (class->key == key) | 764 | if (class->key == key) |
764 | goto out_unlock_set; | 765 | goto out_unlock_set; |
766 | } | ||
767 | |||
765 | /* | 768 | /* |
766 | * Allocate a new key from the static array, and add it to | 769 | * Allocate a new key from the static array, and add it to |
767 | * the hash: | 770 | * the hash: |
768 | */ | 771 | */ |
769 | if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { | 772 | if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { |
770 | if (!debug_locks_off_graph_unlock()) { | 773 | if (!debug_locks_off_graph_unlock()) { |
771 | raw_local_irq_restore(flags); | ||
772 | return NULL; | 774 | return NULL; |
773 | } | 775 | } |
774 | raw_local_irq_restore(flags); | ||
775 | 776 | ||
776 | print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!"); | 777 | print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!"); |
777 | dump_stack(); | 778 | dump_stack(); |
@@ -798,7 +799,6 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
798 | 799 | ||
799 | if (verbose(class)) { | 800 | if (verbose(class)) { |
800 | graph_unlock(); | 801 | graph_unlock(); |
801 | raw_local_irq_restore(flags); | ||
802 | 802 | ||
803 | printk("\nnew class %p: %s", class->key, class->name); | 803 | printk("\nnew class %p: %s", class->key, class->name); |
804 | if (class->name_version > 1) | 804 | if (class->name_version > 1) |
@@ -806,15 +806,12 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
806 | printk("\n"); | 806 | printk("\n"); |
807 | dump_stack(); | 807 | dump_stack(); |
808 | 808 | ||
809 | raw_local_irq_save(flags); | ||
810 | if (!graph_lock()) { | 809 | if (!graph_lock()) { |
811 | raw_local_irq_restore(flags); | ||
812 | return NULL; | 810 | return NULL; |
813 | } | 811 | } |
814 | } | 812 | } |
815 | out_unlock_set: | 813 | out_unlock_set: |
816 | graph_unlock(); | 814 | graph_unlock(); |
817 | raw_local_irq_restore(flags); | ||
818 | 815 | ||
819 | out_set_class_cache: | 816 | out_set_class_cache: |
820 | if (!subclass || force) | 817 | if (!subclass || force) |
@@ -870,11 +867,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this, | |||
870 | entry->distance = distance; | 867 | entry->distance = distance; |
871 | entry->trace = *trace; | 868 | entry->trace = *trace; |
872 | /* | 869 | /* |
873 | * Since we never remove from the dependency list, the list can | 870 | * Both allocation and removal are done under the graph lock; but |
874 | * be walked lockless by other CPUs, it's only allocation | 871 | * iteration is under RCU-sched; see look_up_lock_class() and |
875 | * that must be protected by the spinlock. But this also means | 872 | * lockdep_free_key_range(). |
876 | * we must make new entries visible only once writes to the | ||
877 | * entry become visible - hence the RCU op: | ||
878 | */ | 873 | */ |
879 | list_add_tail_rcu(&entry->entry, head); | 874 | list_add_tail_rcu(&entry->entry, head); |
880 | 875 | ||
@@ -1025,7 +1020,9 @@ static int __bfs(struct lock_list *source_entry, | |||
1025 | else | 1020 | else |
1026 | head = &lock->class->locks_before; | 1021 | head = &lock->class->locks_before; |
1027 | 1022 | ||
1028 | list_for_each_entry(entry, head, entry) { | 1023 | DEBUG_LOCKS_WARN_ON(!irqs_disabled()); |
1024 | |||
1025 | list_for_each_entry_rcu(entry, head, entry) { | ||
1029 | if (!lock_accessed(entry)) { | 1026 | if (!lock_accessed(entry)) { |
1030 | unsigned int cq_depth; | 1027 | unsigned int cq_depth; |
1031 | mark_lock_accessed(entry, lock); | 1028 | mark_lock_accessed(entry, lock); |
@@ -2022,7 +2019,7 @@ static inline int lookup_chain_cache(struct task_struct *curr, | |||
2022 | * We can walk it lock-free, because entries only get added | 2019 | * We can walk it lock-free, because entries only get added |
2023 | * to the hash: | 2020 | * to the hash: |
2024 | */ | 2021 | */ |
2025 | list_for_each_entry(chain, hash_head, entry) { | 2022 | list_for_each_entry_rcu(chain, hash_head, entry) { |
2026 | if (chain->chain_key == chain_key) { | 2023 | if (chain->chain_key == chain_key) { |
2027 | cache_hit: | 2024 | cache_hit: |
2028 | debug_atomic_inc(chain_lookup_hits); | 2025 | debug_atomic_inc(chain_lookup_hits); |
@@ -2996,8 +2993,18 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name, | |||
2996 | if (unlikely(!debug_locks)) | 2993 | if (unlikely(!debug_locks)) |
2997 | return; | 2994 | return; |
2998 | 2995 | ||
2999 | if (subclass) | 2996 | if (subclass) { |
2997 | unsigned long flags; | ||
2998 | |||
2999 | if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion)) | ||
3000 | return; | ||
3001 | |||
3002 | raw_local_irq_save(flags); | ||
3003 | current->lockdep_recursion = 1; | ||
3000 | register_lock_class(lock, subclass, 1); | 3004 | register_lock_class(lock, subclass, 1); |
3005 | current->lockdep_recursion = 0; | ||
3006 | raw_local_irq_restore(flags); | ||
3007 | } | ||
3001 | } | 3008 | } |
3002 | EXPORT_SYMBOL_GPL(lockdep_init_map); | 3009 | EXPORT_SYMBOL_GPL(lockdep_init_map); |
3003 | 3010 | ||
@@ -3887,9 +3894,17 @@ static inline int within(const void *addr, void *start, unsigned long size) | |||
3887 | return addr >= start && addr < start + size; | 3894 | return addr >= start && addr < start + size; |
3888 | } | 3895 | } |
3889 | 3896 | ||
3897 | /* | ||
3898 | * Used in module.c to remove lock classes from memory that is going to be | ||
3899 | * freed; and possibly re-used by other modules. | ||
3900 | * | ||
3901 | * We will have had one sync_sched() before getting here, so we're guaranteed | ||
3902 | * nobody will look up these exact classes -- they're properly dead but still | ||
3903 | * allocated. | ||
3904 | */ | ||
3890 | void lockdep_free_key_range(void *start, unsigned long size) | 3905 | void lockdep_free_key_range(void *start, unsigned long size) |
3891 | { | 3906 | { |
3892 | struct lock_class *class, *next; | 3907 | struct lock_class *class; |
3893 | struct list_head *head; | 3908 | struct list_head *head; |
3894 | unsigned long flags; | 3909 | unsigned long flags; |
3895 | int i; | 3910 | int i; |
@@ -3905,7 +3920,7 @@ void lockdep_free_key_range(void *start, unsigned long size) | |||
3905 | head = classhash_table + i; | 3920 | head = classhash_table + i; |
3906 | if (list_empty(head)) | 3921 | if (list_empty(head)) |
3907 | continue; | 3922 | continue; |
3908 | list_for_each_entry_safe(class, next, head, hash_entry) { | 3923 | list_for_each_entry_rcu(class, head, hash_entry) { |
3909 | if (within(class->key, start, size)) | 3924 | if (within(class->key, start, size)) |
3910 | zap_class(class); | 3925 | zap_class(class); |
3911 | else if (within(class->name, start, size)) | 3926 | else if (within(class->name, start, size)) |
@@ -3916,11 +3931,25 @@ void lockdep_free_key_range(void *start, unsigned long size) | |||
3916 | if (locked) | 3931 | if (locked) |
3917 | graph_unlock(); | 3932 | graph_unlock(); |
3918 | raw_local_irq_restore(flags); | 3933 | raw_local_irq_restore(flags); |
3934 | |||
3935 | /* | ||
3936 | * Wait for any possible iterators from look_up_lock_class() to pass | ||
3937 | * before continuing to free the memory they refer to. | ||
3938 | * | ||
3939 | * sync_sched() is sufficient because the read-side is IRQ disable. | ||
3940 | */ | ||
3941 | synchronize_sched(); | ||
3942 | |||
3943 | /* | ||
3944 | * XXX at this point we could return the resources to the pool; | ||
3945 | * instead we leak them. We would need to change to bitmap allocators | ||
3946 | * instead of the linear allocators we have now. | ||
3947 | */ | ||
3919 | } | 3948 | } |
3920 | 3949 | ||
3921 | void lockdep_reset_lock(struct lockdep_map *lock) | 3950 | void lockdep_reset_lock(struct lockdep_map *lock) |
3922 | { | 3951 | { |
3923 | struct lock_class *class, *next; | 3952 | struct lock_class *class; |
3924 | struct list_head *head; | 3953 | struct list_head *head; |
3925 | unsigned long flags; | 3954 | unsigned long flags; |
3926 | int i, j; | 3955 | int i, j; |
@@ -3948,7 +3977,7 @@ void lockdep_reset_lock(struct lockdep_map *lock) | |||
3948 | head = classhash_table + i; | 3977 | head = classhash_table + i; |
3949 | if (list_empty(head)) | 3978 | if (list_empty(head)) |
3950 | continue; | 3979 | continue; |
3951 | list_for_each_entry_safe(class, next, head, hash_entry) { | 3980 | list_for_each_entry_rcu(class, head, hash_entry) { |
3952 | int match = 0; | 3981 | int match = 0; |
3953 | 3982 | ||
3954 | for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) | 3983 | for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) |
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index e16e5542bf13..6357265a31ad 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
@@ -1193,6 +1193,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
1193 | ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); | 1193 | ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); |
1194 | 1194 | ||
1195 | if (unlikely(ret)) { | 1195 | if (unlikely(ret)) { |
1196 | __set_current_state(TASK_RUNNING); | ||
1196 | if (rt_mutex_has_waiters(lock)) | 1197 | if (rt_mutex_has_waiters(lock)) |
1197 | remove_waiter(lock, &waiter); | 1198 | remove_waiter(lock, &waiter); |
1198 | rt_mutex_handle_deadlock(ret, chwalk, &waiter); | 1199 | rt_mutex_handle_deadlock(ret, chwalk, &waiter); |
diff --git a/kernel/module.c b/kernel/module.c index b34813f725e9..ec53f594e9c9 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -56,7 +56,6 @@ | |||
56 | #include <linux/async.h> | 56 | #include <linux/async.h> |
57 | #include <linux/percpu.h> | 57 | #include <linux/percpu.h> |
58 | #include <linux/kmemleak.h> | 58 | #include <linux/kmemleak.h> |
59 | #include <linux/kasan.h> | ||
60 | #include <linux/jump_label.h> | 59 | #include <linux/jump_label.h> |
61 | #include <linux/pfn.h> | 60 | #include <linux/pfn.h> |
62 | #include <linux/bsearch.h> | 61 | #include <linux/bsearch.h> |
@@ -1814,7 +1813,6 @@ static void unset_module_init_ro_nx(struct module *mod) { } | |||
1814 | void __weak module_memfree(void *module_region) | 1813 | void __weak module_memfree(void *module_region) |
1815 | { | 1814 | { |
1816 | vfree(module_region); | 1815 | vfree(module_region); |
1817 | kasan_module_free(module_region); | ||
1818 | } | 1816 | } |
1819 | 1817 | ||
1820 | void __weak module_arch_cleanup(struct module *mod) | 1818 | void __weak module_arch_cleanup(struct module *mod) |
@@ -1867,7 +1865,7 @@ static void free_module(struct module *mod) | |||
1867 | kfree(mod->args); | 1865 | kfree(mod->args); |
1868 | percpu_modfree(mod); | 1866 | percpu_modfree(mod); |
1869 | 1867 | ||
1870 | /* Free lock-classes: */ | 1868 | /* Free lock-classes; relies on the preceding sync_rcu(). */ |
1871 | lockdep_free_key_range(mod->module_core, mod->core_size); | 1869 | lockdep_free_key_range(mod->module_core, mod->core_size); |
1872 | 1870 | ||
1873 | /* Finally, free the core (containing the module structure) */ | 1871 | /* Finally, free the core (containing the module structure) */ |
@@ -2313,11 +2311,13 @@ static void layout_symtab(struct module *mod, struct load_info *info) | |||
2313 | info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); | 2311 | info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); |
2314 | info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym); | 2312 | info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym); |
2315 | mod->core_size += strtab_size; | 2313 | mod->core_size += strtab_size; |
2314 | mod->core_size = debug_align(mod->core_size); | ||
2316 | 2315 | ||
2317 | /* Put string table section at end of init part of module. */ | 2316 | /* Put string table section at end of init part of module. */ |
2318 | strsect->sh_flags |= SHF_ALLOC; | 2317 | strsect->sh_flags |= SHF_ALLOC; |
2319 | strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, | 2318 | strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, |
2320 | info->index.str) | INIT_OFFSET_MASK; | 2319 | info->index.str) | INIT_OFFSET_MASK; |
2320 | mod->init_size = debug_align(mod->init_size); | ||
2321 | pr_debug("\t%s\n", info->secstrings + strsect->sh_name); | 2321 | pr_debug("\t%s\n", info->secstrings + strsect->sh_name); |
2322 | } | 2322 | } |
2323 | 2323 | ||
@@ -2479,6 +2479,23 @@ static int elf_header_check(struct load_info *info) | |||
2479 | return 0; | 2479 | return 0; |
2480 | } | 2480 | } |
2481 | 2481 | ||
2482 | #define COPY_CHUNK_SIZE (16*PAGE_SIZE) | ||
2483 | |||
2484 | static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len) | ||
2485 | { | ||
2486 | do { | ||
2487 | unsigned long n = min(len, COPY_CHUNK_SIZE); | ||
2488 | |||
2489 | if (copy_from_user(dst, usrc, n) != 0) | ||
2490 | return -EFAULT; | ||
2491 | cond_resched(); | ||
2492 | dst += n; | ||
2493 | usrc += n; | ||
2494 | len -= n; | ||
2495 | } while (len); | ||
2496 | return 0; | ||
2497 | } | ||
2498 | |||
2482 | /* Sets info->hdr and info->len. */ | 2499 | /* Sets info->hdr and info->len. */ |
2483 | static int copy_module_from_user(const void __user *umod, unsigned long len, | 2500 | static int copy_module_from_user(const void __user *umod, unsigned long len, |
2484 | struct load_info *info) | 2501 | struct load_info *info) |
@@ -2498,7 +2515,7 @@ static int copy_module_from_user(const void __user *umod, unsigned long len, | |||
2498 | if (!info->hdr) | 2515 | if (!info->hdr) |
2499 | return -ENOMEM; | 2516 | return -ENOMEM; |
2500 | 2517 | ||
2501 | if (copy_from_user(info->hdr, umod, info->len) != 0) { | 2518 | if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) { |
2502 | vfree(info->hdr); | 2519 | vfree(info->hdr); |
2503 | return -EFAULT; | 2520 | return -EFAULT; |
2504 | } | 2521 | } |
@@ -3349,9 +3366,6 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
3349 | module_bug_cleanup(mod); | 3366 | module_bug_cleanup(mod); |
3350 | mutex_unlock(&module_mutex); | 3367 | mutex_unlock(&module_mutex); |
3351 | 3368 | ||
3352 | /* Free lock-classes: */ | ||
3353 | lockdep_free_key_range(mod->module_core, mod->core_size); | ||
3354 | |||
3355 | /* we can't deallocate the module until we clear memory protection */ | 3369 | /* we can't deallocate the module until we clear memory protection */ |
3356 | unset_module_init_ro_nx(mod); | 3370 | unset_module_init_ro_nx(mod); |
3357 | unset_module_core_ro_nx(mod); | 3371 | unset_module_core_ro_nx(mod); |
@@ -3375,6 +3389,9 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
3375 | synchronize_rcu(); | 3389 | synchronize_rcu(); |
3376 | mutex_unlock(&module_mutex); | 3390 | mutex_unlock(&module_mutex); |
3377 | free_module: | 3391 | free_module: |
3392 | /* Free lock-classes; relies on the preceding sync_rcu() */ | ||
3393 | lockdep_free_key_range(mod->module_core, mod->core_size); | ||
3394 | |||
3378 | module_deallocate(mod, info); | 3395 | module_deallocate(mod, info); |
3379 | free_copy: | 3396 | free_copy: |
3380 | free_copy(info); | 3397 | free_copy(info); |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index c24d5a23bf93..5235dd4e1e2f 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -955,25 +955,6 @@ static void mark_nosave_pages(struct memory_bitmap *bm) | |||
955 | } | 955 | } |
956 | } | 956 | } |
957 | 957 | ||
958 | static bool is_nosave_page(unsigned long pfn) | ||
959 | { | ||
960 | struct nosave_region *region; | ||
961 | |||
962 | list_for_each_entry(region, &nosave_regions, list) { | ||
963 | if (pfn >= region->start_pfn && pfn < region->end_pfn) { | ||
964 | pr_err("PM: %#010llx in e820 nosave region: " | ||
965 | "[mem %#010llx-%#010llx]\n", | ||
966 | (unsigned long long) pfn << PAGE_SHIFT, | ||
967 | (unsigned long long) region->start_pfn << PAGE_SHIFT, | ||
968 | ((unsigned long long) region->end_pfn << PAGE_SHIFT) | ||
969 | - 1); | ||
970 | return true; | ||
971 | } | ||
972 | } | ||
973 | |||
974 | return false; | ||
975 | } | ||
976 | |||
977 | /** | 958 | /** |
978 | * create_basic_memory_bitmaps - create bitmaps needed for marking page | 959 | * create_basic_memory_bitmaps - create bitmaps needed for marking page |
979 | * frames that should not be saved and free page frames. The pointers | 960 | * frames that should not be saved and free page frames. The pointers |
@@ -2042,7 +2023,7 @@ static int mark_unsafe_pages(struct memory_bitmap *bm) | |||
2042 | do { | 2023 | do { |
2043 | pfn = memory_bm_next_pfn(bm); | 2024 | pfn = memory_bm_next_pfn(bm); |
2044 | if (likely(pfn != BM_END_OF_MAP)) { | 2025 | if (likely(pfn != BM_END_OF_MAP)) { |
2045 | if (likely(pfn_valid(pfn)) && !is_nosave_page(pfn)) | 2026 | if (likely(pfn_valid(pfn))) |
2046 | swsusp_set_page_free(pfn_to_page(pfn)); | 2027 | swsusp_set_page_free(pfn_to_page(pfn)); |
2047 | else | 2028 | else |
2048 | return -EFAULT; | 2029 | return -EFAULT; |
diff --git a/kernel/printk/console_cmdline.h b/kernel/printk/console_cmdline.h index cbd69d842341..2ca4a8b5fe57 100644 --- a/kernel/printk/console_cmdline.h +++ b/kernel/printk/console_cmdline.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | struct console_cmdline | 4 | struct console_cmdline |
5 | { | 5 | { |
6 | char name[8]; /* Name of the driver */ | 6 | char name[16]; /* Name of the driver */ |
7 | int index; /* Minor dev. to use */ | 7 | int index; /* Minor dev. to use */ |
8 | char *options; /* Options for the driver */ | 8 | char *options; /* Options for the driver */ |
9 | #ifdef CONFIG_A11Y_BRAILLE_CONSOLE | 9 | #ifdef CONFIG_A11Y_BRAILLE_CONSOLE |
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 01cfd69c54c6..bb0635bd74f2 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
@@ -2464,6 +2464,7 @@ void register_console(struct console *newcon) | |||
2464 | for (i = 0, c = console_cmdline; | 2464 | for (i = 0, c = console_cmdline; |
2465 | i < MAX_CMDLINECONSOLES && c->name[0]; | 2465 | i < MAX_CMDLINECONSOLES && c->name[0]; |
2466 | i++, c++) { | 2466 | i++, c++) { |
2467 | BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name)); | ||
2467 | if (strcmp(c->name, newcon->name) != 0) | 2468 | if (strcmp(c->name, newcon->name) != 0) |
2468 | continue; | 2469 | continue; |
2469 | if (newcon->index >= 0 && | 2470 | if (newcon->index >= 0 && |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f0f831e8a345..62671f53202a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -3034,6 +3034,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio) | |||
3034 | } else { | 3034 | } else { |
3035 | if (dl_prio(oldprio)) | 3035 | if (dl_prio(oldprio)) |
3036 | p->dl.dl_boosted = 0; | 3036 | p->dl.dl_boosted = 0; |
3037 | if (rt_prio(oldprio)) | ||
3038 | p->rt.timeout = 0; | ||
3037 | p->sched_class = &fair_sched_class; | 3039 | p->sched_class = &fair_sched_class; |
3038 | } | 3040 | } |
3039 | 3041 | ||
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7ce18f3c097a..241213be507c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -1609,9 +1609,11 @@ static void update_task_scan_period(struct task_struct *p, | |||
1609 | /* | 1609 | /* |
1610 | * If there were no record hinting faults then either the task is | 1610 | * If there were no record hinting faults then either the task is |
1611 | * completely idle or all activity is areas that are not of interest | 1611 | * completely idle or all activity is areas that are not of interest |
1612 | * to automatic numa balancing. Scan slower | 1612 | * to automatic numa balancing. Related to that, if there were failed |
1613 | * migration then it implies we are migrating too quickly or the local | ||
1614 | * node is overloaded. In either case, scan slower | ||
1613 | */ | 1615 | */ |
1614 | if (local + shared == 0) { | 1616 | if (local + shared == 0 || p->numa_faults_locality[2]) { |
1615 | p->numa_scan_period = min(p->numa_scan_period_max, | 1617 | p->numa_scan_period = min(p->numa_scan_period_max, |
1616 | p->numa_scan_period << 1); | 1618 | p->numa_scan_period << 1); |
1617 | 1619 | ||
@@ -2080,6 +2082,8 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) | |||
2080 | 2082 | ||
2081 | if (migrated) | 2083 | if (migrated) |
2082 | p->numa_pages_migrated += pages; | 2084 | p->numa_pages_migrated += pages; |
2085 | if (flags & TNF_MIGRATE_FAIL) | ||
2086 | p->numa_faults_locality[2] += pages; | ||
2083 | 2087 | ||
2084 | p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; | 2088 | p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; |
2085 | p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; | 2089 | p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; |
@@ -2161,8 +2165,10 @@ void task_numa_work(struct callback_head *work) | |||
2161 | vma = mm->mmap; | 2165 | vma = mm->mmap; |
2162 | } | 2166 | } |
2163 | for (; vma; vma = vma->vm_next) { | 2167 | for (; vma; vma = vma->vm_next) { |
2164 | if (!vma_migratable(vma) || !vma_policy_mof(vma)) | 2168 | if (!vma_migratable(vma) || !vma_policy_mof(vma) || |
2169 | is_vm_hugetlb_page(vma)) { | ||
2165 | continue; | 2170 | continue; |
2171 | } | ||
2166 | 2172 | ||
2167 | /* | 2173 | /* |
2168 | * Shared library pages mapped by multiple processes are not | 2174 | * Shared library pages mapped by multiple processes are not |
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 94b2d7b88a27..80014a178342 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c | |||
@@ -82,6 +82,7 @@ static void cpuidle_idle_call(void) | |||
82 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | 82 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
83 | int next_state, entered_state; | 83 | int next_state, entered_state; |
84 | unsigned int broadcast; | 84 | unsigned int broadcast; |
85 | bool reflect; | ||
85 | 86 | ||
86 | /* | 87 | /* |
87 | * Check if the idle task must be rescheduled. If it is the | 88 | * Check if the idle task must be rescheduled. If it is the |
@@ -105,6 +106,9 @@ static void cpuidle_idle_call(void) | |||
105 | */ | 106 | */ |
106 | rcu_idle_enter(); | 107 | rcu_idle_enter(); |
107 | 108 | ||
109 | if (cpuidle_not_available(drv, dev)) | ||
110 | goto use_default; | ||
111 | |||
108 | /* | 112 | /* |
109 | * Suspend-to-idle ("freeze") is a system state in which all user space | 113 | * Suspend-to-idle ("freeze") is a system state in which all user space |
110 | * has been frozen, all I/O devices have been suspended and the only | 114 | * has been frozen, all I/O devices have been suspended and the only |
@@ -115,30 +119,24 @@ static void cpuidle_idle_call(void) | |||
115 | * until a proper wakeup interrupt happens. | 119 | * until a proper wakeup interrupt happens. |
116 | */ | 120 | */ |
117 | if (idle_should_freeze()) { | 121 | if (idle_should_freeze()) { |
118 | cpuidle_enter_freeze(); | 122 | entered_state = cpuidle_enter_freeze(drv, dev); |
119 | local_irq_enable(); | 123 | if (entered_state >= 0) { |
120 | goto exit_idle; | 124 | local_irq_enable(); |
121 | } | 125 | goto exit_idle; |
126 | } | ||
122 | 127 | ||
123 | /* | 128 | reflect = false; |
124 | * Ask the cpuidle framework to choose a convenient idle state. | 129 | next_state = cpuidle_find_deepest_state(drv, dev); |
125 | * Fall back to the default arch idle method on errors. | 130 | } else { |
126 | */ | 131 | reflect = true; |
127 | next_state = cpuidle_select(drv, dev); | ||
128 | if (next_state < 0) { | ||
129 | use_default: | ||
130 | /* | 132 | /* |
131 | * We can't use the cpuidle framework, let's use the default | 133 | * Ask the cpuidle framework to choose a convenient idle state. |
132 | * idle routine. | ||
133 | */ | 134 | */ |
134 | if (current_clr_polling_and_test()) | 135 | next_state = cpuidle_select(drv, dev); |
135 | local_irq_enable(); | ||
136 | else | ||
137 | arch_cpu_idle(); | ||
138 | |||
139 | goto exit_idle; | ||
140 | } | 136 | } |
141 | 137 | /* Fall back to the default arch idle method on errors. */ | |
138 | if (next_state < 0) | ||
139 | goto use_default; | ||
142 | 140 | ||
143 | /* | 141 | /* |
144 | * The idle task must be scheduled, it is pointless to | 142 | * The idle task must be scheduled, it is pointless to |
@@ -183,7 +181,8 @@ use_default: | |||
183 | /* | 181 | /* |
184 | * Give the governor an opportunity to reflect on the outcome | 182 | * Give the governor an opportunity to reflect on the outcome |
185 | */ | 183 | */ |
186 | cpuidle_reflect(dev, entered_state); | 184 | if (reflect) |
185 | cpuidle_reflect(dev, entered_state); | ||
187 | 186 | ||
188 | exit_idle: | 187 | exit_idle: |
189 | __current_set_polling(); | 188 | __current_set_polling(); |
@@ -196,6 +195,19 @@ exit_idle: | |||
196 | 195 | ||
197 | rcu_idle_exit(); | 196 | rcu_idle_exit(); |
198 | start_critical_timings(); | 197 | start_critical_timings(); |
198 | return; | ||
199 | |||
200 | use_default: | ||
201 | /* | ||
202 | * We can't use the cpuidle framework, let's use the default | ||
203 | * idle routine. | ||
204 | */ | ||
205 | if (current_clr_polling_and_test()) | ||
206 | local_irq_enable(); | ||
207 | else | ||
208 | arch_cpu_idle(); | ||
209 | |||
210 | goto exit_idle; | ||
199 | } | 211 | } |
200 | 212 | ||
201 | /* | 213 | /* |
diff --git a/kernel/sys.c b/kernel/sys.c index 667b2e62fad2..a03d9cd23ed7 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -1108,6 +1108,7 @@ DECLARE_RWSEM(uts_sem); | |||
1108 | /* | 1108 | /* |
1109 | * Work around broken programs that cannot handle "Linux 3.0". | 1109 | * Work around broken programs that cannot handle "Linux 3.0". |
1110 | * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 | 1110 | * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 |
1111 | * And we map 4.x to 2.6.60+x, so 4.0 would be 2.6.60. | ||
1111 | */ | 1112 | */ |
1112 | static int override_release(char __user *release, size_t len) | 1113 | static int override_release(char __user *release, size_t len) |
1113 | { | 1114 | { |
@@ -1127,7 +1128,7 @@ static int override_release(char __user *release, size_t len) | |||
1127 | break; | 1128 | break; |
1128 | rest++; | 1129 | rest++; |
1129 | } | 1130 | } |
1130 | v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40; | 1131 | v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60; |
1131 | copy = clamp_t(size_t, len, 1, sizeof(buf)); | 1132 | copy = clamp_t(size_t, len, 1, sizeof(buf)); |
1132 | copy = scnprintf(buf, copy, "2.6.%u%s", v, rest); | 1133 | copy = scnprintf(buf, copy, "2.6.%u%s", v, rest); |
1133 | ret = copy_to_user(release, buf, copy + 1); | 1134 | ret = copy_to_user(release, buf, copy + 1); |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 88ea2d6e0031..ce410bb9f2e1 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -1228,6 +1228,14 @@ static struct ctl_table vm_table[] = { | |||
1228 | .extra1 = &zero, | 1228 | .extra1 = &zero, |
1229 | }, | 1229 | }, |
1230 | { | 1230 | { |
1231 | .procname = "dirtytime_expire_seconds", | ||
1232 | .data = &dirtytime_expire_interval, | ||
1233 | .maxlen = sizeof(dirty_expire_interval), | ||
1234 | .mode = 0644, | ||
1235 | .proc_handler = dirtytime_interval_handler, | ||
1236 | .extra1 = &zero, | ||
1237 | }, | ||
1238 | { | ||
1231 | .procname = "nr_pdflush_threads", | 1239 | .procname = "nr_pdflush_threads", |
1232 | .mode = 0444 /* read-only */, | 1240 | .mode = 0444 /* read-only */, |
1233 | .proc_handler = pdflush_proc_obsolete, | 1241 | .proc_handler = pdflush_proc_obsolete, |
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c index eb682d5c697c..6aac4beedbbe 100644 --- a/kernel/time/tick-broadcast-hrtimer.c +++ b/kernel/time/tick-broadcast-hrtimer.c | |||
@@ -49,6 +49,7 @@ static void bc_set_mode(enum clock_event_mode mode, | |||
49 | */ | 49 | */ |
50 | static int bc_set_next(ktime_t expires, struct clock_event_device *bc) | 50 | static int bc_set_next(ktime_t expires, struct clock_event_device *bc) |
51 | { | 51 | { |
52 | int bc_moved; | ||
52 | /* | 53 | /* |
53 | * We try to cancel the timer first. If the callback is on | 54 | * We try to cancel the timer first. If the callback is on |
54 | * flight on some other cpu then we let it handle it. If we | 55 | * flight on some other cpu then we let it handle it. If we |
@@ -60,9 +61,15 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc) | |||
60 | * restart the timer because we are in the callback, but we | 61 | * restart the timer because we are in the callback, but we |
61 | * can set the expiry time and let the callback return | 62 | * can set the expiry time and let the callback return |
62 | * HRTIMER_RESTART. | 63 | * HRTIMER_RESTART. |
64 | * | ||
65 | * Since we are in the idle loop at this point and because | ||
66 | * hrtimer_{start/cancel} functions call into tracing, | ||
67 | * calls to these functions must be bound within RCU_NONIDLE. | ||
63 | */ | 68 | */ |
64 | if (hrtimer_try_to_cancel(&bctimer) >= 0) { | 69 | RCU_NONIDLE(bc_moved = (hrtimer_try_to_cancel(&bctimer) >= 0) ? |
65 | hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED); | 70 | !hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED) : |
71 | 0); | ||
72 | if (bc_moved) { | ||
66 | /* Bind the "device" to the cpu */ | 73 | /* Bind the "device" to the cpu */ |
67 | bc->bound_on = smp_processor_id(); | 74 | bc->bound_on = smp_processor_id(); |
68 | } else if (bc->bound_on == smp_processor_id()) { | 75 | } else if (bc->bound_on == smp_processor_id()) { |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 45e5cb143d17..4f228024055b 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1059,6 +1059,12 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer) | |||
1059 | 1059 | ||
1060 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | 1060 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; |
1061 | 1061 | ||
1062 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
1063 | static int ftrace_graph_active; | ||
1064 | #else | ||
1065 | # define ftrace_graph_active 0 | ||
1066 | #endif | ||
1067 | |||
1062 | #ifdef CONFIG_DYNAMIC_FTRACE | 1068 | #ifdef CONFIG_DYNAMIC_FTRACE |
1063 | 1069 | ||
1064 | static struct ftrace_ops *removed_ops; | 1070 | static struct ftrace_ops *removed_ops; |
@@ -2041,8 +2047,12 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) | |||
2041 | if (!ftrace_rec_count(rec)) | 2047 | if (!ftrace_rec_count(rec)) |
2042 | rec->flags = 0; | 2048 | rec->flags = 0; |
2043 | else | 2049 | else |
2044 | /* Just disable the record (keep REGS state) */ | 2050 | /* |
2045 | rec->flags &= ~FTRACE_FL_ENABLED; | 2051 | * Just disable the record, but keep the ops TRAMP |
2052 | * and REGS states. The _EN flags must be disabled though. | ||
2053 | */ | ||
2054 | rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | | ||
2055 | FTRACE_FL_REGS_EN); | ||
2046 | } | 2056 | } |
2047 | 2057 | ||
2048 | return FTRACE_UPDATE_MAKE_NOP; | 2058 | return FTRACE_UPDATE_MAKE_NOP; |
@@ -2688,24 +2698,36 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
2688 | 2698 | ||
2689 | static void ftrace_startup_sysctl(void) | 2699 | static void ftrace_startup_sysctl(void) |
2690 | { | 2700 | { |
2701 | int command; | ||
2702 | |||
2691 | if (unlikely(ftrace_disabled)) | 2703 | if (unlikely(ftrace_disabled)) |
2692 | return; | 2704 | return; |
2693 | 2705 | ||
2694 | /* Force update next time */ | 2706 | /* Force update next time */ |
2695 | saved_ftrace_func = NULL; | 2707 | saved_ftrace_func = NULL; |
2696 | /* ftrace_start_up is true if we want ftrace running */ | 2708 | /* ftrace_start_up is true if we want ftrace running */ |
2697 | if (ftrace_start_up) | 2709 | if (ftrace_start_up) { |
2698 | ftrace_run_update_code(FTRACE_UPDATE_CALLS); | 2710 | command = FTRACE_UPDATE_CALLS; |
2711 | if (ftrace_graph_active) | ||
2712 | command |= FTRACE_START_FUNC_RET; | ||
2713 | ftrace_startup_enable(command); | ||
2714 | } | ||
2699 | } | 2715 | } |
2700 | 2716 | ||
2701 | static void ftrace_shutdown_sysctl(void) | 2717 | static void ftrace_shutdown_sysctl(void) |
2702 | { | 2718 | { |
2719 | int command; | ||
2720 | |||
2703 | if (unlikely(ftrace_disabled)) | 2721 | if (unlikely(ftrace_disabled)) |
2704 | return; | 2722 | return; |
2705 | 2723 | ||
2706 | /* ftrace_start_up is true if ftrace is running */ | 2724 | /* ftrace_start_up is true if ftrace is running */ |
2707 | if (ftrace_start_up) | 2725 | if (ftrace_start_up) { |
2708 | ftrace_run_update_code(FTRACE_DISABLE_CALLS); | 2726 | command = FTRACE_DISABLE_CALLS; |
2727 | if (ftrace_graph_active) | ||
2728 | command |= FTRACE_STOP_FUNC_RET; | ||
2729 | ftrace_run_update_code(command); | ||
2730 | } | ||
2709 | } | 2731 | } |
2710 | 2732 | ||
2711 | static cycle_t ftrace_update_time; | 2733 | static cycle_t ftrace_update_time; |
@@ -5558,12 +5580,12 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
5558 | 5580 | ||
5559 | if (ftrace_enabled) { | 5581 | if (ftrace_enabled) { |
5560 | 5582 | ||
5561 | ftrace_startup_sysctl(); | ||
5562 | |||
5563 | /* we are starting ftrace again */ | 5583 | /* we are starting ftrace again */ |
5564 | if (ftrace_ops_list != &ftrace_list_end) | 5584 | if (ftrace_ops_list != &ftrace_list_end) |
5565 | update_ftrace_function(); | 5585 | update_ftrace_function(); |
5566 | 5586 | ||
5587 | ftrace_startup_sysctl(); | ||
5588 | |||
5567 | } else { | 5589 | } else { |
5568 | /* stopping ftrace calls (just send to ftrace_stub) */ | 5590 | /* stopping ftrace calls (just send to ftrace_stub) */ |
5569 | ftrace_trace_function = ftrace_stub; | 5591 | ftrace_trace_function = ftrace_stub; |
@@ -5590,8 +5612,6 @@ static struct ftrace_ops graph_ops = { | |||
5590 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) | 5612 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) |
5591 | }; | 5613 | }; |
5592 | 5614 | ||
5593 | static int ftrace_graph_active; | ||
5594 | |||
5595 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) | 5615 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) |
5596 | { | 5616 | { |
5597 | return 0; | 5617 | return 0; |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f28849394791..41ff75b478c6 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -2728,19 +2728,57 @@ bool flush_work(struct work_struct *work) | |||
2728 | } | 2728 | } |
2729 | EXPORT_SYMBOL_GPL(flush_work); | 2729 | EXPORT_SYMBOL_GPL(flush_work); |
2730 | 2730 | ||
2731 | struct cwt_wait { | ||
2732 | wait_queue_t wait; | ||
2733 | struct work_struct *work; | ||
2734 | }; | ||
2735 | |||
2736 | static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key) | ||
2737 | { | ||
2738 | struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); | ||
2739 | |||
2740 | if (cwait->work != key) | ||
2741 | return 0; | ||
2742 | return autoremove_wake_function(wait, mode, sync, key); | ||
2743 | } | ||
2744 | |||
2731 | static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) | 2745 | static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) |
2732 | { | 2746 | { |
2747 | static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); | ||
2733 | unsigned long flags; | 2748 | unsigned long flags; |
2734 | int ret; | 2749 | int ret; |
2735 | 2750 | ||
2736 | do { | 2751 | do { |
2737 | ret = try_to_grab_pending(work, is_dwork, &flags); | 2752 | ret = try_to_grab_pending(work, is_dwork, &flags); |
2738 | /* | 2753 | /* |
2739 | * If someone else is canceling, wait for the same event it | 2754 | * If someone else is already canceling, wait for it to |
2740 | * would be waiting for before retrying. | 2755 | * finish. flush_work() doesn't work for PREEMPT_NONE |
2756 | * because we may get scheduled between @work's completion | ||
2757 | * and the other canceling task resuming and clearing | ||
2758 | * CANCELING - flush_work() will return false immediately | ||
2759 | * as @work is no longer busy, try_to_grab_pending() will | ||
2760 | * return -ENOENT as @work is still being canceled and the | ||
2761 | * other canceling task won't be able to clear CANCELING as | ||
2762 | * we're hogging the CPU. | ||
2763 | * | ||
2764 | * Let's wait for completion using a waitqueue. As this | ||
2765 | * may lead to the thundering herd problem, use a custom | ||
2766 | * wake function which matches @work along with exclusive | ||
2767 | * wait and wakeup. | ||
2741 | */ | 2768 | */ |
2742 | if (unlikely(ret == -ENOENT)) | 2769 | if (unlikely(ret == -ENOENT)) { |
2743 | flush_work(work); | 2770 | struct cwt_wait cwait; |
2771 | |||
2772 | init_wait(&cwait.wait); | ||
2773 | cwait.wait.func = cwt_wakefn; | ||
2774 | cwait.work = work; | ||
2775 | |||
2776 | prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait, | ||
2777 | TASK_UNINTERRUPTIBLE); | ||
2778 | if (work_is_canceling(work)) | ||
2779 | schedule(); | ||
2780 | finish_wait(&cancel_waitq, &cwait.wait); | ||
2781 | } | ||
2744 | } while (unlikely(ret < 0)); | 2782 | } while (unlikely(ret < 0)); |
2745 | 2783 | ||
2746 | /* tell other tasks trying to grab @work to back off */ | 2784 | /* tell other tasks trying to grab @work to back off */ |
@@ -2749,6 +2787,16 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) | |||
2749 | 2787 | ||
2750 | flush_work(work); | 2788 | flush_work(work); |
2751 | clear_work_data(work); | 2789 | clear_work_data(work); |
2790 | |||
2791 | /* | ||
2792 | * Paired with prepare_to_wait() above so that either | ||
2793 | * waitqueue_active() is visible here or !work_is_canceling() is | ||
2794 | * visible there. | ||
2795 | */ | ||
2796 | smp_mb(); | ||
2797 | if (waitqueue_active(&cancel_waitq)) | ||
2798 | __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); | ||
2799 | |||
2752 | return ret; | 2800 | return ret; |
2753 | } | 2801 | } |
2754 | 2802 | ||
diff --git a/lib/Makefile b/lib/Makefile index 87eb3bffc283..58f74d2dd396 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -24,7 +24,7 @@ obj-y += lockref.o | |||
24 | 24 | ||
25 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | 25 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ |
26 | bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ | 26 | bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ |
27 | gcd.o lcm.o list_sort.o uuid.o flex_array.o clz_ctz.o \ | 27 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ |
28 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ | 28 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ |
29 | percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o | 29 | percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o |
30 | obj-y += string_helpers.o | 30 | obj-y += string_helpers.o |
diff --git a/mm/iov_iter.c b/lib/iov_iter.c index 827732047da1..9d96e283520c 100644 --- a/mm/iov_iter.c +++ b/lib/iov_iter.c | |||
@@ -751,3 +751,18 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages) | |||
751 | return npages; | 751 | return npages; |
752 | } | 752 | } |
753 | EXPORT_SYMBOL(iov_iter_npages); | 753 | EXPORT_SYMBOL(iov_iter_npages); |
754 | |||
755 | const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) | ||
756 | { | ||
757 | *new = *old; | ||
758 | if (new->type & ITER_BVEC) | ||
759 | return new->bvec = kmemdup(new->bvec, | ||
760 | new->nr_segs * sizeof(struct bio_vec), | ||
761 | flags); | ||
762 | else | ||
763 | /* iovec and kvec have identical layout */ | ||
764 | return new->iov = kmemdup(new->iov, | ||
765 | new->nr_segs * sizeof(struct iovec), | ||
766 | flags); | ||
767 | } | ||
768 | EXPORT_SYMBOL(dup_iter); | ||
@@ -12,3 +12,14 @@ unsigned long lcm(unsigned long a, unsigned long b) | |||
12 | return 0; | 12 | return 0; |
13 | } | 13 | } |
14 | EXPORT_SYMBOL_GPL(lcm); | 14 | EXPORT_SYMBOL_GPL(lcm); |
15 | |||
16 | unsigned long lcm_not_zero(unsigned long a, unsigned long b) | ||
17 | { | ||
18 | unsigned long l = lcm(a, b); | ||
19 | |||
20 | if (l) | ||
21 | return l; | ||
22 | |||
23 | return (b ? : a); | ||
24 | } | ||
25 | EXPORT_SYMBOL_GPL(lcm_not_zero); | ||
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c index 7a85967060a5..f0f5c5c3de12 100644 --- a/lib/lz4/lz4_decompress.c +++ b/lib/lz4/lz4_decompress.c | |||
@@ -139,6 +139,9 @@ static int lz4_uncompress(const char *source, char *dest, int osize) | |||
139 | /* Error: request to write beyond destination buffer */ | 139 | /* Error: request to write beyond destination buffer */ |
140 | if (cpy > oend) | 140 | if (cpy > oend) |
141 | goto _output_error; | 141 | goto _output_error; |
142 | if ((ref + COPYLENGTH) > oend || | ||
143 | (op + COPYLENGTH) > oend) | ||
144 | goto _output_error; | ||
142 | LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); | 145 | LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); |
143 | while (op < cpy) | 146 | while (op < cpy) |
144 | *op++ = *ref++; | 147 | *op++ = *ref++; |
diff --git a/lib/nlattr.c b/lib/nlattr.c index 76a1b59523ab..f5907d23272d 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c | |||
@@ -279,6 +279,8 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count) | |||
279 | int minlen = min_t(int, count, nla_len(src)); | 279 | int minlen = min_t(int, count, nla_len(src)); |
280 | 280 | ||
281 | memcpy(dest, nla_data(src), minlen); | 281 | memcpy(dest, nla_data(src), minlen); |
282 | if (count > minlen) | ||
283 | memset(dest + minlen, 0, count - minlen); | ||
282 | 284 | ||
283 | return minlen; | 285 | return minlen; |
284 | } | 286 | } |
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 9cc4c4a90d00..b5344ef4c684 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/log2.h> | 19 | #include <linux/log2.h> |
20 | #include <linux/sched.h> | ||
20 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
21 | #include <linux/vmalloc.h> | 22 | #include <linux/vmalloc.h> |
22 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
@@ -217,15 +218,15 @@ static void bucket_table_free(const struct bucket_table *tbl) | |||
217 | static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, | 218 | static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, |
218 | size_t nbuckets) | 219 | size_t nbuckets) |
219 | { | 220 | { |
220 | struct bucket_table *tbl; | 221 | struct bucket_table *tbl = NULL; |
221 | size_t size; | 222 | size_t size; |
222 | int i; | 223 | int i; |
223 | 224 | ||
224 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); | 225 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); |
225 | tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); | 226 | if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) |
227 | tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); | ||
226 | if (tbl == NULL) | 228 | if (tbl == NULL) |
227 | tbl = vzalloc(size); | 229 | tbl = vzalloc(size); |
228 | |||
229 | if (tbl == NULL) | 230 | if (tbl == NULL) |
230 | return NULL; | 231 | return NULL; |
231 | 232 | ||
@@ -247,26 +248,24 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, | |||
247 | * @ht: hash table | 248 | * @ht: hash table |
248 | * @new_size: new table size | 249 | * @new_size: new table size |
249 | */ | 250 | */ |
250 | bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) | 251 | static bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) |
251 | { | 252 | { |
252 | /* Expand table when exceeding 75% load */ | 253 | /* Expand table when exceeding 75% load */ |
253 | return atomic_read(&ht->nelems) > (new_size / 4 * 3) && | 254 | return atomic_read(&ht->nelems) > (new_size / 4 * 3) && |
254 | (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift); | 255 | (!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift); |
255 | } | 256 | } |
256 | EXPORT_SYMBOL_GPL(rht_grow_above_75); | ||
257 | 257 | ||
258 | /** | 258 | /** |
259 | * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size | 259 | * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size |
260 | * @ht: hash table | 260 | * @ht: hash table |
261 | * @new_size: new table size | 261 | * @new_size: new table size |
262 | */ | 262 | */ |
263 | bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) | 263 | static bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) |
264 | { | 264 | { |
265 | /* Shrink table beneath 30% load */ | 265 | /* Shrink table beneath 30% load */ |
266 | return atomic_read(&ht->nelems) < (new_size * 3 / 10) && | 266 | return atomic_read(&ht->nelems) < (new_size * 3 / 10) && |
267 | (atomic_read(&ht->shift) > ht->p.min_shift); | 267 | (atomic_read(&ht->shift) > ht->p.min_shift); |
268 | } | 268 | } |
269 | EXPORT_SYMBOL_GPL(rht_shrink_below_30); | ||
270 | 269 | ||
271 | static void lock_buckets(struct bucket_table *new_tbl, | 270 | static void lock_buckets(struct bucket_table *new_tbl, |
272 | struct bucket_table *old_tbl, unsigned int hash) | 271 | struct bucket_table *old_tbl, unsigned int hash) |
@@ -414,6 +413,7 @@ int rhashtable_expand(struct rhashtable *ht) | |||
414 | } | 413 | } |
415 | } | 414 | } |
416 | unlock_buckets(new_tbl, old_tbl, new_hash); | 415 | unlock_buckets(new_tbl, old_tbl, new_hash); |
416 | cond_resched(); | ||
417 | } | 417 | } |
418 | 418 | ||
419 | /* Unzip interleaved hash chains */ | 419 | /* Unzip interleaved hash chains */ |
@@ -437,6 +437,7 @@ int rhashtable_expand(struct rhashtable *ht) | |||
437 | complete = false; | 437 | complete = false; |
438 | 438 | ||
439 | unlock_buckets(new_tbl, old_tbl, old_hash); | 439 | unlock_buckets(new_tbl, old_tbl, old_hash); |
440 | cond_resched(); | ||
440 | } | 441 | } |
441 | } | 442 | } |
442 | 443 | ||
@@ -495,6 +496,7 @@ int rhashtable_shrink(struct rhashtable *ht) | |||
495 | tbl->buckets[new_hash + new_tbl->size]); | 496 | tbl->buckets[new_hash + new_tbl->size]); |
496 | 497 | ||
497 | unlock_buckets(new_tbl, tbl, new_hash); | 498 | unlock_buckets(new_tbl, tbl, new_hash); |
499 | cond_resched(); | ||
498 | } | 500 | } |
499 | 501 | ||
500 | /* Publish the new, valid hash table */ | 502 | /* Publish the new, valid hash table */ |
@@ -528,31 +530,19 @@ static void rht_deferred_worker(struct work_struct *work) | |||
528 | list_for_each_entry(walker, &ht->walkers, list) | 530 | list_for_each_entry(walker, &ht->walkers, list) |
529 | walker->resize = true; | 531 | walker->resize = true; |
530 | 532 | ||
531 | if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) | 533 | if (rht_grow_above_75(ht, tbl->size)) |
532 | rhashtable_expand(ht); | 534 | rhashtable_expand(ht); |
533 | else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) | 535 | else if (rht_shrink_below_30(ht, tbl->size)) |
534 | rhashtable_shrink(ht); | 536 | rhashtable_shrink(ht); |
535 | |||
536 | unlock: | 537 | unlock: |
537 | mutex_unlock(&ht->mutex); | 538 | mutex_unlock(&ht->mutex); |
538 | } | 539 | } |
539 | 540 | ||
540 | static void rhashtable_wakeup_worker(struct rhashtable *ht) | ||
541 | { | ||
542 | struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); | ||
543 | struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht); | ||
544 | size_t size = tbl->size; | ||
545 | |||
546 | /* Only adjust the table if no resizing is currently in progress. */ | ||
547 | if (tbl == new_tbl && | ||
548 | ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) || | ||
549 | (ht->p.shrink_decision && ht->p.shrink_decision(ht, size)))) | ||
550 | schedule_work(&ht->run_work); | ||
551 | } | ||
552 | |||
553 | static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, | 541 | static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, |
554 | struct bucket_table *tbl, u32 hash) | 542 | struct bucket_table *tbl, |
543 | const struct bucket_table *old_tbl, u32 hash) | ||
555 | { | 544 | { |
545 | bool no_resize_running = tbl == old_tbl; | ||
556 | struct rhash_head *head; | 546 | struct rhash_head *head; |
557 | 547 | ||
558 | hash = rht_bucket_index(tbl, hash); | 548 | hash = rht_bucket_index(tbl, hash); |
@@ -568,8 +558,8 @@ static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, | |||
568 | rcu_assign_pointer(tbl->buckets[hash], obj); | 558 | rcu_assign_pointer(tbl->buckets[hash], obj); |
569 | 559 | ||
570 | atomic_inc(&ht->nelems); | 560 | atomic_inc(&ht->nelems); |
571 | 561 | if (no_resize_running && rht_grow_above_75(ht, tbl->size)) | |
572 | rhashtable_wakeup_worker(ht); | 562 | schedule_work(&ht->run_work); |
573 | } | 563 | } |
574 | 564 | ||
575 | /** | 565 | /** |
@@ -599,7 +589,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) | |||
599 | hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); | 589 | hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); |
600 | 590 | ||
601 | lock_buckets(tbl, old_tbl, hash); | 591 | lock_buckets(tbl, old_tbl, hash); |
602 | __rhashtable_insert(ht, obj, tbl, hash); | 592 | __rhashtable_insert(ht, obj, tbl, old_tbl, hash); |
603 | unlock_buckets(tbl, old_tbl, hash); | 593 | unlock_buckets(tbl, old_tbl, hash); |
604 | 594 | ||
605 | rcu_read_unlock(); | 595 | rcu_read_unlock(); |
@@ -681,8 +671,11 @@ found: | |||
681 | unlock_buckets(new_tbl, old_tbl, new_hash); | 671 | unlock_buckets(new_tbl, old_tbl, new_hash); |
682 | 672 | ||
683 | if (ret) { | 673 | if (ret) { |
674 | bool no_resize_running = new_tbl == old_tbl; | ||
675 | |||
684 | atomic_dec(&ht->nelems); | 676 | atomic_dec(&ht->nelems); |
685 | rhashtable_wakeup_worker(ht); | 677 | if (no_resize_running && rht_shrink_below_30(ht, new_tbl->size)) |
678 | schedule_work(&ht->run_work); | ||
686 | } | 679 | } |
687 | 680 | ||
688 | rcu_read_unlock(); | 681 | rcu_read_unlock(); |
@@ -852,7 +845,7 @@ bool rhashtable_lookup_compare_insert(struct rhashtable *ht, | |||
852 | goto exit; | 845 | goto exit; |
853 | } | 846 | } |
854 | 847 | ||
855 | __rhashtable_insert(ht, obj, new_tbl, new_hash); | 848 | __rhashtable_insert(ht, obj, new_tbl, old_tbl, new_hash); |
856 | 849 | ||
857 | exit: | 850 | exit: |
858 | unlock_buckets(new_tbl, old_tbl, new_hash); | 851 | unlock_buckets(new_tbl, old_tbl, new_hash); |
@@ -894,6 +887,9 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) | |||
894 | if (!iter->walker) | 887 | if (!iter->walker) |
895 | return -ENOMEM; | 888 | return -ENOMEM; |
896 | 889 | ||
890 | INIT_LIST_HEAD(&iter->walker->list); | ||
891 | iter->walker->resize = false; | ||
892 | |||
897 | mutex_lock(&ht->mutex); | 893 | mutex_lock(&ht->mutex); |
898 | list_add(&iter->walker->list, &ht->walkers); | 894 | list_add(&iter->walker->list, &ht->walkers); |
899 | mutex_unlock(&ht->mutex); | 895 | mutex_unlock(&ht->mutex); |
@@ -1111,8 +1107,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) | |||
1111 | if (!ht->p.hash_rnd) | 1107 | if (!ht->p.hash_rnd) |
1112 | get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd)); | 1108 | get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd)); |
1113 | 1109 | ||
1114 | if (ht->p.grow_decision || ht->p.shrink_decision) | 1110 | INIT_WORK(&ht->run_work, rht_deferred_worker); |
1115 | INIT_WORK(&ht->run_work, rht_deferred_worker); | ||
1116 | 1111 | ||
1117 | return 0; | 1112 | return 0; |
1118 | } | 1113 | } |
@@ -1130,8 +1125,7 @@ void rhashtable_destroy(struct rhashtable *ht) | |||
1130 | { | 1125 | { |
1131 | ht->being_destroyed = true; | 1126 | ht->being_destroyed = true; |
1132 | 1127 | ||
1133 | if (ht->p.grow_decision || ht->p.shrink_decision) | 1128 | cancel_work_sync(&ht->run_work); |
1134 | cancel_work_sync(&ht->run_work); | ||
1135 | 1129 | ||
1136 | mutex_lock(&ht->mutex); | 1130 | mutex_lock(&ht->mutex); |
1137 | bucket_table_free(rht_dereference(ht->tbl, ht)); | 1131 | bucket_table_free(rht_dereference(ht->tbl, ht)); |
diff --git a/lib/seq_buf.c b/lib/seq_buf.c index 88c0854bd752..5c94e1012a91 100644 --- a/lib/seq_buf.c +++ b/lib/seq_buf.c | |||
@@ -61,7 +61,7 @@ int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args) | |||
61 | 61 | ||
62 | if (s->len < s->size) { | 62 | if (s->len < s->size) { |
63 | len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args); | 63 | len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args); |
64 | if (seq_buf_can_fit(s, len)) { | 64 | if (s->len + len < s->size) { |
65 | s->len += len; | 65 | s->len += len; |
66 | return 0; | 66 | return 0; |
67 | } | 67 | } |
@@ -118,7 +118,7 @@ int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary) | |||
118 | 118 | ||
119 | if (s->len < s->size) { | 119 | if (s->len < s->size) { |
120 | ret = bstr_printf(s->buffer + s->len, len, fmt, binary); | 120 | ret = bstr_printf(s->buffer + s->len, len, fmt, binary); |
121 | if (seq_buf_can_fit(s, ret)) { | 121 | if (s->len + ret < s->size) { |
122 | s->len += ret; | 122 | s->len += ret; |
123 | return 0; | 123 | return 0; |
124 | } | 124 | } |
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 1dfeba73fc74..67c7593d1dd6 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c | |||
@@ -191,18 +191,18 @@ error: | |||
191 | return err; | 191 | return err; |
192 | } | 192 | } |
193 | 193 | ||
194 | static struct rhashtable ht; | ||
195 | |||
194 | static int __init test_rht_init(void) | 196 | static int __init test_rht_init(void) |
195 | { | 197 | { |
196 | struct rhashtable ht; | ||
197 | struct rhashtable_params params = { | 198 | struct rhashtable_params params = { |
198 | .nelem_hint = TEST_HT_SIZE, | 199 | .nelem_hint = TEST_HT_SIZE, |
199 | .head_offset = offsetof(struct test_obj, node), | 200 | .head_offset = offsetof(struct test_obj, node), |
200 | .key_offset = offsetof(struct test_obj, value), | 201 | .key_offset = offsetof(struct test_obj, value), |
201 | .key_len = sizeof(int), | 202 | .key_len = sizeof(int), |
202 | .hashfn = jhash, | 203 | .hashfn = jhash, |
204 | .max_shift = 1, /* we expand/shrink manually here */ | ||
203 | .nulls_base = (3U << RHT_BASE_SHIFT), | 205 | .nulls_base = (3U << RHT_BASE_SHIFT), |
204 | .grow_decision = rht_grow_above_75, | ||
205 | .shrink_decision = rht_shrink_below_30, | ||
206 | }; | 206 | }; |
207 | int err; | 207 | int err; |
208 | 208 | ||
@@ -222,6 +222,11 @@ static int __init test_rht_init(void) | |||
222 | return err; | 222 | return err; |
223 | } | 223 | } |
224 | 224 | ||
225 | static void __exit test_rht_exit(void) | ||
226 | { | ||
227 | } | ||
228 | |||
225 | module_init(test_rht_init); | 229 | module_init(test_rht_init); |
230 | module_exit(test_rht_exit); | ||
226 | 231 | ||
227 | MODULE_LICENSE("GPL v2"); | 232 | MODULE_LICENSE("GPL v2"); |
diff --git a/mm/Makefile b/mm/Makefile index 3c1caa2693bd..15dbe9903c27 100644 --- a/mm/Makefile +++ b/mm/Makefile | |||
@@ -21,7 +21,7 @@ obj-y := filemap.o mempool.o oom_kill.o \ | |||
21 | mm_init.o mmu_context.o percpu.o slab_common.o \ | 21 | mm_init.o mmu_context.o percpu.o slab_common.o \ |
22 | compaction.o vmacache.o \ | 22 | compaction.o vmacache.o \ |
23 | interval_tree.o list_lru.o workingset.o \ | 23 | interval_tree.o list_lru.o workingset.o \ |
24 | iov_iter.o debug.o $(mmu-y) | 24 | debug.o $(mmu-y) |
25 | 25 | ||
26 | obj-y += init-mm.o | 26 | obj-y += init-mm.o |
27 | 27 | ||
@@ -64,15 +64,17 @@ static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) | |||
64 | return (1UL << (align_order - cma->order_per_bit)) - 1; | 64 | return (1UL << (align_order - cma->order_per_bit)) - 1; |
65 | } | 65 | } |
66 | 66 | ||
67 | /* | ||
68 | * Find a PFN aligned to the specified order and return an offset represented in | ||
69 | * order_per_bits. | ||
70 | */ | ||
67 | static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order) | 71 | static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order) |
68 | { | 72 | { |
69 | unsigned int alignment; | ||
70 | |||
71 | if (align_order <= cma->order_per_bit) | 73 | if (align_order <= cma->order_per_bit) |
72 | return 0; | 74 | return 0; |
73 | alignment = 1UL << (align_order - cma->order_per_bit); | 75 | |
74 | return ALIGN(cma->base_pfn, alignment) - | 76 | return (ALIGN(cma->base_pfn, (1UL << align_order)) |
75 | (cma->base_pfn >> cma->order_per_bit); | 77 | - cma->base_pfn) >> cma->order_per_bit; |
76 | } | 78 | } |
77 | 79 | ||
78 | static unsigned long cma_bitmap_maxno(struct cma *cma) | 80 | static unsigned long cma_bitmap_maxno(struct cma *cma) |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index fc00c8cb5a82..6817b0350c71 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1260,6 +1260,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1260 | int target_nid, last_cpupid = -1; | 1260 | int target_nid, last_cpupid = -1; |
1261 | bool page_locked; | 1261 | bool page_locked; |
1262 | bool migrated = false; | 1262 | bool migrated = false; |
1263 | bool was_writable; | ||
1263 | int flags = 0; | 1264 | int flags = 0; |
1264 | 1265 | ||
1265 | /* A PROT_NONE fault should not end up here */ | 1266 | /* A PROT_NONE fault should not end up here */ |
@@ -1291,12 +1292,8 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1291 | flags |= TNF_FAULT_LOCAL; | 1292 | flags |= TNF_FAULT_LOCAL; |
1292 | } | 1293 | } |
1293 | 1294 | ||
1294 | /* | 1295 | /* See similar comment in do_numa_page for explanation */ |
1295 | * Avoid grouping on DSO/COW pages in specific and RO pages | 1296 | if (!(vma->vm_flags & VM_WRITE)) |
1296 | * in general, RO pages shouldn't hurt as much anyway since | ||
1297 | * they can be in shared cache state. | ||
1298 | */ | ||
1299 | if (!pmd_write(pmd)) | ||
1300 | flags |= TNF_NO_GROUP; | 1297 | flags |= TNF_NO_GROUP; |
1301 | 1298 | ||
1302 | /* | 1299 | /* |
@@ -1353,12 +1350,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1353 | if (migrated) { | 1350 | if (migrated) { |
1354 | flags |= TNF_MIGRATED; | 1351 | flags |= TNF_MIGRATED; |
1355 | page_nid = target_nid; | 1352 | page_nid = target_nid; |
1356 | } | 1353 | } else |
1354 | flags |= TNF_MIGRATE_FAIL; | ||
1357 | 1355 | ||
1358 | goto out; | 1356 | goto out; |
1359 | clear_pmdnuma: | 1357 | clear_pmdnuma: |
1360 | BUG_ON(!PageLocked(page)); | 1358 | BUG_ON(!PageLocked(page)); |
1359 | was_writable = pmd_write(pmd); | ||
1361 | pmd = pmd_modify(pmd, vma->vm_page_prot); | 1360 | pmd = pmd_modify(pmd, vma->vm_page_prot); |
1361 | pmd = pmd_mkyoung(pmd); | ||
1362 | if (was_writable) | ||
1363 | pmd = pmd_mkwrite(pmd); | ||
1362 | set_pmd_at(mm, haddr, pmdp, pmd); | 1364 | set_pmd_at(mm, haddr, pmdp, pmd); |
1363 | update_mmu_cache_pmd(vma, addr, pmdp); | 1365 | update_mmu_cache_pmd(vma, addr, pmdp); |
1364 | unlock_page(page); | 1366 | unlock_page(page); |
@@ -1482,6 +1484,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
1482 | 1484 | ||
1483 | if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { | 1485 | if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { |
1484 | pmd_t entry; | 1486 | pmd_t entry; |
1487 | bool preserve_write = prot_numa && pmd_write(*pmd); | ||
1488 | ret = 1; | ||
1485 | 1489 | ||
1486 | /* | 1490 | /* |
1487 | * Avoid trapping faults against the zero page. The read-only | 1491 | * Avoid trapping faults against the zero page. The read-only |
@@ -1490,16 +1494,17 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
1490 | */ | 1494 | */ |
1491 | if (prot_numa && is_huge_zero_pmd(*pmd)) { | 1495 | if (prot_numa && is_huge_zero_pmd(*pmd)) { |
1492 | spin_unlock(ptl); | 1496 | spin_unlock(ptl); |
1493 | return 0; | 1497 | return ret; |
1494 | } | 1498 | } |
1495 | 1499 | ||
1496 | if (!prot_numa || !pmd_protnone(*pmd)) { | 1500 | if (!prot_numa || !pmd_protnone(*pmd)) { |
1497 | ret = 1; | ||
1498 | entry = pmdp_get_and_clear_notify(mm, addr, pmd); | 1501 | entry = pmdp_get_and_clear_notify(mm, addr, pmd); |
1499 | entry = pmd_modify(entry, newprot); | 1502 | entry = pmd_modify(entry, newprot); |
1503 | if (preserve_write) | ||
1504 | entry = pmd_mkwrite(entry); | ||
1500 | ret = HPAGE_PMD_NR; | 1505 | ret = HPAGE_PMD_NR; |
1501 | set_pmd_at(mm, addr, pmd, entry); | 1506 | set_pmd_at(mm, addr, pmd, entry); |
1502 | BUG_ON(pmd_write(entry)); | 1507 | BUG_ON(!preserve_write && pmd_write(entry)); |
1503 | } | 1508 | } |
1504 | spin_unlock(ptl); | 1509 | spin_unlock(ptl); |
1505 | } | 1510 | } |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0a9ac6c26832..c41b2a0ee273 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -917,7 +917,6 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order) | |||
917 | __SetPageHead(page); | 917 | __SetPageHead(page); |
918 | __ClearPageReserved(page); | 918 | __ClearPageReserved(page); |
919 | for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { | 919 | for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { |
920 | __SetPageTail(p); | ||
921 | /* | 920 | /* |
922 | * For gigantic hugepages allocated through bootmem at | 921 | * For gigantic hugepages allocated through bootmem at |
923 | * boot, it's safer to be consistent with the not-gigantic | 922 | * boot, it's safer to be consistent with the not-gigantic |
@@ -933,6 +932,9 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order) | |||
933 | __ClearPageReserved(p); | 932 | __ClearPageReserved(p); |
934 | set_page_count(p, 0); | 933 | set_page_count(p, 0); |
935 | p->first_page = page; | 934 | p->first_page = page; |
935 | /* Make sure p->first_page is always valid for PageTail() */ | ||
936 | smp_wmb(); | ||
937 | __SetPageTail(p); | ||
936 | } | 938 | } |
937 | } | 939 | } |
938 | 940 | ||
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 78fee632a7ee..936d81661c47 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/stacktrace.h> | 29 | #include <linux/stacktrace.h> |
30 | #include <linux/string.h> | 30 | #include <linux/string.h> |
31 | #include <linux/types.h> | 31 | #include <linux/types.h> |
32 | #include <linux/vmalloc.h> | ||
32 | #include <linux/kasan.h> | 33 | #include <linux/kasan.h> |
33 | 34 | ||
34 | #include "kasan.h" | 35 | #include "kasan.h" |
@@ -414,12 +415,19 @@ int kasan_module_alloc(void *addr, size_t size) | |||
414 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, | 415 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, |
415 | PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, | 416 | PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, |
416 | __builtin_return_address(0)); | 417 | __builtin_return_address(0)); |
417 | return ret ? 0 : -ENOMEM; | 418 | |
419 | if (ret) { | ||
420 | find_vm_area(addr)->flags |= VM_KASAN; | ||
421 | return 0; | ||
422 | } | ||
423 | |||
424 | return -ENOMEM; | ||
418 | } | 425 | } |
419 | 426 | ||
420 | void kasan_module_free(void *addr) | 427 | void kasan_free_shadow(const struct vm_struct *vm) |
421 | { | 428 | { |
422 | vfree(kasan_mem_to_shadow(addr)); | 429 | if (vm->flags & VM_KASAN) |
430 | vfree(kasan_mem_to_shadow(vm->addr)); | ||
423 | } | 431 | } |
424 | 432 | ||
425 | static void register_global(struct kasan_global *global) | 433 | static void register_global(struct kasan_global *global) |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d18d3a6e7337..b34ef4a32a3b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -5232,7 +5232,9 @@ static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) | |||
5232 | * on for the root memcg is enough. | 5232 | * on for the root memcg is enough. |
5233 | */ | 5233 | */ |
5234 | if (cgroup_on_dfl(root_css->cgroup)) | 5234 | if (cgroup_on_dfl(root_css->cgroup)) |
5235 | mem_cgroup_from_css(root_css)->use_hierarchy = true; | 5235 | root_mem_cgroup->use_hierarchy = true; |
5236 | else | ||
5237 | root_mem_cgroup->use_hierarchy = false; | ||
5236 | } | 5238 | } |
5237 | 5239 | ||
5238 | static u64 memory_current_read(struct cgroup_subsys_state *css, | 5240 | static u64 memory_current_read(struct cgroup_subsys_state *css, |
@@ -5247,7 +5249,7 @@ static int memory_low_show(struct seq_file *m, void *v) | |||
5247 | unsigned long low = ACCESS_ONCE(memcg->low); | 5249 | unsigned long low = ACCESS_ONCE(memcg->low); |
5248 | 5250 | ||
5249 | if (low == PAGE_COUNTER_MAX) | 5251 | if (low == PAGE_COUNTER_MAX) |
5250 | seq_puts(m, "infinity\n"); | 5252 | seq_puts(m, "max\n"); |
5251 | else | 5253 | else |
5252 | seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE); | 5254 | seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE); |
5253 | 5255 | ||
@@ -5262,7 +5264,7 @@ static ssize_t memory_low_write(struct kernfs_open_file *of, | |||
5262 | int err; | 5264 | int err; |
5263 | 5265 | ||
5264 | buf = strstrip(buf); | 5266 | buf = strstrip(buf); |
5265 | err = page_counter_memparse(buf, "infinity", &low); | 5267 | err = page_counter_memparse(buf, "max", &low); |
5266 | if (err) | 5268 | if (err) |
5267 | return err; | 5269 | return err; |
5268 | 5270 | ||
@@ -5277,7 +5279,7 @@ static int memory_high_show(struct seq_file *m, void *v) | |||
5277 | unsigned long high = ACCESS_ONCE(memcg->high); | 5279 | unsigned long high = ACCESS_ONCE(memcg->high); |
5278 | 5280 | ||
5279 | if (high == PAGE_COUNTER_MAX) | 5281 | if (high == PAGE_COUNTER_MAX) |
5280 | seq_puts(m, "infinity\n"); | 5282 | seq_puts(m, "max\n"); |
5281 | else | 5283 | else |
5282 | seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE); | 5284 | seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE); |
5283 | 5285 | ||
@@ -5292,7 +5294,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of, | |||
5292 | int err; | 5294 | int err; |
5293 | 5295 | ||
5294 | buf = strstrip(buf); | 5296 | buf = strstrip(buf); |
5295 | err = page_counter_memparse(buf, "infinity", &high); | 5297 | err = page_counter_memparse(buf, "max", &high); |
5296 | if (err) | 5298 | if (err) |
5297 | return err; | 5299 | return err; |
5298 | 5300 | ||
@@ -5307,7 +5309,7 @@ static int memory_max_show(struct seq_file *m, void *v) | |||
5307 | unsigned long max = ACCESS_ONCE(memcg->memory.limit); | 5309 | unsigned long max = ACCESS_ONCE(memcg->memory.limit); |
5308 | 5310 | ||
5309 | if (max == PAGE_COUNTER_MAX) | 5311 | if (max == PAGE_COUNTER_MAX) |
5310 | seq_puts(m, "infinity\n"); | 5312 | seq_puts(m, "max\n"); |
5311 | else | 5313 | else |
5312 | seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE); | 5314 | seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE); |
5313 | 5315 | ||
@@ -5322,7 +5324,7 @@ static ssize_t memory_max_write(struct kernfs_open_file *of, | |||
5322 | int err; | 5324 | int err; |
5323 | 5325 | ||
5324 | buf = strstrip(buf); | 5326 | buf = strstrip(buf); |
5325 | err = page_counter_memparse(buf, "infinity", &max); | 5327 | err = page_counter_memparse(buf, "max", &max); |
5326 | if (err) | 5328 | if (err) |
5327 | return err; | 5329 | return err; |
5328 | 5330 | ||
@@ -5426,7 +5428,7 @@ bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg) | |||
5426 | if (memcg == root_mem_cgroup) | 5428 | if (memcg == root_mem_cgroup) |
5427 | return false; | 5429 | return false; |
5428 | 5430 | ||
5429 | if (page_counter_read(&memcg->memory) > memcg->low) | 5431 | if (page_counter_read(&memcg->memory) >= memcg->low) |
5430 | return false; | 5432 | return false; |
5431 | 5433 | ||
5432 | while (memcg != root) { | 5434 | while (memcg != root) { |
@@ -5435,7 +5437,7 @@ bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg) | |||
5435 | if (memcg == root_mem_cgroup) | 5437 | if (memcg == root_mem_cgroup) |
5436 | break; | 5438 | break; |
5437 | 5439 | ||
5438 | if (page_counter_read(&memcg->memory) > memcg->low) | 5440 | if (page_counter_read(&memcg->memory) >= memcg->low) |
5439 | return false; | 5441 | return false; |
5440 | } | 5442 | } |
5441 | return true; | 5443 | return true; |
diff --git a/mm/memory.c b/mm/memory.c index 8068893697bb..97839f5c8c30 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -3035,6 +3035,7 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3035 | int last_cpupid; | 3035 | int last_cpupid; |
3036 | int target_nid; | 3036 | int target_nid; |
3037 | bool migrated = false; | 3037 | bool migrated = false; |
3038 | bool was_writable = pte_write(pte); | ||
3038 | int flags = 0; | 3039 | int flags = 0; |
3039 | 3040 | ||
3040 | /* A PROT_NONE fault should not end up here */ | 3041 | /* A PROT_NONE fault should not end up here */ |
@@ -3059,6 +3060,8 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3059 | /* Make it present again */ | 3060 | /* Make it present again */ |
3060 | pte = pte_modify(pte, vma->vm_page_prot); | 3061 | pte = pte_modify(pte, vma->vm_page_prot); |
3061 | pte = pte_mkyoung(pte); | 3062 | pte = pte_mkyoung(pte); |
3063 | if (was_writable) | ||
3064 | pte = pte_mkwrite(pte); | ||
3062 | set_pte_at(mm, addr, ptep, pte); | 3065 | set_pte_at(mm, addr, ptep, pte); |
3063 | update_mmu_cache(vma, addr, ptep); | 3066 | update_mmu_cache(vma, addr, ptep); |
3064 | 3067 | ||
@@ -3069,11 +3072,14 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3069 | } | 3072 | } |
3070 | 3073 | ||
3071 | /* | 3074 | /* |
3072 | * Avoid grouping on DSO/COW pages in specific and RO pages | 3075 | * Avoid grouping on RO pages in general. RO pages shouldn't hurt as |
3073 | * in general, RO pages shouldn't hurt as much anyway since | 3076 | * much anyway since they can be in shared cache state. This misses |
3074 | * they can be in shared cache state. | 3077 | * the case where a mapping is writable but the process never writes |
3078 | * to it but pte_write gets cleared during protection updates and | ||
3079 | * pte_dirty has unpredictable behaviour between PTE scan updates, | ||
3080 | * background writeback, dirty balancing and application behaviour. | ||
3075 | */ | 3081 | */ |
3076 | if (!pte_write(pte)) | 3082 | if (!(vma->vm_flags & VM_WRITE)) |
3077 | flags |= TNF_NO_GROUP; | 3083 | flags |= TNF_NO_GROUP; |
3078 | 3084 | ||
3079 | /* | 3085 | /* |
@@ -3097,7 +3103,8 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3097 | if (migrated) { | 3103 | if (migrated) { |
3098 | page_nid = target_nid; | 3104 | page_nid = target_nid; |
3099 | flags |= TNF_MIGRATED; | 3105 | flags |= TNF_MIGRATED; |
3100 | } | 3106 | } else |
3107 | flags |= TNF_MIGRATE_FAIL; | ||
3101 | 3108 | ||
3102 | out: | 3109 | out: |
3103 | if (page_nid != -1) | 3110 | if (page_nid != -1) |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 9fab10795bea..65842d688b7c 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -1092,6 +1092,10 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) | |||
1092 | return NULL; | 1092 | return NULL; |
1093 | 1093 | ||
1094 | arch_refresh_nodedata(nid, pgdat); | 1094 | arch_refresh_nodedata(nid, pgdat); |
1095 | } else { | ||
1096 | /* Reset the nr_zones and classzone_idx to 0 before reuse */ | ||
1097 | pgdat->nr_zones = 0; | ||
1098 | pgdat->classzone_idx = 0; | ||
1095 | } | 1099 | } |
1096 | 1100 | ||
1097 | /* we can use NODE_DATA(nid) from here */ | 1101 | /* we can use NODE_DATA(nid) from here */ |
@@ -1977,15 +1981,6 @@ void try_offline_node(int nid) | |||
1977 | if (is_vmalloc_addr(zone->wait_table)) | 1981 | if (is_vmalloc_addr(zone->wait_table)) |
1978 | vfree(zone->wait_table); | 1982 | vfree(zone->wait_table); |
1979 | } | 1983 | } |
1980 | |||
1981 | /* | ||
1982 | * Since there is no way to guarentee the address of pgdat/zone is not | ||
1983 | * on stack of any kernel threads or used by other kernel objects | ||
1984 | * without reference counting or other symchronizing method, do not | ||
1985 | * reset node_data and free pgdat here. Just reset it to 0 and reuse | ||
1986 | * the memory when the node is online again. | ||
1987 | */ | ||
1988 | memset(pgdat, 0, sizeof(*pgdat)); | ||
1989 | } | 1984 | } |
1990 | EXPORT_SYMBOL(try_offline_node); | 1985 | EXPORT_SYMBOL(try_offline_node); |
1991 | 1986 | ||
diff --git a/mm/mlock.c b/mm/mlock.c index 73cf0987088c..8a54cd214925 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -26,10 +26,10 @@ | |||
26 | 26 | ||
27 | int can_do_mlock(void) | 27 | int can_do_mlock(void) |
28 | { | 28 | { |
29 | if (capable(CAP_IPC_LOCK)) | ||
30 | return 1; | ||
31 | if (rlimit(RLIMIT_MEMLOCK) != 0) | 29 | if (rlimit(RLIMIT_MEMLOCK) != 0) |
32 | return 1; | 30 | return 1; |
31 | if (capable(CAP_IPC_LOCK)) | ||
32 | return 1; | ||
33 | return 0; | 33 | return 0; |
34 | } | 34 | } |
35 | EXPORT_SYMBOL(can_do_mlock); | 35 | EXPORT_SYMBOL(can_do_mlock); |
@@ -774,10 +774,8 @@ again: remove_next = 1 + (end > next->vm_end); | |||
774 | 774 | ||
775 | importer->anon_vma = exporter->anon_vma; | 775 | importer->anon_vma = exporter->anon_vma; |
776 | error = anon_vma_clone(importer, exporter); | 776 | error = anon_vma_clone(importer, exporter); |
777 | if (error) { | 777 | if (error) |
778 | importer->anon_vma = NULL; | ||
779 | return error; | 778 | return error; |
780 | } | ||
781 | } | 779 | } |
782 | } | 780 | } |
783 | 781 | ||
diff --git a/mm/mprotect.c b/mm/mprotect.c index 44727811bf4c..88584838e704 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
@@ -75,6 +75,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
75 | oldpte = *pte; | 75 | oldpte = *pte; |
76 | if (pte_present(oldpte)) { | 76 | if (pte_present(oldpte)) { |
77 | pte_t ptent; | 77 | pte_t ptent; |
78 | bool preserve_write = prot_numa && pte_write(oldpte); | ||
78 | 79 | ||
79 | /* | 80 | /* |
80 | * Avoid trapping faults against the zero or KSM | 81 | * Avoid trapping faults against the zero or KSM |
@@ -94,6 +95,8 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
94 | 95 | ||
95 | ptent = ptep_modify_prot_start(mm, addr, pte); | 96 | ptent = ptep_modify_prot_start(mm, addr, pte); |
96 | ptent = pte_modify(ptent, newprot); | 97 | ptent = pte_modify(ptent, newprot); |
98 | if (preserve_write) | ||
99 | ptent = pte_mkwrite(ptent); | ||
97 | 100 | ||
98 | /* Avoid taking write faults for known dirty pages */ | 101 | /* Avoid taking write faults for known dirty pages */ |
99 | if (dirty_accountable && pte_dirty(ptent) && | 102 | if (dirty_accountable && pte_dirty(ptent) && |
diff --git a/mm/mremap.c b/mm/mremap.c index 57dadc025c64..2dc44b1cb1df 100644 --- a/mm/mremap.c +++ b/mm/mremap.c | |||
@@ -286,8 +286,14 @@ static unsigned long move_vma(struct vm_area_struct *vma, | |||
286 | old_len = new_len; | 286 | old_len = new_len; |
287 | old_addr = new_addr; | 287 | old_addr = new_addr; |
288 | new_addr = -ENOMEM; | 288 | new_addr = -ENOMEM; |
289 | } else if (vma->vm_file && vma->vm_file->f_op->mremap) | 289 | } else if (vma->vm_file && vma->vm_file->f_op->mremap) { |
290 | vma->vm_file->f_op->mremap(vma->vm_file, new_vma); | 290 | err = vma->vm_file->f_op->mremap(vma->vm_file, new_vma); |
291 | if (err < 0) { | ||
292 | move_page_tables(new_vma, new_addr, vma, old_addr, | ||
293 | moved_len, true); | ||
294 | return err; | ||
295 | } | ||
296 | } | ||
291 | 297 | ||
292 | /* Conceal VM_ACCOUNT so old reservation is not undone */ | 298 | /* Conceal VM_ACCOUNT so old reservation is not undone */ |
293 | if (vm_flags & VM_ACCOUNT) { | 299 | if (vm_flags & VM_ACCOUNT) { |
diff --git a/mm/nommu.c b/mm/nommu.c index 7296360fc057..3fba2dc97c44 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -62,6 +62,7 @@ void *high_memory; | |||
62 | EXPORT_SYMBOL(high_memory); | 62 | EXPORT_SYMBOL(high_memory); |
63 | struct page *mem_map; | 63 | struct page *mem_map; |
64 | unsigned long max_mapnr; | 64 | unsigned long max_mapnr; |
65 | EXPORT_SYMBOL(max_mapnr); | ||
65 | unsigned long highest_memmap_pfn; | 66 | unsigned long highest_memmap_pfn; |
66 | struct percpu_counter vm_committed_as; | 67 | struct percpu_counter vm_committed_as; |
67 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ | 68 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ |
@@ -1213,11 +1214,9 @@ static int do_mmap_private(struct vm_area_struct *vma, | |||
1213 | if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) { | 1214 | if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) { |
1214 | total = point; | 1215 | total = point; |
1215 | kdebug("try to alloc exact %lu pages", total); | 1216 | kdebug("try to alloc exact %lu pages", total); |
1216 | base = alloc_pages_exact(len, GFP_KERNEL); | ||
1217 | } else { | ||
1218 | base = (void *)__get_free_pages(GFP_KERNEL, order); | ||
1219 | } | 1217 | } |
1220 | 1218 | ||
1219 | base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL); | ||
1221 | if (!base) | 1220 | if (!base) |
1222 | goto enomem; | 1221 | goto enomem; |
1223 | 1222 | ||
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 45e187b2d971..644bcb665773 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -857,8 +857,11 @@ static void bdi_update_write_bandwidth(struct backing_dev_info *bdi, | |||
857 | * bw * elapsed + write_bandwidth * (period - elapsed) | 857 | * bw * elapsed + write_bandwidth * (period - elapsed) |
858 | * write_bandwidth = --------------------------------------------------- | 858 | * write_bandwidth = --------------------------------------------------- |
859 | * period | 859 | * period |
860 | * | ||
861 | * @written may have decreased due to account_page_redirty(). | ||
862 | * Avoid underflowing @bw calculation. | ||
860 | */ | 863 | */ |
861 | bw = written - bdi->written_stamp; | 864 | bw = written - min(written, bdi->written_stamp); |
862 | bw *= HZ; | 865 | bw *= HZ; |
863 | if (unlikely(elapsed > period)) { | 866 | if (unlikely(elapsed > period)) { |
864 | do_div(bw, elapsed); | 867 | do_div(bw, elapsed); |
@@ -922,7 +925,7 @@ static void global_update_bandwidth(unsigned long thresh, | |||
922 | unsigned long now) | 925 | unsigned long now) |
923 | { | 926 | { |
924 | static DEFINE_SPINLOCK(dirty_lock); | 927 | static DEFINE_SPINLOCK(dirty_lock); |
925 | static unsigned long update_time; | 928 | static unsigned long update_time = INITIAL_JIFFIES; |
926 | 929 | ||
927 | /* | 930 | /* |
928 | * check locklessly first to optimize away locking for the most time | 931 | * check locklessly first to optimize away locking for the most time |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a47f0b229a1a..40e29429e7b0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2353,8 +2353,15 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, | |||
2353 | if (ac->high_zoneidx < ZONE_NORMAL) | 2353 | if (ac->high_zoneidx < ZONE_NORMAL) |
2354 | goto out; | 2354 | goto out; |
2355 | /* The OOM killer does not compensate for light reclaim */ | 2355 | /* The OOM killer does not compensate for light reclaim */ |
2356 | if (!(gfp_mask & __GFP_FS)) | 2356 | if (!(gfp_mask & __GFP_FS)) { |
2357 | /* | ||
2358 | * XXX: Page reclaim didn't yield anything, | ||
2359 | * and the OOM killer can't be invoked, but | ||
2360 | * keep looping as per should_alloc_retry(). | ||
2361 | */ | ||
2362 | *did_some_progress = 1; | ||
2357 | goto out; | 2363 | goto out; |
2364 | } | ||
2358 | /* | 2365 | /* |
2359 | * GFP_THISNODE contains __GFP_NORETRY and we never hit this. | 2366 | * GFP_THISNODE contains __GFP_NORETRY and we never hit this. |
2360 | * Sanity check for bare calls of __GFP_THISNODE, not real OOM. | 2367 | * Sanity check for bare calls of __GFP_THISNODE, not real OOM. |
@@ -2366,7 +2373,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, | |||
2366 | goto out; | 2373 | goto out; |
2367 | } | 2374 | } |
2368 | /* Exhausted what can be done so it's blamo time */ | 2375 | /* Exhausted what can be done so it's blamo time */ |
2369 | if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false)) | 2376 | if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false) |
2377 | || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) | ||
2370 | *did_some_progress = 1; | 2378 | *did_some_progress = 1; |
2371 | out: | 2379 | out: |
2372 | oom_zonelist_unlock(ac->zonelist, gfp_mask); | 2380 | oom_zonelist_unlock(ac->zonelist, gfp_mask); |
diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 72f5ac381ab3..755a42c76eb4 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c | |||
@@ -103,6 +103,7 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype) | |||
103 | 103 | ||
104 | if (!is_migrate_isolate_page(buddy)) { | 104 | if (!is_migrate_isolate_page(buddy)) { |
105 | __isolate_free_page(page, order); | 105 | __isolate_free_page(page, order); |
106 | kernel_map_pages(page, (1 << order), 1); | ||
106 | set_page_refcounted(page); | 107 | set_page_refcounted(page); |
107 | isolated_page = page; | 108 | isolated_page = page; |
108 | } | 109 | } |
diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 75c1f2878519..29f2f8b853ae 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c | |||
@@ -265,8 +265,15 @@ int walk_page_range(unsigned long start, unsigned long end, | |||
265 | vma = vma->vm_next; | 265 | vma = vma->vm_next; |
266 | 266 | ||
267 | err = walk_page_test(start, next, walk); | 267 | err = walk_page_test(start, next, walk); |
268 | if (err > 0) | 268 | if (err > 0) { |
269 | /* | ||
270 | * positive return values are purely for | ||
271 | * controlling the pagewalk, so should never | ||
272 | * be passed to the callers. | ||
273 | */ | ||
274 | err = 0; | ||
269 | continue; | 275 | continue; |
276 | } | ||
270 | if (err < 0) | 277 | if (err < 0) |
271 | break; | 278 | break; |
272 | } | 279 | } |
@@ -287,6 +287,13 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) | |||
287 | return 0; | 287 | return 0; |
288 | 288 | ||
289 | enomem_failure: | 289 | enomem_failure: |
290 | /* | ||
291 | * dst->anon_vma is dropped here otherwise its degree can be incorrectly | ||
292 | * decremented in unlink_anon_vmas(). | ||
293 | * We can safely do this because callers of anon_vma_clone() don't care | ||
294 | * about dst->anon_vma if anon_vma_clone() failed. | ||
295 | */ | ||
296 | dst->anon_vma = NULL; | ||
290 | unlink_anon_vmas(dst); | 297 | unlink_anon_vmas(dst); |
291 | return -ENOMEM; | 298 | return -ENOMEM; |
292 | } | 299 | } |
diff --git a/mm/shmem.c b/mm/shmem.c index 2f17cb5f00a4..cf2d0ca010bc 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -1455,6 +1455,9 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode | |||
1455 | 1455 | ||
1456 | bool shmem_mapping(struct address_space *mapping) | 1456 | bool shmem_mapping(struct address_space *mapping) |
1457 | { | 1457 | { |
1458 | if (!mapping->host) | ||
1459 | return false; | ||
1460 | |||
1458 | return mapping->host->i_sb->s_op == &shmem_ops; | 1461 | return mapping->host->i_sb->s_op == &shmem_ops; |
1459 | } | 1462 | } |
1460 | 1463 | ||
@@ -2449,7 +2449,8 @@ redo: | |||
2449 | do { | 2449 | do { |
2450 | tid = this_cpu_read(s->cpu_slab->tid); | 2450 | tid = this_cpu_read(s->cpu_slab->tid); |
2451 | c = raw_cpu_ptr(s->cpu_slab); | 2451 | c = raw_cpu_ptr(s->cpu_slab); |
2452 | } while (IS_ENABLED(CONFIG_PREEMPT) && unlikely(tid != c->tid)); | 2452 | } while (IS_ENABLED(CONFIG_PREEMPT) && |
2453 | unlikely(tid != READ_ONCE(c->tid))); | ||
2453 | 2454 | ||
2454 | /* | 2455 | /* |
2455 | * Irqless object alloc/free algorithm used here depends on sequence | 2456 | * Irqless object alloc/free algorithm used here depends on sequence |
@@ -2718,7 +2719,8 @@ redo: | |||
2718 | do { | 2719 | do { |
2719 | tid = this_cpu_read(s->cpu_slab->tid); | 2720 | tid = this_cpu_read(s->cpu_slab->tid); |
2720 | c = raw_cpu_ptr(s->cpu_slab); | 2721 | c = raw_cpu_ptr(s->cpu_slab); |
2721 | } while (IS_ENABLED(CONFIG_PREEMPT) && unlikely(tid != c->tid)); | 2722 | } while (IS_ENABLED(CONFIG_PREEMPT) && |
2723 | unlikely(tid != READ_ONCE(c->tid))); | ||
2722 | 2724 | ||
2723 | /* Same with comment on barrier() in slab_alloc_node() */ | 2725 | /* Same with comment on barrier() in slab_alloc_node() */ |
2724 | barrier(); | 2726 | barrier(); |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 35b25e1340ca..49abccf29a29 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -1418,6 +1418,7 @@ struct vm_struct *remove_vm_area(const void *addr) | |||
1418 | spin_unlock(&vmap_area_lock); | 1418 | spin_unlock(&vmap_area_lock); |
1419 | 1419 | ||
1420 | vmap_debug_free_range(va->va_start, va->va_end); | 1420 | vmap_debug_free_range(va->va_start, va->va_end); |
1421 | kasan_free_shadow(vm); | ||
1421 | free_unmap_vmap_area(va); | 1422 | free_unmap_vmap_area(va); |
1422 | vm->size -= PAGE_SIZE; | 1423 | vm->size -= PAGE_SIZE; |
1423 | 1424 | ||
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index d8e376a5f0f1..36a1a739ad68 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
@@ -658,14 +658,30 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args) | |||
658 | static void p9_virtio_remove(struct virtio_device *vdev) | 658 | static void p9_virtio_remove(struct virtio_device *vdev) |
659 | { | 659 | { |
660 | struct virtio_chan *chan = vdev->priv; | 660 | struct virtio_chan *chan = vdev->priv; |
661 | 661 | unsigned long warning_time; | |
662 | if (chan->inuse) | ||
663 | p9_virtio_close(chan->client); | ||
664 | vdev->config->del_vqs(vdev); | ||
665 | 662 | ||
666 | mutex_lock(&virtio_9p_lock); | 663 | mutex_lock(&virtio_9p_lock); |
664 | |||
665 | /* Remove self from list so we don't get new users. */ | ||
667 | list_del(&chan->chan_list); | 666 | list_del(&chan->chan_list); |
667 | warning_time = jiffies; | ||
668 | |||
669 | /* Wait for existing users to close. */ | ||
670 | while (chan->inuse) { | ||
671 | mutex_unlock(&virtio_9p_lock); | ||
672 | msleep(250); | ||
673 | if (time_after(jiffies, warning_time + 10 * HZ)) { | ||
674 | dev_emerg(&vdev->dev, | ||
675 | "p9_virtio_remove: waiting for device in use.\n"); | ||
676 | warning_time = jiffies; | ||
677 | } | ||
678 | mutex_lock(&virtio_9p_lock); | ||
679 | } | ||
680 | |||
668 | mutex_unlock(&virtio_9p_lock); | 681 | mutex_unlock(&virtio_9p_lock); |
682 | |||
683 | vdev->config->del_vqs(vdev); | ||
684 | |||
669 | sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); | 685 | sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); |
670 | kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE); | 686 | kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE); |
671 | kfree(chan->tag); | 687 | kfree(chan->tag); |
diff --git a/net/bridge/br.c b/net/bridge/br.c index fb57ab6b24f9..02c24cf63c34 100644 --- a/net/bridge/br.c +++ b/net/bridge/br.c | |||
@@ -190,6 +190,8 @@ static int __init br_init(void) | |||
190 | { | 190 | { |
191 | int err; | 191 | int err; |
192 | 192 | ||
193 | BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb)); | ||
194 | |||
193 | err = stp_proto_register(&br_stp_proto); | 195 | err = stp_proto_register(&br_stp_proto); |
194 | if (err < 0) { | 196 | if (err < 0) { |
195 | pr_err("bridge: can't register sap for STP\n"); | 197 | pr_err("bridge: can't register sap for STP\n"); |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index b087d278c679..1849d96b3c91 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -563,6 +563,8 @@ int br_del_if(struct net_bridge *br, struct net_device *dev) | |||
563 | */ | 563 | */ |
564 | del_nbp(p); | 564 | del_nbp(p); |
565 | 565 | ||
566 | dev_set_mtu(br->dev, br_min_mtu(br)); | ||
567 | |||
566 | spin_lock_bh(&br->lock); | 568 | spin_lock_bh(&br->lock); |
567 | changed_addr = br_stp_recalculate_bridge_id(br); | 569 | changed_addr = br_stp_recalculate_bridge_id(br); |
568 | spin_unlock_bh(&br->lock); | 570 | spin_unlock_bh(&br->lock); |
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 769b185fefbd..a6e2da0bc718 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c | |||
@@ -281,7 +281,7 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
281 | int copylen; | 281 | int copylen; |
282 | 282 | ||
283 | ret = -EOPNOTSUPP; | 283 | ret = -EOPNOTSUPP; |
284 | if (m->msg_flags&MSG_OOB) | 284 | if (flags & MSG_OOB) |
285 | goto read_error; | 285 | goto read_error; |
286 | 286 | ||
287 | skb = skb_recv_datagram(sk, flags, 0 , &ret); | 287 | skb = skb_recv_datagram(sk, flags, 0 , &ret); |
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c index 8bc7caa28e64..434ba8557826 100644 --- a/net/caif/cffrml.c +++ b/net/caif/cffrml.c | |||
@@ -84,7 +84,7 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt) | |||
84 | u16 tmp; | 84 | u16 tmp; |
85 | u16 len; | 85 | u16 len; |
86 | u16 hdrchks; | 86 | u16 hdrchks; |
87 | u16 pktchks; | 87 | int pktchks; |
88 | struct cffrml *this; | 88 | struct cffrml *this; |
89 | this = container_obj(layr); | 89 | this = container_obj(layr); |
90 | 90 | ||
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c index 1be0b521ac49..f6c3b2137eea 100644 --- a/net/caif/cfpkt_skbuff.c +++ b/net/caif/cfpkt_skbuff.c | |||
@@ -255,9 +255,9 @@ inline u16 cfpkt_getlen(struct cfpkt *pkt) | |||
255 | return skb->len; | 255 | return skb->len; |
256 | } | 256 | } |
257 | 257 | ||
258 | inline u16 cfpkt_iterate(struct cfpkt *pkt, | 258 | int cfpkt_iterate(struct cfpkt *pkt, |
259 | u16 (*iter_func)(u16, void *, u16), | 259 | u16 (*iter_func)(u16, void *, u16), |
260 | u16 data) | 260 | u16 data) |
261 | { | 261 | { |
262 | /* | 262 | /* |
263 | * Don't care about the performance hit of linearizing, | 263 | * Don't care about the performance hit of linearizing, |
diff --git a/net/can/af_can.c b/net/can/af_can.c index 66e08040ced7..32d710eaf1fc 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
@@ -259,6 +259,9 @@ int can_send(struct sk_buff *skb, int loop) | |||
259 | goto inval_skb; | 259 | goto inval_skb; |
260 | } | 260 | } |
261 | 261 | ||
262 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
263 | |||
264 | skb_reset_mac_header(skb); | ||
262 | skb_reset_network_header(skb); | 265 | skb_reset_network_header(skb); |
263 | skb_reset_transport_header(skb); | 266 | skb_reset_transport_header(skb); |
264 | 267 | ||
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 6b3f54ed65ba..a9f4ae45b7fb 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
@@ -484,7 +484,7 @@ static int ceph_tcp_connect(struct ceph_connection *con) | |||
484 | IPPROTO_TCP, &sock); | 484 | IPPROTO_TCP, &sock); |
485 | if (ret) | 485 | if (ret) |
486 | return ret; | 486 | return ret; |
487 | sock->sk->sk_allocation = GFP_NOFS | __GFP_MEMALLOC; | 487 | sock->sk->sk_allocation = GFP_NOFS; |
488 | 488 | ||
489 | #ifdef CONFIG_LOCKDEP | 489 | #ifdef CONFIG_LOCKDEP |
490 | lockdep_set_class(&sock->sk->sk_lock, &socket_class); | 490 | lockdep_set_class(&sock->sk->sk_lock, &socket_class); |
@@ -520,8 +520,6 @@ static int ceph_tcp_connect(struct ceph_connection *con) | |||
520 | ret); | 520 | ret); |
521 | } | 521 | } |
522 | 522 | ||
523 | sk_set_memalloc(sock->sk); | ||
524 | |||
525 | con->sock = sock; | 523 | con->sock = sock; |
526 | return 0; | 524 | return 0; |
527 | } | 525 | } |
@@ -2808,11 +2806,8 @@ static void con_work(struct work_struct *work) | |||
2808 | { | 2806 | { |
2809 | struct ceph_connection *con = container_of(work, struct ceph_connection, | 2807 | struct ceph_connection *con = container_of(work, struct ceph_connection, |
2810 | work.work); | 2808 | work.work); |
2811 | unsigned long pflags = current->flags; | ||
2812 | bool fault; | 2809 | bool fault; |
2813 | 2810 | ||
2814 | current->flags |= PF_MEMALLOC; | ||
2815 | |||
2816 | mutex_lock(&con->mutex); | 2811 | mutex_lock(&con->mutex); |
2817 | while (true) { | 2812 | while (true) { |
2818 | int ret; | 2813 | int ret; |
@@ -2866,8 +2861,6 @@ static void con_work(struct work_struct *work) | |||
2866 | con_fault_finish(con); | 2861 | con_fault_finish(con); |
2867 | 2862 | ||
2868 | con->ops->put(con); | 2863 | con->ops->put(con); |
2869 | |||
2870 | tsk_restore_flags(current, pflags, PF_MEMALLOC); | ||
2871 | } | 2864 | } |
2872 | 2865 | ||
2873 | /* | 2866 | /* |
diff --git a/net/compat.c b/net/compat.c index 3236b4167a32..f7bd286a8280 100644 --- a/net/compat.c +++ b/net/compat.c | |||
@@ -49,6 +49,13 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg, | |||
49 | __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || | 49 | __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || |
50 | __get_user(kmsg->msg_flags, &umsg->msg_flags)) | 50 | __get_user(kmsg->msg_flags, &umsg->msg_flags)) |
51 | return -EFAULT; | 51 | return -EFAULT; |
52 | |||
53 | if (!uaddr) | ||
54 | kmsg->msg_namelen = 0; | ||
55 | |||
56 | if (kmsg->msg_namelen < 0) | ||
57 | return -EINVAL; | ||
58 | |||
52 | if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) | 59 | if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) |
53 | kmsg->msg_namelen = sizeof(struct sockaddr_storage); | 60 | kmsg->msg_namelen = sizeof(struct sockaddr_storage); |
54 | kmsg->msg_control = compat_ptr(tmp3); | 61 | kmsg->msg_control = compat_ptr(tmp3); |
@@ -711,24 +718,18 @@ static unsigned char nas[21] = { | |||
711 | 718 | ||
712 | COMPAT_SYSCALL_DEFINE3(sendmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags) | 719 | COMPAT_SYSCALL_DEFINE3(sendmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags) |
713 | { | 720 | { |
714 | if (flags & MSG_CMSG_COMPAT) | ||
715 | return -EINVAL; | ||
716 | return __sys_sendmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT); | 721 | return __sys_sendmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT); |
717 | } | 722 | } |
718 | 723 | ||
719 | COMPAT_SYSCALL_DEFINE4(sendmmsg, int, fd, struct compat_mmsghdr __user *, mmsg, | 724 | COMPAT_SYSCALL_DEFINE4(sendmmsg, int, fd, struct compat_mmsghdr __user *, mmsg, |
720 | unsigned int, vlen, unsigned int, flags) | 725 | unsigned int, vlen, unsigned int, flags) |
721 | { | 726 | { |
722 | if (flags & MSG_CMSG_COMPAT) | ||
723 | return -EINVAL; | ||
724 | return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, | 727 | return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, |
725 | flags | MSG_CMSG_COMPAT); | 728 | flags | MSG_CMSG_COMPAT); |
726 | } | 729 | } |
727 | 730 | ||
728 | COMPAT_SYSCALL_DEFINE3(recvmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags) | 731 | COMPAT_SYSCALL_DEFINE3(recvmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags) |
729 | { | 732 | { |
730 | if (flags & MSG_CMSG_COMPAT) | ||
731 | return -EINVAL; | ||
732 | return __sys_recvmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT); | 733 | return __sys_recvmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT); |
733 | } | 734 | } |
734 | 735 | ||
@@ -751,9 +752,6 @@ COMPAT_SYSCALL_DEFINE5(recvmmsg, int, fd, struct compat_mmsghdr __user *, mmsg, | |||
751 | int datagrams; | 752 | int datagrams; |
752 | struct timespec ktspec; | 753 | struct timespec ktspec; |
753 | 754 | ||
754 | if (flags & MSG_CMSG_COMPAT) | ||
755 | return -EINVAL; | ||
756 | |||
757 | if (timeout == NULL) | 755 | if (timeout == NULL) |
758 | return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, | 756 | return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, |
759 | flags | MSG_CMSG_COMPAT, NULL); | 757 | flags | MSG_CMSG_COMPAT, NULL); |
diff --git a/net/core/dev.c b/net/core/dev.c index 8f9710c62e20..45109b70664e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -946,7 +946,7 @@ bool dev_valid_name(const char *name) | |||
946 | return false; | 946 | return false; |
947 | 947 | ||
948 | while (*name) { | 948 | while (*name) { |
949 | if (*name == '/' || isspace(*name)) | 949 | if (*name == '/' || *name == ':' || isspace(*name)) |
950 | return false; | 950 | return false; |
951 | name++; | 951 | name++; |
952 | } | 952 | } |
@@ -2848,7 +2848,9 @@ static void skb_update_prio(struct sk_buff *skb) | |||
2848 | #define skb_update_prio(skb) | 2848 | #define skb_update_prio(skb) |
2849 | #endif | 2849 | #endif |
2850 | 2850 | ||
2851 | static DEFINE_PER_CPU(int, xmit_recursion); | 2851 | DEFINE_PER_CPU(int, xmit_recursion); |
2852 | EXPORT_SYMBOL(xmit_recursion); | ||
2853 | |||
2852 | #define RECURSION_LIMIT 10 | 2854 | #define RECURSION_LIMIT 10 |
2853 | 2855 | ||
2854 | /** | 2856 | /** |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 91f74f3eb204..aa378ecef186 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -98,6 +98,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] | |||
98 | [NETIF_F_RXALL_BIT] = "rx-all", | 98 | [NETIF_F_RXALL_BIT] = "rx-all", |
99 | [NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload", | 99 | [NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload", |
100 | [NETIF_F_BUSY_POLL_BIT] = "busy-poll", | 100 | [NETIF_F_BUSY_POLL_BIT] = "busy-poll", |
101 | [NETIF_F_HW_SWITCH_OFFLOAD_BIT] = "hw-switch-offload", | ||
101 | }; | 102 | }; |
102 | 103 | ||
103 | static const char | 104 | static const char |
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 44706e81b2e0..e4fdc9dfb2c7 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c | |||
@@ -175,9 +175,9 @@ void fib_rules_unregister(struct fib_rules_ops *ops) | |||
175 | 175 | ||
176 | spin_lock(&net->rules_mod_lock); | 176 | spin_lock(&net->rules_mod_lock); |
177 | list_del_rcu(&ops->list); | 177 | list_del_rcu(&ops->list); |
178 | fib_rules_cleanup_ops(ops); | ||
179 | spin_unlock(&net->rules_mod_lock); | 178 | spin_unlock(&net->rules_mod_lock); |
180 | 179 | ||
180 | fib_rules_cleanup_ops(ops); | ||
181 | call_rcu(&ops->rcu, fib_rules_put_rcu); | 181 | call_rcu(&ops->rcu, fib_rules_put_rcu); |
182 | } | 182 | } |
183 | EXPORT_SYMBOL_GPL(fib_rules_unregister); | 183 | EXPORT_SYMBOL_GPL(fib_rules_unregister); |
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index 0c08062d1796..1e2f46a69d50 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c | |||
@@ -32,6 +32,9 @@ gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size) | |||
32 | return 0; | 32 | return 0; |
33 | 33 | ||
34 | nla_put_failure: | 34 | nla_put_failure: |
35 | kfree(d->xstats); | ||
36 | d->xstats = NULL; | ||
37 | d->xstats_len = 0; | ||
35 | spin_unlock_bh(d->lock); | 38 | spin_unlock_bh(d->lock); |
36 | return -1; | 39 | return -1; |
37 | } | 40 | } |
@@ -305,7 +308,9 @@ int | |||
305 | gnet_stats_copy_app(struct gnet_dump *d, void *st, int len) | 308 | gnet_stats_copy_app(struct gnet_dump *d, void *st, int len) |
306 | { | 309 | { |
307 | if (d->compat_xstats) { | 310 | if (d->compat_xstats) { |
308 | d->xstats = st; | 311 | d->xstats = kmemdup(st, len, GFP_ATOMIC); |
312 | if (!d->xstats) | ||
313 | goto err_out; | ||
309 | d->xstats_len = len; | 314 | d->xstats_len = len; |
310 | } | 315 | } |
311 | 316 | ||
@@ -313,6 +318,11 @@ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len) | |||
313 | return gnet_stats_copy(d, TCA_STATS_APP, st, len); | 318 | return gnet_stats_copy(d, TCA_STATS_APP, st, len); |
314 | 319 | ||
315 | return 0; | 320 | return 0; |
321 | |||
322 | err_out: | ||
323 | d->xstats_len = 0; | ||
324 | spin_unlock_bh(d->lock); | ||
325 | return -1; | ||
316 | } | 326 | } |
317 | EXPORT_SYMBOL(gnet_stats_copy_app); | 327 | EXPORT_SYMBOL(gnet_stats_copy_app); |
318 | 328 | ||
@@ -345,6 +355,9 @@ gnet_stats_finish_copy(struct gnet_dump *d) | |||
345 | return -1; | 355 | return -1; |
346 | } | 356 | } |
347 | 357 | ||
358 | kfree(d->xstats); | ||
359 | d->xstats = NULL; | ||
360 | d->xstats_len = 0; | ||
348 | spin_unlock_bh(d->lock); | 361 | spin_unlock_bh(d->lock); |
349 | return 0; | 362 | return 0; |
350 | } | 363 | } |
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index cb5290b8c428..70d3450588b2 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
@@ -198,8 +198,10 @@ static int __peernet2id(struct net *net, struct net *peer, bool alloc) | |||
198 | */ | 198 | */ |
199 | int peernet2id(struct net *net, struct net *peer) | 199 | int peernet2id(struct net *net, struct net *peer) |
200 | { | 200 | { |
201 | int id = __peernet2id(net, peer, true); | 201 | bool alloc = atomic_read(&peer->count) == 0 ? false : true; |
202 | int id; | ||
202 | 203 | ||
204 | id = __peernet2id(net, peer, alloc); | ||
203 | return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; | 205 | return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; |
204 | } | 206 | } |
205 | EXPORT_SYMBOL(peernet2id); | 207 | EXPORT_SYMBOL(peernet2id); |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index b4899f5b7388..508155b283dd 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -1134,6 +1134,9 @@ static ssize_t pktgen_if_write(struct file *file, | |||
1134 | return len; | 1134 | return len; |
1135 | 1135 | ||
1136 | i += len; | 1136 | i += len; |
1137 | if ((value > 1) && | ||
1138 | (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING))) | ||
1139 | return -ENOTSUPP; | ||
1137 | pkt_dev->burst = value < 1 ? 1 : value; | 1140 | pkt_dev->burst = value < 1 ? 1 : value; |
1138 | sprintf(pg_result, "OK: burst=%d", pkt_dev->burst); | 1141 | sprintf(pg_result, "OK: burst=%d", pkt_dev->burst); |
1139 | return count; | 1142 | return count; |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index ab293a3066b3..7ebed55b5f7d 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -1300,7 +1300,6 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | |||
1300 | s_h = cb->args[0]; | 1300 | s_h = cb->args[0]; |
1301 | s_idx = cb->args[1]; | 1301 | s_idx = cb->args[1]; |
1302 | 1302 | ||
1303 | rcu_read_lock(); | ||
1304 | cb->seq = net->dev_base_seq; | 1303 | cb->seq = net->dev_base_seq; |
1305 | 1304 | ||
1306 | /* A hack to preserve kernel<->userspace interface. | 1305 | /* A hack to preserve kernel<->userspace interface. |
@@ -1322,7 +1321,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | |||
1322 | for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { | 1321 | for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { |
1323 | idx = 0; | 1322 | idx = 0; |
1324 | head = &net->dev_index_head[h]; | 1323 | head = &net->dev_index_head[h]; |
1325 | hlist_for_each_entry_rcu(dev, head, index_hlist) { | 1324 | hlist_for_each_entry(dev, head, index_hlist) { |
1326 | if (idx < s_idx) | 1325 | if (idx < s_idx) |
1327 | goto cont; | 1326 | goto cont; |
1328 | err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, | 1327 | err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, |
@@ -1344,7 +1343,6 @@ cont: | |||
1344 | } | 1343 | } |
1345 | } | 1344 | } |
1346 | out: | 1345 | out: |
1347 | rcu_read_unlock(); | ||
1348 | cb->args[1] = idx; | 1346 | cb->args[1] = idx; |
1349 | cb->args[0] = h; | 1347 | cb->args[0] = h; |
1350 | 1348 | ||
@@ -1934,10 +1932,10 @@ static int rtnl_group_changelink(const struct sk_buff *skb, | |||
1934 | struct ifinfomsg *ifm, | 1932 | struct ifinfomsg *ifm, |
1935 | struct nlattr **tb) | 1933 | struct nlattr **tb) |
1936 | { | 1934 | { |
1937 | struct net_device *dev; | 1935 | struct net_device *dev, *aux; |
1938 | int err; | 1936 | int err; |
1939 | 1937 | ||
1940 | for_each_netdev(net, dev) { | 1938 | for_each_netdev_safe(net, dev, aux) { |
1941 | if (dev->group == group) { | 1939 | if (dev->group == group) { |
1942 | err = do_setlink(skb, dev, ifm, tb, NULL, 0); | 1940 | err = do_setlink(skb, dev, ifm, tb, NULL, 0); |
1943 | if (err < 0) | 1941 | if (err < 0) |
@@ -2012,8 +2010,8 @@ replay: | |||
2012 | } | 2010 | } |
2013 | 2011 | ||
2014 | if (1) { | 2012 | if (1) { |
2015 | struct nlattr *attr[ops ? ops->maxtype + 1 : 0]; | 2013 | struct nlattr *attr[ops ? ops->maxtype + 1 : 1]; |
2016 | struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 0]; | 2014 | struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 1]; |
2017 | struct nlattr **data = NULL; | 2015 | struct nlattr **data = NULL; |
2018 | struct nlattr **slave_data = NULL; | 2016 | struct nlattr **slave_data = NULL; |
2019 | struct net *dest_net, *link_net = NULL; | 2017 | struct net *dest_net, *link_net = NULL; |
@@ -2122,6 +2120,10 @@ replay: | |||
2122 | if (IS_ERR(dest_net)) | 2120 | if (IS_ERR(dest_net)) |
2123 | return PTR_ERR(dest_net); | 2121 | return PTR_ERR(dest_net); |
2124 | 2122 | ||
2123 | err = -EPERM; | ||
2124 | if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN)) | ||
2125 | goto out; | ||
2126 | |||
2125 | if (tb[IFLA_LINK_NETNSID]) { | 2127 | if (tb[IFLA_LINK_NETNSID]) { |
2126 | int id = nla_get_s32(tb[IFLA_LINK_NETNSID]); | 2128 | int id = nla_get_s32(tb[IFLA_LINK_NETNSID]); |
2127 | 2129 | ||
@@ -2130,6 +2132,9 @@ replay: | |||
2130 | err = -EINVAL; | 2132 | err = -EINVAL; |
2131 | goto out; | 2133 | goto out; |
2132 | } | 2134 | } |
2135 | err = -EPERM; | ||
2136 | if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN)) | ||
2137 | goto out; | ||
2133 | } | 2138 | } |
2134 | 2139 | ||
2135 | dev = rtnl_create_link(link_net ? : dest_net, ifname, | 2140 | dev = rtnl_create_link(link_net ? : dest_net, ifname, |
@@ -2161,28 +2166,28 @@ replay: | |||
2161 | } | 2166 | } |
2162 | } | 2167 | } |
2163 | err = rtnl_configure_link(dev, ifm); | 2168 | err = rtnl_configure_link(dev, ifm); |
2164 | if (err < 0) { | 2169 | if (err < 0) |
2165 | if (ops->newlink) { | 2170 | goto out_unregister; |
2166 | LIST_HEAD(list_kill); | ||
2167 | |||
2168 | ops->dellink(dev, &list_kill); | ||
2169 | unregister_netdevice_many(&list_kill); | ||
2170 | } else { | ||
2171 | unregister_netdevice(dev); | ||
2172 | } | ||
2173 | goto out; | ||
2174 | } | ||
2175 | |||
2176 | if (link_net) { | 2171 | if (link_net) { |
2177 | err = dev_change_net_namespace(dev, dest_net, ifname); | 2172 | err = dev_change_net_namespace(dev, dest_net, ifname); |
2178 | if (err < 0) | 2173 | if (err < 0) |
2179 | unregister_netdevice(dev); | 2174 | goto out_unregister; |
2180 | } | 2175 | } |
2181 | out: | 2176 | out: |
2182 | if (link_net) | 2177 | if (link_net) |
2183 | put_net(link_net); | 2178 | put_net(link_net); |
2184 | put_net(dest_net); | 2179 | put_net(dest_net); |
2185 | return err; | 2180 | return err; |
2181 | out_unregister: | ||
2182 | if (ops->newlink) { | ||
2183 | LIST_HEAD(list_kill); | ||
2184 | |||
2185 | ops->dellink(dev, &list_kill); | ||
2186 | unregister_netdevice_many(&list_kill); | ||
2187 | } else { | ||
2188 | unregister_netdevice(dev); | ||
2189 | } | ||
2190 | goto out; | ||
2186 | } | 2191 | } |
2187 | } | 2192 | } |
2188 | 2193 | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 88c613eab142..8e4ac97c8477 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -3621,13 +3621,14 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk) | |||
3621 | { | 3621 | { |
3622 | struct sk_buff_head *q = &sk->sk_error_queue; | 3622 | struct sk_buff_head *q = &sk->sk_error_queue; |
3623 | struct sk_buff *skb, *skb_next; | 3623 | struct sk_buff *skb, *skb_next; |
3624 | unsigned long flags; | ||
3624 | int err = 0; | 3625 | int err = 0; |
3625 | 3626 | ||
3626 | spin_lock_bh(&q->lock); | 3627 | spin_lock_irqsave(&q->lock, flags); |
3627 | skb = __skb_dequeue(q); | 3628 | skb = __skb_dequeue(q); |
3628 | if (skb && (skb_next = skb_peek(q))) | 3629 | if (skb && (skb_next = skb_peek(q))) |
3629 | err = SKB_EXT_ERR(skb_next)->ee.ee_errno; | 3630 | err = SKB_EXT_ERR(skb_next)->ee.ee_errno; |
3630 | spin_unlock_bh(&q->lock); | 3631 | spin_unlock_irqrestore(&q->lock, flags); |
3631 | 3632 | ||
3632 | sk->sk_err = err; | 3633 | sk->sk_err = err; |
3633 | if (err) | 3634 | if (err) |
@@ -3732,9 +3733,13 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, | |||
3732 | struct sock *sk, int tstype) | 3733 | struct sock *sk, int tstype) |
3733 | { | 3734 | { |
3734 | struct sk_buff *skb; | 3735 | struct sk_buff *skb; |
3735 | bool tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; | 3736 | bool tsonly; |
3737 | |||
3738 | if (!sk) | ||
3739 | return; | ||
3736 | 3740 | ||
3737 | if (!sk || !skb_may_tx_timestamp(sk, tsonly)) | 3741 | tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; |
3742 | if (!skb_may_tx_timestamp(sk, tsonly)) | ||
3738 | return; | 3743 | return; |
3739 | 3744 | ||
3740 | if (tsonly) | 3745 | if (tsonly) |
@@ -4172,7 +4177,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet) | |||
4172 | skb->ignore_df = 0; | 4177 | skb->ignore_df = 0; |
4173 | skb_dst_drop(skb); | 4178 | skb_dst_drop(skb); |
4174 | skb->mark = 0; | 4179 | skb->mark = 0; |
4175 | skb->sender_cpu = 0; | 4180 | skb_sender_cpu_clear(skb); |
4176 | skb_init_secmark(skb); | 4181 | skb_init_secmark(skb); |
4177 | secpath_reset(skb); | 4182 | secpath_reset(skb); |
4178 | nf_reset(skb); | 4183 | nf_reset(skb); |
diff --git a/net/core/sock.c b/net/core/sock.c index 93c8b20c91e4..71e3e5f1eaa0 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -653,6 +653,25 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) | |||
653 | sock_reset_flag(sk, bit); | 653 | sock_reset_flag(sk, bit); |
654 | } | 654 | } |
655 | 655 | ||
656 | bool sk_mc_loop(struct sock *sk) | ||
657 | { | ||
658 | if (dev_recursion_level()) | ||
659 | return false; | ||
660 | if (!sk) | ||
661 | return true; | ||
662 | switch (sk->sk_family) { | ||
663 | case AF_INET: | ||
664 | return inet_sk(sk)->mc_loop; | ||
665 | #if IS_ENABLED(CONFIG_IPV6) | ||
666 | case AF_INET6: | ||
667 | return inet6_sk(sk)->mc_loop; | ||
668 | #endif | ||
669 | } | ||
670 | WARN_ON(1); | ||
671 | return true; | ||
672 | } | ||
673 | EXPORT_SYMBOL(sk_mc_loop); | ||
674 | |||
656 | /* | 675 | /* |
657 | * This is meant for all protocols to use and covers goings on | 676 | * This is meant for all protocols to use and covers goings on |
658 | * at the socket level. Everything here is generic. | 677 | * at the socket level. Everything here is generic. |
@@ -1655,6 +1674,10 @@ void sock_rfree(struct sk_buff *skb) | |||
1655 | } | 1674 | } |
1656 | EXPORT_SYMBOL(sock_rfree); | 1675 | EXPORT_SYMBOL(sock_rfree); |
1657 | 1676 | ||
1677 | /* | ||
1678 | * Buffer destructor for skbs that are not used directly in read or write | ||
1679 | * path, e.g. for error handler skbs. Automatically called from kfree_skb. | ||
1680 | */ | ||
1658 | void sock_efree(struct sk_buff *skb) | 1681 | void sock_efree(struct sk_buff *skb) |
1659 | { | 1682 | { |
1660 | sock_put(skb->sk); | 1683 | sock_put(skb->sk); |
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 433424804284..8ce351ffceb1 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c | |||
@@ -25,6 +25,8 @@ | |||
25 | static int zero = 0; | 25 | static int zero = 0; |
26 | static int one = 1; | 26 | static int one = 1; |
27 | static int ushort_max = USHRT_MAX; | 27 | static int ushort_max = USHRT_MAX; |
28 | static int min_sndbuf = SOCK_MIN_SNDBUF; | ||
29 | static int min_rcvbuf = SOCK_MIN_RCVBUF; | ||
28 | 30 | ||
29 | static int net_msg_warn; /* Unused, but still a sysctl */ | 31 | static int net_msg_warn; /* Unused, but still a sysctl */ |
30 | 32 | ||
@@ -237,7 +239,7 @@ static struct ctl_table net_core_table[] = { | |||
237 | .maxlen = sizeof(int), | 239 | .maxlen = sizeof(int), |
238 | .mode = 0644, | 240 | .mode = 0644, |
239 | .proc_handler = proc_dointvec_minmax, | 241 | .proc_handler = proc_dointvec_minmax, |
240 | .extra1 = &one, | 242 | .extra1 = &min_sndbuf, |
241 | }, | 243 | }, |
242 | { | 244 | { |
243 | .procname = "rmem_max", | 245 | .procname = "rmem_max", |
@@ -245,7 +247,7 @@ static struct ctl_table net_core_table[] = { | |||
245 | .maxlen = sizeof(int), | 247 | .maxlen = sizeof(int), |
246 | .mode = 0644, | 248 | .mode = 0644, |
247 | .proc_handler = proc_dointvec_minmax, | 249 | .proc_handler = proc_dointvec_minmax, |
248 | .extra1 = &one, | 250 | .extra1 = &min_rcvbuf, |
249 | }, | 251 | }, |
250 | { | 252 | { |
251 | .procname = "wmem_default", | 253 | .procname = "wmem_default", |
@@ -253,7 +255,7 @@ static struct ctl_table net_core_table[] = { | |||
253 | .maxlen = sizeof(int), | 255 | .maxlen = sizeof(int), |
254 | .mode = 0644, | 256 | .mode = 0644, |
255 | .proc_handler = proc_dointvec_minmax, | 257 | .proc_handler = proc_dointvec_minmax, |
256 | .extra1 = &one, | 258 | .extra1 = &min_sndbuf, |
257 | }, | 259 | }, |
258 | { | 260 | { |
259 | .procname = "rmem_default", | 261 | .procname = "rmem_default", |
@@ -261,7 +263,7 @@ static struct ctl_table net_core_table[] = { | |||
261 | .maxlen = sizeof(int), | 263 | .maxlen = sizeof(int), |
262 | .mode = 0644, | 264 | .mode = 0644, |
263 | .proc_handler = proc_dointvec_minmax, | 265 | .proc_handler = proc_dointvec_minmax, |
264 | .extra1 = &one, | 266 | .extra1 = &min_rcvbuf, |
265 | }, | 267 | }, |
266 | { | 268 | { |
267 | .procname = "dev_weight", | 269 | .procname = "dev_weight", |
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 1d7c1256e845..3b81092771f8 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c | |||
@@ -1062,7 +1062,7 @@ source_ok: | |||
1062 | if (decnet_debug_level & 16) | 1062 | if (decnet_debug_level & 16) |
1063 | printk(KERN_DEBUG | 1063 | printk(KERN_DEBUG |
1064 | "dn_route_output_slow: initial checks complete." | 1064 | "dn_route_output_slow: initial checks complete." |
1065 | " dst=%o4x src=%04x oif=%d try_hard=%d\n", | 1065 | " dst=%04x src=%04x oif=%d try_hard=%d\n", |
1066 | le16_to_cpu(fld.daddr), le16_to_cpu(fld.saddr), | 1066 | le16_to_cpu(fld.daddr), le16_to_cpu(fld.saddr), |
1067 | fld.flowidn_oif, try_hard); | 1067 | fld.flowidn_oif, try_hard); |
1068 | 1068 | ||
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c index faf7cc3483fe..9d66a0f72f90 100644 --- a/net/decnet/dn_rules.c +++ b/net/decnet/dn_rules.c | |||
@@ -248,7 +248,9 @@ void __init dn_fib_rules_init(void) | |||
248 | 248 | ||
249 | void __exit dn_fib_rules_cleanup(void) | 249 | void __exit dn_fib_rules_cleanup(void) |
250 | { | 250 | { |
251 | rtnl_lock(); | ||
251 | fib_rules_unregister(dn_fib_rules_ops); | 252 | fib_rules_unregister(dn_fib_rules_ops); |
253 | rtnl_unlock(); | ||
252 | rcu_barrier(); | 254 | rcu_barrier(); |
253 | } | 255 | } |
254 | 256 | ||
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 2173402d87e0..4dea2e0681d1 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c | |||
@@ -501,12 +501,10 @@ static struct net_device *dev_to_net_device(struct device *dev) | |||
501 | #ifdef CONFIG_OF | 501 | #ifdef CONFIG_OF |
502 | static int dsa_of_setup_routing_table(struct dsa_platform_data *pd, | 502 | static int dsa_of_setup_routing_table(struct dsa_platform_data *pd, |
503 | struct dsa_chip_data *cd, | 503 | struct dsa_chip_data *cd, |
504 | int chip_index, | 504 | int chip_index, int port_index, |
505 | struct device_node *link) | 505 | struct device_node *link) |
506 | { | 506 | { |
507 | int ret; | ||
508 | const __be32 *reg; | 507 | const __be32 *reg; |
509 | int link_port_addr; | ||
510 | int link_sw_addr; | 508 | int link_sw_addr; |
511 | struct device_node *parent_sw; | 509 | struct device_node *parent_sw; |
512 | int len; | 510 | int len; |
@@ -519,6 +517,10 @@ static int dsa_of_setup_routing_table(struct dsa_platform_data *pd, | |||
519 | if (!reg || (len != sizeof(*reg) * 2)) | 517 | if (!reg || (len != sizeof(*reg) * 2)) |
520 | return -EINVAL; | 518 | return -EINVAL; |
521 | 519 | ||
520 | /* | ||
521 | * Get the destination switch number from the second field of its 'reg' | ||
522 | * property, i.e. for "reg = <0x19 1>" sw_addr is '1'. | ||
523 | */ | ||
522 | link_sw_addr = be32_to_cpup(reg + 1); | 524 | link_sw_addr = be32_to_cpup(reg + 1); |
523 | 525 | ||
524 | if (link_sw_addr >= pd->nr_chips) | 526 | if (link_sw_addr >= pd->nr_chips) |
@@ -535,20 +537,9 @@ static int dsa_of_setup_routing_table(struct dsa_platform_data *pd, | |||
535 | memset(cd->rtable, -1, pd->nr_chips * sizeof(s8)); | 537 | memset(cd->rtable, -1, pd->nr_chips * sizeof(s8)); |
536 | } | 538 | } |
537 | 539 | ||
538 | reg = of_get_property(link, "reg", NULL); | 540 | cd->rtable[link_sw_addr] = port_index; |
539 | if (!reg) { | ||
540 | ret = -EINVAL; | ||
541 | goto out; | ||
542 | } | ||
543 | |||
544 | link_port_addr = be32_to_cpup(reg); | ||
545 | |||
546 | cd->rtable[link_sw_addr] = link_port_addr; | ||
547 | 541 | ||
548 | return 0; | 542 | return 0; |
549 | out: | ||
550 | kfree(cd->rtable); | ||
551 | return ret; | ||
552 | } | 543 | } |
553 | 544 | ||
554 | static void dsa_of_free_platform_data(struct dsa_platform_data *pd) | 545 | static void dsa_of_free_platform_data(struct dsa_platform_data *pd) |
@@ -658,7 +649,7 @@ static int dsa_of_probe(struct platform_device *pdev) | |||
658 | if (!strcmp(port_name, "dsa") && link && | 649 | if (!strcmp(port_name, "dsa") && link && |
659 | pd->nr_chips > 1) { | 650 | pd->nr_chips > 1) { |
660 | ret = dsa_of_setup_routing_table(pd, cd, | 651 | ret = dsa_of_setup_routing_table(pd, cd, |
661 | chip_index, link); | 652 | chip_index, port_index, link); |
662 | if (ret) | 653 | if (ret) |
663 | goto out_free_chip; | 654 | goto out_free_chip; |
664 | } | 655 | } |
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index a138d75751df..44d27469ae55 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c | |||
@@ -359,8 +359,11 @@ static void hsr_dev_destroy(struct net_device *hsr_dev) | |||
359 | struct hsr_port *port; | 359 | struct hsr_port *port; |
360 | 360 | ||
361 | hsr = netdev_priv(hsr_dev); | 361 | hsr = netdev_priv(hsr_dev); |
362 | |||
363 | rtnl_lock(); | ||
362 | hsr_for_each_port(hsr, port) | 364 | hsr_for_each_port(hsr, port) |
363 | hsr_del_port(port); | 365 | hsr_del_port(port); |
366 | rtnl_unlock(); | ||
364 | 367 | ||
365 | del_timer_sync(&hsr->prune_timer); | 368 | del_timer_sync(&hsr->prune_timer); |
366 | del_timer_sync(&hsr->announce_timer); | 369 | del_timer_sync(&hsr->announce_timer); |
diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c index 779d28b65417..cd37d0011b42 100644 --- a/net/hsr/hsr_main.c +++ b/net/hsr/hsr_main.c | |||
@@ -36,6 +36,10 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event, | |||
36 | return NOTIFY_DONE; /* Not an HSR device */ | 36 | return NOTIFY_DONE; /* Not an HSR device */ |
37 | hsr = netdev_priv(dev); | 37 | hsr = netdev_priv(dev); |
38 | port = hsr_port_get_hsr(hsr, HSR_PT_MASTER); | 38 | port = hsr_port_get_hsr(hsr, HSR_PT_MASTER); |
39 | if (port == NULL) { | ||
40 | /* Resend of notification concerning removed device? */ | ||
41 | return NOTIFY_DONE; | ||
42 | } | ||
39 | } else { | 43 | } else { |
40 | hsr = port->hsr; | 44 | hsr = port->hsr; |
41 | } | 45 | } |
diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c index a348dcbcd683..7d37366cc695 100644 --- a/net/hsr/hsr_slave.c +++ b/net/hsr/hsr_slave.c | |||
@@ -181,8 +181,10 @@ void hsr_del_port(struct hsr_port *port) | |||
181 | list_del_rcu(&port->port_list); | 181 | list_del_rcu(&port->port_list); |
182 | 182 | ||
183 | if (port != master) { | 183 | if (port != master) { |
184 | netdev_update_features(master->dev); | 184 | if (master != NULL) { |
185 | dev_set_mtu(master->dev, hsr_get_max_mtu(hsr)); | 185 | netdev_update_features(master->dev); |
186 | dev_set_mtu(master->dev, hsr_get_max_mtu(hsr)); | ||
187 | } | ||
186 | netdev_rx_handler_unregister(port->dev); | 188 | netdev_rx_handler_unregister(port->dev); |
187 | dev_set_promiscuity(port->dev, -1); | 189 | dev_set_promiscuity(port->dev, -1); |
188 | } | 190 | } |
@@ -192,5 +194,7 @@ void hsr_del_port(struct hsr_port *port) | |||
192 | */ | 194 | */ |
193 | 195 | ||
194 | synchronize_rcu(); | 196 | synchronize_rcu(); |
195 | dev_put(port->dev); | 197 | |
198 | if (port != master) | ||
199 | dev_put(port->dev); | ||
196 | } | 200 | } |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 57be71dd6a9e..23b9b3e86f4c 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -1111,11 +1111,10 @@ static void ip_fib_net_exit(struct net *net) | |||
1111 | { | 1111 | { |
1112 | unsigned int i; | 1112 | unsigned int i; |
1113 | 1113 | ||
1114 | rtnl_lock(); | ||
1114 | #ifdef CONFIG_IP_MULTIPLE_TABLES | 1115 | #ifdef CONFIG_IP_MULTIPLE_TABLES |
1115 | fib4_rules_exit(net); | 1116 | fib4_rules_exit(net); |
1116 | #endif | 1117 | #endif |
1117 | |||
1118 | rtnl_lock(); | ||
1119 | for (i = 0; i < FIB_TABLE_HASHSZ; i++) { | 1118 | for (i = 0; i < FIB_TABLE_HASHSZ; i++) { |
1120 | struct fib_table *tb; | 1119 | struct fib_table *tb; |
1121 | struct hlist_head *head; | 1120 | struct hlist_head *head; |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 14d02ea905b6..3e44b9b0b78e 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -268,6 +268,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo) | |||
268 | release_sock(sk); | 268 | release_sock(sk); |
269 | if (reqsk_queue_empty(&icsk->icsk_accept_queue)) | 269 | if (reqsk_queue_empty(&icsk->icsk_accept_queue)) |
270 | timeo = schedule_timeout(timeo); | 270 | timeo = schedule_timeout(timeo); |
271 | sched_annotate_sleep(); | ||
271 | lock_sock(sk); | 272 | lock_sock(sk); |
272 | err = 0; | 273 | err = 0; |
273 | if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) | 274 | if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) |
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 81751f12645f..592aff37366b 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -71,6 +71,20 @@ static inline void inet_diag_unlock_handler( | |||
71 | mutex_unlock(&inet_diag_table_mutex); | 71 | mutex_unlock(&inet_diag_table_mutex); |
72 | } | 72 | } |
73 | 73 | ||
74 | static size_t inet_sk_attr_size(void) | ||
75 | { | ||
76 | return nla_total_size(sizeof(struct tcp_info)) | ||
77 | + nla_total_size(1) /* INET_DIAG_SHUTDOWN */ | ||
78 | + nla_total_size(1) /* INET_DIAG_TOS */ | ||
79 | + nla_total_size(1) /* INET_DIAG_TCLASS */ | ||
80 | + nla_total_size(sizeof(struct inet_diag_meminfo)) | ||
81 | + nla_total_size(sizeof(struct inet_diag_msg)) | ||
82 | + nla_total_size(SK_MEMINFO_VARS * sizeof(u32)) | ||
83 | + nla_total_size(TCP_CA_NAME_MAX) | ||
84 | + nla_total_size(sizeof(struct tcpvegas_info)) | ||
85 | + 64; | ||
86 | } | ||
87 | |||
74 | int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, | 88 | int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, |
75 | struct sk_buff *skb, struct inet_diag_req_v2 *req, | 89 | struct sk_buff *skb, struct inet_diag_req_v2 *req, |
76 | struct user_namespace *user_ns, | 90 | struct user_namespace *user_ns, |
@@ -326,9 +340,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s | |||
326 | if (err) | 340 | if (err) |
327 | goto out; | 341 | goto out; |
328 | 342 | ||
329 | rep = nlmsg_new(sizeof(struct inet_diag_msg) + | 343 | rep = nlmsg_new(inet_sk_attr_size(), GFP_KERNEL); |
330 | sizeof(struct inet_diag_meminfo) + | ||
331 | sizeof(struct tcp_info) + 64, GFP_KERNEL); | ||
332 | if (!rep) { | 344 | if (!rep) { |
333 | err = -ENOMEM; | 345 | err = -ENOMEM; |
334 | goto out; | 346 | goto out; |
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 787b3c294ce6..d9bc28ac5d1b 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
@@ -67,6 +67,7 @@ static int ip_forward_finish(struct sk_buff *skb) | |||
67 | if (unlikely(opt->optlen)) | 67 | if (unlikely(opt->optlen)) |
68 | ip_forward_options(skb); | 68 | ip_forward_options(skb); |
69 | 69 | ||
70 | skb_sender_cpu_clear(skb); | ||
70 | return dst_output(skb); | 71 | return dst_output(skb); |
71 | } | 72 | } |
72 | 73 | ||
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index e5b6d0ddcb58..145a50c4d566 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -659,27 +659,30 @@ EXPORT_SYMBOL(ip_defrag); | |||
659 | struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) | 659 | struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) |
660 | { | 660 | { |
661 | struct iphdr iph; | 661 | struct iphdr iph; |
662 | int netoff; | ||
662 | u32 len; | 663 | u32 len; |
663 | 664 | ||
664 | if (skb->protocol != htons(ETH_P_IP)) | 665 | if (skb->protocol != htons(ETH_P_IP)) |
665 | return skb; | 666 | return skb; |
666 | 667 | ||
667 | if (!skb_copy_bits(skb, 0, &iph, sizeof(iph))) | 668 | netoff = skb_network_offset(skb); |
669 | |||
670 | if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0) | ||
668 | return skb; | 671 | return skb; |
669 | 672 | ||
670 | if (iph.ihl < 5 || iph.version != 4) | 673 | if (iph.ihl < 5 || iph.version != 4) |
671 | return skb; | 674 | return skb; |
672 | 675 | ||
673 | len = ntohs(iph.tot_len); | 676 | len = ntohs(iph.tot_len); |
674 | if (skb->len < len || len < (iph.ihl * 4)) | 677 | if (skb->len < netoff + len || len < (iph.ihl * 4)) |
675 | return skb; | 678 | return skb; |
676 | 679 | ||
677 | if (ip_is_fragment(&iph)) { | 680 | if (ip_is_fragment(&iph)) { |
678 | skb = skb_share_check(skb, GFP_ATOMIC); | 681 | skb = skb_share_check(skb, GFP_ATOMIC); |
679 | if (skb) { | 682 | if (skb) { |
680 | if (!pskb_may_pull(skb, iph.ihl*4)) | 683 | if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) |
681 | return skb; | 684 | return skb; |
682 | if (pskb_trim_rcsum(skb, len)) | 685 | if (pskb_trim_rcsum(skb, netoff + len)) |
683 | return skb; | 686 | return skb; |
684 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); | 687 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); |
685 | if (ip_defrag(skb, user)) | 688 | if (ip_defrag(skb, user)) |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index d68199d9b2b0..a7aea2048a0d 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -888,7 +888,8 @@ static int __ip_append_data(struct sock *sk, | |||
888 | cork->length += length; | 888 | cork->length += length; |
889 | if (((length > mtu) || (skb && skb_is_gso(skb))) && | 889 | if (((length > mtu) || (skb && skb_is_gso(skb))) && |
890 | (sk->sk_protocol == IPPROTO_UDP) && | 890 | (sk->sk_protocol == IPPROTO_UDP) && |
891 | (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) { | 891 | (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && |
892 | (sk->sk_type == SOCK_DGRAM)) { | ||
892 | err = ip_ufo_append_data(sk, queue, getfrag, from, length, | 893 | err = ip_ufo_append_data(sk, queue, getfrag, from, length, |
893 | hh_len, fragheaderlen, transhdrlen, | 894 | hh_len, fragheaderlen, transhdrlen, |
894 | maxfraglen, flags); | 895 | maxfraglen, flags); |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 31d8c71986b4..5cd99271d3a6 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -432,17 +432,32 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf | |||
432 | kfree_skb(skb); | 432 | kfree_skb(skb); |
433 | } | 433 | } |
434 | 434 | ||
435 | static bool ipv4_pktinfo_prepare_errqueue(const struct sock *sk, | 435 | /* IPv4 supports cmsg on all imcp errors and some timestamps |
436 | const struct sk_buff *skb, | 436 | * |
437 | int ee_origin) | 437 | * Timestamp code paths do not initialize the fields expected by cmsg: |
438 | * the PKTINFO fields in skb->cb[]. Fill those in here. | ||
439 | */ | ||
440 | static bool ipv4_datagram_support_cmsg(const struct sock *sk, | ||
441 | struct sk_buff *skb, | ||
442 | int ee_origin) | ||
438 | { | 443 | { |
439 | struct in_pktinfo *info = PKTINFO_SKB_CB(skb); | 444 | struct in_pktinfo *info; |
445 | |||
446 | if (ee_origin == SO_EE_ORIGIN_ICMP) | ||
447 | return true; | ||
440 | 448 | ||
441 | if ((ee_origin != SO_EE_ORIGIN_TIMESTAMPING) || | 449 | if (ee_origin == SO_EE_ORIGIN_LOCAL) |
442 | (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) || | 450 | return false; |
451 | |||
452 | /* Support IP_PKTINFO on tstamp packets if requested, to correlate | ||
453 | * timestamp with egress dev. Not possible for packets without dev | ||
454 | * or without payload (SOF_TIMESTAMPING_OPT_TSONLY). | ||
455 | */ | ||
456 | if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) || | ||
443 | (!skb->dev)) | 457 | (!skb->dev)) |
444 | return false; | 458 | return false; |
445 | 459 | ||
460 | info = PKTINFO_SKB_CB(skb); | ||
446 | info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr; | 461 | info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr; |
447 | info->ipi_ifindex = skb->dev->ifindex; | 462 | info->ipi_ifindex = skb->dev->ifindex; |
448 | return true; | 463 | return true; |
@@ -483,7 +498,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) | |||
483 | 498 | ||
484 | serr = SKB_EXT_ERR(skb); | 499 | serr = SKB_EXT_ERR(skb); |
485 | 500 | ||
486 | if (sin && skb->len) { | 501 | if (sin && serr->port) { |
487 | sin->sin_family = AF_INET; | 502 | sin->sin_family = AF_INET; |
488 | sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) + | 503 | sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) + |
489 | serr->addr_offset); | 504 | serr->addr_offset); |
@@ -496,9 +511,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) | |||
496 | sin = &errhdr.offender; | 511 | sin = &errhdr.offender; |
497 | memset(sin, 0, sizeof(*sin)); | 512 | memset(sin, 0, sizeof(*sin)); |
498 | 513 | ||
499 | if (skb->len && | 514 | if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) { |
500 | (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP || | ||
501 | ipv4_pktinfo_prepare_errqueue(sk, skb, serr->ee.ee_origin))) { | ||
502 | sin->sin_family = AF_INET; | 515 | sin->sin_family = AF_INET; |
503 | sin->sin_addr.s_addr = ip_hdr(skb)->saddr; | 516 | sin->sin_addr.s_addr = ip_hdr(skb)->saddr; |
504 | if (inet_sk(sk)->cmsg_flags) | 517 | if (inet_sk(sk)->cmsg_flags) |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 9d78427652d2..fe54eba6d00d 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -268,7 +268,7 @@ static int __net_init ipmr_rules_init(struct net *net) | |||
268 | return 0; | 268 | return 0; |
269 | 269 | ||
270 | err2: | 270 | err2: |
271 | kfree(mrt); | 271 | ipmr_free_table(mrt); |
272 | err1: | 272 | err1: |
273 | fib_rules_unregister(ops); | 273 | fib_rules_unregister(ops); |
274 | return err; | 274 | return err; |
@@ -278,11 +278,13 @@ static void __net_exit ipmr_rules_exit(struct net *net) | |||
278 | { | 278 | { |
279 | struct mr_table *mrt, *next; | 279 | struct mr_table *mrt, *next; |
280 | 280 | ||
281 | rtnl_lock(); | ||
281 | list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { | 282 | list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { |
282 | list_del(&mrt->list); | 283 | list_del(&mrt->list); |
283 | ipmr_free_table(mrt); | 284 | ipmr_free_table(mrt); |
284 | } | 285 | } |
285 | fib_rules_unregister(net->ipv4.mr_rules_ops); | 286 | fib_rules_unregister(net->ipv4.mr_rules_ops); |
287 | rtnl_unlock(); | ||
286 | } | 288 | } |
287 | #else | 289 | #else |
288 | #define ipmr_for_each_table(mrt, net) \ | 290 | #define ipmr_for_each_table(mrt, net) \ |
@@ -308,7 +310,10 @@ static int __net_init ipmr_rules_init(struct net *net) | |||
308 | 310 | ||
309 | static void __net_exit ipmr_rules_exit(struct net *net) | 311 | static void __net_exit ipmr_rules_exit(struct net *net) |
310 | { | 312 | { |
313 | rtnl_lock(); | ||
311 | ipmr_free_table(net->ipv4.mrt); | 314 | ipmr_free_table(net->ipv4.mrt); |
315 | net->ipv4.mrt = NULL; | ||
316 | rtnl_unlock(); | ||
312 | } | 317 | } |
313 | #endif | 318 | #endif |
314 | 319 | ||
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 99e810f84671..cf5e82f39d3b 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -272,9 +272,9 @@ static void trace_packet(const struct sk_buff *skb, | |||
272 | &chainname, &comment, &rulenum) != 0) | 272 | &chainname, &comment, &rulenum) != 0) |
273 | break; | 273 | break; |
274 | 274 | ||
275 | nf_log_packet(net, AF_INET, hook, skb, in, out, &trace_loginfo, | 275 | nf_log_trace(net, AF_INET, hook, skb, in, out, &trace_loginfo, |
276 | "TRACE: %s:%s:%s:%u ", | 276 | "TRACE: %s:%s:%s:%u ", |
277 | tablename, chainname, comment, rulenum); | 277 | tablename, chainname, comment, rulenum); |
278 | } | 278 | } |
279 | #endif | 279 | #endif |
280 | 280 | ||
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index e9f66e1cda50..208d5439e59b 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c | |||
@@ -259,6 +259,9 @@ int ping_init_sock(struct sock *sk) | |||
259 | kgid_t low, high; | 259 | kgid_t low, high; |
260 | int ret = 0; | 260 | int ret = 0; |
261 | 261 | ||
262 | if (sk->sk_family == AF_INET6) | ||
263 | sk->sk_ipv6only = 1; | ||
264 | |||
262 | inet_get_ping_group_range_net(net, &low, &high); | 265 | inet_get_ping_group_range_net(net, &low, &high); |
263 | if (gid_lte(low, group) && gid_lte(group, high)) | 266 | if (gid_lte(low, group) && gid_lte(group, high)) |
264 | return 0; | 267 | return 0; |
@@ -305,6 +308,11 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, | |||
305 | if (addr_len < sizeof(*addr)) | 308 | if (addr_len < sizeof(*addr)) |
306 | return -EINVAL; | 309 | return -EINVAL; |
307 | 310 | ||
311 | if (addr->sin_family != AF_INET && | ||
312 | !(addr->sin_family == AF_UNSPEC && | ||
313 | addr->sin_addr.s_addr == htonl(INADDR_ANY))) | ||
314 | return -EAFNOSUPPORT; | ||
315 | |||
308 | pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n", | 316 | pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n", |
309 | sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port)); | 317 | sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port)); |
310 | 318 | ||
@@ -330,7 +338,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, | |||
330 | return -EINVAL; | 338 | return -EINVAL; |
331 | 339 | ||
332 | if (addr->sin6_family != AF_INET6) | 340 | if (addr->sin6_family != AF_INET6) |
333 | return -EINVAL; | 341 | return -EAFNOSUPPORT; |
334 | 342 | ||
335 | pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n", | 343 | pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n", |
336 | sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port)); | 344 | sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port)); |
@@ -716,7 +724,7 @@ static int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m | |||
716 | if (msg->msg_namelen < sizeof(*usin)) | 724 | if (msg->msg_namelen < sizeof(*usin)) |
717 | return -EINVAL; | 725 | return -EINVAL; |
718 | if (usin->sin_family != AF_INET) | 726 | if (usin->sin_family != AF_INET) |
719 | return -EINVAL; | 727 | return -EAFNOSUPPORT; |
720 | daddr = usin->sin_addr.s_addr; | 728 | daddr = usin->sin_addr.s_addr; |
721 | /* no remote port */ | 729 | /* no remote port */ |
722 | } else { | 730 | } else { |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 9d72a0fcd928..995a2259bcfc 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -835,17 +835,13 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, | |||
835 | int large_allowed) | 835 | int large_allowed) |
836 | { | 836 | { |
837 | struct tcp_sock *tp = tcp_sk(sk); | 837 | struct tcp_sock *tp = tcp_sk(sk); |
838 | u32 new_size_goal, size_goal, hlen; | 838 | u32 new_size_goal, size_goal; |
839 | 839 | ||
840 | if (!large_allowed || !sk_can_gso(sk)) | 840 | if (!large_allowed || !sk_can_gso(sk)) |
841 | return mss_now; | 841 | return mss_now; |
842 | 842 | ||
843 | /* Maybe we should/could use sk->sk_prot->max_header here ? */ | 843 | /* Note : tcp_tso_autosize() will eventually split this later */ |
844 | hlen = inet_csk(sk)->icsk_af_ops->net_header_len + | 844 | new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER; |
845 | inet_csk(sk)->icsk_ext_hdr_len + | ||
846 | tp->tcp_header_len; | ||
847 | |||
848 | new_size_goal = sk->sk_gso_max_size - 1 - hlen; | ||
849 | new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal); | 845 | new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal); |
850 | 846 | ||
851 | /* We try hard to avoid divides here */ | 847 | /* We try hard to avoid divides here */ |
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index d694088214cd..62856e185a93 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c | |||
@@ -378,6 +378,12 @@ EXPORT_SYMBOL_GPL(tcp_slow_start); | |||
378 | */ | 378 | */ |
379 | void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked) | 379 | void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked) |
380 | { | 380 | { |
381 | /* If credits accumulated at a higher w, apply them gently now. */ | ||
382 | if (tp->snd_cwnd_cnt >= w) { | ||
383 | tp->snd_cwnd_cnt = 0; | ||
384 | tp->snd_cwnd++; | ||
385 | } | ||
386 | |||
381 | tp->snd_cwnd_cnt += acked; | 387 | tp->snd_cwnd_cnt += acked; |
382 | if (tp->snd_cwnd_cnt >= w) { | 388 | if (tp->snd_cwnd_cnt >= w) { |
383 | u32 delta = tp->snd_cwnd_cnt / w; | 389 | u32 delta = tp->snd_cwnd_cnt / w; |
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 4b276d1ed980..06d3d665a9fd 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c | |||
@@ -306,8 +306,10 @@ tcp_friendliness: | |||
306 | } | 306 | } |
307 | } | 307 | } |
308 | 308 | ||
309 | if (ca->cnt == 0) /* cannot be zero */ | 309 | /* The maximum rate of cwnd increase CUBIC allows is 1 packet per |
310 | ca->cnt = 1; | 310 | * 2 packets ACKed, meaning cwnd grows at 1.5x per RTT. |
311 | */ | ||
312 | ca->cnt = max(ca->cnt, 2U); | ||
311 | } | 313 | } |
312 | 314 | ||
313 | static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) | 315 | static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 8fdd27b17306..f501ac048366 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -3105,10 +3105,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, | |||
3105 | if (!first_ackt.v64) | 3105 | if (!first_ackt.v64) |
3106 | first_ackt = last_ackt; | 3106 | first_ackt = last_ackt; |
3107 | 3107 | ||
3108 | if (!(sacked & TCPCB_SACKED_ACKED)) | 3108 | if (!(sacked & TCPCB_SACKED_ACKED)) { |
3109 | reord = min(pkts_acked, reord); | 3109 | reord = min(pkts_acked, reord); |
3110 | if (!after(scb->end_seq, tp->high_seq)) | 3110 | if (!after(scb->end_seq, tp->high_seq)) |
3111 | flag |= FLAG_ORIG_SACK_ACKED; | 3111 | flag |= FLAG_ORIG_SACK_ACKED; |
3112 | } | ||
3112 | } | 3113 | } |
3113 | 3114 | ||
3114 | if (sacked & TCPCB_SACKED_ACKED) | 3115 | if (sacked & TCPCB_SACKED_ACKED) |
@@ -4770,7 +4771,7 @@ static bool tcp_should_expand_sndbuf(const struct sock *sk) | |||
4770 | return false; | 4771 | return false; |
4771 | 4772 | ||
4772 | /* If we filled the congestion window, do not expand. */ | 4773 | /* If we filled the congestion window, do not expand. */ |
4773 | if (tp->packets_out >= tp->snd_cwnd) | 4774 | if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) |
4774 | return false; | 4775 | return false; |
4775 | 4776 | ||
4776 | return true; | 4777 | return true; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 5a2dfed4783b..f1756ee02207 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1518,7 +1518,7 @@ void tcp_v4_early_demux(struct sk_buff *skb) | |||
1518 | skb->sk = sk; | 1518 | skb->sk = sk; |
1519 | skb->destructor = sock_edemux; | 1519 | skb->destructor = sock_edemux; |
1520 | if (sk->sk_state != TCP_TIME_WAIT) { | 1520 | if (sk->sk_state != TCP_TIME_WAIT) { |
1521 | struct dst_entry *dst = sk->sk_rx_dst; | 1521 | struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); |
1522 | 1522 | ||
1523 | if (dst) | 1523 | if (dst) |
1524 | dst = dst_check(dst, 0); | 1524 | dst = dst_check(dst, 0); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index a2a796c5536b..1db253e36045 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2773,15 +2773,11 @@ void tcp_send_fin(struct sock *sk) | |||
2773 | } else { | 2773 | } else { |
2774 | /* Socket is locked, keep trying until memory is available. */ | 2774 | /* Socket is locked, keep trying until memory is available. */ |
2775 | for (;;) { | 2775 | for (;;) { |
2776 | skb = alloc_skb_fclone(MAX_TCP_HEADER, | 2776 | skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); |
2777 | sk->sk_allocation); | ||
2778 | if (skb) | 2777 | if (skb) |
2779 | break; | 2778 | break; |
2780 | yield(); | 2779 | yield(); |
2781 | } | 2780 | } |
2782 | |||
2783 | /* Reserve space for headers and prepare control bits. */ | ||
2784 | skb_reserve(skb, MAX_TCP_HEADER); | ||
2785 | /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ | 2781 | /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ |
2786 | tcp_init_nondata_skb(skb, tp->write_seq, | 2782 | tcp_init_nondata_skb(skb, tp->write_seq, |
2787 | TCPHDR_ACK | TCPHDR_FIN); | 2783 | TCPHDR_ACK | TCPHDR_FIN); |
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index d5f6bd9a210a..dab73813cb92 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c | |||
@@ -63,6 +63,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb) | |||
63 | return err; | 63 | return err; |
64 | 64 | ||
65 | IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE; | 65 | IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE; |
66 | skb->protocol = htons(ETH_P_IP); | ||
66 | 67 | ||
67 | return x->outer_mode->output2(x, skb); | 68 | return x->outer_mode->output2(x, skb); |
68 | } | 69 | } |
@@ -71,7 +72,6 @@ EXPORT_SYMBOL(xfrm4_prepare_output); | |||
71 | int xfrm4_output_finish(struct sk_buff *skb) | 72 | int xfrm4_output_finish(struct sk_buff *skb) |
72 | { | 73 | { |
73 | memset(IPCB(skb), 0, sizeof(*IPCB(skb))); | 74 | memset(IPCB(skb), 0, sizeof(*IPCB(skb))); |
74 | skb->protocol = htons(ETH_P_IP); | ||
75 | 75 | ||
76 | #ifdef CONFIG_NETFILTER | 76 | #ifdef CONFIG_NETFILTER |
77 | IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; | 77 | IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 98e4a63d72bb..b6030025f411 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -4903,6 +4903,21 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write, | |||
4903 | return ret; | 4903 | return ret; |
4904 | } | 4904 | } |
4905 | 4905 | ||
4906 | static | ||
4907 | int addrconf_sysctl_mtu(struct ctl_table *ctl, int write, | ||
4908 | void __user *buffer, size_t *lenp, loff_t *ppos) | ||
4909 | { | ||
4910 | struct inet6_dev *idev = ctl->extra1; | ||
4911 | int min_mtu = IPV6_MIN_MTU; | ||
4912 | struct ctl_table lctl; | ||
4913 | |||
4914 | lctl = *ctl; | ||
4915 | lctl.extra1 = &min_mtu; | ||
4916 | lctl.extra2 = idev ? &idev->dev->mtu : NULL; | ||
4917 | |||
4918 | return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos); | ||
4919 | } | ||
4920 | |||
4906 | static void dev_disable_change(struct inet6_dev *idev) | 4921 | static void dev_disable_change(struct inet6_dev *idev) |
4907 | { | 4922 | { |
4908 | struct netdev_notifier_info info; | 4923 | struct netdev_notifier_info info; |
@@ -5054,7 +5069,7 @@ static struct addrconf_sysctl_table | |||
5054 | .data = &ipv6_devconf.mtu6, | 5069 | .data = &ipv6_devconf.mtu6, |
5055 | .maxlen = sizeof(int), | 5070 | .maxlen = sizeof(int), |
5056 | .mode = 0644, | 5071 | .mode = 0644, |
5057 | .proc_handler = proc_dointvec, | 5072 | .proc_handler = addrconf_sysctl_mtu, |
5058 | }, | 5073 | }, |
5059 | { | 5074 | { |
5060 | .procname = "accept_ra", | 5075 | .procname = "accept_ra", |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index c215be70cac0..ace8daca5c83 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -325,14 +325,34 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu) | |||
325 | kfree_skb(skb); | 325 | kfree_skb(skb); |
326 | } | 326 | } |
327 | 327 | ||
328 | static void ip6_datagram_prepare_pktinfo_errqueue(struct sk_buff *skb) | 328 | /* IPv6 supports cmsg on all origins aside from SO_EE_ORIGIN_LOCAL. |
329 | * | ||
330 | * At one point, excluding local errors was a quick test to identify icmp/icmp6 | ||
331 | * errors. This is no longer true, but the test remained, so the v6 stack, | ||
332 | * unlike v4, also honors cmsg requests on all wifi and timestamp errors. | ||
333 | * | ||
334 | * Timestamp code paths do not initialize the fields expected by cmsg: | ||
335 | * the PKTINFO fields in skb->cb[]. Fill those in here. | ||
336 | */ | ||
337 | static bool ip6_datagram_support_cmsg(struct sk_buff *skb, | ||
338 | struct sock_exterr_skb *serr) | ||
329 | { | 339 | { |
330 | int ifindex = skb->dev ? skb->dev->ifindex : -1; | 340 | if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP || |
341 | serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6) | ||
342 | return true; | ||
343 | |||
344 | if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL) | ||
345 | return false; | ||
346 | |||
347 | if (!skb->dev) | ||
348 | return false; | ||
331 | 349 | ||
332 | if (skb->protocol == htons(ETH_P_IPV6)) | 350 | if (skb->protocol == htons(ETH_P_IPV6)) |
333 | IP6CB(skb)->iif = ifindex; | 351 | IP6CB(skb)->iif = skb->dev->ifindex; |
334 | else | 352 | else |
335 | PKTINFO_SKB_CB(skb)->ipi_ifindex = ifindex; | 353 | PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex; |
354 | |||
355 | return true; | ||
336 | } | 356 | } |
337 | 357 | ||
338 | /* | 358 | /* |
@@ -369,7 +389,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) | |||
369 | 389 | ||
370 | serr = SKB_EXT_ERR(skb); | 390 | serr = SKB_EXT_ERR(skb); |
371 | 391 | ||
372 | if (sin && skb->len) { | 392 | if (sin && serr->port) { |
373 | const unsigned char *nh = skb_network_header(skb); | 393 | const unsigned char *nh = skb_network_header(skb); |
374 | sin->sin6_family = AF_INET6; | 394 | sin->sin6_family = AF_INET6; |
375 | sin->sin6_flowinfo = 0; | 395 | sin->sin6_flowinfo = 0; |
@@ -394,14 +414,11 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) | |||
394 | memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); | 414 | memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); |
395 | sin = &errhdr.offender; | 415 | sin = &errhdr.offender; |
396 | memset(sin, 0, sizeof(*sin)); | 416 | memset(sin, 0, sizeof(*sin)); |
397 | if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL && skb->len) { | 417 | |
418 | if (ip6_datagram_support_cmsg(skb, serr)) { | ||
398 | sin->sin6_family = AF_INET6; | 419 | sin->sin6_family = AF_INET6; |
399 | if (np->rxopt.all) { | 420 | if (np->rxopt.all) |
400 | if (serr->ee.ee_origin != SO_EE_ORIGIN_ICMP && | ||
401 | serr->ee.ee_origin != SO_EE_ORIGIN_ICMP6) | ||
402 | ip6_datagram_prepare_pktinfo_errqueue(skb); | ||
403 | ip6_datagram_recv_common_ctl(sk, msg, skb); | 421 | ip6_datagram_recv_common_ctl(sk, msg, skb); |
404 | } | ||
405 | if (skb->protocol == htons(ETH_P_IPV6)) { | 422 | if (skb->protocol == htons(ETH_P_IPV6)) { |
406 | sin->sin6_addr = ipv6_hdr(skb)->saddr; | 423 | sin->sin6_addr = ipv6_hdr(skb)->saddr; |
407 | if (np->rxopt.all) | 424 | if (np->rxopt.all) |
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index b4d5e1d97c1b..70bc6abc0639 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c | |||
@@ -104,6 +104,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, | |||
104 | goto again; | 104 | goto again; |
105 | flp6->saddr = saddr; | 105 | flp6->saddr = saddr; |
106 | } | 106 | } |
107 | err = rt->dst.error; | ||
107 | goto out; | 108 | goto out; |
108 | } | 109 | } |
109 | again: | 110 | again: |
@@ -321,7 +322,9 @@ out_fib6_rules_ops: | |||
321 | 322 | ||
322 | static void __net_exit fib6_rules_net_exit(struct net *net) | 323 | static void __net_exit fib6_rules_net_exit(struct net *net) |
323 | { | 324 | { |
325 | rtnl_lock(); | ||
324 | fib_rules_unregister(net->ipv6.fib6_rules_ops); | 326 | fib_rules_unregister(net->ipv6.fib6_rules_ops); |
327 | rtnl_unlock(); | ||
325 | } | 328 | } |
326 | 329 | ||
327 | static struct pernet_operations fib6_rules_net_ops = { | 330 | static struct pernet_operations fib6_rules_net_ops = { |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 7deebf102cba..36cf0ab685a0 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -318,6 +318,7 @@ static int ip6_forward_proxy_check(struct sk_buff *skb) | |||
318 | 318 | ||
319 | static inline int ip6_forward_finish(struct sk_buff *skb) | 319 | static inline int ip6_forward_finish(struct sk_buff *skb) |
320 | { | 320 | { |
321 | skb_sender_cpu_clear(skb); | ||
321 | return dst_output(skb); | 322 | return dst_output(skb); |
322 | } | 323 | } |
323 | 324 | ||
@@ -541,7 +542,8 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
541 | { | 542 | { |
542 | struct sk_buff *frag; | 543 | struct sk_buff *frag; |
543 | struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); | 544 | struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); |
544 | struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; | 545 | struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? |
546 | inet6_sk(skb->sk) : NULL; | ||
545 | struct ipv6hdr *tmp_hdr; | 547 | struct ipv6hdr *tmp_hdr; |
546 | struct frag_hdr *fh; | 548 | struct frag_hdr *fh; |
547 | unsigned int mtu, hlen, left, len; | 549 | unsigned int mtu, hlen, left, len; |
@@ -1298,7 +1300,8 @@ emsgsize: | |||
1298 | if (((length > mtu) || | 1300 | if (((length > mtu) || |
1299 | (skb && skb_is_gso(skb))) && | 1301 | (skb && skb_is_gso(skb))) && |
1300 | (sk->sk_protocol == IPPROTO_UDP) && | 1302 | (sk->sk_protocol == IPPROTO_UDP) && |
1301 | (rt->dst.dev->features & NETIF_F_UFO)) { | 1303 | (rt->dst.dev->features & NETIF_F_UFO) && |
1304 | (sk->sk_type == SOCK_DGRAM)) { | ||
1302 | err = ip6_ufo_append_data(sk, queue, getfrag, from, length, | 1305 | err = ip6_ufo_append_data(sk, queue, getfrag, from, length, |
1303 | hh_len, fragheaderlen, | 1306 | hh_len, fragheaderlen, |
1304 | transhdrlen, mtu, flags, rt); | 1307 | transhdrlen, mtu, flags, rt); |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 266a264ec212..ddd94eca19b3 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -314,7 +314,7 @@ out: | |||
314 | * Create tunnel matching given parameters. | 314 | * Create tunnel matching given parameters. |
315 | * | 315 | * |
316 | * Return: | 316 | * Return: |
317 | * created tunnel or NULL | 317 | * created tunnel or error pointer |
318 | **/ | 318 | **/ |
319 | 319 | ||
320 | static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) | 320 | static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) |
@@ -322,7 +322,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) | |||
322 | struct net_device *dev; | 322 | struct net_device *dev; |
323 | struct ip6_tnl *t; | 323 | struct ip6_tnl *t; |
324 | char name[IFNAMSIZ]; | 324 | char name[IFNAMSIZ]; |
325 | int err; | 325 | int err = -ENOMEM; |
326 | 326 | ||
327 | if (p->name[0]) | 327 | if (p->name[0]) |
328 | strlcpy(name, p->name, IFNAMSIZ); | 328 | strlcpy(name, p->name, IFNAMSIZ); |
@@ -348,7 +348,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) | |||
348 | failed_free: | 348 | failed_free: |
349 | ip6_dev_free(dev); | 349 | ip6_dev_free(dev); |
350 | failed: | 350 | failed: |
351 | return NULL; | 351 | return ERR_PTR(err); |
352 | } | 352 | } |
353 | 353 | ||
354 | /** | 354 | /** |
@@ -362,7 +362,7 @@ failed: | |||
362 | * tunnel device is created and registered for use. | 362 | * tunnel device is created and registered for use. |
363 | * | 363 | * |
364 | * Return: | 364 | * Return: |
365 | * matching tunnel or NULL | 365 | * matching tunnel or error pointer |
366 | **/ | 366 | **/ |
367 | 367 | ||
368 | static struct ip6_tnl *ip6_tnl_locate(struct net *net, | 368 | static struct ip6_tnl *ip6_tnl_locate(struct net *net, |
@@ -380,13 +380,13 @@ static struct ip6_tnl *ip6_tnl_locate(struct net *net, | |||
380 | if (ipv6_addr_equal(local, &t->parms.laddr) && | 380 | if (ipv6_addr_equal(local, &t->parms.laddr) && |
381 | ipv6_addr_equal(remote, &t->parms.raddr)) { | 381 | ipv6_addr_equal(remote, &t->parms.raddr)) { |
382 | if (create) | 382 | if (create) |
383 | return NULL; | 383 | return ERR_PTR(-EEXIST); |
384 | 384 | ||
385 | return t; | 385 | return t; |
386 | } | 386 | } |
387 | } | 387 | } |
388 | if (!create) | 388 | if (!create) |
389 | return NULL; | 389 | return ERR_PTR(-ENODEV); |
390 | return ip6_tnl_create(net, p); | 390 | return ip6_tnl_create(net, p); |
391 | } | 391 | } |
392 | 392 | ||
@@ -1420,7 +1420,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1420 | } | 1420 | } |
1421 | ip6_tnl_parm_from_user(&p1, &p); | 1421 | ip6_tnl_parm_from_user(&p1, &p); |
1422 | t = ip6_tnl_locate(net, &p1, 0); | 1422 | t = ip6_tnl_locate(net, &p1, 0); |
1423 | if (t == NULL) | 1423 | if (IS_ERR(t)) |
1424 | t = netdev_priv(dev); | 1424 | t = netdev_priv(dev); |
1425 | } else { | 1425 | } else { |
1426 | memset(&p, 0, sizeof(p)); | 1426 | memset(&p, 0, sizeof(p)); |
@@ -1445,7 +1445,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1445 | ip6_tnl_parm_from_user(&p1, &p); | 1445 | ip6_tnl_parm_from_user(&p1, &p); |
1446 | t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL); | 1446 | t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL); |
1447 | if (cmd == SIOCCHGTUNNEL) { | 1447 | if (cmd == SIOCCHGTUNNEL) { |
1448 | if (t != NULL) { | 1448 | if (!IS_ERR(t)) { |
1449 | if (t->dev != dev) { | 1449 | if (t->dev != dev) { |
1450 | err = -EEXIST; | 1450 | err = -EEXIST; |
1451 | break; | 1451 | break; |
@@ -1457,14 +1457,15 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1457 | else | 1457 | else |
1458 | err = ip6_tnl_update(t, &p1); | 1458 | err = ip6_tnl_update(t, &p1); |
1459 | } | 1459 | } |
1460 | if (t) { | 1460 | if (!IS_ERR(t)) { |
1461 | err = 0; | 1461 | err = 0; |
1462 | ip6_tnl_parm_to_user(&p, &t->parms); | 1462 | ip6_tnl_parm_to_user(&p, &t->parms); |
1463 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) | 1463 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) |
1464 | err = -EFAULT; | 1464 | err = -EFAULT; |
1465 | 1465 | ||
1466 | } else | 1466 | } else { |
1467 | err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); | 1467 | err = PTR_ERR(t); |
1468 | } | ||
1468 | break; | 1469 | break; |
1469 | case SIOCDELTUNNEL: | 1470 | case SIOCDELTUNNEL: |
1470 | err = -EPERM; | 1471 | err = -EPERM; |
@@ -1478,7 +1479,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1478 | err = -ENOENT; | 1479 | err = -ENOENT; |
1479 | ip6_tnl_parm_from_user(&p1, &p); | 1480 | ip6_tnl_parm_from_user(&p1, &p); |
1480 | t = ip6_tnl_locate(net, &p1, 0); | 1481 | t = ip6_tnl_locate(net, &p1, 0); |
1481 | if (t == NULL) | 1482 | if (IS_ERR(t)) |
1482 | break; | 1483 | break; |
1483 | err = -EPERM; | 1484 | err = -EPERM; |
1484 | if (t->dev == ip6n->fb_tnl_dev) | 1485 | if (t->dev == ip6n->fb_tnl_dev) |
@@ -1672,12 +1673,13 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev, | |||
1672 | struct nlattr *tb[], struct nlattr *data[]) | 1673 | struct nlattr *tb[], struct nlattr *data[]) |
1673 | { | 1674 | { |
1674 | struct net *net = dev_net(dev); | 1675 | struct net *net = dev_net(dev); |
1675 | struct ip6_tnl *nt; | 1676 | struct ip6_tnl *nt, *t; |
1676 | 1677 | ||
1677 | nt = netdev_priv(dev); | 1678 | nt = netdev_priv(dev); |
1678 | ip6_tnl_netlink_parms(data, &nt->parms); | 1679 | ip6_tnl_netlink_parms(data, &nt->parms); |
1679 | 1680 | ||
1680 | if (ip6_tnl_locate(net, &nt->parms, 0)) | 1681 | t = ip6_tnl_locate(net, &nt->parms, 0); |
1682 | if (!IS_ERR(t)) | ||
1681 | return -EEXIST; | 1683 | return -EEXIST; |
1682 | 1684 | ||
1683 | return ip6_tnl_create2(dev); | 1685 | return ip6_tnl_create2(dev); |
@@ -1697,8 +1699,7 @@ static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], | |||
1697 | ip6_tnl_netlink_parms(data, &p); | 1699 | ip6_tnl_netlink_parms(data, &p); |
1698 | 1700 | ||
1699 | t = ip6_tnl_locate(net, &p, 0); | 1701 | t = ip6_tnl_locate(net, &p, 0); |
1700 | 1702 | if (!IS_ERR(t)) { | |
1701 | if (t) { | ||
1702 | if (t->dev != dev) | 1703 | if (t->dev != dev) |
1703 | return -EEXIST; | 1704 | return -EEXIST; |
1704 | } else | 1705 | } else |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 34b682617f50..312e0ff47339 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -252,7 +252,7 @@ static int __net_init ip6mr_rules_init(struct net *net) | |||
252 | return 0; | 252 | return 0; |
253 | 253 | ||
254 | err2: | 254 | err2: |
255 | kfree(mrt); | 255 | ip6mr_free_table(mrt); |
256 | err1: | 256 | err1: |
257 | fib_rules_unregister(ops); | 257 | fib_rules_unregister(ops); |
258 | return err; | 258 | return err; |
@@ -267,8 +267,8 @@ static void __net_exit ip6mr_rules_exit(struct net *net) | |||
267 | list_del(&mrt->list); | 267 | list_del(&mrt->list); |
268 | ip6mr_free_table(mrt); | 268 | ip6mr_free_table(mrt); |
269 | } | 269 | } |
270 | rtnl_unlock(); | ||
271 | fib_rules_unregister(net->ipv6.mr6_rules_ops); | 270 | fib_rules_unregister(net->ipv6.mr6_rules_ops); |
271 | rtnl_unlock(); | ||
272 | } | 272 | } |
273 | #else | 273 | #else |
274 | #define ip6mr_for_each_table(mrt, net) \ | 274 | #define ip6mr_for_each_table(mrt, net) \ |
@@ -336,7 +336,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id) | |||
336 | 336 | ||
337 | static void ip6mr_free_table(struct mr6_table *mrt) | 337 | static void ip6mr_free_table(struct mr6_table *mrt) |
338 | { | 338 | { |
339 | del_timer(&mrt->ipmr_expire_timer); | 339 | del_timer_sync(&mrt->ipmr_expire_timer); |
340 | mroute_clean_tables(mrt); | 340 | mroute_clean_tables(mrt); |
341 | kfree(mrt); | 341 | kfree(mrt); |
342 | } | 342 | } |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 471ed24aabae..14ecdaf06bf7 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -1218,7 +1218,14 @@ static void ndisc_router_discovery(struct sk_buff *skb) | |||
1218 | if (rt) | 1218 | if (rt) |
1219 | rt6_set_expires(rt, jiffies + (HZ * lifetime)); | 1219 | rt6_set_expires(rt, jiffies + (HZ * lifetime)); |
1220 | if (ra_msg->icmph.icmp6_hop_limit) { | 1220 | if (ra_msg->icmph.icmp6_hop_limit) { |
1221 | in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit; | 1221 | /* Only set hop_limit on the interface if it is higher than |
1222 | * the current hop_limit. | ||
1223 | */ | ||
1224 | if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) { | ||
1225 | in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit; | ||
1226 | } else { | ||
1227 | ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n"); | ||
1228 | } | ||
1222 | if (rt) | 1229 | if (rt) |
1223 | dst_metric_set(&rt->dst, RTAX_HOPLIMIT, | 1230 | dst_metric_set(&rt->dst, RTAX_HOPLIMIT, |
1224 | ra_msg->icmph.icmp6_hop_limit); | 1231 | ra_msg->icmph.icmp6_hop_limit); |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index e080fbbbc0e5..bb00c6f2a885 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -298,9 +298,9 @@ static void trace_packet(const struct sk_buff *skb, | |||
298 | &chainname, &comment, &rulenum) != 0) | 298 | &chainname, &comment, &rulenum) != 0) |
299 | break; | 299 | break; |
300 | 300 | ||
301 | nf_log_packet(net, AF_INET6, hook, skb, in, out, &trace_loginfo, | 301 | nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo, |
302 | "TRACE: %s:%s:%s:%u ", | 302 | "TRACE: %s:%s:%s:%u ", |
303 | tablename, chainname, comment, rulenum); | 303 | tablename, chainname, comment, rulenum); |
304 | } | 304 | } |
305 | #endif | 305 | #endif |
306 | 306 | ||
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index bd46f736f61d..a2dfff6ff227 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c | |||
@@ -102,9 +102,10 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
102 | 102 | ||
103 | if (msg->msg_name) { | 103 | if (msg->msg_name) { |
104 | DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name); | 104 | DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name); |
105 | if (msg->msg_namelen < sizeof(struct sockaddr_in6) || | 105 | if (msg->msg_namelen < sizeof(*u)) |
106 | u->sin6_family != AF_INET6) { | ||
107 | return -EINVAL; | 106 | return -EINVAL; |
107 | if (u->sin6_family != AF_INET6) { | ||
108 | return -EAFNOSUPPORT; | ||
108 | } | 109 | } |
109 | if (sk->sk_bound_dev_if && | 110 | if (sk->sk_bound_dev_if && |
110 | sk->sk_bound_dev_if != u->sin6_scope_id) { | 111 | sk->sk_bound_dev_if != u->sin6_scope_id) { |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 5d46832c6f72..1f5e62229aaa 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -1411,6 +1411,15 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr, | |||
1411 | TCP_SKB_CB(skb)->sacked = 0; | 1411 | TCP_SKB_CB(skb)->sacked = 0; |
1412 | } | 1412 | } |
1413 | 1413 | ||
1414 | static void tcp_v6_restore_cb(struct sk_buff *skb) | ||
1415 | { | ||
1416 | /* We need to move header back to the beginning if xfrm6_policy_check() | ||
1417 | * and tcp_v6_fill_cb() are going to be called again. | ||
1418 | */ | ||
1419 | memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6, | ||
1420 | sizeof(struct inet6_skb_parm)); | ||
1421 | } | ||
1422 | |||
1414 | static int tcp_v6_rcv(struct sk_buff *skb) | 1423 | static int tcp_v6_rcv(struct sk_buff *skb) |
1415 | { | 1424 | { |
1416 | const struct tcphdr *th; | 1425 | const struct tcphdr *th; |
@@ -1543,6 +1552,7 @@ do_time_wait: | |||
1543 | inet_twsk_deschedule(tw, &tcp_death_row); | 1552 | inet_twsk_deschedule(tw, &tcp_death_row); |
1544 | inet_twsk_put(tw); | 1553 | inet_twsk_put(tw); |
1545 | sk = sk2; | 1554 | sk = sk2; |
1555 | tcp_v6_restore_cb(skb); | ||
1546 | goto process; | 1556 | goto process; |
1547 | } | 1557 | } |
1548 | /* Fall through to ACK */ | 1558 | /* Fall through to ACK */ |
@@ -1551,6 +1561,7 @@ do_time_wait: | |||
1551 | tcp_v6_timewait_ack(sk, skb); | 1561 | tcp_v6_timewait_ack(sk, skb); |
1552 | break; | 1562 | break; |
1553 | case TCP_TW_RST: | 1563 | case TCP_TW_RST: |
1564 | tcp_v6_restore_cb(skb); | ||
1554 | goto no_tcp_socket; | 1565 | goto no_tcp_socket; |
1555 | case TCP_TW_SUCCESS: | 1566 | case TCP_TW_SUCCESS: |
1556 | ; | 1567 | ; |
@@ -1585,7 +1596,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb) | |||
1585 | skb->sk = sk; | 1596 | skb->sk = sk; |
1586 | skb->destructor = sock_edemux; | 1597 | skb->destructor = sock_edemux; |
1587 | if (sk->sk_state != TCP_TIME_WAIT) { | 1598 | if (sk->sk_state != TCP_TIME_WAIT) { |
1588 | struct dst_entry *dst = sk->sk_rx_dst; | 1599 | struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); |
1589 | 1600 | ||
1590 | if (dst) | 1601 | if (dst) |
1591 | dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); | 1602 | dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); |
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index ab889bb16b3c..be2c0ba82c85 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c | |||
@@ -112,11 +112,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, | |||
112 | fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); | 112 | fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); |
113 | fptr->nexthdr = nexthdr; | 113 | fptr->nexthdr = nexthdr; |
114 | fptr->reserved = 0; | 114 | fptr->reserved = 0; |
115 | if (skb_shinfo(skb)->ip6_frag_id) | 115 | if (!skb_shinfo(skb)->ip6_frag_id) |
116 | fptr->identification = skb_shinfo(skb)->ip6_frag_id; | 116 | ipv6_proxy_select_ident(skb); |
117 | else | 117 | fptr->identification = skb_shinfo(skb)->ip6_frag_id; |
118 | ipv6_select_ident(fptr, | ||
119 | (struct rt6_info *)skb_dst(skb)); | ||
120 | 118 | ||
121 | /* Fragment the skb. ipv6 header and the remaining fields of the | 119 | /* Fragment the skb. ipv6 header and the remaining fields of the |
122 | * fragment header are updated in ipv6_gso_segment() | 120 | * fragment header are updated in ipv6_gso_segment() |
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index ca3f29b98ae5..010f8bd2d577 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c | |||
@@ -114,6 +114,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb) | |||
114 | return err; | 114 | return err; |
115 | 115 | ||
116 | skb->ignore_df = 1; | 116 | skb->ignore_df = 1; |
117 | skb->protocol = htons(ETH_P_IPV6); | ||
117 | 118 | ||
118 | return x->outer_mode->output2(x, skb); | 119 | return x->outer_mode->output2(x, skb); |
119 | } | 120 | } |
@@ -122,7 +123,6 @@ EXPORT_SYMBOL(xfrm6_prepare_output); | |||
122 | int xfrm6_output_finish(struct sk_buff *skb) | 123 | int xfrm6_output_finish(struct sk_buff *skb) |
123 | { | 124 | { |
124 | memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); | 125 | memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); |
125 | skb->protocol = htons(ETH_P_IPV6); | ||
126 | 126 | ||
127 | #ifdef CONFIG_NETFILTER | 127 | #ifdef CONFIG_NETFILTER |
128 | IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; | 128 | IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; |
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 48bf5a06847b..8d2d01b4800a 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -200,6 +200,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) | |||
200 | 200 | ||
201 | #if IS_ENABLED(CONFIG_IPV6_MIP6) | 201 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
202 | case IPPROTO_MH: | 202 | case IPPROTO_MH: |
203 | offset += ipv6_optlen(exthdr); | ||
203 | if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) { | 204 | if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) { |
204 | struct ip6_mh *mh; | 205 | struct ip6_mh *mh; |
205 | 206 | ||
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index 40695b9751c1..683346d2d633 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c | |||
@@ -798,7 +798,9 @@ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout) | |||
798 | orig_jiffies = jiffies; | 798 | orig_jiffies = jiffies; |
799 | 799 | ||
800 | /* Set poll time to 200 ms */ | 800 | /* Set poll time to 200 ms */ |
801 | poll_time = IRDA_MIN(timeout, msecs_to_jiffies(200)); | 801 | poll_time = msecs_to_jiffies(200); |
802 | if (timeout) | ||
803 | poll_time = min_t(unsigned long, timeout, poll_time); | ||
802 | 804 | ||
803 | spin_lock_irqsave(&self->spinlock, flags); | 805 | spin_lock_irqsave(&self->spinlock, flags); |
804 | while (self->tx_skb && self->tx_skb->len) { | 806 | while (self->tx_skb && self->tx_skb->len) { |
@@ -811,7 +813,7 @@ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout) | |||
811 | break; | 813 | break; |
812 | } | 814 | } |
813 | spin_unlock_irqrestore(&self->spinlock, flags); | 815 | spin_unlock_irqrestore(&self->spinlock, flags); |
814 | current->state = TASK_RUNNING; | 816 | __set_current_state(TASK_RUNNING); |
815 | } | 817 | } |
816 | 818 | ||
817 | /* | 819 | /* |
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c index 3c83a1e5ab03..1215693fdd22 100644 --- a/net/irda/irnet/irnet_ppp.c +++ b/net/irda/irnet/irnet_ppp.c | |||
@@ -305,7 +305,7 @@ irnet_ctrl_read(irnet_socket * ap, | |||
305 | 305 | ||
306 | /* Put ourselves on the wait queue to be woken up */ | 306 | /* Put ourselves on the wait queue to be woken up */ |
307 | add_wait_queue(&irnet_events.rwait, &wait); | 307 | add_wait_queue(&irnet_events.rwait, &wait); |
308 | current->state = TASK_INTERRUPTIBLE; | 308 | set_current_state(TASK_INTERRUPTIBLE); |
309 | for(;;) | 309 | for(;;) |
310 | { | 310 | { |
311 | /* If there is unread events */ | 311 | /* If there is unread events */ |
@@ -321,7 +321,7 @@ irnet_ctrl_read(irnet_socket * ap, | |||
321 | /* Yield and wait to be woken up */ | 321 | /* Yield and wait to be woken up */ |
322 | schedule(); | 322 | schedule(); |
323 | } | 323 | } |
324 | current->state = TASK_RUNNING; | 324 | __set_current_state(TASK_RUNNING); |
325 | remove_wait_queue(&irnet_events.rwait, &wait); | 325 | remove_wait_queue(&irnet_events.rwait, &wait); |
326 | 326 | ||
327 | /* Did we got it ? */ | 327 | /* Did we got it ? */ |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 2e9953b2db84..53d931172088 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -1114,10 +1114,8 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1114 | noblock, &err); | 1114 | noblock, &err); |
1115 | else | 1115 | else |
1116 | skb = sock_alloc_send_skb(sk, len, noblock, &err); | 1116 | skb = sock_alloc_send_skb(sk, len, noblock, &err); |
1117 | if (!skb) { | 1117 | if (!skb) |
1118 | err = -ENOMEM; | ||
1119 | goto out; | 1118 | goto out; |
1120 | } | ||
1121 | if (iucv->transport == AF_IUCV_TRANS_HIPER) | 1119 | if (iucv->transport == AF_IUCV_TRANS_HIPER) |
1122 | skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN); | 1120 | skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN); |
1123 | if (memcpy_from_msg(skb_put(skb, len), msg, len)) { | 1121 | if (memcpy_from_msg(skb_put(skb, len), msg, len)) { |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 895348e44c7d..a29a504492af 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -1871,6 +1871,7 @@ static int __init l2tp_init(void) | |||
1871 | l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0); | 1871 | l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0); |
1872 | if (!l2tp_wq) { | 1872 | if (!l2tp_wq) { |
1873 | pr_err("alloc_workqueue failed\n"); | 1873 | pr_err("alloc_workqueue failed\n"); |
1874 | unregister_pernet_device(&l2tp_net_ops); | ||
1874 | rc = -ENOMEM; | 1875 | rc = -ENOMEM; |
1875 | goto out; | 1876 | goto out; |
1876 | } | 1877 | } |
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index a48bad468880..7702978a4c99 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c | |||
@@ -49,8 +49,6 @@ static void ieee80211_free_tid_rx(struct rcu_head *h) | |||
49 | container_of(h, struct tid_ampdu_rx, rcu_head); | 49 | container_of(h, struct tid_ampdu_rx, rcu_head); |
50 | int i; | 50 | int i; |
51 | 51 | ||
52 | del_timer_sync(&tid_rx->reorder_timer); | ||
53 | |||
54 | for (i = 0; i < tid_rx->buf_size; i++) | 52 | for (i = 0; i < tid_rx->buf_size; i++) |
55 | __skb_queue_purge(&tid_rx->reorder_buf[i]); | 53 | __skb_queue_purge(&tid_rx->reorder_buf[i]); |
56 | kfree(tid_rx->reorder_buf); | 54 | kfree(tid_rx->reorder_buf); |
@@ -93,6 +91,12 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, | |||
93 | 91 | ||
94 | del_timer_sync(&tid_rx->session_timer); | 92 | del_timer_sync(&tid_rx->session_timer); |
95 | 93 | ||
94 | /* make sure ieee80211_sta_reorder_release() doesn't re-arm the timer */ | ||
95 | spin_lock_bh(&tid_rx->reorder_lock); | ||
96 | tid_rx->removed = true; | ||
97 | spin_unlock_bh(&tid_rx->reorder_lock); | ||
98 | del_timer_sync(&tid_rx->reorder_timer); | ||
99 | |||
96 | call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx); | 100 | call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx); |
97 | } | 101 | } |
98 | 102 | ||
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index ff0d2db09df9..5bcd4e5589d3 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c | |||
@@ -1508,6 +1508,8 @@ static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata) | |||
1508 | if (ieee80211_chanctx_refcount(local, ctx) == 0) | 1508 | if (ieee80211_chanctx_refcount(local, ctx) == 0) |
1509 | ieee80211_free_chanctx(local, ctx); | 1509 | ieee80211_free_chanctx(local, ctx); |
1510 | 1510 | ||
1511 | sdata->radar_required = false; | ||
1512 | |||
1511 | /* Unreserving may ready an in-place reservation. */ | 1513 | /* Unreserving may ready an in-place reservation. */ |
1512 | if (use_reserved_switch) | 1514 | if (use_reserved_switch) |
1513 | ieee80211_vif_use_reserved_switch(local); | 1515 | ieee80211_vif_use_reserved_switch(local); |
@@ -1566,6 +1568,9 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata, | |||
1566 | ieee80211_recalc_smps_chanctx(local, ctx); | 1568 | ieee80211_recalc_smps_chanctx(local, ctx); |
1567 | ieee80211_recalc_radar_chanctx(local, ctx); | 1569 | ieee80211_recalc_radar_chanctx(local, ctx); |
1568 | out: | 1570 | out: |
1571 | if (ret) | ||
1572 | sdata->radar_required = false; | ||
1573 | |||
1569 | mutex_unlock(&local->chanctx_mtx); | 1574 | mutex_unlock(&local->chanctx_mtx); |
1570 | return ret; | 1575 | return ret; |
1571 | } | 1576 | } |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 3afe36824703..8d53d65bd2ab 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -58,13 +58,24 @@ struct ieee80211_local; | |||
58 | #define IEEE80211_UNSET_POWER_LEVEL INT_MIN | 58 | #define IEEE80211_UNSET_POWER_LEVEL INT_MIN |
59 | 59 | ||
60 | /* | 60 | /* |
61 | * Some APs experience problems when working with U-APSD. Decrease the | 61 | * Some APs experience problems when working with U-APSD. Decreasing the |
62 | * probability of that happening by using legacy mode for all ACs but VO. | 62 | * probability of that happening by using legacy mode for all ACs but VO isn't |
63 | * The AP that caused us trouble was a Cisco 4410N. It ignores our | 63 | * enough. |
64 | * setting, and always treats non-VO ACs as legacy. | 64 | * |
65 | * Cisco 4410N originally forced us to enable VO by default only because it | ||
66 | * treated non-VO ACs as legacy. | ||
67 | * | ||
68 | * However some APs (notably Netgear R7000) silently reclassify packets to | ||
69 | * different ACs. Since u-APSD ACs require trigger frames for frame retrieval | ||
70 | * clients would never see some frames (e.g. ARP responses) or would fetch them | ||
71 | * accidentally after a long time. | ||
72 | * | ||
73 | * It makes little sense to enable u-APSD queues by default because it needs | ||
74 | * userspace applications to be aware of it to actually take advantage of the | ||
75 | * possible additional powersavings. Implicitly depending on driver autotrigger | ||
76 | * frame support doesn't make much sense. | ||
65 | */ | 77 | */ |
66 | #define IEEE80211_DEFAULT_UAPSD_QUEUES \ | 78 | #define IEEE80211_DEFAULT_UAPSD_QUEUES 0 |
67 | IEEE80211_WMM_IE_STA_QOSINFO_AC_VO | ||
68 | 79 | ||
69 | #define IEEE80211_DEFAULT_MAX_SP_LEN \ | 80 | #define IEEE80211_DEFAULT_MAX_SP_LEN \ |
70 | IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL | 81 | IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL |
@@ -453,6 +464,7 @@ struct ieee80211_if_managed { | |||
453 | unsigned int flags; | 464 | unsigned int flags; |
454 | 465 | ||
455 | bool csa_waiting_bcn; | 466 | bool csa_waiting_bcn; |
467 | bool csa_ignored_same_chan; | ||
456 | 468 | ||
457 | bool beacon_crc_valid; | 469 | bool beacon_crc_valid; |
458 | u32 beacon_crc; | 470 | u32 beacon_crc; |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 10ac6324c1d0..142f66aece18 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -1150,6 +1150,17 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | |||
1150 | return; | 1150 | return; |
1151 | } | 1151 | } |
1152 | 1152 | ||
1153 | if (cfg80211_chandef_identical(&csa_ie.chandef, | ||
1154 | &sdata->vif.bss_conf.chandef)) { | ||
1155 | if (ifmgd->csa_ignored_same_chan) | ||
1156 | return; | ||
1157 | sdata_info(sdata, | ||
1158 | "AP %pM tries to chanswitch to same channel, ignore\n", | ||
1159 | ifmgd->associated->bssid); | ||
1160 | ifmgd->csa_ignored_same_chan = true; | ||
1161 | return; | ||
1162 | } | ||
1163 | |||
1153 | mutex_lock(&local->mtx); | 1164 | mutex_lock(&local->mtx); |
1154 | mutex_lock(&local->chanctx_mtx); | 1165 | mutex_lock(&local->chanctx_mtx); |
1155 | conf = rcu_dereference_protected(sdata->vif.chanctx_conf, | 1166 | conf = rcu_dereference_protected(sdata->vif.chanctx_conf, |
@@ -1210,6 +1221,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | |||
1210 | sdata->vif.csa_active = true; | 1221 | sdata->vif.csa_active = true; |
1211 | sdata->csa_chandef = csa_ie.chandef; | 1222 | sdata->csa_chandef = csa_ie.chandef; |
1212 | sdata->csa_block_tx = csa_ie.mode; | 1223 | sdata->csa_block_tx = csa_ie.mode; |
1224 | ifmgd->csa_ignored_same_chan = false; | ||
1213 | 1225 | ||
1214 | if (sdata->csa_block_tx) | 1226 | if (sdata->csa_block_tx) |
1215 | ieee80211_stop_vif_queues(local, sdata, | 1227 | ieee80211_stop_vif_queues(local, sdata, |
@@ -2090,6 +2102,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, | |||
2090 | 2102 | ||
2091 | sdata->vif.csa_active = false; | 2103 | sdata->vif.csa_active = false; |
2092 | ifmgd->csa_waiting_bcn = false; | 2104 | ifmgd->csa_waiting_bcn = false; |
2105 | ifmgd->csa_ignored_same_chan = false; | ||
2093 | if (sdata->csa_block_tx) { | 2106 | if (sdata->csa_block_tx) { |
2094 | ieee80211_wake_vif_queues(local, sdata, | 2107 | ieee80211_wake_vif_queues(local, sdata, |
2095 | IEEE80211_QUEUE_STOP_REASON_CSA); | 2108 | IEEE80211_QUEUE_STOP_REASON_CSA); |
@@ -3204,7 +3217,8 @@ static const u64 care_about_ies = | |||
3204 | (1ULL << WLAN_EID_CHANNEL_SWITCH) | | 3217 | (1ULL << WLAN_EID_CHANNEL_SWITCH) | |
3205 | (1ULL << WLAN_EID_PWR_CONSTRAINT) | | 3218 | (1ULL << WLAN_EID_PWR_CONSTRAINT) | |
3206 | (1ULL << WLAN_EID_HT_CAPABILITY) | | 3219 | (1ULL << WLAN_EID_HT_CAPABILITY) | |
3207 | (1ULL << WLAN_EID_HT_OPERATION); | 3220 | (1ULL << WLAN_EID_HT_OPERATION) | |
3221 | (1ULL << WLAN_EID_EXT_CHANSWITCH_ANN); | ||
3208 | 3222 | ||
3209 | static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | 3223 | static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, |
3210 | struct ieee80211_mgmt *mgmt, size_t len, | 3224 | struct ieee80211_mgmt *mgmt, size_t len, |
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 7c86a002df95..ef6e8a6c4253 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c | |||
@@ -373,7 +373,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta, | |||
373 | rate++; | 373 | rate++; |
374 | mi->sample_deferred++; | 374 | mi->sample_deferred++; |
375 | } else { | 375 | } else { |
376 | if (!msr->sample_limit != 0) | 376 | if (!msr->sample_limit) |
377 | return; | 377 | return; |
378 | 378 | ||
379 | mi->sample_packets++; | 379 | mi->sample_packets++; |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 1101563357ea..1eb730bf8752 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -873,9 +873,10 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, | |||
873 | 873 | ||
874 | set_release_timer: | 874 | set_release_timer: |
875 | 875 | ||
876 | mod_timer(&tid_agg_rx->reorder_timer, | 876 | if (!tid_agg_rx->removed) |
877 | tid_agg_rx->reorder_time[j] + 1 + | 877 | mod_timer(&tid_agg_rx->reorder_timer, |
878 | HT_RX_REORDER_BUF_TIMEOUT); | 878 | tid_agg_rx->reorder_time[j] + 1 + |
879 | HT_RX_REORDER_BUF_TIMEOUT); | ||
879 | } else { | 880 | } else { |
880 | del_timer(&tid_agg_rx->reorder_timer); | 881 | del_timer(&tid_agg_rx->reorder_timer); |
881 | } | 882 | } |
@@ -2214,6 +2215,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
2214 | hdr = (struct ieee80211_hdr *) skb->data; | 2215 | hdr = (struct ieee80211_hdr *) skb->data; |
2215 | mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); | 2216 | mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); |
2216 | 2217 | ||
2218 | if (ieee80211_drop_unencrypted(rx, hdr->frame_control)) | ||
2219 | return RX_DROP_MONITOR; | ||
2220 | |||
2217 | /* frame is in RMC, don't forward */ | 2221 | /* frame is in RMC, don't forward */ |
2218 | if (ieee80211_is_data(hdr->frame_control) && | 2222 | if (ieee80211_is_data(hdr->frame_control) && |
2219 | is_multicast_ether_addr(hdr->addr1) && | 2223 | is_multicast_ether_addr(hdr->addr1) && |
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 925e68fe64c7..fb0fc1302a58 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
@@ -175,6 +175,7 @@ struct tid_ampdu_tx { | |||
175 | * @reorder_lock: serializes access to reorder buffer, see below. | 175 | * @reorder_lock: serializes access to reorder buffer, see below. |
176 | * @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and | 176 | * @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and |
177 | * and ssn. | 177 | * and ssn. |
178 | * @removed: this session is removed (but might have been found due to RCU) | ||
178 | * | 179 | * |
179 | * This structure's lifetime is managed by RCU, assignments to | 180 | * This structure's lifetime is managed by RCU, assignments to |
180 | * the array holding it must hold the aggregation mutex. | 181 | * the array holding it must hold the aggregation mutex. |
@@ -199,6 +200,7 @@ struct tid_ampdu_rx { | |||
199 | u16 timeout; | 200 | u16 timeout; |
200 | u8 dialog_token; | 201 | u8 dialog_token; |
201 | bool auto_seq; | 202 | bool auto_seq; |
203 | bool removed; | ||
202 | }; | 204 | }; |
203 | 205 | ||
204 | /** | 206 | /** |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 88a18ffe2975..07bd8db00af8 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -566,6 +566,7 @@ ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx) | |||
566 | if (tx->sdata->control_port_no_encrypt) | 566 | if (tx->sdata->control_port_no_encrypt) |
567 | info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; | 567 | info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; |
568 | info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO; | 568 | info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO; |
569 | info->flags |= IEEE80211_TX_CTL_USE_MINRATE; | ||
569 | } | 570 | } |
570 | 571 | ||
571 | return TX_CONTINUE; | 572 | return TX_CONTINUE; |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 8428f4a95479..747bdcf72e92 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -3178,7 +3178,7 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, | |||
3178 | wdev_iter = &sdata_iter->wdev; | 3178 | wdev_iter = &sdata_iter->wdev; |
3179 | 3179 | ||
3180 | if (sdata_iter == sdata || | 3180 | if (sdata_iter == sdata || |
3181 | rcu_access_pointer(sdata_iter->vif.chanctx_conf) == NULL || | 3181 | !ieee80211_sdata_running(sdata_iter) || |
3182 | local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype)) | 3182 | local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype)) |
3183 | continue; | 3183 | continue; |
3184 | 3184 | ||
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index e55759056361..ed99448671c3 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -3402,7 +3402,7 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) | |||
3402 | if (udest.af == 0) | 3402 | if (udest.af == 0) |
3403 | udest.af = svc->af; | 3403 | udest.af = svc->af; |
3404 | 3404 | ||
3405 | if (udest.af != svc->af) { | 3405 | if (udest.af != svc->af && cmd != IPVS_CMD_DEL_DEST) { |
3406 | /* The synchronization protocol is incompatible | 3406 | /* The synchronization protocol is incompatible |
3407 | * with mixed family services | 3407 | * with mixed family services |
3408 | */ | 3408 | */ |
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index c47ffd7a0a70..d93ceeb3ef04 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c | |||
@@ -896,6 +896,8 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param, | |||
896 | IP_VS_DBG(2, "BACKUP, add new conn. failed\n"); | 896 | IP_VS_DBG(2, "BACKUP, add new conn. failed\n"); |
897 | return; | 897 | return; |
898 | } | 898 | } |
899 | if (!(flags & IP_VS_CONN_F_TEMPLATE)) | ||
900 | kfree(param->pe_data); | ||
899 | } | 901 | } |
900 | 902 | ||
901 | if (opt) | 903 | if (opt) |
@@ -1169,6 +1171,7 @@ static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end) | |||
1169 | (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL) | 1171 | (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL) |
1170 | ); | 1172 | ); |
1171 | #endif | 1173 | #endif |
1174 | ip_vs_pe_put(param.pe); | ||
1172 | return 0; | 1175 | return 0; |
1173 | /* Error exit */ | 1176 | /* Error exit */ |
1174 | out: | 1177 | out: |
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 0d8448f19dfe..675d12c69e32 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c | |||
@@ -212,6 +212,30 @@ void nf_log_packet(struct net *net, | |||
212 | } | 212 | } |
213 | EXPORT_SYMBOL(nf_log_packet); | 213 | EXPORT_SYMBOL(nf_log_packet); |
214 | 214 | ||
215 | void nf_log_trace(struct net *net, | ||
216 | u_int8_t pf, | ||
217 | unsigned int hooknum, | ||
218 | const struct sk_buff *skb, | ||
219 | const struct net_device *in, | ||
220 | const struct net_device *out, | ||
221 | const struct nf_loginfo *loginfo, const char *fmt, ...) | ||
222 | { | ||
223 | va_list args; | ||
224 | char prefix[NF_LOG_PREFIXLEN]; | ||
225 | const struct nf_logger *logger; | ||
226 | |||
227 | rcu_read_lock(); | ||
228 | logger = rcu_dereference(net->nf.nf_loggers[pf]); | ||
229 | if (logger) { | ||
230 | va_start(args, fmt); | ||
231 | vsnprintf(prefix, sizeof(prefix), fmt, args); | ||
232 | va_end(args); | ||
233 | logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix); | ||
234 | } | ||
235 | rcu_read_unlock(); | ||
236 | } | ||
237 | EXPORT_SYMBOL(nf_log_trace); | ||
238 | |||
215 | #define S_SIZE (1024 - (sizeof(unsigned int) + 1)) | 239 | #define S_SIZE (1024 - (sizeof(unsigned int) + 1)) |
216 | 240 | ||
217 | struct nf_log_buf { | 241 | struct nf_log_buf { |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 199fd0f27b0e..ac1a9528dbf2 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -227,7 +227,7 @@ nft_rule_deactivate_next(struct net *net, struct nft_rule *rule) | |||
227 | 227 | ||
228 | static inline void nft_rule_clear(struct net *net, struct nft_rule *rule) | 228 | static inline void nft_rule_clear(struct net *net, struct nft_rule *rule) |
229 | { | 229 | { |
230 | rule->genmask = 0; | 230 | rule->genmask &= ~(1 << gencursor_next(net)); |
231 | } | 231 | } |
232 | 232 | ||
233 | static int | 233 | static int |
@@ -1225,7 +1225,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, | |||
1225 | 1225 | ||
1226 | if (nla[NFTA_CHAIN_POLICY]) { | 1226 | if (nla[NFTA_CHAIN_POLICY]) { |
1227 | if ((chain != NULL && | 1227 | if ((chain != NULL && |
1228 | !(chain->flags & NFT_BASE_CHAIN)) || | 1228 | !(chain->flags & NFT_BASE_CHAIN))) |
1229 | return -EOPNOTSUPP; | ||
1230 | |||
1231 | if (chain == NULL && | ||
1229 | nla[NFTA_CHAIN_HOOK] == NULL) | 1232 | nla[NFTA_CHAIN_HOOK] == NULL) |
1230 | return -EOPNOTSUPP; | 1233 | return -EOPNOTSUPP; |
1231 | 1234 | ||
@@ -1711,9 +1714,12 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net, | |||
1711 | } | 1714 | } |
1712 | nla_nest_end(skb, list); | 1715 | nla_nest_end(skb, list); |
1713 | 1716 | ||
1714 | if (rule->ulen && | 1717 | if (rule->udata) { |
1715 | nla_put(skb, NFTA_RULE_USERDATA, rule->ulen, nft_userdata(rule))) | 1718 | struct nft_userdata *udata = nft_userdata(rule); |
1716 | goto nla_put_failure; | 1719 | if (nla_put(skb, NFTA_RULE_USERDATA, udata->len + 1, |
1720 | udata->data) < 0) | ||
1721 | goto nla_put_failure; | ||
1722 | } | ||
1717 | 1723 | ||
1718 | nlmsg_end(skb, nlh); | 1724 | nlmsg_end(skb, nlh); |
1719 | return 0; | 1725 | return 0; |
@@ -1896,11 +1902,12 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, | |||
1896 | struct nft_table *table; | 1902 | struct nft_table *table; |
1897 | struct nft_chain *chain; | 1903 | struct nft_chain *chain; |
1898 | struct nft_rule *rule, *old_rule = NULL; | 1904 | struct nft_rule *rule, *old_rule = NULL; |
1905 | struct nft_userdata *udata; | ||
1899 | struct nft_trans *trans = NULL; | 1906 | struct nft_trans *trans = NULL; |
1900 | struct nft_expr *expr; | 1907 | struct nft_expr *expr; |
1901 | struct nft_ctx ctx; | 1908 | struct nft_ctx ctx; |
1902 | struct nlattr *tmp; | 1909 | struct nlattr *tmp; |
1903 | unsigned int size, i, n, ulen = 0; | 1910 | unsigned int size, i, n, ulen = 0, usize = 0; |
1904 | int err, rem; | 1911 | int err, rem; |
1905 | bool create; | 1912 | bool create; |
1906 | u64 handle, pos_handle; | 1913 | u64 handle, pos_handle; |
@@ -1968,12 +1975,19 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, | |||
1968 | n++; | 1975 | n++; |
1969 | } | 1976 | } |
1970 | } | 1977 | } |
1978 | /* Check for overflow of dlen field */ | ||
1979 | err = -EFBIG; | ||
1980 | if (size >= 1 << 12) | ||
1981 | goto err1; | ||
1971 | 1982 | ||
1972 | if (nla[NFTA_RULE_USERDATA]) | 1983 | if (nla[NFTA_RULE_USERDATA]) { |
1973 | ulen = nla_len(nla[NFTA_RULE_USERDATA]); | 1984 | ulen = nla_len(nla[NFTA_RULE_USERDATA]); |
1985 | if (ulen > 0) | ||
1986 | usize = sizeof(struct nft_userdata) + ulen; | ||
1987 | } | ||
1974 | 1988 | ||
1975 | err = -ENOMEM; | 1989 | err = -ENOMEM; |
1976 | rule = kzalloc(sizeof(*rule) + size + ulen, GFP_KERNEL); | 1990 | rule = kzalloc(sizeof(*rule) + size + usize, GFP_KERNEL); |
1977 | if (rule == NULL) | 1991 | if (rule == NULL) |
1978 | goto err1; | 1992 | goto err1; |
1979 | 1993 | ||
@@ -1981,10 +1995,13 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, | |||
1981 | 1995 | ||
1982 | rule->handle = handle; | 1996 | rule->handle = handle; |
1983 | rule->dlen = size; | 1997 | rule->dlen = size; |
1984 | rule->ulen = ulen; | 1998 | rule->udata = ulen ? 1 : 0; |
1985 | 1999 | ||
1986 | if (ulen) | 2000 | if (ulen) { |
1987 | nla_memcpy(nft_userdata(rule), nla[NFTA_RULE_USERDATA], ulen); | 2001 | udata = nft_userdata(rule); |
2002 | udata->len = ulen - 1; | ||
2003 | nla_memcpy(udata->data, nla[NFTA_RULE_USERDATA], ulen); | ||
2004 | } | ||
1988 | 2005 | ||
1989 | expr = nft_expr_first(rule); | 2006 | expr = nft_expr_first(rule); |
1990 | for (i = 0; i < n; i++) { | 2007 | for (i = 0; i < n; i++) { |
@@ -2031,12 +2048,6 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, | |||
2031 | 2048 | ||
2032 | err3: | 2049 | err3: |
2033 | list_del_rcu(&rule->list); | 2050 | list_del_rcu(&rule->list); |
2034 | if (trans) { | ||
2035 | list_del_rcu(&nft_trans_rule(trans)->list); | ||
2036 | nft_rule_clear(net, nft_trans_rule(trans)); | ||
2037 | nft_trans_destroy(trans); | ||
2038 | chain->use++; | ||
2039 | } | ||
2040 | err2: | 2051 | err2: |
2041 | nf_tables_rule_destroy(&ctx, rule); | 2052 | nf_tables_rule_destroy(&ctx, rule); |
2042 | err1: | 2053 | err1: |
@@ -3612,12 +3623,11 @@ static int nf_tables_commit(struct sk_buff *skb) | |||
3612 | &te->elem, | 3623 | &te->elem, |
3613 | NFT_MSG_DELSETELEM, 0); | 3624 | NFT_MSG_DELSETELEM, 0); |
3614 | te->set->ops->get(te->set, &te->elem); | 3625 | te->set->ops->get(te->set, &te->elem); |
3615 | te->set->ops->remove(te->set, &te->elem); | ||
3616 | nft_data_uninit(&te->elem.key, NFT_DATA_VALUE); | 3626 | nft_data_uninit(&te->elem.key, NFT_DATA_VALUE); |
3617 | if (te->elem.flags & NFT_SET_MAP) { | 3627 | if (te->set->flags & NFT_SET_MAP && |
3618 | nft_data_uninit(&te->elem.data, | 3628 | !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END)) |
3619 | te->set->dtype); | 3629 | nft_data_uninit(&te->elem.data, te->set->dtype); |
3620 | } | 3630 | te->set->ops->remove(te->set, &te->elem); |
3621 | nft_trans_destroy(trans); | 3631 | nft_trans_destroy(trans); |
3622 | break; | 3632 | break; |
3623 | } | 3633 | } |
@@ -3658,7 +3668,7 @@ static int nf_tables_abort(struct sk_buff *skb) | |||
3658 | { | 3668 | { |
3659 | struct net *net = sock_net(skb->sk); | 3669 | struct net *net = sock_net(skb->sk); |
3660 | struct nft_trans *trans, *next; | 3670 | struct nft_trans *trans, *next; |
3661 | struct nft_set *set; | 3671 | struct nft_trans_elem *te; |
3662 | 3672 | ||
3663 | list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { | 3673 | list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { |
3664 | switch (trans->msg_type) { | 3674 | switch (trans->msg_type) { |
@@ -3719,9 +3729,13 @@ static int nf_tables_abort(struct sk_buff *skb) | |||
3719 | break; | 3729 | break; |
3720 | case NFT_MSG_NEWSETELEM: | 3730 | case NFT_MSG_NEWSETELEM: |
3721 | nft_trans_elem_set(trans)->nelems--; | 3731 | nft_trans_elem_set(trans)->nelems--; |
3722 | set = nft_trans_elem_set(trans); | 3732 | te = (struct nft_trans_elem *)trans->data; |
3723 | set->ops->get(set, &nft_trans_elem(trans)); | 3733 | te->set->ops->get(te->set, &te->elem); |
3724 | set->ops->remove(set, &nft_trans_elem(trans)); | 3734 | nft_data_uninit(&te->elem.key, NFT_DATA_VALUE); |
3735 | if (te->set->flags & NFT_SET_MAP && | ||
3736 | !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END)) | ||
3737 | nft_data_uninit(&te->elem.data, te->set->dtype); | ||
3738 | te->set->ops->remove(te->set, &te->elem); | ||
3725 | nft_trans_destroy(trans); | 3739 | nft_trans_destroy(trans); |
3726 | break; | 3740 | break; |
3727 | case NFT_MSG_DELSETELEM: | 3741 | case NFT_MSG_DELSETELEM: |
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index 3b90eb2b2c55..2d298dccb6dd 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c | |||
@@ -94,10 +94,10 @@ static void nft_trace_packet(const struct nft_pktinfo *pkt, | |||
94 | { | 94 | { |
95 | struct net *net = dev_net(pkt->in ? pkt->in : pkt->out); | 95 | struct net *net = dev_net(pkt->in ? pkt->in : pkt->out); |
96 | 96 | ||
97 | nf_log_packet(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in, | 97 | nf_log_trace(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in, |
98 | pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ", | 98 | pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ", |
99 | chain->table->name, chain->name, comments[type], | 99 | chain->table->name, chain->name, comments[type], |
100 | rulenum); | 100 | rulenum); |
101 | } | 101 | } |
102 | 102 | ||
103 | unsigned int | 103 | unsigned int |
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c index a5599fc51a6f..54330fb5efaf 100644 --- a/net/netfilter/nfnetlink_cthelper.c +++ b/net/netfilter/nfnetlink_cthelper.c | |||
@@ -77,6 +77,9 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple, | |||
77 | if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM]) | 77 | if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM]) |
78 | return -EINVAL; | 78 | return -EINVAL; |
79 | 79 | ||
80 | /* Not all fields are initialized so first zero the tuple */ | ||
81 | memset(tuple, 0, sizeof(struct nf_conntrack_tuple)); | ||
82 | |||
80 | tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM])); | 83 | tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM])); |
81 | tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]); | 84 | tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]); |
82 | 85 | ||
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index c598f74063a1..65f3e2b6be44 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c | |||
@@ -123,7 +123,7 @@ static void | |||
123 | nft_target_set_tgchk_param(struct xt_tgchk_param *par, | 123 | nft_target_set_tgchk_param(struct xt_tgchk_param *par, |
124 | const struct nft_ctx *ctx, | 124 | const struct nft_ctx *ctx, |
125 | struct xt_target *target, void *info, | 125 | struct xt_target *target, void *info, |
126 | union nft_entry *entry, u8 proto, bool inv) | 126 | union nft_entry *entry, u16 proto, bool inv) |
127 | { | 127 | { |
128 | par->net = ctx->net; | 128 | par->net = ctx->net; |
129 | par->table = ctx->table->name; | 129 | par->table = ctx->table->name; |
@@ -133,11 +133,14 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par, | |||
133 | entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; | 133 | entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; |
134 | break; | 134 | break; |
135 | case AF_INET6: | 135 | case AF_INET6: |
136 | if (proto) | ||
137 | entry->e6.ipv6.flags |= IP6T_F_PROTO; | ||
138 | |||
136 | entry->e6.ipv6.proto = proto; | 139 | entry->e6.ipv6.proto = proto; |
137 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; | 140 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; |
138 | break; | 141 | break; |
139 | case NFPROTO_BRIDGE: | 142 | case NFPROTO_BRIDGE: |
140 | entry->ebt.ethproto = proto; | 143 | entry->ebt.ethproto = (__force __be16)proto; |
141 | entry->ebt.invflags = inv ? EBT_IPROTO : 0; | 144 | entry->ebt.invflags = inv ? EBT_IPROTO : 0; |
142 | break; | 145 | break; |
143 | } | 146 | } |
@@ -171,7 +174,7 @@ static const struct nla_policy nft_rule_compat_policy[NFTA_RULE_COMPAT_MAX + 1] | |||
171 | [NFTA_RULE_COMPAT_FLAGS] = { .type = NLA_U32 }, | 174 | [NFTA_RULE_COMPAT_FLAGS] = { .type = NLA_U32 }, |
172 | }; | 175 | }; |
173 | 176 | ||
174 | static int nft_parse_compat(const struct nlattr *attr, u8 *proto, bool *inv) | 177 | static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv) |
175 | { | 178 | { |
176 | struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1]; | 179 | struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1]; |
177 | u32 flags; | 180 | u32 flags; |
@@ -203,7 +206,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
203 | struct xt_target *target = expr->ops->data; | 206 | struct xt_target *target = expr->ops->data; |
204 | struct xt_tgchk_param par; | 207 | struct xt_tgchk_param par; |
205 | size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO])); | 208 | size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO])); |
206 | u8 proto = 0; | 209 | u16 proto = 0; |
207 | bool inv = false; | 210 | bool inv = false; |
208 | union nft_entry e = {}; | 211 | union nft_entry e = {}; |
209 | int ret; | 212 | int ret; |
@@ -334,7 +337,7 @@ static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = { | |||
334 | static void | 337 | static void |
335 | nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, | 338 | nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, |
336 | struct xt_match *match, void *info, | 339 | struct xt_match *match, void *info, |
337 | union nft_entry *entry, u8 proto, bool inv) | 340 | union nft_entry *entry, u16 proto, bool inv) |
338 | { | 341 | { |
339 | par->net = ctx->net; | 342 | par->net = ctx->net; |
340 | par->table = ctx->table->name; | 343 | par->table = ctx->table->name; |
@@ -344,11 +347,14 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, | |||
344 | entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; | 347 | entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; |
345 | break; | 348 | break; |
346 | case AF_INET6: | 349 | case AF_INET6: |
350 | if (proto) | ||
351 | entry->e6.ipv6.flags |= IP6T_F_PROTO; | ||
352 | |||
347 | entry->e6.ipv6.proto = proto; | 353 | entry->e6.ipv6.proto = proto; |
348 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; | 354 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; |
349 | break; | 355 | break; |
350 | case NFPROTO_BRIDGE: | 356 | case NFPROTO_BRIDGE: |
351 | entry->ebt.ethproto = proto; | 357 | entry->ebt.ethproto = (__force __be16)proto; |
352 | entry->ebt.invflags = inv ? EBT_IPROTO : 0; | 358 | entry->ebt.invflags = inv ? EBT_IPROTO : 0; |
353 | break; | 359 | break; |
354 | } | 360 | } |
@@ -385,7 +391,7 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
385 | struct xt_match *match = expr->ops->data; | 391 | struct xt_match *match = expr->ops->data; |
386 | struct xt_mtchk_param par; | 392 | struct xt_mtchk_param par; |
387 | size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO])); | 393 | size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO])); |
388 | u8 proto = 0; | 394 | u16 proto = 0; |
389 | bool inv = false; | 395 | bool inv = false; |
390 | union nft_entry e = {}; | 396 | union nft_entry e = {}; |
391 | int ret; | 397 | int ret; |
@@ -625,8 +631,12 @@ nft_match_select_ops(const struct nft_ctx *ctx, | |||
625 | struct xt_match *match = nft_match->ops.data; | 631 | struct xt_match *match = nft_match->ops.data; |
626 | 632 | ||
627 | if (strcmp(match->name, mt_name) == 0 && | 633 | if (strcmp(match->name, mt_name) == 0 && |
628 | match->revision == rev && match->family == family) | 634 | match->revision == rev && match->family == family) { |
635 | if (!try_module_get(match->me)) | ||
636 | return ERR_PTR(-ENOENT); | ||
637 | |||
629 | return &nft_match->ops; | 638 | return &nft_match->ops; |
639 | } | ||
630 | } | 640 | } |
631 | 641 | ||
632 | match = xt_request_find_match(family, mt_name, rev); | 642 | match = xt_request_find_match(family, mt_name, rev); |
@@ -695,8 +705,12 @@ nft_target_select_ops(const struct nft_ctx *ctx, | |||
695 | struct xt_target *target = nft_target->ops.data; | 705 | struct xt_target *target = nft_target->ops.data; |
696 | 706 | ||
697 | if (strcmp(target->name, tg_name) == 0 && | 707 | if (strcmp(target->name, tg_name) == 0 && |
698 | target->revision == rev && target->family == family) | 708 | target->revision == rev && target->family == family) { |
709 | if (!try_module_get(target->me)) | ||
710 | return ERR_PTR(-ENOENT); | ||
711 | |||
699 | return &nft_target->ops; | 712 | return &nft_target->ops; |
713 | } | ||
700 | } | 714 | } |
701 | 715 | ||
702 | target = xt_request_find_target(family, tg_name, rev); | 716 | target = xt_request_find_target(family, tg_name, rev); |
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index 61e6c407476a..37c15e674884 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c | |||
@@ -153,6 +153,8 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set, | |||
153 | iter->err = err; | 153 | iter->err = err; |
154 | goto out; | 154 | goto out; |
155 | } | 155 | } |
156 | |||
157 | continue; | ||
156 | } | 158 | } |
157 | 159 | ||
158 | if (iter->count < iter->skip) | 160 | if (iter->count < iter->skip) |
@@ -192,8 +194,6 @@ static int nft_hash_init(const struct nft_set *set, | |||
192 | .key_offset = offsetof(struct nft_hash_elem, key), | 194 | .key_offset = offsetof(struct nft_hash_elem, key), |
193 | .key_len = set->klen, | 195 | .key_len = set->klen, |
194 | .hashfn = jhash, | 196 | .hashfn = jhash, |
195 | .grow_decision = rht_grow_above_75, | ||
196 | .shrink_decision = rht_shrink_below_30, | ||
197 | }; | 197 | }; |
198 | 198 | ||
199 | return rhashtable_init(priv, ¶ms); | 199 | return rhashtable_init(priv, ¶ms); |
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index ef8a926752a9..50e1e5aaf4ce 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c | |||
@@ -513,8 +513,8 @@ static int tproxy_tg6_check(const struct xt_tgchk_param *par) | |||
513 | { | 513 | { |
514 | const struct ip6t_ip6 *i = par->entryinfo; | 514 | const struct ip6t_ip6 *i = par->entryinfo; |
515 | 515 | ||
516 | if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) | 516 | if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) && |
517 | && !(i->flags & IP6T_INV_PROTO)) | 517 | !(i->invflags & IP6T_INV_PROTO)) |
518 | return 0; | 518 | return 0; |
519 | 519 | ||
520 | pr_info("Can be used only in combination with " | 520 | pr_info("Can be used only in combination with " |
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index 30dbe34915ae..45e1b30e4fb2 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c | |||
@@ -378,12 +378,11 @@ static int recent_mt_check(const struct xt_mtchk_param *par, | |||
378 | mutex_lock(&recent_mutex); | 378 | mutex_lock(&recent_mutex); |
379 | t = recent_table_lookup(recent_net, info->name); | 379 | t = recent_table_lookup(recent_net, info->name); |
380 | if (t != NULL) { | 380 | if (t != NULL) { |
381 | if (info->hit_count > t->nstamps_max_mask) { | 381 | if (nstamp_mask > t->nstamps_max_mask) { |
382 | pr_info("hitcount (%u) is larger than packets to be remembered (%u) for table %s\n", | 382 | spin_lock_bh(&recent_lock); |
383 | info->hit_count, t->nstamps_max_mask + 1, | 383 | recent_table_flush(t); |
384 | info->name); | 384 | t->nstamps_max_mask = nstamp_mask; |
385 | ret = -EINVAL; | 385 | spin_unlock_bh(&recent_lock); |
386 | goto out; | ||
387 | } | 386 | } |
388 | 387 | ||
389 | t->refcnt++; | 388 | t->refcnt++; |
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 1ba67931eb1b..13332dbf291d 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c | |||
@@ -243,12 +243,13 @@ static int | |||
243 | extract_icmp6_fields(const struct sk_buff *skb, | 243 | extract_icmp6_fields(const struct sk_buff *skb, |
244 | unsigned int outside_hdrlen, | 244 | unsigned int outside_hdrlen, |
245 | int *protocol, | 245 | int *protocol, |
246 | struct in6_addr **raddr, | 246 | const struct in6_addr **raddr, |
247 | struct in6_addr **laddr, | 247 | const struct in6_addr **laddr, |
248 | __be16 *rport, | 248 | __be16 *rport, |
249 | __be16 *lport) | 249 | __be16 *lport, |
250 | struct ipv6hdr *ipv6_var) | ||
250 | { | 251 | { |
251 | struct ipv6hdr *inside_iph, _inside_iph; | 252 | const struct ipv6hdr *inside_iph; |
252 | struct icmp6hdr *icmph, _icmph; | 253 | struct icmp6hdr *icmph, _icmph; |
253 | __be16 *ports, _ports[2]; | 254 | __be16 *ports, _ports[2]; |
254 | u8 inside_nexthdr; | 255 | u8 inside_nexthdr; |
@@ -263,12 +264,14 @@ extract_icmp6_fields(const struct sk_buff *skb, | |||
263 | if (icmph->icmp6_type & ICMPV6_INFOMSG_MASK) | 264 | if (icmph->icmp6_type & ICMPV6_INFOMSG_MASK) |
264 | return 1; | 265 | return 1; |
265 | 266 | ||
266 | inside_iph = skb_header_pointer(skb, outside_hdrlen + sizeof(_icmph), sizeof(_inside_iph), &_inside_iph); | 267 | inside_iph = skb_header_pointer(skb, outside_hdrlen + sizeof(_icmph), |
268 | sizeof(*ipv6_var), ipv6_var); | ||
267 | if (inside_iph == NULL) | 269 | if (inside_iph == NULL) |
268 | return 1; | 270 | return 1; |
269 | inside_nexthdr = inside_iph->nexthdr; | 271 | inside_nexthdr = inside_iph->nexthdr; |
270 | 272 | ||
271 | inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) + sizeof(_inside_iph), | 273 | inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) + |
274 | sizeof(*ipv6_var), | ||
272 | &inside_nexthdr, &inside_fragoff); | 275 | &inside_nexthdr, &inside_fragoff); |
273 | if (inside_hdrlen < 0) | 276 | if (inside_hdrlen < 0) |
274 | return 1; /* hjm: Packet has no/incomplete transport layer headers. */ | 277 | return 1; /* hjm: Packet has no/incomplete transport layer headers. */ |
@@ -315,10 +318,10 @@ xt_socket_get_sock_v6(struct net *net, const u8 protocol, | |||
315 | static bool | 318 | static bool |
316 | socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par) | 319 | socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par) |
317 | { | 320 | { |
318 | struct ipv6hdr *iph = ipv6_hdr(skb); | 321 | struct ipv6hdr ipv6_var, *iph = ipv6_hdr(skb); |
319 | struct udphdr _hdr, *hp = NULL; | 322 | struct udphdr _hdr, *hp = NULL; |
320 | struct sock *sk = skb->sk; | 323 | struct sock *sk = skb->sk; |
321 | struct in6_addr *daddr = NULL, *saddr = NULL; | 324 | const struct in6_addr *daddr = NULL, *saddr = NULL; |
322 | __be16 uninitialized_var(dport), uninitialized_var(sport); | 325 | __be16 uninitialized_var(dport), uninitialized_var(sport); |
323 | int thoff = 0, uninitialized_var(tproto); | 326 | int thoff = 0, uninitialized_var(tproto); |
324 | const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo; | 327 | const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo; |
@@ -342,7 +345,7 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par) | |||
342 | 345 | ||
343 | } else if (tproto == IPPROTO_ICMPV6) { | 346 | } else if (tproto == IPPROTO_ICMPV6) { |
344 | if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr, | 347 | if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr, |
345 | &sport, &dport)) | 348 | &sport, &dport, &ipv6_var)) |
346 | return false; | 349 | return false; |
347 | } else { | 350 | } else { |
348 | return false; | 351 | return false; |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 2702673f0f23..05919bf3f670 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -3126,8 +3126,6 @@ static int __init netlink_proto_init(void) | |||
3126 | .key_len = sizeof(u32), /* portid */ | 3126 | .key_len = sizeof(u32), /* portid */ |
3127 | .hashfn = jhash, | 3127 | .hashfn = jhash, |
3128 | .max_shift = 16, /* 64K */ | 3128 | .max_shift = 16, /* 64K */ |
3129 | .grow_decision = rht_grow_above_75, | ||
3130 | .shrink_decision = rht_shrink_below_30, | ||
3131 | }; | 3129 | }; |
3132 | 3130 | ||
3133 | if (err != 0) | 3131 | if (err != 0) |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index ae5e77cdc0ca..5bae7243c577 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -2194,14 +2194,55 @@ static int __net_init ovs_init_net(struct net *net) | |||
2194 | return 0; | 2194 | return 0; |
2195 | } | 2195 | } |
2196 | 2196 | ||
2197 | static void __net_exit ovs_exit_net(struct net *net) | 2197 | static void __net_exit list_vports_from_net(struct net *net, struct net *dnet, |
2198 | struct list_head *head) | ||
2198 | { | 2199 | { |
2199 | struct datapath *dp, *dp_next; | ||
2200 | struct ovs_net *ovs_net = net_generic(net, ovs_net_id); | 2200 | struct ovs_net *ovs_net = net_generic(net, ovs_net_id); |
2201 | struct datapath *dp; | ||
2202 | |||
2203 | list_for_each_entry(dp, &ovs_net->dps, list_node) { | ||
2204 | int i; | ||
2205 | |||
2206 | for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) { | ||
2207 | struct vport *vport; | ||
2208 | |||
2209 | hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) { | ||
2210 | struct netdev_vport *netdev_vport; | ||
2211 | |||
2212 | if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL) | ||
2213 | continue; | ||
2214 | |||
2215 | netdev_vport = netdev_vport_priv(vport); | ||
2216 | if (dev_net(netdev_vport->dev) == dnet) | ||
2217 | list_add(&vport->detach_list, head); | ||
2218 | } | ||
2219 | } | ||
2220 | } | ||
2221 | } | ||
2222 | |||
2223 | static void __net_exit ovs_exit_net(struct net *dnet) | ||
2224 | { | ||
2225 | struct datapath *dp, *dp_next; | ||
2226 | struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id); | ||
2227 | struct vport *vport, *vport_next; | ||
2228 | struct net *net; | ||
2229 | LIST_HEAD(head); | ||
2201 | 2230 | ||
2202 | ovs_lock(); | 2231 | ovs_lock(); |
2203 | list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node) | 2232 | list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node) |
2204 | __dp_destroy(dp); | 2233 | __dp_destroy(dp); |
2234 | |||
2235 | rtnl_lock(); | ||
2236 | for_each_net(net) | ||
2237 | list_vports_from_net(net, dnet, &head); | ||
2238 | rtnl_unlock(); | ||
2239 | |||
2240 | /* Detach all vports from given namespace. */ | ||
2241 | list_for_each_entry_safe(vport, vport_next, &head, detach_list) { | ||
2242 | list_del(&vport->detach_list); | ||
2243 | ovs_dp_detach_port(vport); | ||
2244 | } | ||
2245 | |||
2205 | ovs_unlock(); | 2246 | ovs_unlock(); |
2206 | 2247 | ||
2207 | cancel_work_sync(&ovs_net->dp_notify_work); | 2248 | cancel_work_sync(&ovs_net->dp_notify_work); |
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 216f20b90aa5..22b18c145c92 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c | |||
@@ -2253,14 +2253,20 @@ static int masked_set_action_to_set_action_attr(const struct nlattr *a, | |||
2253 | struct sk_buff *skb) | 2253 | struct sk_buff *skb) |
2254 | { | 2254 | { |
2255 | const struct nlattr *ovs_key = nla_data(a); | 2255 | const struct nlattr *ovs_key = nla_data(a); |
2256 | struct nlattr *nla; | ||
2256 | size_t key_len = nla_len(ovs_key) / 2; | 2257 | size_t key_len = nla_len(ovs_key) / 2; |
2257 | 2258 | ||
2258 | /* Revert the conversion we did from a non-masked set action to | 2259 | /* Revert the conversion we did from a non-masked set action to |
2259 | * masked set action. | 2260 | * masked set action. |
2260 | */ | 2261 | */ |
2261 | if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a) - key_len, ovs_key)) | 2262 | nla = nla_nest_start(skb, OVS_ACTION_ATTR_SET); |
2263 | if (!nla) | ||
2262 | return -EMSGSIZE; | 2264 | return -EMSGSIZE; |
2263 | 2265 | ||
2266 | if (nla_put(skb, nla_type(ovs_key), key_len, nla_data(ovs_key))) | ||
2267 | return -EMSGSIZE; | ||
2268 | |||
2269 | nla_nest_end(skb, nla); | ||
2264 | return 0; | 2270 | return 0; |
2265 | } | 2271 | } |
2266 | 2272 | ||
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index ec2954ffc690..067a3fff1d2c 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c | |||
@@ -274,10 +274,8 @@ void ovs_vport_del(struct vport *vport) | |||
274 | ASSERT_OVSL(); | 274 | ASSERT_OVSL(); |
275 | 275 | ||
276 | hlist_del_rcu(&vport->hash_node); | 276 | hlist_del_rcu(&vport->hash_node); |
277 | |||
278 | vport->ops->destroy(vport); | ||
279 | |||
280 | module_put(vport->ops->owner); | 277 | module_put(vport->ops->owner); |
278 | vport->ops->destroy(vport); | ||
281 | } | 279 | } |
282 | 280 | ||
283 | /** | 281 | /** |
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index f8ae295fb001..bc85331a6c60 100644 --- a/net/openvswitch/vport.h +++ b/net/openvswitch/vport.h | |||
@@ -103,6 +103,7 @@ struct vport_portids { | |||
103 | * @ops: Class structure. | 103 | * @ops: Class structure. |
104 | * @percpu_stats: Points to per-CPU statistics used and maintained by vport | 104 | * @percpu_stats: Points to per-CPU statistics used and maintained by vport |
105 | * @err_stats: Points to error statistics used and maintained by vport | 105 | * @err_stats: Points to error statistics used and maintained by vport |
106 | * @detach_list: list used for detaching vport in net-exit call. | ||
106 | */ | 107 | */ |
107 | struct vport { | 108 | struct vport { |
108 | struct rcu_head rcu; | 109 | struct rcu_head rcu; |
@@ -117,6 +118,7 @@ struct vport { | |||
117 | struct pcpu_sw_netstats __percpu *percpu_stats; | 118 | struct pcpu_sw_netstats __percpu *percpu_stats; |
118 | 119 | ||
119 | struct vport_err_stats err_stats; | 120 | struct vport_err_stats err_stats; |
121 | struct list_head detach_list; | ||
120 | }; | 122 | }; |
121 | 123 | ||
122 | /** | 124 | /** |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 9c28cec1a083..f8db7064d81c 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -698,6 +698,10 @@ static void prb_retire_rx_blk_timer_expired(unsigned long data) | |||
698 | 698 | ||
699 | if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) { | 699 | if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) { |
700 | if (!frozen) { | 700 | if (!frozen) { |
701 | if (!BLOCK_NUM_PKTS(pbd)) { | ||
702 | /* An empty block. Just refresh the timer. */ | ||
703 | goto refresh_timer; | ||
704 | } | ||
701 | prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); | 705 | prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); |
702 | if (!prb_dispatch_next_block(pkc, po)) | 706 | if (!prb_dispatch_next_block(pkc, po)) |
703 | goto refresh_timer; | 707 | goto refresh_timer; |
@@ -798,7 +802,11 @@ static void prb_close_block(struct tpacket_kbdq_core *pkc1, | |||
798 | h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; | 802 | h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; |
799 | h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec; | 803 | h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec; |
800 | } else { | 804 | } else { |
801 | /* Ok, we tmo'd - so get the current time */ | 805 | /* Ok, we tmo'd - so get the current time. |
806 | * | ||
807 | * It shouldn't really happen as we don't close empty | ||
808 | * blocks. See prb_retire_rx_blk_timer_expired(). | ||
809 | */ | ||
802 | struct timespec ts; | 810 | struct timespec ts; |
803 | getnstimeofday(&ts); | 811 | getnstimeofday(&ts); |
804 | h1->ts_last_pkt.ts_sec = ts.tv_sec; | 812 | h1->ts_last_pkt.ts_sec = ts.tv_sec; |
@@ -1349,14 +1357,14 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, | |||
1349 | return 0; | 1357 | return 0; |
1350 | } | 1358 | } |
1351 | 1359 | ||
1360 | if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) { | ||
1361 | skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET); | ||
1362 | if (!skb) | ||
1363 | return 0; | ||
1364 | } | ||
1352 | switch (f->type) { | 1365 | switch (f->type) { |
1353 | case PACKET_FANOUT_HASH: | 1366 | case PACKET_FANOUT_HASH: |
1354 | default: | 1367 | default: |
1355 | if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) { | ||
1356 | skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET); | ||
1357 | if (!skb) | ||
1358 | return 0; | ||
1359 | } | ||
1360 | idx = fanout_demux_hash(f, skb, num); | 1368 | idx = fanout_demux_hash(f, skb, num); |
1361 | break; | 1369 | break; |
1362 | case PACKET_FANOUT_LB: | 1370 | case PACKET_FANOUT_LB: |
@@ -3115,11 +3123,18 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, | |||
3115 | return 0; | 3123 | return 0; |
3116 | } | 3124 | } |
3117 | 3125 | ||
3118 | static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what) | 3126 | static void packet_dev_mclist_delete(struct net_device *dev, |
3127 | struct packet_mclist **mlp) | ||
3119 | { | 3128 | { |
3120 | for ( ; i; i = i->next) { | 3129 | struct packet_mclist *ml; |
3121 | if (i->ifindex == dev->ifindex) | 3130 | |
3122 | packet_dev_mc(dev, i, what); | 3131 | while ((ml = *mlp) != NULL) { |
3132 | if (ml->ifindex == dev->ifindex) { | ||
3133 | packet_dev_mc(dev, ml, -1); | ||
3134 | *mlp = ml->next; | ||
3135 | kfree(ml); | ||
3136 | } else | ||
3137 | mlp = &ml->next; | ||
3123 | } | 3138 | } |
3124 | } | 3139 | } |
3125 | 3140 | ||
@@ -3196,12 +3211,11 @@ static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) | |||
3196 | packet_dev_mc(dev, ml, -1); | 3211 | packet_dev_mc(dev, ml, -1); |
3197 | kfree(ml); | 3212 | kfree(ml); |
3198 | } | 3213 | } |
3199 | rtnl_unlock(); | 3214 | break; |
3200 | return 0; | ||
3201 | } | 3215 | } |
3202 | } | 3216 | } |
3203 | rtnl_unlock(); | 3217 | rtnl_unlock(); |
3204 | return -EADDRNOTAVAIL; | 3218 | return 0; |
3205 | } | 3219 | } |
3206 | 3220 | ||
3207 | static void packet_flush_mclist(struct sock *sk) | 3221 | static void packet_flush_mclist(struct sock *sk) |
@@ -3551,7 +3565,7 @@ static int packet_notifier(struct notifier_block *this, | |||
3551 | switch (msg) { | 3565 | switch (msg) { |
3552 | case NETDEV_UNREGISTER: | 3566 | case NETDEV_UNREGISTER: |
3553 | if (po->mclist) | 3567 | if (po->mclist) |
3554 | packet_dev_mclist(dev, po->mclist, -1); | 3568 | packet_dev_mclist_delete(dev, &po->mclist); |
3555 | /* fallthrough */ | 3569 | /* fallthrough */ |
3556 | 3570 | ||
3557 | case NETDEV_DOWN: | 3571 | case NETDEV_DOWN: |
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c index a817705ce2d0..dba8d0864f18 100644 --- a/net/rds/iw_rdma.c +++ b/net/rds/iw_rdma.c | |||
@@ -88,7 +88,9 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, | |||
88 | int *unpinned); | 88 | int *unpinned); |
89 | static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); | 89 | static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); |
90 | 90 | ||
91 | static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id) | 91 | static int rds_iw_get_device(struct sockaddr_in *src, struct sockaddr_in *dst, |
92 | struct rds_iw_device **rds_iwdev, | ||
93 | struct rdma_cm_id **cm_id) | ||
92 | { | 94 | { |
93 | struct rds_iw_device *iwdev; | 95 | struct rds_iw_device *iwdev; |
94 | struct rds_iw_cm_id *i_cm_id; | 96 | struct rds_iw_cm_id *i_cm_id; |
@@ -112,15 +114,15 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd | |||
112 | src_addr->sin_port, | 114 | src_addr->sin_port, |
113 | dst_addr->sin_addr.s_addr, | 115 | dst_addr->sin_addr.s_addr, |
114 | dst_addr->sin_port, | 116 | dst_addr->sin_port, |
115 | rs->rs_bound_addr, | 117 | src->sin_addr.s_addr, |
116 | rs->rs_bound_port, | 118 | src->sin_port, |
117 | rs->rs_conn_addr, | 119 | dst->sin_addr.s_addr, |
118 | rs->rs_conn_port); | 120 | dst->sin_port); |
119 | #ifdef WORKING_TUPLE_DETECTION | 121 | #ifdef WORKING_TUPLE_DETECTION |
120 | if (src_addr->sin_addr.s_addr == rs->rs_bound_addr && | 122 | if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr && |
121 | src_addr->sin_port == rs->rs_bound_port && | 123 | src_addr->sin_port == src->sin_port && |
122 | dst_addr->sin_addr.s_addr == rs->rs_conn_addr && | 124 | dst_addr->sin_addr.s_addr == dst->sin_addr.s_addr && |
123 | dst_addr->sin_port == rs->rs_conn_port) { | 125 | dst_addr->sin_port == dst->sin_port) { |
124 | #else | 126 | #else |
125 | /* FIXME - needs to compare the local and remote | 127 | /* FIXME - needs to compare the local and remote |
126 | * ipaddr/port tuple, but the ipaddr is the only | 128 | * ipaddr/port tuple, but the ipaddr is the only |
@@ -128,7 +130,7 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd | |||
128 | * zero'ed. It doesn't appear to be properly populated | 130 | * zero'ed. It doesn't appear to be properly populated |
129 | * during connection setup... | 131 | * during connection setup... |
130 | */ | 132 | */ |
131 | if (src_addr->sin_addr.s_addr == rs->rs_bound_addr) { | 133 | if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr) { |
132 | #endif | 134 | #endif |
133 | spin_unlock_irq(&iwdev->spinlock); | 135 | spin_unlock_irq(&iwdev->spinlock); |
134 | *rds_iwdev = iwdev; | 136 | *rds_iwdev = iwdev; |
@@ -180,19 +182,13 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i | |||
180 | { | 182 | { |
181 | struct sockaddr_in *src_addr, *dst_addr; | 183 | struct sockaddr_in *src_addr, *dst_addr; |
182 | struct rds_iw_device *rds_iwdev_old; | 184 | struct rds_iw_device *rds_iwdev_old; |
183 | struct rds_sock rs; | ||
184 | struct rdma_cm_id *pcm_id; | 185 | struct rdma_cm_id *pcm_id; |
185 | int rc; | 186 | int rc; |
186 | 187 | ||
187 | src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr; | 188 | src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr; |
188 | dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr; | 189 | dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr; |
189 | 190 | ||
190 | rs.rs_bound_addr = src_addr->sin_addr.s_addr; | 191 | rc = rds_iw_get_device(src_addr, dst_addr, &rds_iwdev_old, &pcm_id); |
191 | rs.rs_bound_port = src_addr->sin_port; | ||
192 | rs.rs_conn_addr = dst_addr->sin_addr.s_addr; | ||
193 | rs.rs_conn_port = dst_addr->sin_port; | ||
194 | |||
195 | rc = rds_iw_get_device(&rs, &rds_iwdev_old, &pcm_id); | ||
196 | if (rc) | 192 | if (rc) |
197 | rds_iw_remove_cm_id(rds_iwdev, cm_id); | 193 | rds_iw_remove_cm_id(rds_iwdev, cm_id); |
198 | 194 | ||
@@ -598,9 +594,17 @@ void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents, | |||
598 | struct rds_iw_device *rds_iwdev; | 594 | struct rds_iw_device *rds_iwdev; |
599 | struct rds_iw_mr *ibmr = NULL; | 595 | struct rds_iw_mr *ibmr = NULL; |
600 | struct rdma_cm_id *cm_id; | 596 | struct rdma_cm_id *cm_id; |
597 | struct sockaddr_in src = { | ||
598 | .sin_addr.s_addr = rs->rs_bound_addr, | ||
599 | .sin_port = rs->rs_bound_port, | ||
600 | }; | ||
601 | struct sockaddr_in dst = { | ||
602 | .sin_addr.s_addr = rs->rs_conn_addr, | ||
603 | .sin_port = rs->rs_conn_port, | ||
604 | }; | ||
601 | int ret; | 605 | int ret; |
602 | 606 | ||
603 | ret = rds_iw_get_device(rs, &rds_iwdev, &cm_id); | 607 | ret = rds_iw_get_device(&src, &dst, &rds_iwdev, &cm_id); |
604 | if (ret || !cm_id) { | 608 | if (ret || !cm_id) { |
605 | ret = -ENODEV; | 609 | ret = -ENODEV; |
606 | goto out; | 610 | goto out; |
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c index c6be17a959a6..e0547f521f20 100644 --- a/net/rxrpc/ar-ack.c +++ b/net/rxrpc/ar-ack.c | |||
@@ -218,7 +218,8 @@ static void rxrpc_resend(struct rxrpc_call *call) | |||
218 | struct rxrpc_header *hdr; | 218 | struct rxrpc_header *hdr; |
219 | struct sk_buff *txb; | 219 | struct sk_buff *txb; |
220 | unsigned long *p_txb, resend_at; | 220 | unsigned long *p_txb, resend_at; |
221 | int loop, stop; | 221 | bool stop; |
222 | int loop; | ||
222 | u8 resend; | 223 | u8 resend; |
223 | 224 | ||
224 | _enter("{%d,%d,%d,%d},", | 225 | _enter("{%d,%d,%d,%d},", |
@@ -226,7 +227,7 @@ static void rxrpc_resend(struct rxrpc_call *call) | |||
226 | atomic_read(&call->sequence), | 227 | atomic_read(&call->sequence), |
227 | CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz)); | 228 | CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz)); |
228 | 229 | ||
229 | stop = 0; | 230 | stop = false; |
230 | resend = 0; | 231 | resend = 0; |
231 | resend_at = 0; | 232 | resend_at = 0; |
232 | 233 | ||
@@ -255,11 +256,11 @@ static void rxrpc_resend(struct rxrpc_call *call) | |||
255 | _proto("Tx DATA %%%u { #%d }", | 256 | _proto("Tx DATA %%%u { #%d }", |
256 | ntohl(sp->hdr.serial), ntohl(sp->hdr.seq)); | 257 | ntohl(sp->hdr.serial), ntohl(sp->hdr.seq)); |
257 | if (rxrpc_send_packet(call->conn->trans, txb) < 0) { | 258 | if (rxrpc_send_packet(call->conn->trans, txb) < 0) { |
258 | stop = 0; | 259 | stop = true; |
259 | sp->resend_at = jiffies + 3; | 260 | sp->resend_at = jiffies + 3; |
260 | } else { | 261 | } else { |
261 | sp->resend_at = | 262 | sp->resend_at = |
262 | jiffies + rxrpc_resend_timeout * HZ; | 263 | jiffies + rxrpc_resend_timeout; |
263 | } | 264 | } |
264 | } | 265 | } |
265 | 266 | ||
diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c index 5394b6be46ec..0610efa83d72 100644 --- a/net/rxrpc/ar-error.c +++ b/net/rxrpc/ar-error.c | |||
@@ -42,7 +42,8 @@ void rxrpc_UDP_error_report(struct sock *sk) | |||
42 | _leave("UDP socket errqueue empty"); | 42 | _leave("UDP socket errqueue empty"); |
43 | return; | 43 | return; |
44 | } | 44 | } |
45 | if (!skb->len) { | 45 | serr = SKB_EXT_ERR(skb); |
46 | if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) { | ||
46 | _leave("UDP empty message"); | 47 | _leave("UDP empty message"); |
47 | kfree_skb(skb); | 48 | kfree_skb(skb); |
48 | return; | 49 | return; |
@@ -50,7 +51,6 @@ void rxrpc_UDP_error_report(struct sock *sk) | |||
50 | 51 | ||
51 | rxrpc_new_skb(skb); | 52 | rxrpc_new_skb(skb); |
52 | 53 | ||
53 | serr = SKB_EXT_ERR(skb); | ||
54 | addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset); | 54 | addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset); |
55 | port = serr->port; | 55 | port = serr->port; |
56 | 56 | ||
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c index 4575485ad1b4..19a560626dc4 100644 --- a/net/rxrpc/ar-recvmsg.c +++ b/net/rxrpc/ar-recvmsg.c | |||
@@ -87,7 +87,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
87 | if (!skb) { | 87 | if (!skb) { |
88 | /* nothing remains on the queue */ | 88 | /* nothing remains on the queue */ |
89 | if (copied && | 89 | if (copied && |
90 | (msg->msg_flags & MSG_PEEK || timeo == 0)) | 90 | (flags & MSG_PEEK || timeo == 0)) |
91 | goto out; | 91 | goto out; |
92 | 92 | ||
93 | /* wait for a message to turn up */ | 93 | /* wait for a message to turn up */ |
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index 82c5d7fc1988..5f6288fa3f12 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c | |||
@@ -25,21 +25,41 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *a, | |||
25 | struct tcf_result *res) | 25 | struct tcf_result *res) |
26 | { | 26 | { |
27 | struct tcf_bpf *b = a->priv; | 27 | struct tcf_bpf *b = a->priv; |
28 | int action; | 28 | int action, filter_res; |
29 | int filter_res; | ||
30 | 29 | ||
31 | spin_lock(&b->tcf_lock); | 30 | spin_lock(&b->tcf_lock); |
31 | |||
32 | b->tcf_tm.lastuse = jiffies; | 32 | b->tcf_tm.lastuse = jiffies; |
33 | bstats_update(&b->tcf_bstats, skb); | 33 | bstats_update(&b->tcf_bstats, skb); |
34 | action = b->tcf_action; | ||
35 | 34 | ||
36 | filter_res = BPF_PROG_RUN(b->filter, skb); | 35 | filter_res = BPF_PROG_RUN(b->filter, skb); |
37 | if (filter_res == 0) { | 36 | |
38 | /* Return code 0 from the BPF program | 37 | /* A BPF program may overwrite the default action opcode. |
39 | * is being interpreted as a drop here. | 38 | * Similarly as in cls_bpf, if filter_res == -1 we use the |
40 | */ | 39 | * default action specified from tc. |
41 | action = TC_ACT_SHOT; | 40 | * |
41 | * In case a different well-known TC_ACT opcode has been | ||
42 | * returned, it will overwrite the default one. | ||
43 | * | ||
44 | * For everything else that is unkown, TC_ACT_UNSPEC is | ||
45 | * returned. | ||
46 | */ | ||
47 | switch (filter_res) { | ||
48 | case TC_ACT_PIPE: | ||
49 | case TC_ACT_RECLASSIFY: | ||
50 | case TC_ACT_OK: | ||
51 | action = filter_res; | ||
52 | break; | ||
53 | case TC_ACT_SHOT: | ||
54 | action = filter_res; | ||
42 | b->tcf_qstats.drops++; | 55 | b->tcf_qstats.drops++; |
56 | break; | ||
57 | case TC_ACT_UNSPEC: | ||
58 | action = b->tcf_action; | ||
59 | break; | ||
60 | default: | ||
61 | action = TC_ACT_UNSPEC; | ||
62 | break; | ||
43 | } | 63 | } |
44 | 64 | ||
45 | spin_unlock(&b->tcf_lock); | 65 | spin_unlock(&b->tcf_lock); |
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 09487afbfd51..95fdf4e40051 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -78,8 +78,11 @@ struct tc_u_hnode { | |||
78 | struct tc_u_common *tp_c; | 78 | struct tc_u_common *tp_c; |
79 | int refcnt; | 79 | int refcnt; |
80 | unsigned int divisor; | 80 | unsigned int divisor; |
81 | struct tc_u_knode __rcu *ht[1]; | ||
82 | struct rcu_head rcu; | 81 | struct rcu_head rcu; |
82 | /* The 'ht' field MUST be the last field in structure to allow for | ||
83 | * more entries allocated at end of structure. | ||
84 | */ | ||
85 | struct tc_u_knode __rcu *ht[1]; | ||
83 | }; | 86 | }; |
84 | 87 | ||
85 | struct tc_u_common { | 88 | struct tc_u_common { |
diff --git a/net/sched/ematch.c b/net/sched/ematch.c index 6742200b1307..fbb7ebfc58c6 100644 --- a/net/sched/ematch.c +++ b/net/sched/ematch.c | |||
@@ -228,6 +228,7 @@ static int tcf_em_validate(struct tcf_proto *tp, | |||
228 | * to replay the request. | 228 | * to replay the request. |
229 | */ | 229 | */ |
230 | module_put(em->ops->owner); | 230 | module_put(em->ops->owner); |
231 | em->ops = NULL; | ||
231 | err = -EAGAIN; | 232 | err = -EAGAIN; |
232 | } | 233 | } |
233 | #endif | 234 | #endif |
diff --git a/net/socket.c b/net/socket.c index bbedbfcb42c2..245330ca0015 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -1702,6 +1702,8 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, | |||
1702 | 1702 | ||
1703 | if (len > INT_MAX) | 1703 | if (len > INT_MAX) |
1704 | len = INT_MAX; | 1704 | len = INT_MAX; |
1705 | if (unlikely(!access_ok(VERIFY_READ, buff, len))) | ||
1706 | return -EFAULT; | ||
1705 | sock = sockfd_lookup_light(fd, &err, &fput_needed); | 1707 | sock = sockfd_lookup_light(fd, &err, &fput_needed); |
1706 | if (!sock) | 1708 | if (!sock) |
1707 | goto out; | 1709 | goto out; |
@@ -1760,6 +1762,8 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, | |||
1760 | 1762 | ||
1761 | if (size > INT_MAX) | 1763 | if (size > INT_MAX) |
1762 | size = INT_MAX; | 1764 | size = INT_MAX; |
1765 | if (unlikely(!access_ok(VERIFY_WRITE, ubuf, size))) | ||
1766 | return -EFAULT; | ||
1763 | sock = sockfd_lookup_light(fd, &err, &fput_needed); | 1767 | sock = sockfd_lookup_light(fd, &err, &fput_needed); |
1764 | if (!sock) | 1768 | if (!sock) |
1765 | goto out; | 1769 | goto out; |
diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c index abbb7dcd1689..59eeed43eda2 100644 --- a/net/sunrpc/auth_gss/gss_rpc_upcall.c +++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c | |||
@@ -217,6 +217,8 @@ static void gssp_free_receive_pages(struct gssx_arg_accept_sec_context *arg) | |||
217 | 217 | ||
218 | for (i = 0; i < arg->npages && arg->pages[i]; i++) | 218 | for (i = 0; i < arg->npages && arg->pages[i]; i++) |
219 | __free_page(arg->pages[i]); | 219 | __free_page(arg->pages[i]); |
220 | |||
221 | kfree(arg->pages); | ||
220 | } | 222 | } |
221 | 223 | ||
222 | static int gssp_alloc_receive_pages(struct gssx_arg_accept_sec_context *arg) | 224 | static int gssp_alloc_receive_pages(struct gssx_arg_accept_sec_context *arg) |
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 224a82f24d3c..1095be9c80ab 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c | |||
@@ -463,6 +463,8 @@ static int rsc_parse(struct cache_detail *cd, | |||
463 | /* number of additional gid's */ | 463 | /* number of additional gid's */ |
464 | if (get_int(&mesg, &N)) | 464 | if (get_int(&mesg, &N)) |
465 | goto out; | 465 | goto out; |
466 | if (N < 0 || N > NGROUPS_MAX) | ||
467 | goto out; | ||
466 | status = -ENOMEM; | 468 | status = -ENOMEM; |
467 | rsci.cred.cr_group_info = groups_alloc(N); | 469 | rsci.cred.cr_group_info = groups_alloc(N); |
468 | if (rsci.cred.cr_group_info == NULL) | 470 | if (rsci.cred.cr_group_info == NULL) |
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 33fb105d4352..5199bb1a017e 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -921,7 +921,7 @@ static unsigned int cache_poll(struct file *filp, poll_table *wait, | |||
921 | poll_wait(filp, &queue_wait, wait); | 921 | poll_wait(filp, &queue_wait, wait); |
922 | 922 | ||
923 | /* alway allow write */ | 923 | /* alway allow write */ |
924 | mask = POLL_OUT | POLLWRNORM; | 924 | mask = POLLOUT | POLLWRNORM; |
925 | 925 | ||
926 | if (!rp) | 926 | if (!rp) |
927 | return mask; | 927 | return mask; |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 612aa73bbc60..e6ce1517367f 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -303,9 +303,7 @@ static int rpc_client_register(struct rpc_clnt *clnt, | |||
303 | struct super_block *pipefs_sb; | 303 | struct super_block *pipefs_sb; |
304 | int err; | 304 | int err; |
305 | 305 | ||
306 | err = rpc_clnt_debugfs_register(clnt); | 306 | rpc_clnt_debugfs_register(clnt); |
307 | if (err) | ||
308 | return err; | ||
309 | 307 | ||
310 | pipefs_sb = rpc_get_sb_net(net); | 308 | pipefs_sb = rpc_get_sb_net(net); |
311 | if (pipefs_sb) { | 309 | if (pipefs_sb) { |
diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c index e811f390f9f6..82962f7e6e88 100644 --- a/net/sunrpc/debugfs.c +++ b/net/sunrpc/debugfs.c | |||
@@ -129,48 +129,52 @@ static const struct file_operations tasks_fops = { | |||
129 | .release = tasks_release, | 129 | .release = tasks_release, |
130 | }; | 130 | }; |
131 | 131 | ||
132 | int | 132 | void |
133 | rpc_clnt_debugfs_register(struct rpc_clnt *clnt) | 133 | rpc_clnt_debugfs_register(struct rpc_clnt *clnt) |
134 | { | 134 | { |
135 | int len, err; | 135 | int len; |
136 | char name[24]; /* enough for "../../rpc_xprt/ + 8 hex digits + NULL */ | 136 | char name[24]; /* enough for "../../rpc_xprt/ + 8 hex digits + NULL */ |
137 | struct rpc_xprt *xprt; | ||
137 | 138 | ||
138 | /* Already registered? */ | 139 | /* Already registered? */ |
139 | if (clnt->cl_debugfs) | 140 | if (clnt->cl_debugfs || !rpc_clnt_dir) |
140 | return 0; | 141 | return; |
141 | 142 | ||
142 | len = snprintf(name, sizeof(name), "%x", clnt->cl_clid); | 143 | len = snprintf(name, sizeof(name), "%x", clnt->cl_clid); |
143 | if (len >= sizeof(name)) | 144 | if (len >= sizeof(name)) |
144 | return -EINVAL; | 145 | return; |
145 | 146 | ||
146 | /* make the per-client dir */ | 147 | /* make the per-client dir */ |
147 | clnt->cl_debugfs = debugfs_create_dir(name, rpc_clnt_dir); | 148 | clnt->cl_debugfs = debugfs_create_dir(name, rpc_clnt_dir); |
148 | if (!clnt->cl_debugfs) | 149 | if (!clnt->cl_debugfs) |
149 | return -ENOMEM; | 150 | return; |
150 | 151 | ||
151 | /* make tasks file */ | 152 | /* make tasks file */ |
152 | err = -ENOMEM; | ||
153 | if (!debugfs_create_file("tasks", S_IFREG | S_IRUSR, clnt->cl_debugfs, | 153 | if (!debugfs_create_file("tasks", S_IFREG | S_IRUSR, clnt->cl_debugfs, |
154 | clnt, &tasks_fops)) | 154 | clnt, &tasks_fops)) |
155 | goto out_err; | 155 | goto out_err; |
156 | 156 | ||
157 | err = -EINVAL; | ||
158 | rcu_read_lock(); | 157 | rcu_read_lock(); |
158 | xprt = rcu_dereference(clnt->cl_xprt); | ||
159 | /* no "debugfs" dentry? Don't bother with the symlink. */ | ||
160 | if (!xprt->debugfs) { | ||
161 | rcu_read_unlock(); | ||
162 | return; | ||
163 | } | ||
159 | len = snprintf(name, sizeof(name), "../../rpc_xprt/%s", | 164 | len = snprintf(name, sizeof(name), "../../rpc_xprt/%s", |
160 | rcu_dereference(clnt->cl_xprt)->debugfs->d_name.name); | 165 | xprt->debugfs->d_name.name); |
161 | rcu_read_unlock(); | 166 | rcu_read_unlock(); |
167 | |||
162 | if (len >= sizeof(name)) | 168 | if (len >= sizeof(name)) |
163 | goto out_err; | 169 | goto out_err; |
164 | 170 | ||
165 | err = -ENOMEM; | ||
166 | if (!debugfs_create_symlink("xprt", clnt->cl_debugfs, name)) | 171 | if (!debugfs_create_symlink("xprt", clnt->cl_debugfs, name)) |
167 | goto out_err; | 172 | goto out_err; |
168 | 173 | ||
169 | return 0; | 174 | return; |
170 | out_err: | 175 | out_err: |
171 | debugfs_remove_recursive(clnt->cl_debugfs); | 176 | debugfs_remove_recursive(clnt->cl_debugfs); |
172 | clnt->cl_debugfs = NULL; | 177 | clnt->cl_debugfs = NULL; |
173 | return err; | ||
174 | } | 178 | } |
175 | 179 | ||
176 | void | 180 | void |
@@ -226,33 +230,33 @@ static const struct file_operations xprt_info_fops = { | |||
226 | .release = xprt_info_release, | 230 | .release = xprt_info_release, |
227 | }; | 231 | }; |
228 | 232 | ||
229 | int | 233 | void |
230 | rpc_xprt_debugfs_register(struct rpc_xprt *xprt) | 234 | rpc_xprt_debugfs_register(struct rpc_xprt *xprt) |
231 | { | 235 | { |
232 | int len, id; | 236 | int len, id; |
233 | static atomic_t cur_id; | 237 | static atomic_t cur_id; |
234 | char name[9]; /* 8 hex digits + NULL term */ | 238 | char name[9]; /* 8 hex digits + NULL term */ |
235 | 239 | ||
240 | if (!rpc_xprt_dir) | ||
241 | return; | ||
242 | |||
236 | id = (unsigned int)atomic_inc_return(&cur_id); | 243 | id = (unsigned int)atomic_inc_return(&cur_id); |
237 | 244 | ||
238 | len = snprintf(name, sizeof(name), "%x", id); | 245 | len = snprintf(name, sizeof(name), "%x", id); |
239 | if (len >= sizeof(name)) | 246 | if (len >= sizeof(name)) |
240 | return -EINVAL; | 247 | return; |
241 | 248 | ||
242 | /* make the per-client dir */ | 249 | /* make the per-client dir */ |
243 | xprt->debugfs = debugfs_create_dir(name, rpc_xprt_dir); | 250 | xprt->debugfs = debugfs_create_dir(name, rpc_xprt_dir); |
244 | if (!xprt->debugfs) | 251 | if (!xprt->debugfs) |
245 | return -ENOMEM; | 252 | return; |
246 | 253 | ||
247 | /* make tasks file */ | 254 | /* make tasks file */ |
248 | if (!debugfs_create_file("info", S_IFREG | S_IRUSR, xprt->debugfs, | 255 | if (!debugfs_create_file("info", S_IFREG | S_IRUSR, xprt->debugfs, |
249 | xprt, &xprt_info_fops)) { | 256 | xprt, &xprt_info_fops)) { |
250 | debugfs_remove_recursive(xprt->debugfs); | 257 | debugfs_remove_recursive(xprt->debugfs); |
251 | xprt->debugfs = NULL; | 258 | xprt->debugfs = NULL; |
252 | return -ENOMEM; | ||
253 | } | 259 | } |
254 | |||
255 | return 0; | ||
256 | } | 260 | } |
257 | 261 | ||
258 | void | 262 | void |
@@ -266,14 +270,17 @@ void __exit | |||
266 | sunrpc_debugfs_exit(void) | 270 | sunrpc_debugfs_exit(void) |
267 | { | 271 | { |
268 | debugfs_remove_recursive(topdir); | 272 | debugfs_remove_recursive(topdir); |
273 | topdir = NULL; | ||
274 | rpc_clnt_dir = NULL; | ||
275 | rpc_xprt_dir = NULL; | ||
269 | } | 276 | } |
270 | 277 | ||
271 | int __init | 278 | void __init |
272 | sunrpc_debugfs_init(void) | 279 | sunrpc_debugfs_init(void) |
273 | { | 280 | { |
274 | topdir = debugfs_create_dir("sunrpc", NULL); | 281 | topdir = debugfs_create_dir("sunrpc", NULL); |
275 | if (!topdir) | 282 | if (!topdir) |
276 | goto out; | 283 | return; |
277 | 284 | ||
278 | rpc_clnt_dir = debugfs_create_dir("rpc_clnt", topdir); | 285 | rpc_clnt_dir = debugfs_create_dir("rpc_clnt", topdir); |
279 | if (!rpc_clnt_dir) | 286 | if (!rpc_clnt_dir) |
@@ -283,10 +290,9 @@ sunrpc_debugfs_init(void) | |||
283 | if (!rpc_xprt_dir) | 290 | if (!rpc_xprt_dir) |
284 | goto out_remove; | 291 | goto out_remove; |
285 | 292 | ||
286 | return 0; | 293 | return; |
287 | out_remove: | 294 | out_remove: |
288 | debugfs_remove_recursive(topdir); | 295 | debugfs_remove_recursive(topdir); |
289 | topdir = NULL; | 296 | topdir = NULL; |
290 | out: | 297 | rpc_clnt_dir = NULL; |
291 | return -ENOMEM; | ||
292 | } | 298 | } |
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index e37fbed87956..ee5d3d253102 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c | |||
@@ -98,10 +98,7 @@ init_sunrpc(void) | |||
98 | if (err) | 98 | if (err) |
99 | goto out4; | 99 | goto out4; |
100 | 100 | ||
101 | err = sunrpc_debugfs_init(); | 101 | sunrpc_debugfs_init(); |
102 | if (err) | ||
103 | goto out5; | ||
104 | |||
105 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | 102 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
106 | rpc_register_sysctl(); | 103 | rpc_register_sysctl(); |
107 | #endif | 104 | #endif |
@@ -109,8 +106,6 @@ init_sunrpc(void) | |||
109 | init_socket_xprt(); /* clnt sock transport */ | 106 | init_socket_xprt(); /* clnt sock transport */ |
110 | return 0; | 107 | return 0; |
111 | 108 | ||
112 | out5: | ||
113 | unregister_rpc_pipefs(); | ||
114 | out4: | 109 | out4: |
115 | unregister_pernet_subsys(&sunrpc_net_ops); | 110 | unregister_pernet_subsys(&sunrpc_net_ops); |
116 | out3: | 111 | out3: |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index e3015aede0d9..9949722d99ce 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -1331,7 +1331,6 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net) | |||
1331 | */ | 1331 | */ |
1332 | struct rpc_xprt *xprt_create_transport(struct xprt_create *args) | 1332 | struct rpc_xprt *xprt_create_transport(struct xprt_create *args) |
1333 | { | 1333 | { |
1334 | int err; | ||
1335 | struct rpc_xprt *xprt; | 1334 | struct rpc_xprt *xprt; |
1336 | struct xprt_class *t; | 1335 | struct xprt_class *t; |
1337 | 1336 | ||
@@ -1372,11 +1371,7 @@ found: | |||
1372 | return ERR_PTR(-ENOMEM); | 1371 | return ERR_PTR(-ENOMEM); |
1373 | } | 1372 | } |
1374 | 1373 | ||
1375 | err = rpc_xprt_debugfs_register(xprt); | 1374 | rpc_xprt_debugfs_register(xprt); |
1376 | if (err) { | ||
1377 | xprt_destroy(xprt); | ||
1378 | return ERR_PTR(err); | ||
1379 | } | ||
1380 | 1375 | ||
1381 | dprintk("RPC: created transport %p with %u slots\n", xprt, | 1376 | dprintk("RPC: created transport %p with %u slots\n", xprt, |
1382 | xprt->max_reqs); | 1377 | xprt->max_reqs); |
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 7e9acd9361c5..91ffde82fa0c 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c | |||
@@ -738,8 +738,9 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep) | |||
738 | struct rpc_xprt *xprt = rep->rr_xprt; | 738 | struct rpc_xprt *xprt = rep->rr_xprt; |
739 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); | 739 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); |
740 | __be32 *iptr; | 740 | __be32 *iptr; |
741 | int credits, rdmalen, status; | 741 | int rdmalen, status; |
742 | unsigned long cwnd; | 742 | unsigned long cwnd; |
743 | u32 credits; | ||
743 | 744 | ||
744 | /* Check status. If bad, signal disconnect and return rep to pool */ | 745 | /* Check status. If bad, signal disconnect and return rep to pool */ |
745 | if (rep->rr_len == ~0U) { | 746 | if (rep->rr_len == ~0U) { |
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index d1b70397c60f..0a16fb6f0885 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h | |||
@@ -285,7 +285,7 @@ rpcr_to_rdmar(struct rpc_rqst *rqst) | |||
285 | */ | 285 | */ |
286 | struct rpcrdma_buffer { | 286 | struct rpcrdma_buffer { |
287 | spinlock_t rb_lock; /* protects indexes */ | 287 | spinlock_t rb_lock; /* protects indexes */ |
288 | int rb_max_requests;/* client max requests */ | 288 | u32 rb_max_requests;/* client max requests */ |
289 | struct list_head rb_mws; /* optional memory windows/fmrs/frmrs */ | 289 | struct list_head rb_mws; /* optional memory windows/fmrs/frmrs */ |
290 | struct list_head rb_all; | 290 | struct list_head rb_all; |
291 | int rb_send_index; | 291 | int rb_send_index; |
diff --git a/net/tipc/core.c b/net/tipc/core.c index 935205e6bcfe..be1c9fa60b09 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c | |||
@@ -152,11 +152,11 @@ out_netlink: | |||
152 | static void __exit tipc_exit(void) | 152 | static void __exit tipc_exit(void) |
153 | { | 153 | { |
154 | tipc_bearer_cleanup(); | 154 | tipc_bearer_cleanup(); |
155 | unregister_pernet_subsys(&tipc_net_ops); | ||
155 | tipc_netlink_stop(); | 156 | tipc_netlink_stop(); |
156 | tipc_netlink_compat_stop(); | 157 | tipc_netlink_compat_stop(); |
157 | tipc_socket_stop(); | 158 | tipc_socket_stop(); |
158 | tipc_unregister_sysctl(); | 159 | tipc_unregister_sysctl(); |
159 | unregister_pernet_subsys(&tipc_net_ops); | ||
160 | 160 | ||
161 | pr_info("Deactivated\n"); | 161 | pr_info("Deactivated\n"); |
162 | } | 162 | } |
diff --git a/net/tipc/link.c b/net/tipc/link.c index a4cf364316de..14f09b3cb87c 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -464,10 +464,11 @@ void tipc_link_reset(struct tipc_link *l_ptr) | |||
464 | /* Clean up all queues, except inputq: */ | 464 | /* Clean up all queues, except inputq: */ |
465 | __skb_queue_purge(&l_ptr->outqueue); | 465 | __skb_queue_purge(&l_ptr->outqueue); |
466 | __skb_queue_purge(&l_ptr->deferred_queue); | 466 | __skb_queue_purge(&l_ptr->deferred_queue); |
467 | skb_queue_splice_init(&l_ptr->wakeupq, &l_ptr->inputq); | 467 | if (!owner->inputq) |
468 | if (!skb_queue_empty(&l_ptr->inputq)) | 468 | owner->inputq = &l_ptr->inputq; |
469 | skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq); | ||
470 | if (!skb_queue_empty(owner->inputq)) | ||
469 | owner->action_flags |= TIPC_MSG_EVT; | 471 | owner->action_flags |= TIPC_MSG_EVT; |
470 | owner->inputq = &l_ptr->inputq; | ||
471 | l_ptr->next_out = NULL; | 472 | l_ptr->next_out = NULL; |
472 | l_ptr->unacked_window = 0; | 473 | l_ptr->unacked_window = 0; |
473 | l_ptr->checkpoint = 1; | 474 | l_ptr->checkpoint = 1; |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index f73e975af80b..b4d4467d0bb0 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -2364,8 +2364,6 @@ int tipc_sk_rht_init(struct net *net) | |||
2364 | .hashfn = jhash, | 2364 | .hashfn = jhash, |
2365 | .max_shift = 20, /* 1M */ | 2365 | .max_shift = 20, /* 1M */ |
2366 | .min_shift = 8, /* 256 */ | 2366 | .min_shift = 8, /* 256 */ |
2367 | .grow_decision = rht_grow_above_75, | ||
2368 | .shrink_decision = rht_shrink_below_30, | ||
2369 | }; | 2367 | }; |
2370 | 2368 | ||
2371 | return rhashtable_init(&tn->sk_rht, &rht_params); | 2369 | return rhashtable_init(&tn->sk_rht, &rht_params); |
diff --git a/net/wireless/core.c b/net/wireless/core.c index 3af0ecf1cc16..2a0bbd22854b 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -1199,6 +1199,7 @@ out_fail_wq: | |||
1199 | regulatory_exit(); | 1199 | regulatory_exit(); |
1200 | out_fail_reg: | 1200 | out_fail_reg: |
1201 | debugfs_remove(ieee80211_debugfs_dir); | 1201 | debugfs_remove(ieee80211_debugfs_dir); |
1202 | nl80211_exit(); | ||
1202 | out_fail_nl80211: | 1203 | out_fail_nl80211: |
1203 | unregister_netdevice_notifier(&cfg80211_netdev_notifier); | 1204 | unregister_netdevice_notifier(&cfg80211_netdev_notifier); |
1204 | out_fail_notifier: | 1205 | out_fail_notifier: |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index d78fd8b54515..b6f84f6a2a09 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -2654,10 +2654,6 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) | |||
2654 | return err; | 2654 | return err; |
2655 | } | 2655 | } |
2656 | 2656 | ||
2657 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | ||
2658 | if (!msg) | ||
2659 | return -ENOMEM; | ||
2660 | |||
2661 | err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? | 2657 | err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? |
2662 | info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, | 2658 | info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, |
2663 | &flags); | 2659 | &flags); |
@@ -2666,6 +2662,10 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) | |||
2666 | !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) | 2662 | !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) |
2667 | return -EOPNOTSUPP; | 2663 | return -EOPNOTSUPP; |
2668 | 2664 | ||
2665 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | ||
2666 | if (!msg) | ||
2667 | return -ENOMEM; | ||
2668 | |||
2669 | wdev = rdev_add_virtual_intf(rdev, | 2669 | wdev = rdev_add_virtual_intf(rdev, |
2670 | nla_data(info->attrs[NL80211_ATTR_IFNAME]), | 2670 | nla_data(info->attrs[NL80211_ATTR_IFNAME]), |
2671 | type, err ? NULL : &flags, ¶ms); | 2671 | type, err ? NULL : &flags, ¶ms); |
@@ -4400,6 +4400,16 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) | |||
4400 | if (parse_station_flags(info, dev->ieee80211_ptr->iftype, ¶ms)) | 4400 | if (parse_station_flags(info, dev->ieee80211_ptr->iftype, ¶ms)) |
4401 | return -EINVAL; | 4401 | return -EINVAL; |
4402 | 4402 | ||
4403 | /* HT/VHT requires QoS, but if we don't have that just ignore HT/VHT | ||
4404 | * as userspace might just pass through the capabilities from the IEs | ||
4405 | * directly, rather than enforcing this restriction and returning an | ||
4406 | * error in this case. | ||
4407 | */ | ||
4408 | if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_WME))) { | ||
4409 | params.ht_capa = NULL; | ||
4410 | params.vht_capa = NULL; | ||
4411 | } | ||
4412 | |||
4403 | /* When you run into this, adjust the code below for the new flag */ | 4413 | /* When you run into this, adjust the code below for the new flag */ |
4404 | BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7); | 4414 | BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7); |
4405 | 4415 | ||
@@ -12528,9 +12538,7 @@ static int cfg80211_net_detect_results(struct sk_buff *msg, | |||
12528 | } | 12538 | } |
12529 | 12539 | ||
12530 | for (j = 0; j < match->n_channels; j++) { | 12540 | for (j = 0; j < match->n_channels; j++) { |
12531 | if (nla_put_u32(msg, | 12541 | if (nla_put_u32(msg, j, match->channels[j])) { |
12532 | NL80211_ATTR_WIPHY_FREQ, | ||
12533 | match->channels[j])) { | ||
12534 | nla_nest_cancel(msg, nl_freqs); | 12542 | nla_nest_cancel(msg, nl_freqs); |
12535 | nla_nest_cancel(msg, nl_match); | 12543 | nla_nest_cancel(msg, nl_match); |
12536 | goto out; | 12544 | goto out; |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index b586d0dcb09e..48dfc7b4e981 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -228,7 +228,7 @@ static DECLARE_DELAYED_WORK(reg_timeout, reg_timeout_work); | |||
228 | 228 | ||
229 | /* We keep a static world regulatory domain in case of the absence of CRDA */ | 229 | /* We keep a static world regulatory domain in case of the absence of CRDA */ |
230 | static const struct ieee80211_regdomain world_regdom = { | 230 | static const struct ieee80211_regdomain world_regdom = { |
231 | .n_reg_rules = 6, | 231 | .n_reg_rules = 8, |
232 | .alpha2 = "00", | 232 | .alpha2 = "00", |
233 | .reg_rules = { | 233 | .reg_rules = { |
234 | /* IEEE 802.11b/g, channels 1..11 */ | 234 | /* IEEE 802.11b/g, channels 1..11 */ |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index cee479bc655c..638af0655aaf 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -2269,11 +2269,9 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, | |||
2269 | * have the xfrm_state's. We need to wait for KM to | 2269 | * have the xfrm_state's. We need to wait for KM to |
2270 | * negotiate new SA's or bail out with error.*/ | 2270 | * negotiate new SA's or bail out with error.*/ |
2271 | if (net->xfrm.sysctl_larval_drop) { | 2271 | if (net->xfrm.sysctl_larval_drop) { |
2272 | dst_release(dst); | ||
2273 | xfrm_pols_put(pols, drop_pols); | ||
2274 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); | 2272 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); |
2275 | 2273 | err = -EREMOTE; | |
2276 | return ERR_PTR(-EREMOTE); | 2274 | goto error; |
2277 | } | 2275 | } |
2278 | 2276 | ||
2279 | err = -EAGAIN; | 2277 | err = -EAGAIN; |
@@ -2324,7 +2322,8 @@ nopol: | |||
2324 | error: | 2322 | error: |
2325 | dst_release(dst); | 2323 | dst_release(dst); |
2326 | dropdst: | 2324 | dropdst: |
2327 | dst_release(dst_orig); | 2325 | if (!(flags & XFRM_LOOKUP_KEEP_DST_REF)) |
2326 | dst_release(dst_orig); | ||
2328 | xfrm_pols_put(pols, drop_pols); | 2327 | xfrm_pols_put(pols, drop_pols); |
2329 | return ERR_PTR(err); | 2328 | return ERR_PTR(err); |
2330 | } | 2329 | } |
@@ -2338,7 +2337,8 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, | |||
2338 | struct sock *sk, int flags) | 2337 | struct sock *sk, int flags) |
2339 | { | 2338 | { |
2340 | struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk, | 2339 | struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk, |
2341 | flags | XFRM_LOOKUP_QUEUE); | 2340 | flags | XFRM_LOOKUP_QUEUE | |
2341 | XFRM_LOOKUP_KEEP_DST_REF); | ||
2342 | 2342 | ||
2343 | if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) | 2343 | if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) |
2344 | return make_blackhole(net, dst_orig->ops->family, dst_orig); | 2344 | return make_blackhole(net, dst_orig->ops->family, dst_orig); |
diff --git a/scripts/gdb/linux/__init__.py b/scripts/gdb/linux/__init__.py new file mode 100644 index 000000000000..4680fb176337 --- /dev/null +++ b/scripts/gdb/linux/__init__.py | |||
@@ -0,0 +1 @@ | |||
# nothing to do for the initialization of this package | |||
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 1684bcc78b34..5fde34326dcf 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c | |||
@@ -152,7 +152,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf, | |||
152 | goto out; | 152 | goto out; |
153 | 153 | ||
154 | /* No partial writes. */ | 154 | /* No partial writes. */ |
155 | length = EINVAL; | 155 | length = -EINVAL; |
156 | if (*ppos != 0) | 156 | if (*ppos != 0) |
157 | goto out; | 157 | goto out; |
158 | 158 | ||
diff --git a/sound/core/control.c b/sound/core/control.c index 35324a8e83c8..eeb691d1911f 100644 --- a/sound/core/control.c +++ b/sound/core/control.c | |||
@@ -1170,6 +1170,10 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file, | |||
1170 | 1170 | ||
1171 | if (info->count < 1) | 1171 | if (info->count < 1) |
1172 | return -EINVAL; | 1172 | return -EINVAL; |
1173 | if (!*info->id.name) | ||
1174 | return -EINVAL; | ||
1175 | if (strnlen(info->id.name, sizeof(info->id.name)) >= sizeof(info->id.name)) | ||
1176 | return -EINVAL; | ||
1173 | access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE : | 1177 | access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE : |
1174 | (info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE| | 1178 | (info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE| |
1175 | SNDRV_CTL_ELEM_ACCESS_INACTIVE| | 1179 | SNDRV_CTL_ELEM_ACCESS_INACTIVE| |
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index b03a638b420c..279e24f61305 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c | |||
@@ -1552,6 +1552,8 @@ static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state) | |||
1552 | if (! snd_pcm_playback_empty(substream)) { | 1552 | if (! snd_pcm_playback_empty(substream)) { |
1553 | snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING); | 1553 | snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING); |
1554 | snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING); | 1554 | snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING); |
1555 | } else { | ||
1556 | runtime->status->state = SNDRV_PCM_STATE_SETUP; | ||
1555 | } | 1557 | } |
1556 | break; | 1558 | break; |
1557 | case SNDRV_PCM_STATE_RUNNING: | 1559 | case SNDRV_PCM_STATE_RUNNING: |
diff --git a/sound/drivers/opl3/opl3_midi.c b/sound/drivers/opl3/opl3_midi.c index f62780ed64ad..7821b07415a7 100644 --- a/sound/drivers/opl3/opl3_midi.c +++ b/sound/drivers/opl3/opl3_midi.c | |||
@@ -105,6 +105,8 @@ static void snd_opl3_calc_pitch(unsigned char *fnum, unsigned char *blocknum, | |||
105 | int pitchbend = chan->midi_pitchbend; | 105 | int pitchbend = chan->midi_pitchbend; |
106 | int segment; | 106 | int segment; |
107 | 107 | ||
108 | if (pitchbend < -0x2000) | ||
109 | pitchbend = -0x2000; | ||
108 | if (pitchbend > 0x1FFF) | 110 | if (pitchbend > 0x1FFF) |
109 | pitchbend = 0x1FFF; | 111 | pitchbend = 0x1FFF; |
110 | 112 | ||
diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c index 0d580186ef1a..5cc356db5351 100644 --- a/sound/firewire/amdtp.c +++ b/sound/firewire/amdtp.c | |||
@@ -33,7 +33,7 @@ | |||
33 | */ | 33 | */ |
34 | #define MAX_MIDI_RX_BLOCKS 8 | 34 | #define MAX_MIDI_RX_BLOCKS 8 |
35 | 35 | ||
36 | #define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 µs */ | 36 | #define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */ |
37 | 37 | ||
38 | /* isochronous header parameters */ | 38 | /* isochronous header parameters */ |
39 | #define ISO_DATA_LENGTH_SHIFT 16 | 39 | #define ISO_DATA_LENGTH_SHIFT 16 |
@@ -78,7 +78,7 @@ static void pcm_period_tasklet(unsigned long data); | |||
78 | int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit, | 78 | int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit, |
79 | enum amdtp_stream_direction dir, enum cip_flags flags) | 79 | enum amdtp_stream_direction dir, enum cip_flags flags) |
80 | { | 80 | { |
81 | s->unit = fw_unit_get(unit); | 81 | s->unit = unit; |
82 | s->direction = dir; | 82 | s->direction = dir; |
83 | s->flags = flags; | 83 | s->flags = flags; |
84 | s->context = ERR_PTR(-1); | 84 | s->context = ERR_PTR(-1); |
@@ -102,7 +102,6 @@ void amdtp_stream_destroy(struct amdtp_stream *s) | |||
102 | { | 102 | { |
103 | WARN_ON(amdtp_stream_running(s)); | 103 | WARN_ON(amdtp_stream_running(s)); |
104 | mutex_destroy(&s->mutex); | 104 | mutex_destroy(&s->mutex); |
105 | fw_unit_put(s->unit); | ||
106 | } | 105 | } |
107 | EXPORT_SYMBOL(amdtp_stream_destroy); | 106 | EXPORT_SYMBOL(amdtp_stream_destroy); |
108 | 107 | ||
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c index fc19c99654aa..611b7dae7ee5 100644 --- a/sound/firewire/bebob/bebob.c +++ b/sound/firewire/bebob/bebob.c | |||
@@ -116,11 +116,22 @@ end: | |||
116 | return err; | 116 | return err; |
117 | } | 117 | } |
118 | 118 | ||
119 | /* | ||
120 | * This module releases the FireWire unit data after all ALSA character devices | ||
121 | * are released by applications. This is for releasing stream data or finishing | ||
122 | * transactions safely. Thus at returning from .remove(), this module still keep | ||
123 | * references for the unit. | ||
124 | */ | ||
119 | static void | 125 | static void |
120 | bebob_card_free(struct snd_card *card) | 126 | bebob_card_free(struct snd_card *card) |
121 | { | 127 | { |
122 | struct snd_bebob *bebob = card->private_data; | 128 | struct snd_bebob *bebob = card->private_data; |
123 | 129 | ||
130 | snd_bebob_stream_destroy_duplex(bebob); | ||
131 | fw_unit_put(bebob->unit); | ||
132 | |||
133 | kfree(bebob->maudio_special_quirk); | ||
134 | |||
124 | if (bebob->card_index >= 0) { | 135 | if (bebob->card_index >= 0) { |
125 | mutex_lock(&devices_mutex); | 136 | mutex_lock(&devices_mutex); |
126 | clear_bit(bebob->card_index, devices_used); | 137 | clear_bit(bebob->card_index, devices_used); |
@@ -205,7 +216,7 @@ bebob_probe(struct fw_unit *unit, | |||
205 | card->private_free = bebob_card_free; | 216 | card->private_free = bebob_card_free; |
206 | 217 | ||
207 | bebob->card = card; | 218 | bebob->card = card; |
208 | bebob->unit = unit; | 219 | bebob->unit = fw_unit_get(unit); |
209 | bebob->spec = spec; | 220 | bebob->spec = spec; |
210 | mutex_init(&bebob->mutex); | 221 | mutex_init(&bebob->mutex); |
211 | spin_lock_init(&bebob->lock); | 222 | spin_lock_init(&bebob->lock); |
@@ -306,10 +317,11 @@ static void bebob_remove(struct fw_unit *unit) | |||
306 | if (bebob == NULL) | 317 | if (bebob == NULL) |
307 | return; | 318 | return; |
308 | 319 | ||
309 | kfree(bebob->maudio_special_quirk); | 320 | /* Awake bus-reset waiters. */ |
321 | if (!completion_done(&bebob->bus_reset)) | ||
322 | complete_all(&bebob->bus_reset); | ||
310 | 323 | ||
311 | snd_bebob_stream_destroy_duplex(bebob); | 324 | /* No need to wait for releasing card object in this context. */ |
312 | snd_card_disconnect(bebob->card); | ||
313 | snd_card_free_when_closed(bebob->card); | 325 | snd_card_free_when_closed(bebob->card); |
314 | } | 326 | } |
315 | 327 | ||
diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c index a422aaa3bb0c..9ee25a63f684 100644 --- a/sound/firewire/bebob/bebob_maudio.c +++ b/sound/firewire/bebob/bebob_maudio.c | |||
@@ -96,10 +96,10 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit) | |||
96 | struct fw_device *device = fw_parent_device(unit); | 96 | struct fw_device *device = fw_parent_device(unit); |
97 | int err, rcode; | 97 | int err, rcode; |
98 | u64 date; | 98 | u64 date; |
99 | __be32 cues[3] = { | 99 | __le32 cues[3] = { |
100 | MAUDIO_BOOTLOADER_CUE1, | 100 | cpu_to_le32(MAUDIO_BOOTLOADER_CUE1), |
101 | MAUDIO_BOOTLOADER_CUE2, | 101 | cpu_to_le32(MAUDIO_BOOTLOADER_CUE2), |
102 | MAUDIO_BOOTLOADER_CUE3 | 102 | cpu_to_le32(MAUDIO_BOOTLOADER_CUE3) |
103 | }; | 103 | }; |
104 | 104 | ||
105 | /* check date of software used to build */ | 105 | /* check date of software used to build */ |
diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c index 0ebcabfdc7ce..98e4fc8121a1 100644 --- a/sound/firewire/bebob/bebob_stream.c +++ b/sound/firewire/bebob/bebob_stream.c | |||
@@ -410,8 +410,6 @@ break_both_connections(struct snd_bebob *bebob) | |||
410 | static void | 410 | static void |
411 | destroy_both_connections(struct snd_bebob *bebob) | 411 | destroy_both_connections(struct snd_bebob *bebob) |
412 | { | 412 | { |
413 | break_both_connections(bebob); | ||
414 | |||
415 | cmp_connection_destroy(&bebob->in_conn); | 413 | cmp_connection_destroy(&bebob->in_conn); |
416 | cmp_connection_destroy(&bebob->out_conn); | 414 | cmp_connection_destroy(&bebob->out_conn); |
417 | } | 415 | } |
@@ -712,22 +710,16 @@ void snd_bebob_stream_update_duplex(struct snd_bebob *bebob) | |||
712 | mutex_unlock(&bebob->mutex); | 710 | mutex_unlock(&bebob->mutex); |
713 | } | 711 | } |
714 | 712 | ||
713 | /* | ||
714 | * This function should be called before starting streams or after stopping | ||
715 | * streams. | ||
716 | */ | ||
715 | void snd_bebob_stream_destroy_duplex(struct snd_bebob *bebob) | 717 | void snd_bebob_stream_destroy_duplex(struct snd_bebob *bebob) |
716 | { | 718 | { |
717 | mutex_lock(&bebob->mutex); | ||
718 | |||
719 | amdtp_stream_pcm_abort(&bebob->rx_stream); | ||
720 | amdtp_stream_pcm_abort(&bebob->tx_stream); | ||
721 | |||
722 | amdtp_stream_stop(&bebob->rx_stream); | ||
723 | amdtp_stream_stop(&bebob->tx_stream); | ||
724 | |||
725 | amdtp_stream_destroy(&bebob->rx_stream); | 719 | amdtp_stream_destroy(&bebob->rx_stream); |
726 | amdtp_stream_destroy(&bebob->tx_stream); | 720 | amdtp_stream_destroy(&bebob->tx_stream); |
727 | 721 | ||
728 | destroy_both_connections(bebob); | 722 | destroy_both_connections(bebob); |
729 | |||
730 | mutex_unlock(&bebob->mutex); | ||
731 | } | 723 | } |
732 | 724 | ||
733 | /* | 725 | /* |
diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c index fa9cf761b610..07dbd01d7a6b 100644 --- a/sound/firewire/dice/dice-stream.c +++ b/sound/firewire/dice/dice-stream.c | |||
@@ -311,14 +311,21 @@ end: | |||
311 | return err; | 311 | return err; |
312 | } | 312 | } |
313 | 313 | ||
314 | /* | ||
315 | * This function should be called before starting streams or after stopping | ||
316 | * streams. | ||
317 | */ | ||
314 | static void destroy_stream(struct snd_dice *dice, struct amdtp_stream *stream) | 318 | static void destroy_stream(struct snd_dice *dice, struct amdtp_stream *stream) |
315 | { | 319 | { |
316 | amdtp_stream_destroy(stream); | 320 | struct fw_iso_resources *resources; |
317 | 321 | ||
318 | if (stream == &dice->tx_stream) | 322 | if (stream == &dice->tx_stream) |
319 | fw_iso_resources_destroy(&dice->tx_resources); | 323 | resources = &dice->tx_resources; |
320 | else | 324 | else |
321 | fw_iso_resources_destroy(&dice->rx_resources); | 325 | resources = &dice->rx_resources; |
326 | |||
327 | amdtp_stream_destroy(stream); | ||
328 | fw_iso_resources_destroy(resources); | ||
322 | } | 329 | } |
323 | 330 | ||
324 | int snd_dice_stream_init_duplex(struct snd_dice *dice) | 331 | int snd_dice_stream_init_duplex(struct snd_dice *dice) |
@@ -332,6 +339,8 @@ int snd_dice_stream_init_duplex(struct snd_dice *dice) | |||
332 | goto end; | 339 | goto end; |
333 | 340 | ||
334 | err = init_stream(dice, &dice->rx_stream); | 341 | err = init_stream(dice, &dice->rx_stream); |
342 | if (err < 0) | ||
343 | destroy_stream(dice, &dice->tx_stream); | ||
335 | end: | 344 | end: |
336 | return err; | 345 | return err; |
337 | } | 346 | } |
@@ -340,10 +349,7 @@ void snd_dice_stream_destroy_duplex(struct snd_dice *dice) | |||
340 | { | 349 | { |
341 | snd_dice_transaction_clear_enable(dice); | 350 | snd_dice_transaction_clear_enable(dice); |
342 | 351 | ||
343 | stop_stream(dice, &dice->tx_stream); | ||
344 | destroy_stream(dice, &dice->tx_stream); | 352 | destroy_stream(dice, &dice->tx_stream); |
345 | |||
346 | stop_stream(dice, &dice->rx_stream); | ||
347 | destroy_stream(dice, &dice->rx_stream); | 353 | destroy_stream(dice, &dice->rx_stream); |
348 | 354 | ||
349 | dice->substreams_counter = 0; | 355 | dice->substreams_counter = 0; |
diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c index 90d8f40ff727..70a111d7f428 100644 --- a/sound/firewire/dice/dice.c +++ b/sound/firewire/dice/dice.c | |||
@@ -226,11 +226,20 @@ static void dice_card_strings(struct snd_dice *dice) | |||
226 | strcpy(card->mixername, "DICE"); | 226 | strcpy(card->mixername, "DICE"); |
227 | } | 227 | } |
228 | 228 | ||
229 | /* | ||
230 | * This module releases the FireWire unit data after all ALSA character devices | ||
231 | * are released by applications. This is for releasing stream data or finishing | ||
232 | * transactions safely. Thus at returning from .remove(), this module still keep | ||
233 | * references for the unit. | ||
234 | */ | ||
229 | static void dice_card_free(struct snd_card *card) | 235 | static void dice_card_free(struct snd_card *card) |
230 | { | 236 | { |
231 | struct snd_dice *dice = card->private_data; | 237 | struct snd_dice *dice = card->private_data; |
232 | 238 | ||
239 | snd_dice_stream_destroy_duplex(dice); | ||
233 | snd_dice_transaction_destroy(dice); | 240 | snd_dice_transaction_destroy(dice); |
241 | fw_unit_put(dice->unit); | ||
242 | |||
234 | mutex_destroy(&dice->mutex); | 243 | mutex_destroy(&dice->mutex); |
235 | } | 244 | } |
236 | 245 | ||
@@ -251,7 +260,7 @@ static int dice_probe(struct fw_unit *unit, const struct ieee1394_device_id *id) | |||
251 | 260 | ||
252 | dice = card->private_data; | 261 | dice = card->private_data; |
253 | dice->card = card; | 262 | dice->card = card; |
254 | dice->unit = unit; | 263 | dice->unit = fw_unit_get(unit); |
255 | card->private_free = dice_card_free; | 264 | card->private_free = dice_card_free; |
256 | 265 | ||
257 | spin_lock_init(&dice->lock); | 266 | spin_lock_init(&dice->lock); |
@@ -305,10 +314,7 @@ static void dice_remove(struct fw_unit *unit) | |||
305 | { | 314 | { |
306 | struct snd_dice *dice = dev_get_drvdata(&unit->device); | 315 | struct snd_dice *dice = dev_get_drvdata(&unit->device); |
307 | 316 | ||
308 | snd_card_disconnect(dice->card); | 317 | /* No need to wait for releasing card object in this context. */ |
309 | |||
310 | snd_dice_stream_destroy_duplex(dice); | ||
311 | |||
312 | snd_card_free_when_closed(dice->card); | 318 | snd_card_free_when_closed(dice->card); |
313 | } | 319 | } |
314 | 320 | ||
diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c index 3e2ed8e82cbc..2682e7e3e5c9 100644 --- a/sound/firewire/fireworks/fireworks.c +++ b/sound/firewire/fireworks/fireworks.c | |||
@@ -173,11 +173,23 @@ end: | |||
173 | return err; | 173 | return err; |
174 | } | 174 | } |
175 | 175 | ||
176 | /* | ||
177 | * This module releases the FireWire unit data after all ALSA character devices | ||
178 | * are released by applications. This is for releasing stream data or finishing | ||
179 | * transactions safely. Thus at returning from .remove(), this module still keep | ||
180 | * references for the unit. | ||
181 | */ | ||
176 | static void | 182 | static void |
177 | efw_card_free(struct snd_card *card) | 183 | efw_card_free(struct snd_card *card) |
178 | { | 184 | { |
179 | struct snd_efw *efw = card->private_data; | 185 | struct snd_efw *efw = card->private_data; |
180 | 186 | ||
187 | snd_efw_stream_destroy_duplex(efw); | ||
188 | snd_efw_transaction_remove_instance(efw); | ||
189 | fw_unit_put(efw->unit); | ||
190 | |||
191 | kfree(efw->resp_buf); | ||
192 | |||
181 | if (efw->card_index >= 0) { | 193 | if (efw->card_index >= 0) { |
182 | mutex_lock(&devices_mutex); | 194 | mutex_lock(&devices_mutex); |
183 | clear_bit(efw->card_index, devices_used); | 195 | clear_bit(efw->card_index, devices_used); |
@@ -185,7 +197,6 @@ efw_card_free(struct snd_card *card) | |||
185 | } | 197 | } |
186 | 198 | ||
187 | mutex_destroy(&efw->mutex); | 199 | mutex_destroy(&efw->mutex); |
188 | kfree(efw->resp_buf); | ||
189 | } | 200 | } |
190 | 201 | ||
191 | static int | 202 | static int |
@@ -218,7 +229,7 @@ efw_probe(struct fw_unit *unit, | |||
218 | card->private_free = efw_card_free; | 229 | card->private_free = efw_card_free; |
219 | 230 | ||
220 | efw->card = card; | 231 | efw->card = card; |
221 | efw->unit = unit; | 232 | efw->unit = fw_unit_get(unit); |
222 | mutex_init(&efw->mutex); | 233 | mutex_init(&efw->mutex); |
223 | spin_lock_init(&efw->lock); | 234 | spin_lock_init(&efw->lock); |
224 | init_waitqueue_head(&efw->hwdep_wait); | 235 | init_waitqueue_head(&efw->hwdep_wait); |
@@ -289,10 +300,7 @@ static void efw_remove(struct fw_unit *unit) | |||
289 | { | 300 | { |
290 | struct snd_efw *efw = dev_get_drvdata(&unit->device); | 301 | struct snd_efw *efw = dev_get_drvdata(&unit->device); |
291 | 302 | ||
292 | snd_efw_stream_destroy_duplex(efw); | 303 | /* No need to wait for releasing card object in this context. */ |
293 | snd_efw_transaction_remove_instance(efw); | ||
294 | |||
295 | snd_card_disconnect(efw->card); | ||
296 | snd_card_free_when_closed(efw->card); | 304 | snd_card_free_when_closed(efw->card); |
297 | } | 305 | } |
298 | 306 | ||
diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c index 4f440e163667..c55db1bddc80 100644 --- a/sound/firewire/fireworks/fireworks_stream.c +++ b/sound/firewire/fireworks/fireworks_stream.c | |||
@@ -100,17 +100,22 @@ end: | |||
100 | return err; | 100 | return err; |
101 | } | 101 | } |
102 | 102 | ||
103 | /* | ||
104 | * This function should be called before starting the stream or after stopping | ||
105 | * the streams. | ||
106 | */ | ||
103 | static void | 107 | static void |
104 | destroy_stream(struct snd_efw *efw, struct amdtp_stream *stream) | 108 | destroy_stream(struct snd_efw *efw, struct amdtp_stream *stream) |
105 | { | 109 | { |
106 | stop_stream(efw, stream); | 110 | struct cmp_connection *conn; |
107 | |||
108 | amdtp_stream_destroy(stream); | ||
109 | 111 | ||
110 | if (stream == &efw->tx_stream) | 112 | if (stream == &efw->tx_stream) |
111 | cmp_connection_destroy(&efw->out_conn); | 113 | conn = &efw->out_conn; |
112 | else | 114 | else |
113 | cmp_connection_destroy(&efw->in_conn); | 115 | conn = &efw->in_conn; |
116 | |||
117 | amdtp_stream_destroy(stream); | ||
118 | cmp_connection_destroy(&efw->out_conn); | ||
114 | } | 119 | } |
115 | 120 | ||
116 | static int | 121 | static int |
@@ -319,12 +324,8 @@ void snd_efw_stream_update_duplex(struct snd_efw *efw) | |||
319 | 324 | ||
320 | void snd_efw_stream_destroy_duplex(struct snd_efw *efw) | 325 | void snd_efw_stream_destroy_duplex(struct snd_efw *efw) |
321 | { | 326 | { |
322 | mutex_lock(&efw->mutex); | ||
323 | |||
324 | destroy_stream(efw, &efw->rx_stream); | 327 | destroy_stream(efw, &efw->rx_stream); |
325 | destroy_stream(efw, &efw->tx_stream); | 328 | destroy_stream(efw, &efw->tx_stream); |
326 | |||
327 | mutex_unlock(&efw->mutex); | ||
328 | } | 329 | } |
329 | 330 | ||
330 | void snd_efw_stream_lock_changed(struct snd_efw *efw) | 331 | void snd_efw_stream_lock_changed(struct snd_efw *efw) |
diff --git a/sound/firewire/iso-resources.c b/sound/firewire/iso-resources.c index 5f17b77ee152..f0e4d502d604 100644 --- a/sound/firewire/iso-resources.c +++ b/sound/firewire/iso-resources.c | |||
@@ -26,7 +26,7 @@ | |||
26 | int fw_iso_resources_init(struct fw_iso_resources *r, struct fw_unit *unit) | 26 | int fw_iso_resources_init(struct fw_iso_resources *r, struct fw_unit *unit) |
27 | { | 27 | { |
28 | r->channels_mask = ~0uLL; | 28 | r->channels_mask = ~0uLL; |
29 | r->unit = fw_unit_get(unit); | 29 | r->unit = unit; |
30 | mutex_init(&r->mutex); | 30 | mutex_init(&r->mutex); |
31 | r->allocated = false; | 31 | r->allocated = false; |
32 | 32 | ||
@@ -42,7 +42,6 @@ void fw_iso_resources_destroy(struct fw_iso_resources *r) | |||
42 | { | 42 | { |
43 | WARN_ON(r->allocated); | 43 | WARN_ON(r->allocated); |
44 | mutex_destroy(&r->mutex); | 44 | mutex_destroy(&r->mutex); |
45 | fw_unit_put(r->unit); | ||
46 | } | 45 | } |
47 | EXPORT_SYMBOL(fw_iso_resources_destroy); | 46 | EXPORT_SYMBOL(fw_iso_resources_destroy); |
48 | 47 | ||
diff --git a/sound/firewire/oxfw/oxfw-stream.c b/sound/firewire/oxfw/oxfw-stream.c index bda845afb470..e6757cd85724 100644 --- a/sound/firewire/oxfw/oxfw-stream.c +++ b/sound/firewire/oxfw/oxfw-stream.c | |||
@@ -171,9 +171,10 @@ static int start_stream(struct snd_oxfw *oxfw, struct amdtp_stream *stream, | |||
171 | } | 171 | } |
172 | 172 | ||
173 | /* Wait first packet */ | 173 | /* Wait first packet */ |
174 | err = amdtp_stream_wait_callback(stream, CALLBACK_TIMEOUT); | 174 | if (!amdtp_stream_wait_callback(stream, CALLBACK_TIMEOUT)) { |
175 | if (err < 0) | ||
176 | stop_stream(oxfw, stream); | 175 | stop_stream(oxfw, stream); |
176 | err = -ETIMEDOUT; | ||
177 | } | ||
177 | end: | 178 | end: |
178 | return err; | 179 | return err; |
179 | } | 180 | } |
@@ -337,6 +338,10 @@ void snd_oxfw_stream_stop_simplex(struct snd_oxfw *oxfw, | |||
337 | stop_stream(oxfw, stream); | 338 | stop_stream(oxfw, stream); |
338 | } | 339 | } |
339 | 340 | ||
341 | /* | ||
342 | * This function should be called before starting the stream or after stopping | ||
343 | * the streams. | ||
344 | */ | ||
340 | void snd_oxfw_stream_destroy_simplex(struct snd_oxfw *oxfw, | 345 | void snd_oxfw_stream_destroy_simplex(struct snd_oxfw *oxfw, |
341 | struct amdtp_stream *stream) | 346 | struct amdtp_stream *stream) |
342 | { | 347 | { |
@@ -347,8 +352,6 @@ void snd_oxfw_stream_destroy_simplex(struct snd_oxfw *oxfw, | |||
347 | else | 352 | else |
348 | conn = &oxfw->in_conn; | 353 | conn = &oxfw->in_conn; |
349 | 354 | ||
350 | stop_stream(oxfw, stream); | ||
351 | |||
352 | amdtp_stream_destroy(stream); | 355 | amdtp_stream_destroy(stream); |
353 | cmp_connection_destroy(conn); | 356 | cmp_connection_destroy(conn); |
354 | } | 357 | } |
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c index 60e5cad0531a..8c6ce019f437 100644 --- a/sound/firewire/oxfw/oxfw.c +++ b/sound/firewire/oxfw/oxfw.c | |||
@@ -104,11 +104,23 @@ end: | |||
104 | return err; | 104 | return err; |
105 | } | 105 | } |
106 | 106 | ||
107 | /* | ||
108 | * This module releases the FireWire unit data after all ALSA character devices | ||
109 | * are released by applications. This is for releasing stream data or finishing | ||
110 | * transactions safely. Thus at returning from .remove(), this module still keep | ||
111 | * references for the unit. | ||
112 | */ | ||
107 | static void oxfw_card_free(struct snd_card *card) | 113 | static void oxfw_card_free(struct snd_card *card) |
108 | { | 114 | { |
109 | struct snd_oxfw *oxfw = card->private_data; | 115 | struct snd_oxfw *oxfw = card->private_data; |
110 | unsigned int i; | 116 | unsigned int i; |
111 | 117 | ||
118 | snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream); | ||
119 | if (oxfw->has_output) | ||
120 | snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream); | ||
121 | |||
122 | fw_unit_put(oxfw->unit); | ||
123 | |||
112 | for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; i++) { | 124 | for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; i++) { |
113 | kfree(oxfw->tx_stream_formats[i]); | 125 | kfree(oxfw->tx_stream_formats[i]); |
114 | kfree(oxfw->rx_stream_formats[i]); | 126 | kfree(oxfw->rx_stream_formats[i]); |
@@ -136,7 +148,7 @@ static int oxfw_probe(struct fw_unit *unit, | |||
136 | oxfw = card->private_data; | 148 | oxfw = card->private_data; |
137 | oxfw->card = card; | 149 | oxfw->card = card; |
138 | mutex_init(&oxfw->mutex); | 150 | mutex_init(&oxfw->mutex); |
139 | oxfw->unit = unit; | 151 | oxfw->unit = fw_unit_get(unit); |
140 | oxfw->device_info = (const struct device_info *)id->driver_data; | 152 | oxfw->device_info = (const struct device_info *)id->driver_data; |
141 | spin_lock_init(&oxfw->lock); | 153 | spin_lock_init(&oxfw->lock); |
142 | init_waitqueue_head(&oxfw->hwdep_wait); | 154 | init_waitqueue_head(&oxfw->hwdep_wait); |
@@ -212,12 +224,7 @@ static void oxfw_remove(struct fw_unit *unit) | |||
212 | { | 224 | { |
213 | struct snd_oxfw *oxfw = dev_get_drvdata(&unit->device); | 225 | struct snd_oxfw *oxfw = dev_get_drvdata(&unit->device); |
214 | 226 | ||
215 | snd_card_disconnect(oxfw->card); | 227 | /* No need to wait for releasing card object in this context. */ |
216 | |||
217 | snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream); | ||
218 | if (oxfw->has_output) | ||
219 | snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream); | ||
220 | |||
221 | snd_card_free_when_closed(oxfw->card); | 228 | snd_card_free_when_closed(oxfw->card); |
222 | } | 229 | } |
223 | 230 | ||
diff --git a/sound/isa/msnd/msnd_pinnacle_mixer.c b/sound/isa/msnd/msnd_pinnacle_mixer.c index 17e49a071af4..b408540798c1 100644 --- a/sound/isa/msnd/msnd_pinnacle_mixer.c +++ b/sound/isa/msnd/msnd_pinnacle_mixer.c | |||
@@ -306,11 +306,12 @@ int snd_msndmix_new(struct snd_card *card) | |||
306 | spin_lock_init(&chip->mixer_lock); | 306 | spin_lock_init(&chip->mixer_lock); |
307 | strcpy(card->mixername, "MSND Pinnacle Mixer"); | 307 | strcpy(card->mixername, "MSND Pinnacle Mixer"); |
308 | 308 | ||
309 | for (idx = 0; idx < ARRAY_SIZE(snd_msnd_controls); idx++) | 309 | for (idx = 0; idx < ARRAY_SIZE(snd_msnd_controls); idx++) { |
310 | err = snd_ctl_add(card, | 310 | err = snd_ctl_add(card, |
311 | snd_ctl_new1(snd_msnd_controls + idx, chip)); | 311 | snd_ctl_new1(snd_msnd_controls + idx, chip)); |
312 | if (err < 0) | 312 | if (err < 0) |
313 | return err; | 313 | return err; |
314 | } | ||
314 | 315 | ||
315 | return 0; | 316 | return 0; |
316 | } | 317 | } |
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index dfcb5e929f9f..17c2637d842c 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c | |||
@@ -961,7 +961,6 @@ static int azx_alloc_cmd_io(struct azx *chip) | |||
961 | dev_err(chip->card->dev, "cannot allocate CORB/RIRB\n"); | 961 | dev_err(chip->card->dev, "cannot allocate CORB/RIRB\n"); |
962 | return err; | 962 | return err; |
963 | } | 963 | } |
964 | EXPORT_SYMBOL_GPL(azx_alloc_cmd_io); | ||
965 | 964 | ||
966 | static void azx_init_cmd_io(struct azx *chip) | 965 | static void azx_init_cmd_io(struct azx *chip) |
967 | { | 966 | { |
@@ -1026,7 +1025,6 @@ static void azx_init_cmd_io(struct azx *chip) | |||
1026 | azx_writeb(chip, RIRBCTL, AZX_RBCTL_DMA_EN | AZX_RBCTL_IRQ_EN); | 1025 | azx_writeb(chip, RIRBCTL, AZX_RBCTL_DMA_EN | AZX_RBCTL_IRQ_EN); |
1027 | spin_unlock_irq(&chip->reg_lock); | 1026 | spin_unlock_irq(&chip->reg_lock); |
1028 | } | 1027 | } |
1029 | EXPORT_SYMBOL_GPL(azx_init_cmd_io); | ||
1030 | 1028 | ||
1031 | static void azx_free_cmd_io(struct azx *chip) | 1029 | static void azx_free_cmd_io(struct azx *chip) |
1032 | { | 1030 | { |
@@ -1036,7 +1034,6 @@ static void azx_free_cmd_io(struct azx *chip) | |||
1036 | azx_writeb(chip, CORBCTL, 0); | 1034 | azx_writeb(chip, CORBCTL, 0); |
1037 | spin_unlock_irq(&chip->reg_lock); | 1035 | spin_unlock_irq(&chip->reg_lock); |
1038 | } | 1036 | } |
1039 | EXPORT_SYMBOL_GPL(azx_free_cmd_io); | ||
1040 | 1037 | ||
1041 | static unsigned int azx_command_addr(u32 cmd) | 1038 | static unsigned int azx_command_addr(u32 cmd) |
1042 | { | 1039 | { |
@@ -1167,7 +1164,7 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus, | |||
1167 | } | 1164 | } |
1168 | } | 1165 | } |
1169 | 1166 | ||
1170 | if (!bus->no_response_fallback) | 1167 | if (bus->no_response_fallback) |
1171 | return -1; | 1168 | return -1; |
1172 | 1169 | ||
1173 | if (!chip->polling_mode && chip->poll_count < 2) { | 1170 | if (!chip->polling_mode && chip->poll_count < 2) { |
@@ -1316,7 +1313,6 @@ static int azx_send_cmd(struct hda_bus *bus, unsigned int val) | |||
1316 | else | 1313 | else |
1317 | return azx_corb_send_cmd(bus, val); | 1314 | return azx_corb_send_cmd(bus, val); |
1318 | } | 1315 | } |
1319 | EXPORT_SYMBOL_GPL(azx_send_cmd); | ||
1320 | 1316 | ||
1321 | /* get a response */ | 1317 | /* get a response */ |
1322 | static unsigned int azx_get_response(struct hda_bus *bus, | 1318 | static unsigned int azx_get_response(struct hda_bus *bus, |
@@ -1330,7 +1326,6 @@ static unsigned int azx_get_response(struct hda_bus *bus, | |||
1330 | else | 1326 | else |
1331 | return azx_rirb_get_response(bus, addr); | 1327 | return azx_rirb_get_response(bus, addr); |
1332 | } | 1328 | } |
1333 | EXPORT_SYMBOL_GPL(azx_get_response); | ||
1334 | 1329 | ||
1335 | #ifdef CONFIG_SND_HDA_DSP_LOADER | 1330 | #ifdef CONFIG_SND_HDA_DSP_LOADER |
1336 | /* | 1331 | /* |
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index b680b4ec6331..8ec5289f8e05 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c | |||
@@ -687,12 +687,45 @@ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid, | |||
687 | return val; | 687 | return val; |
688 | } | 688 | } |
689 | 689 | ||
690 | /* is this a stereo widget or a stereo-to-mono mix? */ | ||
691 | static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid, int dir) | ||
692 | { | ||
693 | unsigned int wcaps = get_wcaps(codec, nid); | ||
694 | hda_nid_t conn; | ||
695 | |||
696 | if (wcaps & AC_WCAP_STEREO) | ||
697 | return true; | ||
698 | if (dir != HDA_INPUT || get_wcaps_type(wcaps) != AC_WID_AUD_MIX) | ||
699 | return false; | ||
700 | if (snd_hda_get_num_conns(codec, nid) != 1) | ||
701 | return false; | ||
702 | if (snd_hda_get_connections(codec, nid, &conn, 1) < 0) | ||
703 | return false; | ||
704 | return !!(get_wcaps(codec, conn) & AC_WCAP_STEREO); | ||
705 | } | ||
706 | |||
690 | /* initialize the amp value (only at the first time) */ | 707 | /* initialize the amp value (only at the first time) */ |
691 | static void init_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx) | 708 | static void init_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx) |
692 | { | 709 | { |
693 | unsigned int caps = query_amp_caps(codec, nid, dir); | 710 | unsigned int caps = query_amp_caps(codec, nid, dir); |
694 | int val = get_amp_val_to_activate(codec, nid, dir, caps, false); | 711 | int val = get_amp_val_to_activate(codec, nid, dir, caps, false); |
695 | snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val); | 712 | |
713 | if (is_stereo_amps(codec, nid, dir)) | ||
714 | snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val); | ||
715 | else | ||
716 | snd_hda_codec_amp_init(codec, nid, 0, dir, idx, 0xff, val); | ||
717 | } | ||
718 | |||
719 | /* update the amp, doing in stereo or mono depending on NID */ | ||
720 | static int update_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx, | ||
721 | unsigned int mask, unsigned int val) | ||
722 | { | ||
723 | if (is_stereo_amps(codec, nid, dir)) | ||
724 | return snd_hda_codec_amp_stereo(codec, nid, dir, idx, | ||
725 | mask, val); | ||
726 | else | ||
727 | return snd_hda_codec_amp_update(codec, nid, 0, dir, idx, | ||
728 | mask, val); | ||
696 | } | 729 | } |
697 | 730 | ||
698 | /* calculate amp value mask we can modify; | 731 | /* calculate amp value mask we can modify; |
@@ -732,7 +765,7 @@ static void activate_amp(struct hda_codec *codec, hda_nid_t nid, int dir, | |||
732 | return; | 765 | return; |
733 | 766 | ||
734 | val &= mask; | 767 | val &= mask; |
735 | snd_hda_codec_amp_stereo(codec, nid, dir, idx, mask, val); | 768 | update_amp(codec, nid, dir, idx, mask, val); |
736 | } | 769 | } |
737 | 770 | ||
738 | static void activate_amp_out(struct hda_codec *codec, struct nid_path *path, | 771 | static void activate_amp_out(struct hda_codec *codec, struct nid_path *path, |
@@ -4424,13 +4457,11 @@ static void mute_all_mixer_nid(struct hda_codec *codec, hda_nid_t mix) | |||
4424 | has_amp = nid_has_mute(codec, mix, HDA_INPUT); | 4457 | has_amp = nid_has_mute(codec, mix, HDA_INPUT); |
4425 | for (i = 0; i < nums; i++) { | 4458 | for (i = 0; i < nums; i++) { |
4426 | if (has_amp) | 4459 | if (has_amp) |
4427 | snd_hda_codec_amp_stereo(codec, mix, | 4460 | update_amp(codec, mix, HDA_INPUT, i, |
4428 | HDA_INPUT, i, | 4461 | 0xff, HDA_AMP_MUTE); |
4429 | 0xff, HDA_AMP_MUTE); | ||
4430 | else if (nid_has_volume(codec, conn[i], HDA_OUTPUT)) | 4462 | else if (nid_has_volume(codec, conn[i], HDA_OUTPUT)) |
4431 | snd_hda_codec_amp_stereo(codec, conn[i], | 4463 | update_amp(codec, conn[i], HDA_OUTPUT, 0, |
4432 | HDA_OUTPUT, 0, | 4464 | 0xff, HDA_AMP_MUTE); |
4433 | 0xff, HDA_AMP_MUTE); | ||
4434 | } | 4465 | } |
4435 | } | 4466 | } |
4436 | 4467 | ||
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 36d2f20db7a4..a8a1e14272a1 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -1966,7 +1966,7 @@ static const struct pci_device_id azx_ids[] = { | |||
1966 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM }, | 1966 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM }, |
1967 | /* Panther Point */ | 1967 | /* Panther Point */ |
1968 | { PCI_DEVICE(0x8086, 0x1e20), | 1968 | { PCI_DEVICE(0x8086, 0x1e20), |
1969 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, | 1969 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM }, |
1970 | /* Lynx Point */ | 1970 | /* Lynx Point */ |
1971 | { PCI_DEVICE(0x8086, 0x8c20), | 1971 | { PCI_DEVICE(0x8086, 0x8c20), |
1972 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, | 1972 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, |
@@ -1989,7 +1989,7 @@ static const struct pci_device_id azx_ids[] = { | |||
1989 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, | 1989 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, |
1990 | /* Sunrise Point */ | 1990 | /* Sunrise Point */ |
1991 | { PCI_DEVICE(0x8086, 0xa170), | 1991 | { PCI_DEVICE(0x8086, 0xa170), |
1992 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, | 1992 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, |
1993 | /* Sunrise Point-LP */ | 1993 | /* Sunrise Point-LP */ |
1994 | { PCI_DEVICE(0x8086, 0x9d70), | 1994 | { PCI_DEVICE(0x8086, 0x9d70), |
1995 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, | 1995 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, |
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c index ce5a6da83419..05e19f78b4cb 100644 --- a/sound/pci/hda/hda_proc.c +++ b/sound/pci/hda/hda_proc.c | |||
@@ -134,13 +134,38 @@ static void print_amp_caps(struct snd_info_buffer *buffer, | |||
134 | (caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT); | 134 | (caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT); |
135 | } | 135 | } |
136 | 136 | ||
137 | /* is this a stereo widget or a stereo-to-mono mix? */ | ||
138 | static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid, | ||
139 | int dir, unsigned int wcaps, int indices) | ||
140 | { | ||
141 | hda_nid_t conn; | ||
142 | |||
143 | if (wcaps & AC_WCAP_STEREO) | ||
144 | return true; | ||
145 | /* check for a stereo-to-mono mix; it must be: | ||
146 | * only a single connection, only for input, and only a mixer widget | ||
147 | */ | ||
148 | if (indices != 1 || dir != HDA_INPUT || | ||
149 | get_wcaps_type(wcaps) != AC_WID_AUD_MIX) | ||
150 | return false; | ||
151 | |||
152 | if (snd_hda_get_raw_connections(codec, nid, &conn, 1) < 0) | ||
153 | return false; | ||
154 | /* the connection source is a stereo? */ | ||
155 | wcaps = snd_hda_param_read(codec, conn, AC_PAR_AUDIO_WIDGET_CAP); | ||
156 | return !!(wcaps & AC_WCAP_STEREO); | ||
157 | } | ||
158 | |||
137 | static void print_amp_vals(struct snd_info_buffer *buffer, | 159 | static void print_amp_vals(struct snd_info_buffer *buffer, |
138 | struct hda_codec *codec, hda_nid_t nid, | 160 | struct hda_codec *codec, hda_nid_t nid, |
139 | int dir, int stereo, int indices) | 161 | int dir, unsigned int wcaps, int indices) |
140 | { | 162 | { |
141 | unsigned int val; | 163 | unsigned int val; |
164 | bool stereo; | ||
142 | int i; | 165 | int i; |
143 | 166 | ||
167 | stereo = is_stereo_amps(codec, nid, dir, wcaps, indices); | ||
168 | |||
144 | dir = dir == HDA_OUTPUT ? AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT; | 169 | dir = dir == HDA_OUTPUT ? AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT; |
145 | for (i = 0; i < indices; i++) { | 170 | for (i = 0; i < indices; i++) { |
146 | snd_iprintf(buffer, " ["); | 171 | snd_iprintf(buffer, " ["); |
@@ -757,12 +782,10 @@ static void print_codec_info(struct snd_info_entry *entry, | |||
757 | (codec->single_adc_amp && | 782 | (codec->single_adc_amp && |
758 | wid_type == AC_WID_AUD_IN)) | 783 | wid_type == AC_WID_AUD_IN)) |
759 | print_amp_vals(buffer, codec, nid, HDA_INPUT, | 784 | print_amp_vals(buffer, codec, nid, HDA_INPUT, |
760 | wid_caps & AC_WCAP_STEREO, | 785 | wid_caps, 1); |
761 | 1); | ||
762 | else | 786 | else |
763 | print_amp_vals(buffer, codec, nid, HDA_INPUT, | 787 | print_amp_vals(buffer, codec, nid, HDA_INPUT, |
764 | wid_caps & AC_WCAP_STEREO, | 788 | wid_caps, conn_len); |
765 | conn_len); | ||
766 | } | 789 | } |
767 | if (wid_caps & AC_WCAP_OUT_AMP) { | 790 | if (wid_caps & AC_WCAP_OUT_AMP) { |
768 | snd_iprintf(buffer, " Amp-Out caps: "); | 791 | snd_iprintf(buffer, " Amp-Out caps: "); |
@@ -771,11 +794,10 @@ static void print_codec_info(struct snd_info_entry *entry, | |||
771 | if (wid_type == AC_WID_PIN && | 794 | if (wid_type == AC_WID_PIN && |
772 | codec->pin_amp_workaround) | 795 | codec->pin_amp_workaround) |
773 | print_amp_vals(buffer, codec, nid, HDA_OUTPUT, | 796 | print_amp_vals(buffer, codec, nid, HDA_OUTPUT, |
774 | wid_caps & AC_WCAP_STEREO, | 797 | wid_caps, conn_len); |
775 | conn_len); | ||
776 | else | 798 | else |
777 | print_amp_vals(buffer, codec, nid, HDA_OUTPUT, | 799 | print_amp_vals(buffer, codec, nid, HDA_OUTPUT, |
778 | wid_caps & AC_WCAP_STEREO, 1); | 800 | wid_caps, 1); |
779 | } | 801 | } |
780 | 802 | ||
781 | switch (wid_type) { | 803 | switch (wid_type) { |
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index 1589c9bcce3e..dd2b3d92071f 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c | |||
@@ -393,6 +393,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = { | |||
393 | SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81), | 393 | SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81), |
394 | SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122), | 394 | SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122), |
395 | SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101), | 395 | SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101), |
396 | SND_PCI_QUIRK(0x106b, 0x5600, "MacBookAir 5,2", CS420X_MBP81), | ||
396 | SND_PCI_QUIRK(0x106b, 0x5b00, "MacBookAir 4,2", CS420X_MBA42), | 397 | SND_PCI_QUIRK(0x106b, 0x5b00, "MacBookAir 4,2", CS420X_MBA42), |
397 | SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE), | 398 | SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE), |
398 | {} /* terminator */ | 399 | {} /* terminator */ |
@@ -584,6 +585,7 @@ static int patch_cs420x(struct hda_codec *codec) | |||
584 | return -ENOMEM; | 585 | return -ENOMEM; |
585 | 586 | ||
586 | spec->gen.automute_hook = cs_automute; | 587 | spec->gen.automute_hook = cs_automute; |
588 | codec->single_adc_amp = 1; | ||
587 | 589 | ||
588 | snd_hda_pick_fixup(codec, cs420x_models, cs420x_fixup_tbl, | 590 | snd_hda_pick_fixup(codec, cs420x_models, cs420x_fixup_tbl, |
589 | cs420x_fixups); | 591 | cs420x_fixups); |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index fd3ed18670e9..da67ea8645a6 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -223,6 +223,7 @@ enum { | |||
223 | CXT_PINCFG_LENOVO_TP410, | 223 | CXT_PINCFG_LENOVO_TP410, |
224 | CXT_PINCFG_LEMOTE_A1004, | 224 | CXT_PINCFG_LEMOTE_A1004, |
225 | CXT_PINCFG_LEMOTE_A1205, | 225 | CXT_PINCFG_LEMOTE_A1205, |
226 | CXT_PINCFG_COMPAQ_CQ60, | ||
226 | CXT_FIXUP_STEREO_DMIC, | 227 | CXT_FIXUP_STEREO_DMIC, |
227 | CXT_FIXUP_INC_MIC_BOOST, | 228 | CXT_FIXUP_INC_MIC_BOOST, |
228 | CXT_FIXUP_HEADPHONE_MIC_PIN, | 229 | CXT_FIXUP_HEADPHONE_MIC_PIN, |
@@ -660,6 +661,15 @@ static const struct hda_fixup cxt_fixups[] = { | |||
660 | .type = HDA_FIXUP_PINS, | 661 | .type = HDA_FIXUP_PINS, |
661 | .v.pins = cxt_pincfg_lemote, | 662 | .v.pins = cxt_pincfg_lemote, |
662 | }, | 663 | }, |
664 | [CXT_PINCFG_COMPAQ_CQ60] = { | ||
665 | .type = HDA_FIXUP_PINS, | ||
666 | .v.pins = (const struct hda_pintbl[]) { | ||
667 | /* 0x17 was falsely set up as a mic, it should 0x1d */ | ||
668 | { 0x17, 0x400001f0 }, | ||
669 | { 0x1d, 0x97a70120 }, | ||
670 | { } | ||
671 | } | ||
672 | }, | ||
663 | [CXT_FIXUP_STEREO_DMIC] = { | 673 | [CXT_FIXUP_STEREO_DMIC] = { |
664 | .type = HDA_FIXUP_FUNC, | 674 | .type = HDA_FIXUP_FUNC, |
665 | .v.func = cxt_fixup_stereo_dmic, | 675 | .v.func = cxt_fixup_stereo_dmic, |
@@ -769,6 +779,7 @@ static const struct hda_model_fixup cxt5047_fixup_models[] = { | |||
769 | }; | 779 | }; |
770 | 780 | ||
771 | static const struct snd_pci_quirk cxt5051_fixups[] = { | 781 | static const struct snd_pci_quirk cxt5051_fixups[] = { |
782 | SND_PCI_QUIRK(0x103c, 0x360b, "Compaq CQ60", CXT_PINCFG_COMPAQ_CQ60), | ||
772 | SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200), | 783 | SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200), |
773 | {} | 784 | {} |
774 | }; | 785 | }; |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index b2b24a8b3dac..f9d12c0a7e5a 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -396,7 +396,7 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on) | |||
396 | { | 396 | { |
397 | /* We currently only handle front, HP */ | 397 | /* We currently only handle front, HP */ |
398 | static hda_nid_t pins[] = { | 398 | static hda_nid_t pins[] = { |
399 | 0x0f, 0x10, 0x14, 0x15, 0 | 399 | 0x0f, 0x10, 0x14, 0x15, 0x17, 0 |
400 | }; | 400 | }; |
401 | hda_nid_t *p; | 401 | hda_nid_t *p; |
402 | for (p = pins; *p; p++) | 402 | for (p = pins; *p; p++) |
@@ -2912,6 +2912,8 @@ static void alc283_init(struct hda_codec *codec) | |||
2912 | 2912 | ||
2913 | if (!hp_pin) | 2913 | if (!hp_pin) |
2914 | return; | 2914 | return; |
2915 | |||
2916 | msleep(30); | ||
2915 | hp_pin_sense = snd_hda_jack_detect(codec, hp_pin); | 2917 | hp_pin_sense = snd_hda_jack_detect(codec, hp_pin); |
2916 | 2918 | ||
2917 | /* Index 0x43 Direct Drive HP AMP LPM Control 1 */ | 2919 | /* Index 0x43 Direct Drive HP AMP LPM Control 1 */ |
@@ -3607,6 +3609,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec) | |||
3607 | 3609 | ||
3608 | switch (codec->vendor_id) { | 3610 | switch (codec->vendor_id) { |
3609 | case 0x10ec0255: | 3611 | case 0x10ec0255: |
3612 | case 0x10ec0256: | ||
3610 | alc_process_coef_fw(codec, coef0255); | 3613 | alc_process_coef_fw(codec, coef0255); |
3611 | break; | 3614 | break; |
3612 | case 0x10ec0233: | 3615 | case 0x10ec0233: |
@@ -3662,6 +3665,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin, | |||
3662 | 3665 | ||
3663 | switch (codec->vendor_id) { | 3666 | switch (codec->vendor_id) { |
3664 | case 0x10ec0255: | 3667 | case 0x10ec0255: |
3668 | case 0x10ec0256: | ||
3665 | alc_write_coef_idx(codec, 0x45, 0xc489); | 3669 | alc_write_coef_idx(codec, 0x45, 0xc489); |
3666 | snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); | 3670 | snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); |
3667 | alc_process_coef_fw(codec, coef0255); | 3671 | alc_process_coef_fw(codec, coef0255); |
@@ -3731,6 +3735,7 @@ static void alc_headset_mode_default(struct hda_codec *codec) | |||
3731 | 3735 | ||
3732 | switch (codec->vendor_id) { | 3736 | switch (codec->vendor_id) { |
3733 | case 0x10ec0255: | 3737 | case 0x10ec0255: |
3738 | case 0x10ec0256: | ||
3734 | alc_process_coef_fw(codec, coef0255); | 3739 | alc_process_coef_fw(codec, coef0255); |
3735 | break; | 3740 | break; |
3736 | case 0x10ec0233: | 3741 | case 0x10ec0233: |
@@ -3785,6 +3790,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec) | |||
3785 | 3790 | ||
3786 | switch (codec->vendor_id) { | 3791 | switch (codec->vendor_id) { |
3787 | case 0x10ec0255: | 3792 | case 0x10ec0255: |
3793 | case 0x10ec0256: | ||
3788 | alc_process_coef_fw(codec, coef0255); | 3794 | alc_process_coef_fw(codec, coef0255); |
3789 | break; | 3795 | break; |
3790 | case 0x10ec0233: | 3796 | case 0x10ec0233: |
@@ -3839,6 +3845,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec) | |||
3839 | 3845 | ||
3840 | switch (codec->vendor_id) { | 3846 | switch (codec->vendor_id) { |
3841 | case 0x10ec0255: | 3847 | case 0x10ec0255: |
3848 | case 0x10ec0256: | ||
3842 | alc_process_coef_fw(codec, coef0255); | 3849 | alc_process_coef_fw(codec, coef0255); |
3843 | break; | 3850 | break; |
3844 | case 0x10ec0233: | 3851 | case 0x10ec0233: |
@@ -3884,6 +3891,7 @@ static void alc_determine_headset_type(struct hda_codec *codec) | |||
3884 | 3891 | ||
3885 | switch (codec->vendor_id) { | 3892 | switch (codec->vendor_id) { |
3886 | case 0x10ec0255: | 3893 | case 0x10ec0255: |
3894 | case 0x10ec0256: | ||
3887 | alc_process_coef_fw(codec, coef0255); | 3895 | alc_process_coef_fw(codec, coef0255); |
3888 | msleep(300); | 3896 | msleep(300); |
3889 | val = alc_read_coef_idx(codec, 0x46); | 3897 | val = alc_read_coef_idx(codec, 0x46); |
@@ -4364,6 +4372,7 @@ enum { | |||
4364 | ALC269_FIXUP_QUANTA_MUTE, | 4372 | ALC269_FIXUP_QUANTA_MUTE, |
4365 | ALC269_FIXUP_LIFEBOOK, | 4373 | ALC269_FIXUP_LIFEBOOK, |
4366 | ALC269_FIXUP_LIFEBOOK_EXTMIC, | 4374 | ALC269_FIXUP_LIFEBOOK_EXTMIC, |
4375 | ALC269_FIXUP_LIFEBOOK_HP_PIN, | ||
4367 | ALC269_FIXUP_AMIC, | 4376 | ALC269_FIXUP_AMIC, |
4368 | ALC269_FIXUP_DMIC, | 4377 | ALC269_FIXUP_DMIC, |
4369 | ALC269VB_FIXUP_AMIC, | 4378 | ALC269VB_FIXUP_AMIC, |
@@ -4517,6 +4526,13 @@ static const struct hda_fixup alc269_fixups[] = { | |||
4517 | { } | 4526 | { } |
4518 | }, | 4527 | }, |
4519 | }, | 4528 | }, |
4529 | [ALC269_FIXUP_LIFEBOOK_HP_PIN] = { | ||
4530 | .type = HDA_FIXUP_PINS, | ||
4531 | .v.pins = (const struct hda_pintbl[]) { | ||
4532 | { 0x21, 0x0221102f }, /* HP out */ | ||
4533 | { } | ||
4534 | }, | ||
4535 | }, | ||
4520 | [ALC269_FIXUP_AMIC] = { | 4536 | [ALC269_FIXUP_AMIC] = { |
4521 | .type = HDA_FIXUP_PINS, | 4537 | .type = HDA_FIXUP_PINS, |
4522 | .v.pins = (const struct hda_pintbl[]) { | 4538 | .v.pins = (const struct hda_pintbl[]) { |
@@ -5010,6 +5026,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
5010 | SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), | 5026 | SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), |
5011 | SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX), | 5027 | SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX), |
5012 | SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK), | 5028 | SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK), |
5029 | SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN), | ||
5013 | SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), | 5030 | SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), |
5014 | SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), | 5031 | SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), |
5015 | SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC), | 5032 | SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC), |
@@ -5036,6 +5053,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
5036 | SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), | 5053 | SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), |
5037 | SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK), | 5054 | SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK), |
5038 | SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), | 5055 | SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
5056 | SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK), | ||
5039 | SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), | 5057 | SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
5040 | SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), | 5058 | SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), |
5041 | SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), | 5059 | SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), |
@@ -5209,6 +5227,23 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { | |||
5209 | {0x17, 0x40000000}, | 5227 | {0x17, 0x40000000}, |
5210 | {0x1d, 0x40700001}, | 5228 | {0x1d, 0x40700001}, |
5211 | {0x21, 0x02211040}), | 5229 | {0x21, 0x02211040}), |
5230 | SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, | ||
5231 | ALC255_STANDARD_PINS, | ||
5232 | {0x12, 0x90a60170}, | ||
5233 | {0x14, 0x90170140}, | ||
5234 | {0x17, 0x40000000}, | ||
5235 | {0x1d, 0x40700001}, | ||
5236 | {0x21, 0x02211050}), | ||
5237 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, | ||
5238 | {0x12, 0x90a60140}, | ||
5239 | {0x13, 0x40000000}, | ||
5240 | {0x14, 0x90170110}, | ||
5241 | {0x19, 0x411111f0}, | ||
5242 | {0x1a, 0x411111f0}, | ||
5243 | {0x1b, 0x411111f0}, | ||
5244 | {0x1d, 0x40700001}, | ||
5245 | {0x1e, 0x411111f0}, | ||
5246 | {0x21, 0x02211020}), | ||
5212 | SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, | 5247 | SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, |
5213 | {0x12, 0x90a60130}, | 5248 | {0x12, 0x90a60130}, |
5214 | {0x13, 0x40000000}, | 5249 | {0x13, 0x40000000}, |
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 6d36c5b78805..87eff3173ce9 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
@@ -79,6 +79,7 @@ enum { | |||
79 | STAC_ALIENWARE_M17X, | 79 | STAC_ALIENWARE_M17X, |
80 | STAC_92HD89XX_HP_FRONT_JACK, | 80 | STAC_92HD89XX_HP_FRONT_JACK, |
81 | STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK, | 81 | STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK, |
82 | STAC_92HD73XX_ASUS_MOBO, | ||
82 | STAC_92HD73XX_MODELS | 83 | STAC_92HD73XX_MODELS |
83 | }; | 84 | }; |
84 | 85 | ||
@@ -1911,7 +1912,18 @@ static const struct hda_fixup stac92hd73xx_fixups[] = { | |||
1911 | [STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK] = { | 1912 | [STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK] = { |
1912 | .type = HDA_FIXUP_PINS, | 1913 | .type = HDA_FIXUP_PINS, |
1913 | .v.pins = stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs, | 1914 | .v.pins = stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs, |
1914 | } | 1915 | }, |
1916 | [STAC_92HD73XX_ASUS_MOBO] = { | ||
1917 | .type = HDA_FIXUP_PINS, | ||
1918 | .v.pins = (const struct hda_pintbl[]) { | ||
1919 | /* enable 5.1 and SPDIF out */ | ||
1920 | { 0x0c, 0x01014411 }, | ||
1921 | { 0x0d, 0x01014410 }, | ||
1922 | { 0x0e, 0x01014412 }, | ||
1923 | { 0x22, 0x014b1180 }, | ||
1924 | { } | ||
1925 | } | ||
1926 | }, | ||
1915 | }; | 1927 | }; |
1916 | 1928 | ||
1917 | static const struct hda_model_fixup stac92hd73xx_models[] = { | 1929 | static const struct hda_model_fixup stac92hd73xx_models[] = { |
@@ -1923,6 +1935,7 @@ static const struct hda_model_fixup stac92hd73xx_models[] = { | |||
1923 | { .id = STAC_DELL_M6_BOTH, .name = "dell-m6" }, | 1935 | { .id = STAC_DELL_M6_BOTH, .name = "dell-m6" }, |
1924 | { .id = STAC_DELL_EQ, .name = "dell-eq" }, | 1936 | { .id = STAC_DELL_EQ, .name = "dell-eq" }, |
1925 | { .id = STAC_ALIENWARE_M17X, .name = "alienware" }, | 1937 | { .id = STAC_ALIENWARE_M17X, .name = "alienware" }, |
1938 | { .id = STAC_92HD73XX_ASUS_MOBO, .name = "asus-mobo" }, | ||
1926 | {} | 1939 | {} |
1927 | }; | 1940 | }; |
1928 | 1941 | ||
@@ -1975,6 +1988,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = { | |||
1975 | "HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK), | 1988 | "HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK), |
1976 | SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17, | 1989 | SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17, |
1977 | "unknown HP", STAC_92HD89XX_HP_FRONT_JACK), | 1990 | "unknown HP", STAC_92HD89XX_HP_FRONT_JACK), |
1991 | SND_PCI_QUIRK(PCI_VENDOR_ID_ASUSTEK, 0x83f8, "ASUS AT4NM10", | ||
1992 | STAC_92HD73XX_ASUS_MOBO), | ||
1978 | {} /* terminator */ | 1993 | {} /* terminator */ |
1979 | }; | 1994 | }; |
1980 | 1995 | ||
diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c index f5ad214663f9..8de836165cf2 100644 --- a/sound/soc/atmel/sam9g20_wm8731.c +++ b/sound/soc/atmel/sam9g20_wm8731.c | |||
@@ -46,8 +46,6 @@ | |||
46 | #include <sound/pcm_params.h> | 46 | #include <sound/pcm_params.h> |
47 | #include <sound/soc.h> | 47 | #include <sound/soc.h> |
48 | 48 | ||
49 | #include <asm/mach-types.h> | ||
50 | |||
51 | #include "../codecs/wm8731.h" | 49 | #include "../codecs/wm8731.h" |
52 | #include "atmel-pcm.h" | 50 | #include "atmel-pcm.h" |
53 | #include "atmel_ssc_dai.h" | 51 | #include "atmel_ssc_dai.h" |
@@ -171,9 +169,7 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev) | |||
171 | int ret; | 169 | int ret; |
172 | 170 | ||
173 | if (!np) { | 171 | if (!np) { |
174 | if (!(machine_is_at91sam9g20ek() || | 172 | return -ENODEV; |
175 | machine_is_at91sam9g20ek_2mmc())) | ||
176 | return -ENODEV; | ||
177 | } | 173 | } |
178 | 174 | ||
179 | ret = atmel_ssc_set_audio(0); | 175 | ret = atmel_ssc_set_audio(0); |
@@ -210,39 +206,37 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev) | |||
210 | card->dev = &pdev->dev; | 206 | card->dev = &pdev->dev; |
211 | 207 | ||
212 | /* Parse device node info */ | 208 | /* Parse device node info */ |
213 | if (np) { | 209 | ret = snd_soc_of_parse_card_name(card, "atmel,model"); |
214 | ret = snd_soc_of_parse_card_name(card, "atmel,model"); | 210 | if (ret) |
215 | if (ret) | 211 | goto err; |
216 | goto err; | 212 | |
217 | 213 | ret = snd_soc_of_parse_audio_routing(card, | |
218 | ret = snd_soc_of_parse_audio_routing(card, | 214 | "atmel,audio-routing"); |
219 | "atmel,audio-routing"); | 215 | if (ret) |
220 | if (ret) | 216 | goto err; |
221 | goto err; | 217 | |
222 | 218 | /* Parse codec info */ | |
223 | /* Parse codec info */ | 219 | at91sam9g20ek_dai.codec_name = NULL; |
224 | at91sam9g20ek_dai.codec_name = NULL; | 220 | codec_np = of_parse_phandle(np, "atmel,audio-codec", 0); |
225 | codec_np = of_parse_phandle(np, "atmel,audio-codec", 0); | 221 | if (!codec_np) { |
226 | if (!codec_np) { | 222 | dev_err(&pdev->dev, "codec info missing\n"); |
227 | dev_err(&pdev->dev, "codec info missing\n"); | 223 | return -EINVAL; |
228 | return -EINVAL; | 224 | } |
229 | } | 225 | at91sam9g20ek_dai.codec_of_node = codec_np; |
230 | at91sam9g20ek_dai.codec_of_node = codec_np; | 226 | |
231 | 227 | /* Parse dai and platform info */ | |
232 | /* Parse dai and platform info */ | 228 | at91sam9g20ek_dai.cpu_dai_name = NULL; |
233 | at91sam9g20ek_dai.cpu_dai_name = NULL; | 229 | at91sam9g20ek_dai.platform_name = NULL; |
234 | at91sam9g20ek_dai.platform_name = NULL; | 230 | cpu_np = of_parse_phandle(np, "atmel,ssc-controller", 0); |
235 | cpu_np = of_parse_phandle(np, "atmel,ssc-controller", 0); | 231 | if (!cpu_np) { |
236 | if (!cpu_np) { | 232 | dev_err(&pdev->dev, "dai and pcm info missing\n"); |
237 | dev_err(&pdev->dev, "dai and pcm info missing\n"); | 233 | return -EINVAL; |
238 | return -EINVAL; | ||
239 | } | ||
240 | at91sam9g20ek_dai.cpu_of_node = cpu_np; | ||
241 | at91sam9g20ek_dai.platform_of_node = cpu_np; | ||
242 | |||
243 | of_node_put(codec_np); | ||
244 | of_node_put(cpu_np); | ||
245 | } | 234 | } |
235 | at91sam9g20ek_dai.cpu_of_node = cpu_np; | ||
236 | at91sam9g20ek_dai.platform_of_node = cpu_np; | ||
237 | |||
238 | of_node_put(codec_np); | ||
239 | of_node_put(cpu_np); | ||
246 | 240 | ||
247 | ret = snd_soc_register_card(card); | 241 | ret = snd_soc_register_card(card); |
248 | if (ret) { | 242 | if (ret) { |
diff --git a/sound/soc/cirrus/Kconfig b/sound/soc/cirrus/Kconfig index 7b7fbcd49e5e..c7cd60f009e9 100644 --- a/sound/soc/cirrus/Kconfig +++ b/sound/soc/cirrus/Kconfig | |||
@@ -16,7 +16,7 @@ config SND_EP93XX_SOC_AC97 | |||
16 | 16 | ||
17 | config SND_EP93XX_SOC_SNAPPERCL15 | 17 | config SND_EP93XX_SOC_SNAPPERCL15 |
18 | tristate "SoC Audio support for Bluewater Systems Snapper CL15 module" | 18 | tristate "SoC Audio support for Bluewater Systems Snapper CL15 module" |
19 | depends on SND_EP93XX_SOC && MACH_SNAPPER_CL15 | 19 | depends on SND_EP93XX_SOC && MACH_SNAPPER_CL15 && I2C |
20 | select SND_EP93XX_SOC_I2S | 20 | select SND_EP93XX_SOC_I2S |
21 | select SND_SOC_TLV320AIC23_I2C | 21 | select SND_SOC_TLV320AIC23_I2C |
22 | help | 22 | help |
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 064e6c18e109..ea9f0e31f9d4 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig | |||
@@ -69,7 +69,7 @@ config SND_SOC_ALL_CODECS | |||
69 | select SND_SOC_MAX98088 if I2C | 69 | select SND_SOC_MAX98088 if I2C |
70 | select SND_SOC_MAX98090 if I2C | 70 | select SND_SOC_MAX98090 if I2C |
71 | select SND_SOC_MAX98095 if I2C | 71 | select SND_SOC_MAX98095 if I2C |
72 | select SND_SOC_MAX98357A | 72 | select SND_SOC_MAX98357A if GPIOLIB |
73 | select SND_SOC_MAX9850 if I2C | 73 | select SND_SOC_MAX9850 if I2C |
74 | select SND_SOC_MAX9768 if I2C | 74 | select SND_SOC_MAX9768 if I2C |
75 | select SND_SOC_MAX9877 if I2C | 75 | select SND_SOC_MAX9877 if I2C |
diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c index b67480f1b1aa..4373ada95648 100644 --- a/sound/soc/codecs/adav80x.c +++ b/sound/soc/codecs/adav80x.c | |||
@@ -317,7 +317,7 @@ static int adav80x_put_deemph(struct snd_kcontrol *kcontrol, | |||
317 | { | 317 | { |
318 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 318 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
319 | struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec); | 319 | struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec); |
320 | unsigned int deemph = ucontrol->value.enumerated.item[0]; | 320 | unsigned int deemph = ucontrol->value.integer.value[0]; |
321 | 321 | ||
322 | if (deemph > 1) | 322 | if (deemph > 1) |
323 | return -EINVAL; | 323 | return -EINVAL; |
@@ -333,7 +333,7 @@ static int adav80x_get_deemph(struct snd_kcontrol *kcontrol, | |||
333 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 333 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
334 | struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec); | 334 | struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec); |
335 | 335 | ||
336 | ucontrol->value.enumerated.item[0] = adav80x->deemph; | 336 | ucontrol->value.integer.value[0] = adav80x->deemph; |
337 | return 0; | 337 | return 0; |
338 | }; | 338 | }; |
339 | 339 | ||
diff --git a/sound/soc/codecs/ak4641.c b/sound/soc/codecs/ak4641.c index 70861c7b1631..81b54a270bd8 100644 --- a/sound/soc/codecs/ak4641.c +++ b/sound/soc/codecs/ak4641.c | |||
@@ -76,7 +76,7 @@ static int ak4641_put_deemph(struct snd_kcontrol *kcontrol, | |||
76 | { | 76 | { |
77 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 77 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
78 | struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec); | 78 | struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec); |
79 | int deemph = ucontrol->value.enumerated.item[0]; | 79 | int deemph = ucontrol->value.integer.value[0]; |
80 | 80 | ||
81 | if (deemph > 1) | 81 | if (deemph > 1) |
82 | return -EINVAL; | 82 | return -EINVAL; |
@@ -92,7 +92,7 @@ static int ak4641_get_deemph(struct snd_kcontrol *kcontrol, | |||
92 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 92 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
93 | struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec); | 93 | struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec); |
94 | 94 | ||
95 | ucontrol->value.enumerated.item[0] = ak4641->deemph; | 95 | ucontrol->value.integer.value[0] = ak4641->deemph; |
96 | return 0; | 96 | return 0; |
97 | }; | 97 | }; |
98 | 98 | ||
diff --git a/sound/soc/codecs/ak4671.c b/sound/soc/codecs/ak4671.c index 632e89f793a7..2a58b1dccd2f 100644 --- a/sound/soc/codecs/ak4671.c +++ b/sound/soc/codecs/ak4671.c | |||
@@ -343,25 +343,25 @@ static const struct snd_soc_dapm_widget ak4671_dapm_widgets[] = { | |||
343 | }; | 343 | }; |
344 | 344 | ||
345 | static const struct snd_soc_dapm_route ak4671_intercon[] = { | 345 | static const struct snd_soc_dapm_route ak4671_intercon[] = { |
346 | {"DAC Left", "NULL", "PMPLL"}, | 346 | {"DAC Left", NULL, "PMPLL"}, |
347 | {"DAC Right", "NULL", "PMPLL"}, | 347 | {"DAC Right", NULL, "PMPLL"}, |
348 | {"ADC Left", "NULL", "PMPLL"}, | 348 | {"ADC Left", NULL, "PMPLL"}, |
349 | {"ADC Right", "NULL", "PMPLL"}, | 349 | {"ADC Right", NULL, "PMPLL"}, |
350 | 350 | ||
351 | /* Outputs */ | 351 | /* Outputs */ |
352 | {"LOUT1", "NULL", "LOUT1 Mixer"}, | 352 | {"LOUT1", NULL, "LOUT1 Mixer"}, |
353 | {"ROUT1", "NULL", "ROUT1 Mixer"}, | 353 | {"ROUT1", NULL, "ROUT1 Mixer"}, |
354 | {"LOUT2", "NULL", "LOUT2 Mix Amp"}, | 354 | {"LOUT2", NULL, "LOUT2 Mix Amp"}, |
355 | {"ROUT2", "NULL", "ROUT2 Mix Amp"}, | 355 | {"ROUT2", NULL, "ROUT2 Mix Amp"}, |
356 | {"LOUT3", "NULL", "LOUT3 Mixer"}, | 356 | {"LOUT3", NULL, "LOUT3 Mixer"}, |
357 | {"ROUT3", "NULL", "ROUT3 Mixer"}, | 357 | {"ROUT3", NULL, "ROUT3 Mixer"}, |
358 | 358 | ||
359 | {"LOUT1 Mixer", "DACL", "DAC Left"}, | 359 | {"LOUT1 Mixer", "DACL", "DAC Left"}, |
360 | {"ROUT1 Mixer", "DACR", "DAC Right"}, | 360 | {"ROUT1 Mixer", "DACR", "DAC Right"}, |
361 | {"LOUT2 Mixer", "DACHL", "DAC Left"}, | 361 | {"LOUT2 Mixer", "DACHL", "DAC Left"}, |
362 | {"ROUT2 Mixer", "DACHR", "DAC Right"}, | 362 | {"ROUT2 Mixer", "DACHR", "DAC Right"}, |
363 | {"LOUT2 Mix Amp", "NULL", "LOUT2 Mixer"}, | 363 | {"LOUT2 Mix Amp", NULL, "LOUT2 Mixer"}, |
364 | {"ROUT2 Mix Amp", "NULL", "ROUT2 Mixer"}, | 364 | {"ROUT2 Mix Amp", NULL, "ROUT2 Mixer"}, |
365 | {"LOUT3 Mixer", "DACSL", "DAC Left"}, | 365 | {"LOUT3 Mixer", "DACSL", "DAC Left"}, |
366 | {"ROUT3 Mixer", "DACSR", "DAC Right"}, | 366 | {"ROUT3 Mixer", "DACSR", "DAC Right"}, |
367 | 367 | ||
@@ -381,18 +381,18 @@ static const struct snd_soc_dapm_route ak4671_intercon[] = { | |||
381 | {"LIN2", NULL, "Mic Bias"}, | 381 | {"LIN2", NULL, "Mic Bias"}, |
382 | {"RIN2", NULL, "Mic Bias"}, | 382 | {"RIN2", NULL, "Mic Bias"}, |
383 | 383 | ||
384 | {"ADC Left", "NULL", "LIN MUX"}, | 384 | {"ADC Left", NULL, "LIN MUX"}, |
385 | {"ADC Right", "NULL", "RIN MUX"}, | 385 | {"ADC Right", NULL, "RIN MUX"}, |
386 | 386 | ||
387 | /* Analog Loops */ | 387 | /* Analog Loops */ |
388 | {"LIN1 Mixing Circuit", "NULL", "LIN1"}, | 388 | {"LIN1 Mixing Circuit", NULL, "LIN1"}, |
389 | {"RIN1 Mixing Circuit", "NULL", "RIN1"}, | 389 | {"RIN1 Mixing Circuit", NULL, "RIN1"}, |
390 | {"LIN2 Mixing Circuit", "NULL", "LIN2"}, | 390 | {"LIN2 Mixing Circuit", NULL, "LIN2"}, |
391 | {"RIN2 Mixing Circuit", "NULL", "RIN2"}, | 391 | {"RIN2 Mixing Circuit", NULL, "RIN2"}, |
392 | {"LIN3 Mixing Circuit", "NULL", "LIN3"}, | 392 | {"LIN3 Mixing Circuit", NULL, "LIN3"}, |
393 | {"RIN3 Mixing Circuit", "NULL", "RIN3"}, | 393 | {"RIN3 Mixing Circuit", NULL, "RIN3"}, |
394 | {"LIN4 Mixing Circuit", "NULL", "LIN4"}, | 394 | {"LIN4 Mixing Circuit", NULL, "LIN4"}, |
395 | {"RIN4 Mixing Circuit", "NULL", "RIN4"}, | 395 | {"RIN4 Mixing Circuit", NULL, "RIN4"}, |
396 | 396 | ||
397 | {"LOUT1 Mixer", "LINL1", "LIN1 Mixing Circuit"}, | 397 | {"LOUT1 Mixer", "LINL1", "LIN1 Mixing Circuit"}, |
398 | {"ROUT1 Mixer", "RINR1", "RIN1 Mixing Circuit"}, | 398 | {"ROUT1 Mixer", "RINR1", "RIN1 Mixing Circuit"}, |
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c index 79a4efcb894c..7d3a6accaf9a 100644 --- a/sound/soc/codecs/cs4271.c +++ b/sound/soc/codecs/cs4271.c | |||
@@ -286,7 +286,7 @@ static int cs4271_get_deemph(struct snd_kcontrol *kcontrol, | |||
286 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 286 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
287 | struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec); | 287 | struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec); |
288 | 288 | ||
289 | ucontrol->value.enumerated.item[0] = cs4271->deemph; | 289 | ucontrol->value.integer.value[0] = cs4271->deemph; |
290 | return 0; | 290 | return 0; |
291 | } | 291 | } |
292 | 292 | ||
@@ -296,7 +296,7 @@ static int cs4271_put_deemph(struct snd_kcontrol *kcontrol, | |||
296 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 296 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
297 | struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec); | 297 | struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec); |
298 | 298 | ||
299 | cs4271->deemph = ucontrol->value.enumerated.item[0]; | 299 | cs4271->deemph = ucontrol->value.integer.value[0]; |
300 | return cs4271_set_deemph(codec); | 300 | return cs4271_set_deemph(codec); |
301 | } | 301 | } |
302 | 302 | ||
diff --git a/sound/soc/codecs/da732x.c b/sound/soc/codecs/da732x.c index ffe96175a8a5..911c26c705fc 100644 --- a/sound/soc/codecs/da732x.c +++ b/sound/soc/codecs/da732x.c | |||
@@ -876,11 +876,11 @@ static const struct snd_soc_dapm_widget da732x_dapm_widgets[] = { | |||
876 | 876 | ||
877 | static const struct snd_soc_dapm_route da732x_dapm_routes[] = { | 877 | static const struct snd_soc_dapm_route da732x_dapm_routes[] = { |
878 | /* Inputs */ | 878 | /* Inputs */ |
879 | {"AUX1L PGA", "NULL", "AUX1L"}, | 879 | {"AUX1L PGA", NULL, "AUX1L"}, |
880 | {"AUX1R PGA", "NULL", "AUX1R"}, | 880 | {"AUX1R PGA", NULL, "AUX1R"}, |
881 | {"MIC1 PGA", NULL, "MIC1"}, | 881 | {"MIC1 PGA", NULL, "MIC1"}, |
882 | {"MIC2 PGA", "NULL", "MIC2"}, | 882 | {"MIC2 PGA", NULL, "MIC2"}, |
883 | {"MIC3 PGA", "NULL", "MIC3"}, | 883 | {"MIC3 PGA", NULL, "MIC3"}, |
884 | 884 | ||
885 | /* Capture Path */ | 885 | /* Capture Path */ |
886 | {"ADC1 Left MUX", "MIC1", "MIC1 PGA"}, | 886 | {"ADC1 Left MUX", "MIC1", "MIC1 PGA"}, |
diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c index f27325155ace..c5f35a07e8e4 100644 --- a/sound/soc/codecs/es8328.c +++ b/sound/soc/codecs/es8328.c | |||
@@ -120,7 +120,7 @@ static int es8328_get_deemph(struct snd_kcontrol *kcontrol, | |||
120 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 120 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
121 | struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec); | 121 | struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec); |
122 | 122 | ||
123 | ucontrol->value.enumerated.item[0] = es8328->deemph; | 123 | ucontrol->value.integer.value[0] = es8328->deemph; |
124 | return 0; | 124 | return 0; |
125 | } | 125 | } |
126 | 126 | ||
@@ -129,7 +129,7 @@ static int es8328_put_deemph(struct snd_kcontrol *kcontrol, | |||
129 | { | 129 | { |
130 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 130 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
131 | struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec); | 131 | struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec); |
132 | int deemph = ucontrol->value.enumerated.item[0]; | 132 | int deemph = ucontrol->value.integer.value[0]; |
133 | int ret; | 133 | int ret; |
134 | 134 | ||
135 | if (deemph > 1) | 135 | if (deemph > 1) |
diff --git a/sound/soc/codecs/max98357a.c b/sound/soc/codecs/max98357a.c index 1806333ea29e..e9e6efbc21dd 100644 --- a/sound/soc/codecs/max98357a.c +++ b/sound/soc/codecs/max98357a.c | |||
@@ -12,9 +12,19 @@ | |||
12 | * max98357a.c -- MAX98357A ALSA SoC Codec driver | 12 | * max98357a.c -- MAX98357A ALSA SoC Codec driver |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/module.h> | 15 | #include <linux/device.h> |
16 | #include <linux/err.h> | ||
16 | #include <linux/gpio.h> | 17 | #include <linux/gpio.h> |
18 | #include <linux/gpio/consumer.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/mod_devicetable.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/of.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <sound/pcm.h> | ||
17 | #include <sound/soc.h> | 25 | #include <sound/soc.h> |
26 | #include <sound/soc-dai.h> | ||
27 | #include <sound/soc-dapm.h> | ||
18 | 28 | ||
19 | #define DRV_NAME "max98357a" | 29 | #define DRV_NAME "max98357a" |
20 | 30 | ||
diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c index a722a023c262..477e13d30971 100644 --- a/sound/soc/codecs/pcm1681.c +++ b/sound/soc/codecs/pcm1681.c | |||
@@ -118,7 +118,7 @@ static int pcm1681_get_deemph(struct snd_kcontrol *kcontrol, | |||
118 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 118 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
119 | struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec); | 119 | struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec); |
120 | 120 | ||
121 | ucontrol->value.enumerated.item[0] = priv->deemph; | 121 | ucontrol->value.integer.value[0] = priv->deemph; |
122 | 122 | ||
123 | return 0; | 123 | return 0; |
124 | } | 124 | } |
@@ -129,7 +129,7 @@ static int pcm1681_put_deemph(struct snd_kcontrol *kcontrol, | |||
129 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 129 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
130 | struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec); | 130 | struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec); |
131 | 131 | ||
132 | priv->deemph = ucontrol->value.enumerated.item[0]; | 132 | priv->deemph = ucontrol->value.integer.value[0]; |
133 | 133 | ||
134 | return pcm1681_set_deemph(codec); | 134 | return pcm1681_set_deemph(codec); |
135 | } | 135 | } |
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c index 9974f201a08f..474cae82a874 100644 --- a/sound/soc/codecs/pcm512x.c +++ b/sound/soc/codecs/pcm512x.c | |||
@@ -1156,25 +1156,6 @@ static int pcm512x_hw_params(struct snd_pcm_substream *substream, | |||
1156 | ret, pcm512x->pll_out); | 1156 | ret, pcm512x->pll_out); |
1157 | return ret; | 1157 | return ret; |
1158 | } | 1158 | } |
1159 | |||
1160 | gpio = PCM512x_G1OE << (4 - 1); | ||
1161 | ret = regmap_update_bits(pcm512x->regmap, PCM512x_GPIO_EN, | ||
1162 | gpio, gpio); | ||
1163 | if (ret != 0) { | ||
1164 | dev_err(codec->dev, "Failed to enable gpio %d: %d\n", | ||
1165 | 4, ret); | ||
1166 | return ret; | ||
1167 | } | ||
1168 | |||
1169 | gpio = PCM512x_GPIO_OUTPUT_1 + 4 - 1; | ||
1170 | ret = regmap_update_bits(pcm512x->regmap, gpio, | ||
1171 | PCM512x_GxSL, PCM512x_GxSL_PLLLK); | ||
1172 | if (ret != 0) { | ||
1173 | dev_err(codec->dev, | ||
1174 | "Failed to output pll lock on %d: %d\n", | ||
1175 | ret, 4); | ||
1176 | return ret; | ||
1177 | } | ||
1178 | } | 1159 | } |
1179 | 1160 | ||
1180 | ret = regmap_update_bits(pcm512x->regmap, PCM512x_SYNCHRONIZE, | 1161 | ret = regmap_update_bits(pcm512x->regmap, PCM512x_SYNCHRONIZE, |
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c index f374840a5a7c..9b541e52da8c 100644 --- a/sound/soc/codecs/rt286.c +++ b/sound/soc/codecs/rt286.c | |||
@@ -1198,7 +1198,7 @@ static struct dmi_system_id dmi_dell_dino[] = { | |||
1198 | .ident = "Dell Dino", | 1198 | .ident = "Dell Dino", |
1199 | .matches = { | 1199 | .matches = { |
1200 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | 1200 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
1201 | DMI_MATCH(DMI_BOARD_NAME, "0144P8") | 1201 | DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343") |
1202 | } | 1202 | } |
1203 | }, | 1203 | }, |
1204 | { } | 1204 | { } |
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c index e1a4a45c57e2..fd102613d20d 100644 --- a/sound/soc/codecs/rt5670.c +++ b/sound/soc/codecs/rt5670.c | |||
@@ -225,7 +225,6 @@ static bool rt5670_volatile_register(struct device *dev, unsigned int reg) | |||
225 | case RT5670_ADC_EQ_CTRL1: | 225 | case RT5670_ADC_EQ_CTRL1: |
226 | case RT5670_EQ_CTRL1: | 226 | case RT5670_EQ_CTRL1: |
227 | case RT5670_ALC_CTRL_1: | 227 | case RT5670_ALC_CTRL_1: |
228 | case RT5670_IRQ_CTRL1: | ||
229 | case RT5670_IRQ_CTRL2: | 228 | case RT5670_IRQ_CTRL2: |
230 | case RT5670_INT_IRQ_ST: | 229 | case RT5670_INT_IRQ_ST: |
231 | case RT5670_IL_CMD: | 230 | case RT5670_IL_CMD: |
@@ -2703,6 +2702,12 @@ static int rt5670_i2c_probe(struct i2c_client *i2c, | |||
2703 | 2702 | ||
2704 | regmap_write(rt5670->regmap, RT5670_RESET, 0); | 2703 | regmap_write(rt5670->regmap, RT5670_RESET, 0); |
2705 | 2704 | ||
2705 | regmap_read(rt5670->regmap, RT5670_VENDOR_ID, &val); | ||
2706 | if (val >= 4) | ||
2707 | regmap_write(rt5670->regmap, RT5670_GPIO_CTRL3, 0x0980); | ||
2708 | else | ||
2709 | regmap_write(rt5670->regmap, RT5670_GPIO_CTRL3, 0x0d00); | ||
2710 | |||
2706 | ret = regmap_register_patch(rt5670->regmap, init_list, | 2711 | ret = regmap_register_patch(rt5670->regmap, init_list, |
2707 | ARRAY_SIZE(init_list)); | 2712 | ARRAY_SIZE(init_list)); |
2708 | if (ret != 0) | 2713 | if (ret != 0) |
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c index 5d0bb8748dd1..fb9c20eace3f 100644 --- a/sound/soc/codecs/rt5677.c +++ b/sound/soc/codecs/rt5677.c | |||
@@ -3284,8 +3284,8 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = { | |||
3284 | { "IB45 Bypass Mux", "Bypass", "IB45 Mux" }, | 3284 | { "IB45 Bypass Mux", "Bypass", "IB45 Mux" }, |
3285 | { "IB45 Bypass Mux", "Pass SRC", "IB45 Mux" }, | 3285 | { "IB45 Bypass Mux", "Pass SRC", "IB45 Mux" }, |
3286 | 3286 | ||
3287 | { "IB6 Mux", "IF1 DAC 6", "IF1 DAC6" }, | 3287 | { "IB6 Mux", "IF1 DAC 6", "IF1 DAC6 Mux" }, |
3288 | { "IB6 Mux", "IF2 DAC 6", "IF2 DAC6" }, | 3288 | { "IB6 Mux", "IF2 DAC 6", "IF2 DAC6 Mux" }, |
3289 | { "IB6 Mux", "SLB DAC 6", "SLB DAC6" }, | 3289 | { "IB6 Mux", "SLB DAC 6", "SLB DAC6" }, |
3290 | { "IB6 Mux", "STO4 ADC MIX L", "Stereo4 ADC MIXL" }, | 3290 | { "IB6 Mux", "STO4 ADC MIX L", "Stereo4 ADC MIXL" }, |
3291 | { "IB6 Mux", "IF4 DAC L", "IF4 DAC L" }, | 3291 | { "IB6 Mux", "IF4 DAC L", "IF4 DAC L" }, |
@@ -3293,8 +3293,8 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = { | |||
3293 | { "IB6 Mux", "STO2 ADC MIX L", "Stereo2 ADC MIXL" }, | 3293 | { "IB6 Mux", "STO2 ADC MIX L", "Stereo2 ADC MIXL" }, |
3294 | { "IB6 Mux", "STO3 ADC MIX L", "Stereo3 ADC MIXL" }, | 3294 | { "IB6 Mux", "STO3 ADC MIX L", "Stereo3 ADC MIXL" }, |
3295 | 3295 | ||
3296 | { "IB7 Mux", "IF1 DAC 7", "IF1 DAC7" }, | 3296 | { "IB7 Mux", "IF1 DAC 7", "IF1 DAC7 Mux" }, |
3297 | { "IB7 Mux", "IF2 DAC 7", "IF2 DAC7" }, | 3297 | { "IB7 Mux", "IF2 DAC 7", "IF2 DAC7 Mux" }, |
3298 | { "IB7 Mux", "SLB DAC 7", "SLB DAC7" }, | 3298 | { "IB7 Mux", "SLB DAC 7", "SLB DAC7" }, |
3299 | { "IB7 Mux", "STO4 ADC MIX R", "Stereo4 ADC MIXR" }, | 3299 | { "IB7 Mux", "STO4 ADC MIX R", "Stereo4 ADC MIXR" }, |
3300 | { "IB7 Mux", "IF4 DAC R", "IF4 DAC R" }, | 3300 | { "IB7 Mux", "IF4 DAC R", "IF4 DAC R" }, |
@@ -3635,15 +3635,15 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = { | |||
3635 | { "DAC1 FS", NULL, "DAC1 MIXL" }, | 3635 | { "DAC1 FS", NULL, "DAC1 MIXL" }, |
3636 | { "DAC1 FS", NULL, "DAC1 MIXR" }, | 3636 | { "DAC1 FS", NULL, "DAC1 MIXR" }, |
3637 | 3637 | ||
3638 | { "DAC2 L Mux", "IF1 DAC 2", "IF1 DAC2" }, | 3638 | { "DAC2 L Mux", "IF1 DAC 2", "IF1 DAC2 Mux" }, |
3639 | { "DAC2 L Mux", "IF2 DAC 2", "IF2 DAC2" }, | 3639 | { "DAC2 L Mux", "IF2 DAC 2", "IF2 DAC2 Mux" }, |
3640 | { "DAC2 L Mux", "IF3 DAC L", "IF3 DAC L" }, | 3640 | { "DAC2 L Mux", "IF3 DAC L", "IF3 DAC L" }, |
3641 | { "DAC2 L Mux", "IF4 DAC L", "IF4 DAC L" }, | 3641 | { "DAC2 L Mux", "IF4 DAC L", "IF4 DAC L" }, |
3642 | { "DAC2 L Mux", "SLB DAC 2", "SLB DAC2" }, | 3642 | { "DAC2 L Mux", "SLB DAC 2", "SLB DAC2" }, |
3643 | { "DAC2 L Mux", "OB 2", "OutBound2" }, | 3643 | { "DAC2 L Mux", "OB 2", "OutBound2" }, |
3644 | 3644 | ||
3645 | { "DAC2 R Mux", "IF1 DAC 3", "IF1 DAC3" }, | 3645 | { "DAC2 R Mux", "IF1 DAC 3", "IF1 DAC3 Mux" }, |
3646 | { "DAC2 R Mux", "IF2 DAC 3", "IF2 DAC3" }, | 3646 | { "DAC2 R Mux", "IF2 DAC 3", "IF2 DAC3 Mux" }, |
3647 | { "DAC2 R Mux", "IF3 DAC R", "IF3 DAC R" }, | 3647 | { "DAC2 R Mux", "IF3 DAC R", "IF3 DAC R" }, |
3648 | { "DAC2 R Mux", "IF4 DAC R", "IF4 DAC R" }, | 3648 | { "DAC2 R Mux", "IF4 DAC R", "IF4 DAC R" }, |
3649 | { "DAC2 R Mux", "SLB DAC 3", "SLB DAC3" }, | 3649 | { "DAC2 R Mux", "SLB DAC 3", "SLB DAC3" }, |
@@ -3651,29 +3651,29 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = { | |||
3651 | { "DAC2 R Mux", "Haptic Generator", "Haptic Generator" }, | 3651 | { "DAC2 R Mux", "Haptic Generator", "Haptic Generator" }, |
3652 | { "DAC2 R Mux", "VAD ADC", "VAD ADC Mux" }, | 3652 | { "DAC2 R Mux", "VAD ADC", "VAD ADC Mux" }, |
3653 | 3653 | ||
3654 | { "DAC3 L Mux", "IF1 DAC 4", "IF1 DAC4" }, | 3654 | { "DAC3 L Mux", "IF1 DAC 4", "IF1 DAC4 Mux" }, |
3655 | { "DAC3 L Mux", "IF2 DAC 4", "IF2 DAC4" }, | 3655 | { "DAC3 L Mux", "IF2 DAC 4", "IF2 DAC4 Mux" }, |
3656 | { "DAC3 L Mux", "IF3 DAC L", "IF3 DAC L" }, | 3656 | { "DAC3 L Mux", "IF3 DAC L", "IF3 DAC L" }, |
3657 | { "DAC3 L Mux", "IF4 DAC L", "IF4 DAC L" }, | 3657 | { "DAC3 L Mux", "IF4 DAC L", "IF4 DAC L" }, |
3658 | { "DAC3 L Mux", "SLB DAC 4", "SLB DAC4" }, | 3658 | { "DAC3 L Mux", "SLB DAC 4", "SLB DAC4" }, |
3659 | { "DAC3 L Mux", "OB 4", "OutBound4" }, | 3659 | { "DAC3 L Mux", "OB 4", "OutBound4" }, |
3660 | 3660 | ||
3661 | { "DAC3 R Mux", "IF1 DAC 5", "IF1 DAC4" }, | 3661 | { "DAC3 R Mux", "IF1 DAC 5", "IF1 DAC5 Mux" }, |
3662 | { "DAC3 R Mux", "IF2 DAC 5", "IF2 DAC4" }, | 3662 | { "DAC3 R Mux", "IF2 DAC 5", "IF2 DAC5 Mux" }, |
3663 | { "DAC3 R Mux", "IF3 DAC R", "IF3 DAC R" }, | 3663 | { "DAC3 R Mux", "IF3 DAC R", "IF3 DAC R" }, |
3664 | { "DAC3 R Mux", "IF4 DAC R", "IF4 DAC R" }, | 3664 | { "DAC3 R Mux", "IF4 DAC R", "IF4 DAC R" }, |
3665 | { "DAC3 R Mux", "SLB DAC 5", "SLB DAC5" }, | 3665 | { "DAC3 R Mux", "SLB DAC 5", "SLB DAC5" }, |
3666 | { "DAC3 R Mux", "OB 5", "OutBound5" }, | 3666 | { "DAC3 R Mux", "OB 5", "OutBound5" }, |
3667 | 3667 | ||
3668 | { "DAC4 L Mux", "IF1 DAC 6", "IF1 DAC6" }, | 3668 | { "DAC4 L Mux", "IF1 DAC 6", "IF1 DAC6 Mux" }, |
3669 | { "DAC4 L Mux", "IF2 DAC 6", "IF2 DAC6" }, | 3669 | { "DAC4 L Mux", "IF2 DAC 6", "IF2 DAC6 Mux" }, |
3670 | { "DAC4 L Mux", "IF3 DAC L", "IF3 DAC L" }, | 3670 | { "DAC4 L Mux", "IF3 DAC L", "IF3 DAC L" }, |
3671 | { "DAC4 L Mux", "IF4 DAC L", "IF4 DAC L" }, | 3671 | { "DAC4 L Mux", "IF4 DAC L", "IF4 DAC L" }, |
3672 | { "DAC4 L Mux", "SLB DAC 6", "SLB DAC6" }, | 3672 | { "DAC4 L Mux", "SLB DAC 6", "SLB DAC6" }, |
3673 | { "DAC4 L Mux", "OB 6", "OutBound6" }, | 3673 | { "DAC4 L Mux", "OB 6", "OutBound6" }, |
3674 | 3674 | ||
3675 | { "DAC4 R Mux", "IF1 DAC 7", "IF1 DAC7" }, | 3675 | { "DAC4 R Mux", "IF1 DAC 7", "IF1 DAC7 Mux" }, |
3676 | { "DAC4 R Mux", "IF2 DAC 7", "IF2 DAC7" }, | 3676 | { "DAC4 R Mux", "IF2 DAC 7", "IF2 DAC7 Mux" }, |
3677 | { "DAC4 R Mux", "IF3 DAC R", "IF3 DAC R" }, | 3677 | { "DAC4 R Mux", "IF3 DAC R", "IF3 DAC R" }, |
3678 | { "DAC4 R Mux", "IF4 DAC R", "IF4 DAC R" }, | 3678 | { "DAC4 R Mux", "IF4 DAC R", "IF4 DAC R" }, |
3679 | { "DAC4 R Mux", "SLB DAC 7", "SLB DAC7" }, | 3679 | { "DAC4 R Mux", "SLB DAC 7", "SLB DAC7" }, |
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index e182e6569bbd..3593a1496056 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c | |||
@@ -1151,13 +1151,7 @@ static int sgtl5000_set_power_regs(struct snd_soc_codec *codec) | |||
1151 | /* Enable VDDC charge pump */ | 1151 | /* Enable VDDC charge pump */ |
1152 | ana_pwr |= SGTL5000_VDDC_CHRGPMP_POWERUP; | 1152 | ana_pwr |= SGTL5000_VDDC_CHRGPMP_POWERUP; |
1153 | } else if (vddio >= 3100 && vdda >= 3100) { | 1153 | } else if (vddio >= 3100 && vdda >= 3100) { |
1154 | /* | 1154 | ana_pwr &= ~SGTL5000_VDDC_CHRGPMP_POWERUP; |
1155 | * if vddio and vddd > 3.1v, | ||
1156 | * charge pump should be clean before set ana_pwr | ||
1157 | */ | ||
1158 | snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER, | ||
1159 | SGTL5000_VDDC_CHRGPMP_POWERUP, 0); | ||
1160 | |||
1161 | /* VDDC use VDDIO rail */ | 1155 | /* VDDC use VDDIO rail */ |
1162 | lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD; | 1156 | lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD; |
1163 | lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO << | 1157 | lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO << |
diff --git a/sound/soc/codecs/sn95031.c b/sound/soc/codecs/sn95031.c index 47b257e41809..82095d6cd070 100644 --- a/sound/soc/codecs/sn95031.c +++ b/sound/soc/codecs/sn95031.c | |||
@@ -538,8 +538,8 @@ static const struct snd_soc_dapm_route sn95031_audio_map[] = { | |||
538 | /* speaker map */ | 538 | /* speaker map */ |
539 | { "IHFOUTL", NULL, "Speaker Rail"}, | 539 | { "IHFOUTL", NULL, "Speaker Rail"}, |
540 | { "IHFOUTR", NULL, "Speaker Rail"}, | 540 | { "IHFOUTR", NULL, "Speaker Rail"}, |
541 | { "IHFOUTL", "NULL", "Speaker Left Playback"}, | 541 | { "IHFOUTL", NULL, "Speaker Left Playback"}, |
542 | { "IHFOUTR", "NULL", "Speaker Right Playback"}, | 542 | { "IHFOUTR", NULL, "Speaker Right Playback"}, |
543 | { "Speaker Left Playback", NULL, "Speaker Left Filter"}, | 543 | { "Speaker Left Playback", NULL, "Speaker Left Filter"}, |
544 | { "Speaker Right Playback", NULL, "Speaker Right Filter"}, | 544 | { "Speaker Right Playback", NULL, "Speaker Right Filter"}, |
545 | { "Speaker Left Filter", NULL, "IHFDAC Left"}, | 545 | { "Speaker Left Filter", NULL, "IHFDAC Left"}, |
diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c index 3a1343fa109b..007a0e3bc273 100644 --- a/sound/soc/codecs/sta32x.c +++ b/sound/soc/codecs/sta32x.c | |||
@@ -106,13 +106,11 @@ static const struct reg_default sta32x_regs[] = { | |||
106 | }; | 106 | }; |
107 | 107 | ||
108 | static const struct regmap_range sta32x_write_regs_range[] = { | 108 | static const struct regmap_range sta32x_write_regs_range[] = { |
109 | regmap_reg_range(STA32X_CONFA, STA32X_AUTO2), | 109 | regmap_reg_range(STA32X_CONFA, STA32X_FDRC2), |
110 | regmap_reg_range(STA32X_C1CFG, STA32X_FDRC2), | ||
111 | }; | 110 | }; |
112 | 111 | ||
113 | static const struct regmap_range sta32x_read_regs_range[] = { | 112 | static const struct regmap_range sta32x_read_regs_range[] = { |
114 | regmap_reg_range(STA32X_CONFA, STA32X_AUTO2), | 113 | regmap_reg_range(STA32X_CONFA, STA32X_FDRC2), |
115 | regmap_reg_range(STA32X_C1CFG, STA32X_FDRC2), | ||
116 | }; | 114 | }; |
117 | 115 | ||
118 | static const struct regmap_range sta32x_volatile_regs_range[] = { | 116 | static const struct regmap_range sta32x_volatile_regs_range[] = { |
diff --git a/sound/soc/codecs/tas5086.c b/sound/soc/codecs/tas5086.c index 249ef5c4c762..32942bed34b1 100644 --- a/sound/soc/codecs/tas5086.c +++ b/sound/soc/codecs/tas5086.c | |||
@@ -281,7 +281,7 @@ static int tas5086_get_deemph(struct snd_kcontrol *kcontrol, | |||
281 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 281 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
282 | struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec); | 282 | struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec); |
283 | 283 | ||
284 | ucontrol->value.enumerated.item[0] = priv->deemph; | 284 | ucontrol->value.integer.value[0] = priv->deemph; |
285 | 285 | ||
286 | return 0; | 286 | return 0; |
287 | } | 287 | } |
@@ -292,7 +292,7 @@ static int tas5086_put_deemph(struct snd_kcontrol *kcontrol, | |||
292 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 292 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
293 | struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec); | 293 | struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec); |
294 | 294 | ||
295 | priv->deemph = ucontrol->value.enumerated.item[0]; | 295 | priv->deemph = ucontrol->value.integer.value[0]; |
296 | 296 | ||
297 | return tas5086_set_deemph(codec); | 297 | return tas5086_set_deemph(codec); |
298 | } | 298 | } |
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c index 8d9de49a5052..21d5402e343f 100644 --- a/sound/soc/codecs/wm2000.c +++ b/sound/soc/codecs/wm2000.c | |||
@@ -610,7 +610,7 @@ static int wm2000_anc_mode_get(struct snd_kcontrol *kcontrol, | |||
610 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 610 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
611 | struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); | 611 | struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); |
612 | 612 | ||
613 | ucontrol->value.enumerated.item[0] = wm2000->anc_active; | 613 | ucontrol->value.integer.value[0] = wm2000->anc_active; |
614 | 614 | ||
615 | return 0; | 615 | return 0; |
616 | } | 616 | } |
@@ -620,7 +620,7 @@ static int wm2000_anc_mode_put(struct snd_kcontrol *kcontrol, | |||
620 | { | 620 | { |
621 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 621 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
622 | struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); | 622 | struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); |
623 | int anc_active = ucontrol->value.enumerated.item[0]; | 623 | int anc_active = ucontrol->value.integer.value[0]; |
624 | int ret; | 624 | int ret; |
625 | 625 | ||
626 | if (anc_active > 1) | 626 | if (anc_active > 1) |
@@ -643,7 +643,7 @@ static int wm2000_speaker_get(struct snd_kcontrol *kcontrol, | |||
643 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 643 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
644 | struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); | 644 | struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); |
645 | 645 | ||
646 | ucontrol->value.enumerated.item[0] = wm2000->spk_ena; | 646 | ucontrol->value.integer.value[0] = wm2000->spk_ena; |
647 | 647 | ||
648 | return 0; | 648 | return 0; |
649 | } | 649 | } |
@@ -653,7 +653,7 @@ static int wm2000_speaker_put(struct snd_kcontrol *kcontrol, | |||
653 | { | 653 | { |
654 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 654 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
655 | struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); | 655 | struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); |
656 | int val = ucontrol->value.enumerated.item[0]; | 656 | int val = ucontrol->value.integer.value[0]; |
657 | int ret; | 657 | int ret; |
658 | 658 | ||
659 | if (val > 1) | 659 | if (val > 1) |
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c index 098c143f44d6..c6d10533e2bd 100644 --- a/sound/soc/codecs/wm8731.c +++ b/sound/soc/codecs/wm8731.c | |||
@@ -125,7 +125,7 @@ static int wm8731_get_deemph(struct snd_kcontrol *kcontrol, | |||
125 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 125 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
126 | struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); | 126 | struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); |
127 | 127 | ||
128 | ucontrol->value.enumerated.item[0] = wm8731->deemph; | 128 | ucontrol->value.integer.value[0] = wm8731->deemph; |
129 | 129 | ||
130 | return 0; | 130 | return 0; |
131 | } | 131 | } |
@@ -135,7 +135,7 @@ static int wm8731_put_deemph(struct snd_kcontrol *kcontrol, | |||
135 | { | 135 | { |
136 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 136 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
137 | struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); | 137 | struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); |
138 | int deemph = ucontrol->value.enumerated.item[0]; | 138 | int deemph = ucontrol->value.integer.value[0]; |
139 | int ret = 0; | 139 | int ret = 0; |
140 | 140 | ||
141 | if (deemph > 1) | 141 | if (deemph > 1) |
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c index dde462c082be..04b04f8e147c 100644 --- a/sound/soc/codecs/wm8903.c +++ b/sound/soc/codecs/wm8903.c | |||
@@ -442,7 +442,7 @@ static int wm8903_get_deemph(struct snd_kcontrol *kcontrol, | |||
442 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 442 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
443 | struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec); | 443 | struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec); |
444 | 444 | ||
445 | ucontrol->value.enumerated.item[0] = wm8903->deemph; | 445 | ucontrol->value.integer.value[0] = wm8903->deemph; |
446 | 446 | ||
447 | return 0; | 447 | return 0; |
448 | } | 448 | } |
@@ -452,7 +452,7 @@ static int wm8903_put_deemph(struct snd_kcontrol *kcontrol, | |||
452 | { | 452 | { |
453 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 453 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
454 | struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec); | 454 | struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec); |
455 | int deemph = ucontrol->value.enumerated.item[0]; | 455 | int deemph = ucontrol->value.integer.value[0]; |
456 | int ret = 0; | 456 | int ret = 0; |
457 | 457 | ||
458 | if (deemph > 1) | 458 | if (deemph > 1) |
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c index d3b3f57668cc..215e93c1ddf0 100644 --- a/sound/soc/codecs/wm8904.c +++ b/sound/soc/codecs/wm8904.c | |||
@@ -525,7 +525,7 @@ static int wm8904_get_deemph(struct snd_kcontrol *kcontrol, | |||
525 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 525 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
526 | struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec); | 526 | struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec); |
527 | 527 | ||
528 | ucontrol->value.enumerated.item[0] = wm8904->deemph; | 528 | ucontrol->value.integer.value[0] = wm8904->deemph; |
529 | return 0; | 529 | return 0; |
530 | } | 530 | } |
531 | 531 | ||
@@ -534,7 +534,7 @@ static int wm8904_put_deemph(struct snd_kcontrol *kcontrol, | |||
534 | { | 534 | { |
535 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 535 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
536 | struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec); | 536 | struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec); |
537 | int deemph = ucontrol->value.enumerated.item[0]; | 537 | int deemph = ucontrol->value.integer.value[0]; |
538 | 538 | ||
539 | if (deemph > 1) | 539 | if (deemph > 1) |
540 | return -EINVAL; | 540 | return -EINVAL; |
diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c index 1ab2d462afad..00bec915d652 100644 --- a/sound/soc/codecs/wm8955.c +++ b/sound/soc/codecs/wm8955.c | |||
@@ -393,7 +393,7 @@ static int wm8955_get_deemph(struct snd_kcontrol *kcontrol, | |||
393 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 393 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
394 | struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec); | 394 | struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec); |
395 | 395 | ||
396 | ucontrol->value.enumerated.item[0] = wm8955->deemph; | 396 | ucontrol->value.integer.value[0] = wm8955->deemph; |
397 | return 0; | 397 | return 0; |
398 | } | 398 | } |
399 | 399 | ||
@@ -402,7 +402,7 @@ static int wm8955_put_deemph(struct snd_kcontrol *kcontrol, | |||
402 | { | 402 | { |
403 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 403 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
404 | struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec); | 404 | struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec); |
405 | int deemph = ucontrol->value.enumerated.item[0]; | 405 | int deemph = ucontrol->value.integer.value[0]; |
406 | 406 | ||
407 | if (deemph > 1) | 407 | if (deemph > 1) |
408 | return -EINVAL; | 408 | return -EINVAL; |
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c index cf8fecf97f2c..3035d9856415 100644 --- a/sound/soc/codecs/wm8960.c +++ b/sound/soc/codecs/wm8960.c | |||
@@ -184,7 +184,7 @@ static int wm8960_get_deemph(struct snd_kcontrol *kcontrol, | |||
184 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 184 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
185 | struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec); | 185 | struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec); |
186 | 186 | ||
187 | ucontrol->value.enumerated.item[0] = wm8960->deemph; | 187 | ucontrol->value.integer.value[0] = wm8960->deemph; |
188 | return 0; | 188 | return 0; |
189 | } | 189 | } |
190 | 190 | ||
@@ -193,7 +193,7 @@ static int wm8960_put_deemph(struct snd_kcontrol *kcontrol, | |||
193 | { | 193 | { |
194 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 194 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
195 | struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec); | 195 | struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec); |
196 | int deemph = ucontrol->value.enumerated.item[0]; | 196 | int deemph = ucontrol->value.integer.value[0]; |
197 | 197 | ||
198 | if (deemph > 1) | 198 | if (deemph > 1) |
199 | return -EINVAL; | 199 | return -EINVAL; |
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c index 9517571e820d..98c9525bd751 100644 --- a/sound/soc/codecs/wm9712.c +++ b/sound/soc/codecs/wm9712.c | |||
@@ -180,7 +180,7 @@ static int wm9712_hp_mixer_put(struct snd_kcontrol *kcontrol, | |||
180 | struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); | 180 | struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); |
181 | struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm); | 181 | struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm); |
182 | struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec); | 182 | struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec); |
183 | unsigned int val = ucontrol->value.enumerated.item[0]; | 183 | unsigned int val = ucontrol->value.integer.value[0]; |
184 | struct soc_mixer_control *mc = | 184 | struct soc_mixer_control *mc = |
185 | (struct soc_mixer_control *)kcontrol->private_value; | 185 | (struct soc_mixer_control *)kcontrol->private_value; |
186 | unsigned int mixer, mask, shift, old; | 186 | unsigned int mixer, mask, shift, old; |
@@ -193,7 +193,7 @@ static int wm9712_hp_mixer_put(struct snd_kcontrol *kcontrol, | |||
193 | 193 | ||
194 | mutex_lock(&wm9712->lock); | 194 | mutex_lock(&wm9712->lock); |
195 | old = wm9712->hp_mixer[mixer]; | 195 | old = wm9712->hp_mixer[mixer]; |
196 | if (ucontrol->value.enumerated.item[0]) | 196 | if (ucontrol->value.integer.value[0]) |
197 | wm9712->hp_mixer[mixer] |= mask; | 197 | wm9712->hp_mixer[mixer] |= mask; |
198 | else | 198 | else |
199 | wm9712->hp_mixer[mixer] &= ~mask; | 199 | wm9712->hp_mixer[mixer] &= ~mask; |
@@ -231,7 +231,7 @@ static int wm9712_hp_mixer_get(struct snd_kcontrol *kcontrol, | |||
231 | mixer = mc->shift >> 8; | 231 | mixer = mc->shift >> 8; |
232 | shift = mc->shift & 0xff; | 232 | shift = mc->shift & 0xff; |
233 | 233 | ||
234 | ucontrol->value.enumerated.item[0] = | 234 | ucontrol->value.integer.value[0] = |
235 | (wm9712->hp_mixer[mixer] >> shift) & 1; | 235 | (wm9712->hp_mixer[mixer] >> shift) & 1; |
236 | 236 | ||
237 | return 0; | 237 | return 0; |
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c index 68222917b396..79552953e1bd 100644 --- a/sound/soc/codecs/wm9713.c +++ b/sound/soc/codecs/wm9713.c | |||
@@ -255,7 +255,7 @@ static int wm9713_hp_mixer_put(struct snd_kcontrol *kcontrol, | |||
255 | struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); | 255 | struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); |
256 | struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm); | 256 | struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm); |
257 | struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec); | 257 | struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec); |
258 | unsigned int val = ucontrol->value.enumerated.item[0]; | 258 | unsigned int val = ucontrol->value.integer.value[0]; |
259 | struct soc_mixer_control *mc = | 259 | struct soc_mixer_control *mc = |
260 | (struct soc_mixer_control *)kcontrol->private_value; | 260 | (struct soc_mixer_control *)kcontrol->private_value; |
261 | unsigned int mixer, mask, shift, old; | 261 | unsigned int mixer, mask, shift, old; |
@@ -268,7 +268,7 @@ static int wm9713_hp_mixer_put(struct snd_kcontrol *kcontrol, | |||
268 | 268 | ||
269 | mutex_lock(&wm9713->lock); | 269 | mutex_lock(&wm9713->lock); |
270 | old = wm9713->hp_mixer[mixer]; | 270 | old = wm9713->hp_mixer[mixer]; |
271 | if (ucontrol->value.enumerated.item[0]) | 271 | if (ucontrol->value.integer.value[0]) |
272 | wm9713->hp_mixer[mixer] |= mask; | 272 | wm9713->hp_mixer[mixer] |= mask; |
273 | else | 273 | else |
274 | wm9713->hp_mixer[mixer] &= ~mask; | 274 | wm9713->hp_mixer[mixer] &= ~mask; |
@@ -306,7 +306,7 @@ static int wm9713_hp_mixer_get(struct snd_kcontrol *kcontrol, | |||
306 | mixer = mc->shift >> 8; | 306 | mixer = mc->shift >> 8; |
307 | shift = mc->shift & 0xff; | 307 | shift = mc->shift & 0xff; |
308 | 308 | ||
309 | ucontrol->value.enumerated.item[0] = | 309 | ucontrol->value.integer.value[0] = |
310 | (wm9713->hp_mixer[mixer] >> shift) & 1; | 310 | (wm9713->hp_mixer[mixer] >> shift) & 1; |
311 | 311 | ||
312 | return 0; | 312 | return 0; |
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c index 75870c0ea2c9..91eb3aef7f02 100644 --- a/sound/soc/fsl/fsl_spdif.c +++ b/sound/soc/fsl/fsl_spdif.c | |||
@@ -1049,7 +1049,7 @@ static u32 fsl_spdif_txclk_caldiv(struct fsl_spdif_priv *spdif_priv, | |||
1049 | enum spdif_txrate index, bool round) | 1049 | enum spdif_txrate index, bool round) |
1050 | { | 1050 | { |
1051 | const u32 rate[] = { 32000, 44100, 48000, 96000, 192000 }; | 1051 | const u32 rate[] = { 32000, 44100, 48000, 96000, 192000 }; |
1052 | bool is_sysclk = clk == spdif_priv->sysclk; | 1052 | bool is_sysclk = clk_is_match(clk, spdif_priv->sysclk); |
1053 | u64 rate_ideal, rate_actual, sub; | 1053 | u64 rate_ideal, rate_actual, sub; |
1054 | u32 sysclk_dfmin, sysclk_dfmax; | 1054 | u32 sysclk_dfmin, sysclk_dfmax; |
1055 | u32 txclk_df, sysclk_df, arate; | 1055 | u32 txclk_df, sysclk_df, arate; |
@@ -1143,7 +1143,7 @@ static int fsl_spdif_probe_txclk(struct fsl_spdif_priv *spdif_priv, | |||
1143 | spdif_priv->txclk_src[index], rate[index]); | 1143 | spdif_priv->txclk_src[index], rate[index]); |
1144 | dev_dbg(&pdev->dev, "use txclk df %d for %dHz sample rate\n", | 1144 | dev_dbg(&pdev->dev, "use txclk df %d for %dHz sample rate\n", |
1145 | spdif_priv->txclk_df[index], rate[index]); | 1145 | spdif_priv->txclk_df[index], rate[index]); |
1146 | if (spdif_priv->txclk[index] == spdif_priv->sysclk) | 1146 | if (clk_is_match(spdif_priv->txclk[index], spdif_priv->sysclk)) |
1147 | dev_dbg(&pdev->dev, "use sysclk df %d for %dHz sample rate\n", | 1147 | dev_dbg(&pdev->dev, "use sysclk df %d for %dHz sample rate\n", |
1148 | spdif_priv->sysclk_df[index], rate[index]); | 1148 | spdif_priv->sysclk_df[index], rate[index]); |
1149 | dev_dbg(&pdev->dev, "the best rate for %dHz sample rate is %dHz\n", | 1149 | dev_dbg(&pdev->dev, "the best rate for %dHz sample rate is %dHz\n", |
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index 2595611e8a6d..6b0c8f717ec2 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c | |||
@@ -603,17 +603,20 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream, | |||
603 | factor = (div2 + 1) * (7 * psr + 1) * 2; | 603 | factor = (div2 + 1) * (7 * psr + 1) * 2; |
604 | 604 | ||
605 | for (i = 0; i < 255; i++) { | 605 | for (i = 0; i < 255; i++) { |
606 | /* The bclk rate must be smaller than 1/5 sysclk rate */ | 606 | tmprate = freq * factor * (i + 1); |
607 | if (factor * (i + 1) < 5) | ||
608 | continue; | ||
609 | |||
610 | tmprate = freq * factor * (i + 2); | ||
611 | 607 | ||
612 | if (baudclk_is_used) | 608 | if (baudclk_is_used) |
613 | clkrate = clk_get_rate(ssi_private->baudclk); | 609 | clkrate = clk_get_rate(ssi_private->baudclk); |
614 | else | 610 | else |
615 | clkrate = clk_round_rate(ssi_private->baudclk, tmprate); | 611 | clkrate = clk_round_rate(ssi_private->baudclk, tmprate); |
616 | 612 | ||
613 | /* | ||
614 | * Hardware limitation: The bclk rate must be | ||
615 | * never greater than 1/5 IPG clock rate | ||
616 | */ | ||
617 | if (clkrate * 5 > clk_get_rate(ssi_private->clk)) | ||
618 | continue; | ||
619 | |||
617 | clkrate /= factor; | 620 | clkrate /= factor; |
618 | afreq = clkrate / (i + 1); | 621 | afreq = clkrate / (i + 1); |
619 | 622 | ||
@@ -1224,7 +1227,7 @@ static int fsl_ssi_imx_probe(struct platform_device *pdev, | |||
1224 | ssi_private->dma_params_tx.addr = ssi_private->ssi_phys + CCSR_SSI_STX0; | 1227 | ssi_private->dma_params_tx.addr = ssi_private->ssi_phys + CCSR_SSI_STX0; |
1225 | ssi_private->dma_params_rx.addr = ssi_private->ssi_phys + CCSR_SSI_SRX0; | 1228 | ssi_private->dma_params_rx.addr = ssi_private->ssi_phys + CCSR_SSI_SRX0; |
1226 | 1229 | ||
1227 | ret = !of_property_read_u32_array(np, "dmas", dmas, 4); | 1230 | ret = of_property_read_u32_array(np, "dmas", dmas, 4); |
1228 | if (ssi_private->use_dma && !ret && dmas[2] == IMX_DMATYPE_SSI_DUAL) { | 1231 | if (ssi_private->use_dma && !ret && dmas[2] == IMX_DMATYPE_SSI_DUAL) { |
1229 | ssi_private->use_dual_fifo = true; | 1232 | ssi_private->use_dual_fifo = true; |
1230 | /* When using dual fifo mode, we need to keep watermark | 1233 | /* When using dual fifo mode, we need to keep watermark |
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c index f7c6734bd5da..fb550b5869d2 100644 --- a/sound/soc/generic/simple-card.c +++ b/sound/soc/generic/simple-card.c | |||
@@ -372,6 +372,11 @@ static int asoc_simple_card_dai_link_of(struct device_node *node, | |||
372 | strlen(dai_link->cpu_dai_name) + | 372 | strlen(dai_link->cpu_dai_name) + |
373 | strlen(dai_link->codec_dai_name) + 2, | 373 | strlen(dai_link->codec_dai_name) + 2, |
374 | GFP_KERNEL); | 374 | GFP_KERNEL); |
375 | if (!name) { | ||
376 | ret = -ENOMEM; | ||
377 | goto dai_link_of_err; | ||
378 | } | ||
379 | |||
375 | sprintf(name, "%s-%s", dai_link->cpu_dai_name, | 380 | sprintf(name, "%s-%s", dai_link->cpu_dai_name, |
376 | dai_link->codec_dai_name); | 381 | dai_link->codec_dai_name); |
377 | dai_link->name = dai_link->stream_name = name; | 382 | dai_link->name = dai_link->stream_name = name; |
diff --git a/sound/soc/intel/sst-atom-controls.h b/sound/soc/intel/sst-atom-controls.h index dfebfdd5eb2a..daecc58f28af 100644 --- a/sound/soc/intel/sst-atom-controls.h +++ b/sound/soc/intel/sst-atom-controls.h | |||
@@ -150,7 +150,7 @@ enum sst_cmd_type { | |||
150 | 150 | ||
151 | enum sst_task { | 151 | enum sst_task { |
152 | SST_TASK_SBA = 1, | 152 | SST_TASK_SBA = 1, |
153 | SST_TASK_MMX, | 153 | SST_TASK_MMX = 3, |
154 | }; | 154 | }; |
155 | 155 | ||
156 | enum sst_type { | 156 | enum sst_type { |
diff --git a/sound/soc/intel/sst-haswell-dsp.c b/sound/soc/intel/sst-haswell-dsp.c index c42ffae5fe9f..402b728c0a06 100644 --- a/sound/soc/intel/sst-haswell-dsp.c +++ b/sound/soc/intel/sst-haswell-dsp.c | |||
@@ -207,9 +207,6 @@ static int hsw_parse_fw_image(struct sst_fw *sst_fw) | |||
207 | module = (void *)module + sizeof(*module) + module->mod_size; | 207 | module = (void *)module + sizeof(*module) + module->mod_size; |
208 | } | 208 | } |
209 | 209 | ||
210 | /* allocate scratch mem regions */ | ||
211 | sst_block_alloc_scratch(dsp); | ||
212 | |||
213 | return 0; | 210 | return 0; |
214 | } | 211 | } |
215 | 212 | ||
diff --git a/sound/soc/intel/sst-haswell-ipc.c b/sound/soc/intel/sst-haswell-ipc.c index 394af5684c05..863a9ca34b8e 100644 --- a/sound/soc/intel/sst-haswell-ipc.c +++ b/sound/soc/intel/sst-haswell-ipc.c | |||
@@ -1732,6 +1732,7 @@ static void sst_hsw_drop_all(struct sst_hsw *hsw) | |||
1732 | int sst_hsw_dsp_load(struct sst_hsw *hsw) | 1732 | int sst_hsw_dsp_load(struct sst_hsw *hsw) |
1733 | { | 1733 | { |
1734 | struct sst_dsp *dsp = hsw->dsp; | 1734 | struct sst_dsp *dsp = hsw->dsp; |
1735 | struct sst_fw *sst_fw, *t; | ||
1735 | int ret; | 1736 | int ret; |
1736 | 1737 | ||
1737 | dev_dbg(hsw->dev, "loading audio DSP...."); | 1738 | dev_dbg(hsw->dev, "loading audio DSP...."); |
@@ -1748,12 +1749,17 @@ int sst_hsw_dsp_load(struct sst_hsw *hsw) | |||
1748 | return ret; | 1749 | return ret; |
1749 | } | 1750 | } |
1750 | 1751 | ||
1751 | ret = sst_fw_reload(hsw->sst_fw); | 1752 | list_for_each_entry_safe_reverse(sst_fw, t, &dsp->fw_list, list) { |
1752 | if (ret < 0) { | 1753 | ret = sst_fw_reload(sst_fw); |
1753 | dev_err(hsw->dev, "error: SST FW reload failed\n"); | 1754 | if (ret < 0) { |
1754 | sst_dsp_dma_put_channel(dsp); | 1755 | dev_err(hsw->dev, "error: SST FW reload failed\n"); |
1755 | return -ENOMEM; | 1756 | sst_dsp_dma_put_channel(dsp); |
1757 | return -ENOMEM; | ||
1758 | } | ||
1756 | } | 1759 | } |
1760 | ret = sst_block_alloc_scratch(hsw->dsp); | ||
1761 | if (ret < 0) | ||
1762 | return -EINVAL; | ||
1757 | 1763 | ||
1758 | sst_dsp_dma_put_channel(dsp); | 1764 | sst_dsp_dma_put_channel(dsp); |
1759 | return 0; | 1765 | return 0; |
@@ -1809,12 +1815,17 @@ int sst_hsw_dsp_runtime_suspend(struct sst_hsw *hsw) | |||
1809 | 1815 | ||
1810 | int sst_hsw_dsp_runtime_sleep(struct sst_hsw *hsw) | 1816 | int sst_hsw_dsp_runtime_sleep(struct sst_hsw *hsw) |
1811 | { | 1817 | { |
1812 | sst_fw_unload(hsw->sst_fw); | 1818 | struct sst_fw *sst_fw, *t; |
1813 | sst_block_free_scratch(hsw->dsp); | 1819 | struct sst_dsp *dsp = hsw->dsp; |
1820 | |||
1821 | list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) { | ||
1822 | sst_fw_unload(sst_fw); | ||
1823 | } | ||
1824 | sst_block_free_scratch(dsp); | ||
1814 | 1825 | ||
1815 | hsw->boot_complete = false; | 1826 | hsw->boot_complete = false; |
1816 | 1827 | ||
1817 | sst_dsp_sleep(hsw->dsp); | 1828 | sst_dsp_sleep(dsp); |
1818 | 1829 | ||
1819 | return 0; | 1830 | return 0; |
1820 | } | 1831 | } |
@@ -1943,6 +1954,11 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata) | |||
1943 | goto fw_err; | 1954 | goto fw_err; |
1944 | } | 1955 | } |
1945 | 1956 | ||
1957 | /* allocate scratch mem regions */ | ||
1958 | ret = sst_block_alloc_scratch(hsw->dsp); | ||
1959 | if (ret < 0) | ||
1960 | goto boot_err; | ||
1961 | |||
1946 | /* wait for DSP boot completion */ | 1962 | /* wait for DSP boot completion */ |
1947 | sst_dsp_boot(hsw->dsp); | 1963 | sst_dsp_boot(hsw->dsp); |
1948 | ret = wait_event_timeout(hsw->boot_wait, hsw->boot_complete, | 1964 | ret = wait_event_timeout(hsw->boot_wait, hsw->boot_complete, |
diff --git a/sound/soc/intel/sst/sst.c b/sound/soc/intel/sst/sst.c index 8a8d56a146e7..11c578651c1c 100644 --- a/sound/soc/intel/sst/sst.c +++ b/sound/soc/intel/sst/sst.c | |||
@@ -350,7 +350,9 @@ static inline void sst_save_shim64(struct intel_sst_drv *ctx, | |||
350 | 350 | ||
351 | spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags); | 351 | spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags); |
352 | 352 | ||
353 | shim_regs->imrx = sst_shim_read64(shim, SST_IMRX), | 353 | shim_regs->imrx = sst_shim_read64(shim, SST_IMRX); |
354 | shim_regs->csr = sst_shim_read64(shim, SST_CSR); | ||
355 | |||
354 | 356 | ||
355 | spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags); | 357 | spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags); |
356 | } | 358 | } |
@@ -367,6 +369,7 @@ static inline void sst_restore_shim64(struct intel_sst_drv *ctx, | |||
367 | */ | 369 | */ |
368 | spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags); | 370 | spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags); |
369 | sst_shim_write64(shim, SST_IMRX, shim_regs->imrx), | 371 | sst_shim_write64(shim, SST_IMRX, shim_regs->imrx), |
372 | sst_shim_write64(shim, SST_CSR, shim_regs->csr), | ||
370 | spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags); | 373 | spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags); |
371 | } | 374 | } |
372 | 375 | ||
@@ -379,6 +382,10 @@ void sst_configure_runtime_pm(struct intel_sst_drv *ctx) | |||
379 | * initially active. So change the state to active before | 382 | * initially active. So change the state to active before |
380 | * enabling the pm | 383 | * enabling the pm |
381 | */ | 384 | */ |
385 | |||
386 | if (!acpi_disabled) | ||
387 | pm_runtime_set_active(ctx->dev); | ||
388 | |||
382 | pm_runtime_enable(ctx->dev); | 389 | pm_runtime_enable(ctx->dev); |
383 | 390 | ||
384 | if (acpi_disabled) | 391 | if (acpi_disabled) |
@@ -409,6 +416,7 @@ static int intel_sst_runtime_suspend(struct device *dev) | |||
409 | synchronize_irq(ctx->irq_num); | 416 | synchronize_irq(ctx->irq_num); |
410 | flush_workqueue(ctx->post_msg_wq); | 417 | flush_workqueue(ctx->post_msg_wq); |
411 | 418 | ||
419 | ctx->ops->reset(ctx); | ||
412 | /* save the shim registers because PMC doesn't save state */ | 420 | /* save the shim registers because PMC doesn't save state */ |
413 | sst_save_shim64(ctx, ctx->shim, ctx->shim_regs64); | 421 | sst_save_shim64(ctx, ctx->shim, ctx->shim_regs64); |
414 | 422 | ||
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c index def7d8260c4e..d19483081f9b 100644 --- a/sound/soc/kirkwood/kirkwood-i2s.c +++ b/sound/soc/kirkwood/kirkwood-i2s.c | |||
@@ -579,7 +579,7 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev) | |||
579 | if (PTR_ERR(priv->extclk) == -EPROBE_DEFER) | 579 | if (PTR_ERR(priv->extclk) == -EPROBE_DEFER) |
580 | return -EPROBE_DEFER; | 580 | return -EPROBE_DEFER; |
581 | } else { | 581 | } else { |
582 | if (priv->extclk == priv->clk) { | 582 | if (clk_is_match(priv->extclk, priv->clk)) { |
583 | devm_clk_put(&pdev->dev, priv->extclk); | 583 | devm_clk_put(&pdev->dev, priv->extclk); |
584 | priv->extclk = ERR_PTR(-EINVAL); | 584 | priv->extclk = ERR_PTR(-EINVAL); |
585 | } else { | 585 | } else { |
diff --git a/sound/soc/omap/omap-hdmi-audio.c b/sound/soc/omap/omap-hdmi-audio.c index ccfb41c22e53..f7eb42aa3f38 100644 --- a/sound/soc/omap/omap-hdmi-audio.c +++ b/sound/soc/omap/omap-hdmi-audio.c | |||
@@ -352,6 +352,9 @@ static int omap_hdmi_audio_probe(struct platform_device *pdev) | |||
352 | return ret; | 352 | return ret; |
353 | 353 | ||
354 | card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); | 354 | card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); |
355 | if (!card) | ||
356 | return -ENOMEM; | ||
357 | |||
355 | card->name = devm_kasprintf(dev, GFP_KERNEL, | 358 | card->name = devm_kasprintf(dev, GFP_KERNEL, |
356 | "HDMI %s", dev_name(ad->dssdev)); | 359 | "HDMI %s", dev_name(ad->dssdev)); |
357 | card->owner = THIS_MODULE; | 360 | card->owner = THIS_MODULE; |
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c index c7eb9dd67f60..fd99d89de6a8 100644 --- a/sound/soc/omap/omap-mcbsp.c +++ b/sound/soc/omap/omap-mcbsp.c | |||
@@ -530,8 +530,19 @@ static int omap_mcbsp_dai_set_dai_sysclk(struct snd_soc_dai *cpu_dai, | |||
530 | 530 | ||
531 | case OMAP_MCBSP_SYSCLK_CLKX_EXT: | 531 | case OMAP_MCBSP_SYSCLK_CLKX_EXT: |
532 | regs->srgr2 |= CLKSM; | 532 | regs->srgr2 |= CLKSM; |
533 | regs->pcr0 |= SCLKME; | ||
534 | /* | ||
535 | * If McBSP is master but yet the CLKX/CLKR pin drives the SRG, | ||
536 | * disable output on those pins. This enables to inject the | ||
537 | * reference clock through CLKX/CLKR. For this to work | ||
538 | * set_dai_sysclk() _needs_ to be called after set_dai_fmt(). | ||
539 | */ | ||
540 | regs->pcr0 &= ~CLKXM; | ||
541 | break; | ||
533 | case OMAP_MCBSP_SYSCLK_CLKR_EXT: | 542 | case OMAP_MCBSP_SYSCLK_CLKR_EXT: |
534 | regs->pcr0 |= SCLKME; | 543 | regs->pcr0 |= SCLKME; |
544 | /* Disable ouput on CLKR pin in master mode */ | ||
545 | regs->pcr0 &= ~CLKRM; | ||
535 | break; | 546 | break; |
536 | default: | 547 | default: |
537 | err = -ENODEV; | 548 | err = -ENODEV; |
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c index f4b05bc23e4b..1343ecbf0bd5 100644 --- a/sound/soc/omap/omap-pcm.c +++ b/sound/soc/omap/omap-pcm.c | |||
@@ -201,7 +201,7 @@ static int omap_pcm_new(struct snd_soc_pcm_runtime *rtd) | |||
201 | struct snd_pcm *pcm = rtd->pcm; | 201 | struct snd_pcm *pcm = rtd->pcm; |
202 | int ret; | 202 | int ret; |
203 | 203 | ||
204 | ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(64)); | 204 | ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32)); |
205 | if (ret) | 205 | if (ret) |
206 | return ret; | 206 | return ret; |
207 | 207 | ||
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig index 3cebf6ca03df..0632a36852c8 100644 --- a/sound/soc/samsung/Kconfig +++ b/sound/soc/samsung/Kconfig | |||
@@ -174,7 +174,7 @@ config SND_SOC_SMDK_WM8994_PCM | |||
174 | 174 | ||
175 | config SND_SOC_SPEYSIDE | 175 | config SND_SOC_SPEYSIDE |
176 | tristate "Audio support for Wolfson Speyside" | 176 | tristate "Audio support for Wolfson Speyside" |
177 | depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 | 177 | depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 && I2C && SPI_MASTER |
178 | select SND_SAMSUNG_I2S | 178 | select SND_SAMSUNG_I2S |
179 | select SND_SOC_WM8996 | 179 | select SND_SOC_WM8996 |
180 | select SND_SOC_WM9081 | 180 | select SND_SOC_WM9081 |
@@ -189,7 +189,7 @@ config SND_SOC_TOBERMORY | |||
189 | 189 | ||
190 | config SND_SOC_BELLS | 190 | config SND_SOC_BELLS |
191 | tristate "Audio support for Wolfson Bells" | 191 | tristate "Audio support for Wolfson Bells" |
192 | depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 && MFD_ARIZONA | 192 | depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 && MFD_ARIZONA && I2C && SPI_MASTER |
193 | select SND_SAMSUNG_I2S | 193 | select SND_SAMSUNG_I2S |
194 | select SND_SOC_WM5102 | 194 | select SND_SOC_WM5102 |
195 | select SND_SOC_WM5110 | 195 | select SND_SOC_WM5110 |
@@ -206,7 +206,7 @@ config SND_SOC_LOWLAND | |||
206 | 206 | ||
207 | config SND_SOC_LITTLEMILL | 207 | config SND_SOC_LITTLEMILL |
208 | tristate "Audio support for Wolfson Littlemill" | 208 | tristate "Audio support for Wolfson Littlemill" |
209 | depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 | 209 | depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 && I2C |
210 | select SND_SAMSUNG_I2S | 210 | select SND_SAMSUNG_I2S |
211 | select MFD_WM8994 | 211 | select MFD_WM8994 |
212 | select SND_SOC_WM8994 | 212 | select SND_SOC_WM8994 |
@@ -223,7 +223,7 @@ config SND_SOC_SNOW | |||
223 | 223 | ||
224 | config SND_SOC_ODROIDX2 | 224 | config SND_SOC_ODROIDX2 |
225 | tristate "Audio support for Odroid-X2 and Odroid-U3" | 225 | tristate "Audio support for Odroid-X2 and Odroid-U3" |
226 | depends on SND_SOC_SAMSUNG | 226 | depends on SND_SOC_SAMSUNG && I2C |
227 | select SND_SOC_MAX98090 | 227 | select SND_SOC_MAX98090 |
228 | select SND_SAMSUNG_I2S | 228 | select SND_SAMSUNG_I2S |
229 | help | 229 | help |
@@ -231,6 +231,6 @@ config SND_SOC_ODROIDX2 | |||
231 | 231 | ||
232 | config SND_SOC_ARNDALE_RT5631_ALC5631 | 232 | config SND_SOC_ARNDALE_RT5631_ALC5631 |
233 | tristate "Audio support for RT5631(ALC5631) on Arndale Board" | 233 | tristate "Audio support for RT5631(ALC5631) on Arndale Board" |
234 | depends on SND_SOC_SAMSUNG | 234 | depends on SND_SOC_SAMSUNG && I2C |
235 | select SND_SAMSUNG_I2S | 235 | select SND_SAMSUNG_I2S |
236 | select SND_SOC_RT5631 | 236 | select SND_SOC_RT5631 |
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c index 1b53605f7154..110577c52317 100644 --- a/sound/soc/sh/rcar/core.c +++ b/sound/soc/sh/rcar/core.c | |||
@@ -1252,6 +1252,8 @@ static int rsnd_probe(struct platform_device *pdev) | |||
1252 | goto exit_snd_probe; | 1252 | goto exit_snd_probe; |
1253 | } | 1253 | } |
1254 | 1254 | ||
1255 | dev_set_drvdata(dev, priv); | ||
1256 | |||
1255 | /* | 1257 | /* |
1256 | * asoc register | 1258 | * asoc register |
1257 | */ | 1259 | */ |
@@ -1268,8 +1270,6 @@ static int rsnd_probe(struct platform_device *pdev) | |||
1268 | goto exit_snd_soc; | 1270 | goto exit_snd_soc; |
1269 | } | 1271 | } |
1270 | 1272 | ||
1271 | dev_set_drvdata(dev, priv); | ||
1272 | |||
1273 | pm_runtime_enable(dev); | 1273 | pm_runtime_enable(dev); |
1274 | 1274 | ||
1275 | dev_info(dev, "probed\n"); | 1275 | dev_info(dev, "probed\n"); |
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 30579ca5bacb..e5c990889dcc 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c | |||
@@ -347,6 +347,8 @@ static ssize_t codec_list_read_file(struct file *file, char __user *user_buf, | |||
347 | if (!buf) | 347 | if (!buf) |
348 | return -ENOMEM; | 348 | return -ENOMEM; |
349 | 349 | ||
350 | mutex_lock(&client_mutex); | ||
351 | |||
350 | list_for_each_entry(codec, &codec_list, list) { | 352 | list_for_each_entry(codec, &codec_list, list) { |
351 | len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", | 353 | len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", |
352 | codec->component.name); | 354 | codec->component.name); |
@@ -358,6 +360,8 @@ static ssize_t codec_list_read_file(struct file *file, char __user *user_buf, | |||
358 | } | 360 | } |
359 | } | 361 | } |
360 | 362 | ||
363 | mutex_unlock(&client_mutex); | ||
364 | |||
361 | if (ret >= 0) | 365 | if (ret >= 0) |
362 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); | 366 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); |
363 | 367 | ||
@@ -382,6 +386,8 @@ static ssize_t dai_list_read_file(struct file *file, char __user *user_buf, | |||
382 | if (!buf) | 386 | if (!buf) |
383 | return -ENOMEM; | 387 | return -ENOMEM; |
384 | 388 | ||
389 | mutex_lock(&client_mutex); | ||
390 | |||
385 | list_for_each_entry(component, &component_list, list) { | 391 | list_for_each_entry(component, &component_list, list) { |
386 | list_for_each_entry(dai, &component->dai_list, list) { | 392 | list_for_each_entry(dai, &component->dai_list, list) { |
387 | len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", | 393 | len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", |
@@ -395,6 +401,8 @@ static ssize_t dai_list_read_file(struct file *file, char __user *user_buf, | |||
395 | } | 401 | } |
396 | } | 402 | } |
397 | 403 | ||
404 | mutex_unlock(&client_mutex); | ||
405 | |||
398 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); | 406 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); |
399 | 407 | ||
400 | kfree(buf); | 408 | kfree(buf); |
@@ -418,6 +426,8 @@ static ssize_t platform_list_read_file(struct file *file, | |||
418 | if (!buf) | 426 | if (!buf) |
419 | return -ENOMEM; | 427 | return -ENOMEM; |
420 | 428 | ||
429 | mutex_lock(&client_mutex); | ||
430 | |||
421 | list_for_each_entry(platform, &platform_list, list) { | 431 | list_for_each_entry(platform, &platform_list, list) { |
422 | len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", | 432 | len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", |
423 | platform->component.name); | 433 | platform->component.name); |
@@ -429,6 +439,8 @@ static ssize_t platform_list_read_file(struct file *file, | |||
429 | } | 439 | } |
430 | } | 440 | } |
431 | 441 | ||
442 | mutex_unlock(&client_mutex); | ||
443 | |||
432 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); | 444 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); |
433 | 445 | ||
434 | kfree(buf); | 446 | kfree(buf); |
@@ -836,6 +848,8 @@ static struct snd_soc_component *soc_find_component( | |||
836 | { | 848 | { |
837 | struct snd_soc_component *component; | 849 | struct snd_soc_component *component; |
838 | 850 | ||
851 | lockdep_assert_held(&client_mutex); | ||
852 | |||
839 | list_for_each_entry(component, &component_list, list) { | 853 | list_for_each_entry(component, &component_list, list) { |
840 | if (of_node) { | 854 | if (of_node) { |
841 | if (component->dev->of_node == of_node) | 855 | if (component->dev->of_node == of_node) |
@@ -854,6 +868,8 @@ static struct snd_soc_dai *snd_soc_find_dai( | |||
854 | struct snd_soc_component *component; | 868 | struct snd_soc_component *component; |
855 | struct snd_soc_dai *dai; | 869 | struct snd_soc_dai *dai; |
856 | 870 | ||
871 | lockdep_assert_held(&client_mutex); | ||
872 | |||
857 | /* Find CPU DAI from registered DAIs*/ | 873 | /* Find CPU DAI from registered DAIs*/ |
858 | list_for_each_entry(component, &component_list, list) { | 874 | list_for_each_entry(component, &component_list, list) { |
859 | if (dlc->of_node && component->dev->of_node != dlc->of_node) | 875 | if (dlc->of_node && component->dev->of_node != dlc->of_node) |
@@ -1508,6 +1524,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card) | |||
1508 | struct snd_soc_codec *codec; | 1524 | struct snd_soc_codec *codec; |
1509 | int ret, i, order; | 1525 | int ret, i, order; |
1510 | 1526 | ||
1527 | mutex_lock(&client_mutex); | ||
1511 | mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_INIT); | 1528 | mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_INIT); |
1512 | 1529 | ||
1513 | /* bind DAIs */ | 1530 | /* bind DAIs */ |
@@ -1662,6 +1679,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card) | |||
1662 | card->instantiated = 1; | 1679 | card->instantiated = 1; |
1663 | snd_soc_dapm_sync(&card->dapm); | 1680 | snd_soc_dapm_sync(&card->dapm); |
1664 | mutex_unlock(&card->mutex); | 1681 | mutex_unlock(&card->mutex); |
1682 | mutex_unlock(&client_mutex); | ||
1665 | 1683 | ||
1666 | return 0; | 1684 | return 0; |
1667 | 1685 | ||
@@ -1680,6 +1698,7 @@ card_probe_error: | |||
1680 | 1698 | ||
1681 | base_error: | 1699 | base_error: |
1682 | mutex_unlock(&card->mutex); | 1700 | mutex_unlock(&card->mutex); |
1701 | mutex_unlock(&client_mutex); | ||
1683 | 1702 | ||
1684 | return ret; | 1703 | return ret; |
1685 | } | 1704 | } |
@@ -2713,13 +2732,6 @@ static void snd_soc_component_del_unlocked(struct snd_soc_component *component) | |||
2713 | list_del(&component->list); | 2732 | list_del(&component->list); |
2714 | } | 2733 | } |
2715 | 2734 | ||
2716 | static void snd_soc_component_del(struct snd_soc_component *component) | ||
2717 | { | ||
2718 | mutex_lock(&client_mutex); | ||
2719 | snd_soc_component_del_unlocked(component); | ||
2720 | mutex_unlock(&client_mutex); | ||
2721 | } | ||
2722 | |||
2723 | int snd_soc_register_component(struct device *dev, | 2735 | int snd_soc_register_component(struct device *dev, |
2724 | const struct snd_soc_component_driver *cmpnt_drv, | 2736 | const struct snd_soc_component_driver *cmpnt_drv, |
2725 | struct snd_soc_dai_driver *dai_drv, | 2737 | struct snd_soc_dai_driver *dai_drv, |
@@ -2767,14 +2779,17 @@ void snd_soc_unregister_component(struct device *dev) | |||
2767 | { | 2779 | { |
2768 | struct snd_soc_component *cmpnt; | 2780 | struct snd_soc_component *cmpnt; |
2769 | 2781 | ||
2782 | mutex_lock(&client_mutex); | ||
2770 | list_for_each_entry(cmpnt, &component_list, list) { | 2783 | list_for_each_entry(cmpnt, &component_list, list) { |
2771 | if (dev == cmpnt->dev && cmpnt->registered_as_component) | 2784 | if (dev == cmpnt->dev && cmpnt->registered_as_component) |
2772 | goto found; | 2785 | goto found; |
2773 | } | 2786 | } |
2787 | mutex_unlock(&client_mutex); | ||
2774 | return; | 2788 | return; |
2775 | 2789 | ||
2776 | found: | 2790 | found: |
2777 | snd_soc_component_del(cmpnt); | 2791 | snd_soc_component_del_unlocked(cmpnt); |
2792 | mutex_unlock(&client_mutex); | ||
2778 | snd_soc_component_cleanup(cmpnt); | 2793 | snd_soc_component_cleanup(cmpnt); |
2779 | kfree(cmpnt); | 2794 | kfree(cmpnt); |
2780 | } | 2795 | } |
@@ -2882,10 +2897,14 @@ struct snd_soc_platform *snd_soc_lookup_platform(struct device *dev) | |||
2882 | { | 2897 | { |
2883 | struct snd_soc_platform *platform; | 2898 | struct snd_soc_platform *platform; |
2884 | 2899 | ||
2900 | mutex_lock(&client_mutex); | ||
2885 | list_for_each_entry(platform, &platform_list, list) { | 2901 | list_for_each_entry(platform, &platform_list, list) { |
2886 | if (dev == platform->dev) | 2902 | if (dev == platform->dev) { |
2903 | mutex_unlock(&client_mutex); | ||
2887 | return platform; | 2904 | return platform; |
2905 | } | ||
2888 | } | 2906 | } |
2907 | mutex_unlock(&client_mutex); | ||
2889 | 2908 | ||
2890 | return NULL; | 2909 | return NULL; |
2891 | } | 2910 | } |
@@ -3090,15 +3109,15 @@ void snd_soc_unregister_codec(struct device *dev) | |||
3090 | { | 3109 | { |
3091 | struct snd_soc_codec *codec; | 3110 | struct snd_soc_codec *codec; |
3092 | 3111 | ||
3112 | mutex_lock(&client_mutex); | ||
3093 | list_for_each_entry(codec, &codec_list, list) { | 3113 | list_for_each_entry(codec, &codec_list, list) { |
3094 | if (dev == codec->dev) | 3114 | if (dev == codec->dev) |
3095 | goto found; | 3115 | goto found; |
3096 | } | 3116 | } |
3117 | mutex_unlock(&client_mutex); | ||
3097 | return; | 3118 | return; |
3098 | 3119 | ||
3099 | found: | 3120 | found: |
3100 | |||
3101 | mutex_lock(&client_mutex); | ||
3102 | list_del(&codec->list); | 3121 | list_del(&codec->list); |
3103 | snd_soc_component_del_unlocked(&codec->component); | 3122 | snd_soc_component_del_unlocked(&codec->component); |
3104 | mutex_unlock(&client_mutex); | 3123 | mutex_unlock(&client_mutex); |
diff --git a/sound/usb/line6/playback.c b/sound/usb/line6/playback.c index 05dee690f487..97ed593f6010 100644 --- a/sound/usb/line6/playback.c +++ b/sound/usb/line6/playback.c | |||
@@ -39,7 +39,7 @@ static void change_volume(struct urb *urb_out, int volume[], | |||
39 | for (; p < buf_end; ++p) { | 39 | for (; p < buf_end; ++p) { |
40 | short pv = le16_to_cpu(*p); | 40 | short pv = le16_to_cpu(*p); |
41 | int val = (pv * volume[chn & 1]) >> 8; | 41 | int val = (pv * volume[chn & 1]) >> 8; |
42 | pv = clamp(val, 0x7fff, -0x8000); | 42 | pv = clamp(val, -0x8000, 0x7fff); |
43 | *p = cpu_to_le16(pv); | 43 | *p = cpu_to_le16(pv); |
44 | ++chn; | 44 | ++chn; |
45 | } | 45 | } |
@@ -54,7 +54,7 @@ static void change_volume(struct urb *urb_out, int volume[], | |||
54 | 54 | ||
55 | val = p[0] + (p[1] << 8) + ((signed char)p[2] << 16); | 55 | val = p[0] + (p[1] << 8) + ((signed char)p[2] << 16); |
56 | val = (val * volume[chn & 1]) >> 8; | 56 | val = (val * volume[chn & 1]) >> 8; |
57 | val = clamp(val, 0x7fffff, -0x800000); | 57 | val = clamp(val, -0x800000, 0x7fffff); |
58 | p[0] = val; | 58 | p[0] = val; |
59 | p[1] = val >> 8; | 59 | p[1] = val >> 8; |
60 | p[2] = val >> 16; | 60 | p[2] = val >> 16; |
@@ -126,7 +126,7 @@ static void add_monitor_signal(struct urb *urb_out, unsigned char *signal, | |||
126 | short pov = le16_to_cpu(*po); | 126 | short pov = le16_to_cpu(*po); |
127 | short piv = le16_to_cpu(*pi); | 127 | short piv = le16_to_cpu(*pi); |
128 | int val = pov + ((piv * volume) >> 8); | 128 | int val = pov + ((piv * volume) >> 8); |
129 | pov = clamp(val, 0x7fff, -0x8000); | 129 | pov = clamp(val, -0x8000, 0x7fff); |
130 | *po = cpu_to_le16(pov); | 130 | *po = cpu_to_le16(pov); |
131 | } | 131 | } |
132 | } | 132 | } |
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index dc9df007d3e3..337c317ead6f 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c | |||
@@ -192,6 +192,7 @@ static const struct rc_config { | |||
192 | { USB_ID(0x041e, 0x3040), 2, 2, 6, 6, 2, 0x6e91 }, /* Live! 24-bit */ | 192 | { USB_ID(0x041e, 0x3040), 2, 2, 6, 6, 2, 0x6e91 }, /* Live! 24-bit */ |
193 | { USB_ID(0x041e, 0x3042), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 */ | 193 | { USB_ID(0x041e, 0x3042), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 */ |
194 | { USB_ID(0x041e, 0x30df), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */ | 194 | { USB_ID(0x041e, 0x30df), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */ |
195 | { USB_ID(0x041e, 0x3237), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */ | ||
195 | { USB_ID(0x041e, 0x3048), 2, 2, 6, 6, 2, 0x6e91 }, /* Toshiba SB0500 */ | 196 | { USB_ID(0x041e, 0x3048), 2, 2, 6, 6, 2, 0x6e91 }, /* Toshiba SB0500 */ |
196 | }; | 197 | }; |
197 | 198 | ||
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 67d476548dcf..07f984d5f516 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h | |||
@@ -1773,6 +1773,36 @@ YAMAHA_DEVICE(0x7010, "UB99"), | |||
1773 | } | 1773 | } |
1774 | } | 1774 | } |
1775 | }, | 1775 | }, |
1776 | { | ||
1777 | USB_DEVICE(0x0582, 0x0159), | ||
1778 | .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { | ||
1779 | /* .vendor_name = "Roland", */ | ||
1780 | /* .product_name = "UA-22", */ | ||
1781 | .ifnum = QUIRK_ANY_INTERFACE, | ||
1782 | .type = QUIRK_COMPOSITE, | ||
1783 | .data = (const struct snd_usb_audio_quirk[]) { | ||
1784 | { | ||
1785 | .ifnum = 0, | ||
1786 | .type = QUIRK_AUDIO_STANDARD_INTERFACE | ||
1787 | }, | ||
1788 | { | ||
1789 | .ifnum = 1, | ||
1790 | .type = QUIRK_AUDIO_STANDARD_INTERFACE | ||
1791 | }, | ||
1792 | { | ||
1793 | .ifnum = 2, | ||
1794 | .type = QUIRK_MIDI_FIXED_ENDPOINT, | ||
1795 | .data = & (const struct snd_usb_midi_endpoint_info) { | ||
1796 | .out_cables = 0x0001, | ||
1797 | .in_cables = 0x0001 | ||
1798 | } | ||
1799 | }, | ||
1800 | { | ||
1801 | .ifnum = -1 | ||
1802 | } | ||
1803 | } | ||
1804 | } | ||
1805 | }, | ||
1776 | /* this catches most recent vendor-specific Roland devices */ | 1806 | /* this catches most recent vendor-specific Roland devices */ |
1777 | { | 1807 | { |
1778 | .match_flags = USB_DEVICE_ID_MATCH_VENDOR | | 1808 | .match_flags = USB_DEVICE_ID_MATCH_VENDOR | |
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 753a47de8459..9a28365126f9 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c | |||
@@ -1113,8 +1113,13 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs, | |||
1113 | 1113 | ||
1114 | bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) | 1114 | bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) |
1115 | { | 1115 | { |
1116 | /* MS Lifecam HD-5000 doesn't support reading the sample rate. */ | 1116 | /* devices which do not support reading the sample rate. */ |
1117 | return chip->usb_id == USB_ID(0x045E, 0x076D); | 1117 | switch (chip->usb_id) { |
1118 | case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */ | ||
1119 | case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ | ||
1120 | return true; | ||
1121 | } | ||
1122 | return false; | ||
1118 | } | 1123 | } |
1119 | 1124 | ||
1120 | /* Marantz/Denon USB DACs need a vendor cmd to switch | 1125 | /* Marantz/Denon USB DACs need a vendor cmd to switch |
diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c index 6c14afe8c1b1..db1d3a29d97f 100644 --- a/tools/perf/bench/mem-memcpy.c +++ b/tools/perf/bench/mem-memcpy.c | |||
@@ -289,7 +289,7 @@ static u64 do_memcpy_cycle(const struct routine *r, size_t len, bool prefault) | |||
289 | memcpy_t fn = r->fn.memcpy; | 289 | memcpy_t fn = r->fn.memcpy; |
290 | int i; | 290 | int i; |
291 | 291 | ||
292 | memcpy_alloc_mem(&src, &dst, len); | 292 | memcpy_alloc_mem(&dst, &src, len); |
293 | 293 | ||
294 | if (prefault) | 294 | if (prefault) |
295 | fn(dst, src, len); | 295 | fn(dst, src, len); |
@@ -312,7 +312,7 @@ static double do_memcpy_gettimeofday(const struct routine *r, size_t len, | |||
312 | void *src = NULL, *dst = NULL; | 312 | void *src = NULL, *dst = NULL; |
313 | int i; | 313 | int i; |
314 | 314 | ||
315 | memcpy_alloc_mem(&src, &dst, len); | 315 | memcpy_alloc_mem(&dst, &src, len); |
316 | 316 | ||
317 | if (prefault) | 317 | if (prefault) |
318 | fn(dst, src, len); | 318 | fn(dst, src, len); |
diff --git a/tools/perf/config/Makefile.arch b/tools/perf/config/Makefile.arch index ff95a68741d1..ac8721ffa6c8 100644 --- a/tools/perf/config/Makefile.arch +++ b/tools/perf/config/Makefile.arch | |||
@@ -21,6 +21,10 @@ ifeq ($(RAW_ARCH),x86_64) | |||
21 | endif | 21 | endif |
22 | endif | 22 | endif |
23 | 23 | ||
24 | ifeq ($(RAW_ARCH),sparc64) | ||
25 | ARCH ?= sparc | ||
26 | endif | ||
27 | |||
24 | ARCH ?= $(RAW_ARCH) | 28 | ARCH ?= $(RAW_ARCH) |
25 | 29 | ||
26 | LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1) | 30 | LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1) |
diff --git a/tools/perf/config/feature-checks/Makefile b/tools/perf/config/feature-checks/Makefile index 42ac05aaf8ac..b32ff3372514 100644 --- a/tools/perf/config/feature-checks/Makefile +++ b/tools/perf/config/feature-checks/Makefile | |||
@@ -49,7 +49,7 @@ test-hello.bin: | |||
49 | $(BUILD) | 49 | $(BUILD) |
50 | 50 | ||
51 | test-pthread-attr-setaffinity-np.bin: | 51 | test-pthread-attr-setaffinity-np.bin: |
52 | $(BUILD) -Werror -lpthread | 52 | $(BUILD) -D_GNU_SOURCE -Werror -lpthread |
53 | 53 | ||
54 | test-stackprotector-all.bin: | 54 | test-stackprotector-all.bin: |
55 | $(BUILD) -Werror -fstack-protector-all | 55 | $(BUILD) -Werror -fstack-protector-all |
diff --git a/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c b/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c index 0a0d3ecb4e8a..2b81b72eca23 100644 --- a/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c +++ b/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c | |||
@@ -5,10 +5,11 @@ int main(void) | |||
5 | { | 5 | { |
6 | int ret = 0; | 6 | int ret = 0; |
7 | pthread_attr_t thread_attr; | 7 | pthread_attr_t thread_attr; |
8 | cpu_set_t cs; | ||
8 | 9 | ||
9 | pthread_attr_init(&thread_attr); | 10 | pthread_attr_init(&thread_attr); |
10 | /* don't care abt exact args, just the API itself in libpthread */ | 11 | /* don't care abt exact args, just the API itself in libpthread */ |
11 | ret = pthread_attr_setaffinity_np(&thread_attr, 0, NULL); | 12 | ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cs), &cs); |
12 | 13 | ||
13 | return ret; | 14 | return ret; |
14 | } | 15 | } |
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 61bf9128e1f2..9d9db3b296dd 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c | |||
@@ -30,6 +30,8 @@ static int disasm_line__parse(char *line, char **namep, char **rawp); | |||
30 | 30 | ||
31 | static void ins__delete(struct ins_operands *ops) | 31 | static void ins__delete(struct ins_operands *ops) |
32 | { | 32 | { |
33 | if (ops == NULL) | ||
34 | return; | ||
33 | zfree(&ops->source.raw); | 35 | zfree(&ops->source.raw); |
34 | zfree(&ops->source.name); | 36 | zfree(&ops->source.name); |
35 | zfree(&ops->target.raw); | 37 | zfree(&ops->target.raw); |
diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c index 47b78b3f0325..6da965bdbc2c 100644 --- a/tools/perf/util/cloexec.c +++ b/tools/perf/util/cloexec.c | |||
@@ -25,6 +25,10 @@ static int perf_flag_probe(void) | |||
25 | if (cpu < 0) | 25 | if (cpu < 0) |
26 | cpu = 0; | 26 | cpu = 0; |
27 | 27 | ||
28 | /* | ||
29 | * Using -1 for the pid is a workaround to avoid gratuitous jump label | ||
30 | * changes. | ||
31 | */ | ||
28 | while (1) { | 32 | while (1) { |
29 | /* check cloexec flag */ | 33 | /* check cloexec flag */ |
30 | fd = sys_perf_event_open(&attr, pid, cpu, -1, | 34 | fd = sys_perf_event_open(&attr, pid, cpu, -1, |
@@ -47,16 +51,24 @@ static int perf_flag_probe(void) | |||
47 | err, strerror_r(err, sbuf, sizeof(sbuf))); | 51 | err, strerror_r(err, sbuf, sizeof(sbuf))); |
48 | 52 | ||
49 | /* not supported, confirm error related to PERF_FLAG_FD_CLOEXEC */ | 53 | /* not supported, confirm error related to PERF_FLAG_FD_CLOEXEC */ |
50 | fd = sys_perf_event_open(&attr, pid, cpu, -1, 0); | 54 | while (1) { |
55 | fd = sys_perf_event_open(&attr, pid, cpu, -1, 0); | ||
56 | if (fd < 0 && pid == -1 && errno == EACCES) { | ||
57 | pid = 0; | ||
58 | continue; | ||
59 | } | ||
60 | break; | ||
61 | } | ||
51 | err = errno; | 62 | err = errno; |
52 | 63 | ||
64 | if (fd >= 0) | ||
65 | close(fd); | ||
66 | |||
53 | if (WARN_ONCE(fd < 0 && err != EBUSY, | 67 | if (WARN_ONCE(fd < 0 && err != EBUSY, |
54 | "perf_event_open(..., 0) failed unexpectedly with error %d (%s)\n", | 68 | "perf_event_open(..., 0) failed unexpectedly with error %d (%s)\n", |
55 | err, strerror_r(err, sbuf, sizeof(sbuf)))) | 69 | err, strerror_r(err, sbuf, sizeof(sbuf)))) |
56 | return -1; | 70 | return -1; |
57 | 71 | ||
58 | close(fd); | ||
59 | |||
60 | return 0; | 72 | return 0; |
61 | } | 73 | } |
62 | 74 | ||
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index c94a9e03ecf1..e99a67632831 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h | |||
@@ -28,7 +28,7 @@ struct perf_mmap { | |||
28 | int mask; | 28 | int mask; |
29 | int refcnt; | 29 | int refcnt; |
30 | unsigned int prev; | 30 | unsigned int prev; |
31 | char event_copy[PERF_SAMPLE_MAX_SIZE]; | 31 | char event_copy[PERF_SAMPLE_MAX_SIZE] __attribute__((aligned(8))); |
32 | }; | 32 | }; |
33 | 33 | ||
34 | struct perf_evlist { | 34 | struct perf_evlist { |
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index b24f9d8727a8..33b7a2aef713 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c | |||
@@ -11,6 +11,11 @@ | |||
11 | #include <symbol/kallsyms.h> | 11 | #include <symbol/kallsyms.h> |
12 | #include "debug.h" | 12 | #include "debug.h" |
13 | 13 | ||
14 | #ifndef EM_AARCH64 | ||
15 | #define EM_AARCH64 183 /* ARM 64 bit */ | ||
16 | #endif | ||
17 | |||
18 | |||
14 | #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT | 19 | #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT |
15 | extern char *cplus_demangle(const char *, int); | 20 | extern char *cplus_demangle(const char *, int); |
16 | 21 | ||
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile index 3ed7c0476d48..2e2ba2efa0d9 100644 --- a/tools/power/cpupower/Makefile +++ b/tools/power/cpupower/Makefile | |||
@@ -209,7 +209,7 @@ $(OUTPUT)%.o: %.c | |||
209 | 209 | ||
210 | $(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)libcpupower.so.$(LIB_MAJ) | 210 | $(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)libcpupower.so.$(LIB_MAJ) |
211 | $(ECHO) " CC " $@ | 211 | $(ECHO) " CC " $@ |
212 | $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -Wl,-rpath=./ -lrt -lpci -L$(OUTPUT) -o $@ | 212 | $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -lrt -lpci -L$(OUTPUT) -o $@ |
213 | $(QUIET) $(STRIPCMD) $@ | 213 | $(QUIET) $(STRIPCMD) $@ |
214 | 214 | ||
215 | $(OUTPUT)po/$(PACKAGE).pot: $(UTIL_SRC) | 215 | $(OUTPUT)po/$(PACKAGE).pot: $(UTIL_SRC) |
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 4e511221a0c1..0db571340edb 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile | |||
@@ -22,6 +22,14 @@ TARGETS += vm | |||
22 | TARGETS_HOTPLUG = cpu-hotplug | 22 | TARGETS_HOTPLUG = cpu-hotplug |
23 | TARGETS_HOTPLUG += memory-hotplug | 23 | TARGETS_HOTPLUG += memory-hotplug |
24 | 24 | ||
25 | # Clear LDFLAGS and MAKEFLAGS if called from main | ||
26 | # Makefile to avoid test build failures when test | ||
27 | # Makefile doesn't have explicit build rules. | ||
28 | ifeq (1,$(MAKELEVEL)) | ||
29 | undefine LDFLAGS | ||
30 | override MAKEFLAGS = | ||
31 | endif | ||
32 | |||
25 | all: | 33 | all: |
26 | for TARGET in $(TARGETS); do \ | 34 | for TARGET in $(TARGETS); do \ |
27 | make -C $$TARGET; \ | 35 | make -C $$TARGET; \ |
diff --git a/tools/testing/selftests/exec/execveat.c b/tools/testing/selftests/exec/execveat.c index e238c9559caf..8d5d1d2ee7c1 100644 --- a/tools/testing/selftests/exec/execveat.c +++ b/tools/testing/selftests/exec/execveat.c | |||
@@ -30,7 +30,7 @@ static int execveat_(int fd, const char *path, char **argv, char **envp, | |||
30 | #ifdef __NR_execveat | 30 | #ifdef __NR_execveat |
31 | return syscall(__NR_execveat, fd, path, argv, envp, flags); | 31 | return syscall(__NR_execveat, fd, path, argv, envp, flags); |
32 | #else | 32 | #else |
33 | errno = -ENOSYS; | 33 | errno = ENOSYS; |
34 | return -1; | 34 | return -1; |
35 | #endif | 35 | #endif |
36 | } | 36 | } |
@@ -234,6 +234,14 @@ static int run_tests(void) | |||
234 | int fd_cloexec = open_or_die("execveat", O_RDONLY|O_CLOEXEC); | 234 | int fd_cloexec = open_or_die("execveat", O_RDONLY|O_CLOEXEC); |
235 | int fd_script_cloexec = open_or_die("script", O_RDONLY|O_CLOEXEC); | 235 | int fd_script_cloexec = open_or_die("script", O_RDONLY|O_CLOEXEC); |
236 | 236 | ||
237 | /* Check if we have execveat at all, and bail early if not */ | ||
238 | errno = 0; | ||
239 | execveat_(-1, NULL, NULL, NULL, 0); | ||
240 | if (errno == ENOSYS) { | ||
241 | printf("[FAIL] ENOSYS calling execveat - no kernel support?\n"); | ||
242 | return 1; | ||
243 | } | ||
244 | |||
237 | /* Change file position to confirm it doesn't affect anything */ | 245 | /* Change file position to confirm it doesn't affect anything */ |
238 | lseek(fd, 10, SEEK_SET); | 246 | lseek(fd, 10, SEEK_SET); |
239 | 247 | ||
diff --git a/tools/thermal/tmon/.gitignore b/tools/thermal/tmon/.gitignore new file mode 100644 index 000000000000..06e96be65276 --- /dev/null +++ b/tools/thermal/tmon/.gitignore | |||
@@ -0,0 +1 @@ | |||
/tmon | |||
diff --git a/tools/thermal/tmon/Makefile b/tools/thermal/tmon/Makefile index e775adcbd29f..0788621c8d76 100644 --- a/tools/thermal/tmon/Makefile +++ b/tools/thermal/tmon/Makefile | |||
@@ -2,8 +2,8 @@ VERSION = 1.0 | |||
2 | 2 | ||
3 | BINDIR=usr/bin | 3 | BINDIR=usr/bin |
4 | WARNFLAGS=-Wall -Wshadow -W -Wformat -Wimplicit-function-declaration -Wimplicit-int | 4 | WARNFLAGS=-Wall -Wshadow -W -Wformat -Wimplicit-function-declaration -Wimplicit-int |
5 | CFLAGS= -O1 ${WARNFLAGS} -fstack-protector | 5 | CFLAGS+= -O1 ${WARNFLAGS} -fstack-protector |
6 | CC=gcc | 6 | CC=$(CROSS_COMPILE)gcc |
7 | 7 | ||
8 | CFLAGS+=-D VERSION=\"$(VERSION)\" | 8 | CFLAGS+=-D VERSION=\"$(VERSION)\" |
9 | LDFLAGS+= | 9 | LDFLAGS+= |
@@ -16,12 +16,21 @@ INSTALL_CONFIGFILE=install -m 644 -p | |||
16 | CONFIG_FILE= | 16 | CONFIG_FILE= |
17 | CONFIG_PATH= | 17 | CONFIG_PATH= |
18 | 18 | ||
19 | # Static builds might require -ltinfo, for instance | ||
20 | ifneq ($(findstring -static, $(LDFLAGS)),) | ||
21 | STATIC := --static | ||
22 | endif | ||
23 | |||
24 | TMON_LIBS=-lm -lpthread | ||
25 | TMON_LIBS += $(shell pkg-config --libs $(STATIC) panelw ncursesw 2> /dev/null || \ | ||
26 | pkg-config --libs $(STATIC) panel ncurses 2> /dev/null || \ | ||
27 | echo -lpanel -lncurses) | ||
19 | 28 | ||
20 | OBJS = tmon.o tui.o sysfs.o pid.o | 29 | OBJS = tmon.o tui.o sysfs.o pid.o |
21 | OBJS += | 30 | OBJS += |
22 | 31 | ||
23 | tmon: $(OBJS) Makefile tmon.h | 32 | tmon: $(OBJS) Makefile tmon.h |
24 | $(CC) ${CFLAGS} $(LDFLAGS) $(OBJS) -o $(TARGET) -lm -lpanel -lncursesw -ltinfo -lpthread | 33 | $(CC) $(CFLAGS) $(LDFLAGS) $(OBJS) -o $(TARGET) $(TMON_LIBS) |
25 | 34 | ||
26 | valgrind: tmon | 35 | valgrind: tmon |
27 | sudo valgrind -v --track-origins=yes --tool=memcheck --leak-check=yes --show-reachable=yes --num-callers=20 --track-fds=yes ./$(TARGET) 1> /dev/null | 36 | sudo valgrind -v --track-origins=yes --tool=memcheck --leak-check=yes --show-reachable=yes --num-callers=20 --track-fds=yes ./$(TARGET) 1> /dev/null |
diff --git a/tools/thermal/tmon/tmon.8 b/tools/thermal/tmon/tmon.8 index 0be727cb9892..02d5179803aa 100644 --- a/tools/thermal/tmon/tmon.8 +++ b/tools/thermal/tmon/tmon.8 | |||
@@ -55,6 +55,8 @@ The \fB-l --log\fP option write data to /var/tmp/tmon.log | |||
55 | .PP | 55 | .PP |
56 | The \fB-t --time-interval\fP option sets the polling interval in seconds | 56 | The \fB-t --time-interval\fP option sets the polling interval in seconds |
57 | .PP | 57 | .PP |
58 | The \fB-T --target-temp\fP option sets the initial target temperature | ||
59 | .PP | ||
58 | The \fB-v --version\fP option shows the version of \fBtmon \fP | 60 | The \fB-v --version\fP option shows the version of \fBtmon \fP |
59 | .PP | 61 | .PP |
60 | The \fB-z --zone\fP option sets the target therma zone instance to be controlled | 62 | The \fB-z --zone\fP option sets the target therma zone instance to be controlled |
diff --git a/tools/thermal/tmon/tmon.c b/tools/thermal/tmon/tmon.c index 09b7c3218334..9aa19652e8e8 100644 --- a/tools/thermal/tmon/tmon.c +++ b/tools/thermal/tmon/tmon.c | |||
@@ -64,6 +64,7 @@ void usage() | |||
64 | printf(" -h, --help show this help message\n"); | 64 | printf(" -h, --help show this help message\n"); |
65 | printf(" -l, --log log data to /var/tmp/tmon.log\n"); | 65 | printf(" -l, --log log data to /var/tmp/tmon.log\n"); |
66 | printf(" -t, --time-interval sampling time interval, > 1 sec.\n"); | 66 | printf(" -t, --time-interval sampling time interval, > 1 sec.\n"); |
67 | printf(" -T, --target-temp initial target temperature\n"); | ||
67 | printf(" -v, --version show version\n"); | 68 | printf(" -v, --version show version\n"); |
68 | printf(" -z, --zone target thermal zone id\n"); | 69 | printf(" -z, --zone target thermal zone id\n"); |
69 | 70 | ||
@@ -219,6 +220,7 @@ static struct option opts[] = { | |||
219 | { "control", 1, NULL, 'c' }, | 220 | { "control", 1, NULL, 'c' }, |
220 | { "daemon", 0, NULL, 'd' }, | 221 | { "daemon", 0, NULL, 'd' }, |
221 | { "time-interval", 1, NULL, 't' }, | 222 | { "time-interval", 1, NULL, 't' }, |
223 | { "target-temp", 1, NULL, 'T' }, | ||
222 | { "log", 0, NULL, 'l' }, | 224 | { "log", 0, NULL, 'l' }, |
223 | { "help", 0, NULL, 'h' }, | 225 | { "help", 0, NULL, 'h' }, |
224 | { "version", 0, NULL, 'v' }, | 226 | { "version", 0, NULL, 'v' }, |
@@ -231,7 +233,7 @@ int main(int argc, char **argv) | |||
231 | { | 233 | { |
232 | int err = 0; | 234 | int err = 0; |
233 | int id2 = 0, c; | 235 | int id2 = 0, c; |
234 | double yk = 0.0; /* controller output */ | 236 | double yk = 0.0, temp; /* controller output */ |
235 | int target_tz_index; | 237 | int target_tz_index; |
236 | 238 | ||
237 | if (geteuid() != 0) { | 239 | if (geteuid() != 0) { |
@@ -239,7 +241,7 @@ int main(int argc, char **argv) | |||
239 | exit(EXIT_FAILURE); | 241 | exit(EXIT_FAILURE); |
240 | } | 242 | } |
241 | 243 | ||
242 | while ((c = getopt_long(argc, argv, "c:dlht:vgz:", opts, &id2)) != -1) { | 244 | while ((c = getopt_long(argc, argv, "c:dlht:T:vgz:", opts, &id2)) != -1) { |
243 | switch (c) { | 245 | switch (c) { |
244 | case 'c': | 246 | case 'c': |
245 | no_control = 0; | 247 | no_control = 0; |
@@ -254,6 +256,14 @@ int main(int argc, char **argv) | |||
254 | if (ticktime < 1) | 256 | if (ticktime < 1) |
255 | ticktime = 1; | 257 | ticktime = 1; |
256 | break; | 258 | break; |
259 | case 'T': | ||
260 | temp = strtod(optarg, NULL); | ||
261 | if (temp < 0) { | ||
262 | fprintf(stderr, "error: temperature must be positive\n"); | ||
263 | return 1; | ||
264 | } | ||
265 | target_temp_user = temp; | ||
266 | break; | ||
257 | case 'l': | 267 | case 'l': |
258 | printf("Logging data to /var/tmp/tmon.log\n"); | 268 | printf("Logging data to /var/tmp/tmon.log\n"); |
259 | logging = 1; | 269 | logging = 1; |
diff --git a/tools/thermal/tmon/tui.c b/tools/thermal/tmon/tui.c index 89f8ef0e15c8..b5d1c6b22dd3 100644 --- a/tools/thermal/tmon/tui.c +++ b/tools/thermal/tmon/tui.c | |||
@@ -30,6 +30,18 @@ | |||
30 | 30 | ||
31 | #include "tmon.h" | 31 | #include "tmon.h" |
32 | 32 | ||
33 | #define min(x, y) ({ \ | ||
34 | typeof(x) _min1 = (x); \ | ||
35 | typeof(y) _min2 = (y); \ | ||
36 | (void) (&_min1 == &_min2); \ | ||
37 | _min1 < _min2 ? _min1 : _min2; }) | ||
38 | |||
39 | #define max(x, y) ({ \ | ||
40 | typeof(x) _max1 = (x); \ | ||
41 | typeof(y) _max2 = (y); \ | ||
42 | (void) (&_max1 == &_max2); \ | ||
43 | _max1 > _max2 ? _max1 : _max2; }) | ||
44 | |||
33 | static PANEL *data_panel; | 45 | static PANEL *data_panel; |
34 | static PANEL *dialogue_panel; | 46 | static PANEL *dialogue_panel; |
35 | static PANEL *top; | 47 | static PANEL *top; |
@@ -98,6 +110,18 @@ void write_status_bar(int x, char *line) | |||
98 | wrefresh(status_bar_window); | 110 | wrefresh(status_bar_window); |
99 | } | 111 | } |
100 | 112 | ||
113 | /* wrap at 5 */ | ||
114 | #define DIAG_DEV_ROWS 5 | ||
115 | /* | ||
116 | * list cooling devices + "set temp" entry; wraps after 5 rows, if they fit | ||
117 | */ | ||
118 | static int diag_dev_rows(void) | ||
119 | { | ||
120 | int entries = ptdata.nr_cooling_dev + 1; | ||
121 | int rows = max(DIAG_DEV_ROWS, (entries + 1) / 2); | ||
122 | return min(rows, entries); | ||
123 | } | ||
124 | |||
101 | void setup_windows(void) | 125 | void setup_windows(void) |
102 | { | 126 | { |
103 | int y_begin = 1; | 127 | int y_begin = 1; |
@@ -122,7 +146,7 @@ void setup_windows(void) | |||
122 | * dialogue window is a pop-up, when needed it lays on top of cdev win | 146 | * dialogue window is a pop-up, when needed it lays on top of cdev win |
123 | */ | 147 | */ |
124 | 148 | ||
125 | dialogue_window = subwin(stdscr, ptdata.nr_cooling_dev+5, maxx-50, | 149 | dialogue_window = subwin(stdscr, diag_dev_rows() + 5, maxx-50, |
126 | DIAG_Y, DIAG_X); | 150 | DIAG_Y, DIAG_X); |
127 | 151 | ||
128 | thermal_data_window = subwin(stdscr, ptdata.nr_tz_sensor * | 152 | thermal_data_window = subwin(stdscr, ptdata.nr_tz_sensor * |
@@ -258,21 +282,26 @@ void show_cooling_device(void) | |||
258 | } | 282 | } |
259 | 283 | ||
260 | const char DIAG_TITLE[] = "[ TUNABLES ]"; | 284 | const char DIAG_TITLE[] = "[ TUNABLES ]"; |
261 | #define DIAG_DEV_ROWS 5 | ||
262 | void show_dialogue(void) | 285 | void show_dialogue(void) |
263 | { | 286 | { |
264 | int j, x = 0, y = 0; | 287 | int j, x = 0, y = 0; |
288 | int rows, cols; | ||
265 | WINDOW *w = dialogue_window; | 289 | WINDOW *w = dialogue_window; |
266 | 290 | ||
267 | if (tui_disabled || !w) | 291 | if (tui_disabled || !w) |
268 | return; | 292 | return; |
269 | 293 | ||
294 | getmaxyx(w, rows, cols); | ||
295 | |||
296 | /* Silence compiler 'unused' warnings */ | ||
297 | (void)cols; | ||
298 | |||
270 | werase(w); | 299 | werase(w); |
271 | box(w, 0, 0); | 300 | box(w, 0, 0); |
272 | mvwprintw(w, 0, maxx/4, DIAG_TITLE); | 301 | mvwprintw(w, 0, maxx/4, DIAG_TITLE); |
273 | /* list all the available tunables */ | 302 | /* list all the available tunables */ |
274 | for (j = 0; j <= ptdata.nr_cooling_dev; j++) { | 303 | for (j = 0; j <= ptdata.nr_cooling_dev; j++) { |
275 | y = j % DIAG_DEV_ROWS; | 304 | y = j % diag_dev_rows(); |
276 | if (y == 0 && j != 0) | 305 | if (y == 0 && j != 0) |
277 | x += 20; | 306 | x += 20; |
278 | if (j == ptdata.nr_cooling_dev) | 307 | if (j == ptdata.nr_cooling_dev) |
@@ -283,12 +312,10 @@ void show_dialogue(void) | |||
283 | ptdata.cdi[j].type, ptdata.cdi[j].instance); | 312 | ptdata.cdi[j].type, ptdata.cdi[j].instance); |
284 | } | 313 | } |
285 | wattron(w, A_BOLD); | 314 | wattron(w, A_BOLD); |
286 | mvwprintw(w, DIAG_DEV_ROWS+1, 1, "Enter Choice [A-Z]?"); | 315 | mvwprintw(w, diag_dev_rows()+1, 1, "Enter Choice [A-Z]?"); |
287 | wattroff(w, A_BOLD); | 316 | wattroff(w, A_BOLD); |
288 | /* y size of dialogue win is nr cdev + 5, so print legend | 317 | /* print legend at the bottom line */ |
289 | * at the bottom line | 318 | mvwprintw(w, rows - 2, 1, |
290 | */ | ||
291 | mvwprintw(w, ptdata.nr_cooling_dev+3, 1, | ||
292 | "Legend: A=Active, P=Passive, C=Critical"); | 319 | "Legend: A=Active, P=Passive, C=Critical"); |
293 | 320 | ||
294 | wrefresh(dialogue_window); | 321 | wrefresh(dialogue_window); |
@@ -437,7 +464,7 @@ static void handle_input_choice(int ch) | |||
437 | snprintf(buf, sizeof(buf), "New Value for %.10s-%2d: ", | 464 | snprintf(buf, sizeof(buf), "New Value for %.10s-%2d: ", |
438 | ptdata.cdi[cdev_id].type, | 465 | ptdata.cdi[cdev_id].type, |
439 | ptdata.cdi[cdev_id].instance); | 466 | ptdata.cdi[cdev_id].instance); |
440 | write_dialogue_win(buf, DIAG_DEV_ROWS+2, 2); | 467 | write_dialogue_win(buf, diag_dev_rows() + 2, 2); |
441 | handle_input_val(cdev_id); | 468 | handle_input_val(cdev_id); |
442 | } else { | 469 | } else { |
443 | snprintf(buf, sizeof(buf), "Invalid selection %d", ch); | 470 | snprintf(buf, sizeof(buf), "Invalid selection %d", ch); |
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c index a0a7b5d1a070..f9b9c7c51372 100644 --- a/virt/kvm/arm/vgic-v2.c +++ b/virt/kvm/arm/vgic-v2.c | |||
@@ -72,6 +72,8 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, | |||
72 | { | 72 | { |
73 | if (!(lr_desc.state & LR_STATE_MASK)) | 73 | if (!(lr_desc.state & LR_STATE_MASK)) |
74 | vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr); | 74 | vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr); |
75 | else | ||
76 | vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr &= ~(1ULL << lr); | ||
75 | } | 77 | } |
76 | 78 | ||
77 | static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu) | 79 | static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu) |
@@ -84,6 +86,11 @@ static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu) | |||
84 | return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr; | 86 | return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr; |
85 | } | 87 | } |
86 | 88 | ||
89 | static void vgic_v2_clear_eisr(struct kvm_vcpu *vcpu) | ||
90 | { | ||
91 | vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr = 0; | ||
92 | } | ||
93 | |||
87 | static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu) | 94 | static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu) |
88 | { | 95 | { |
89 | u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr; | 96 | u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr; |
@@ -148,6 +155,7 @@ static const struct vgic_ops vgic_v2_ops = { | |||
148 | .sync_lr_elrsr = vgic_v2_sync_lr_elrsr, | 155 | .sync_lr_elrsr = vgic_v2_sync_lr_elrsr, |
149 | .get_elrsr = vgic_v2_get_elrsr, | 156 | .get_elrsr = vgic_v2_get_elrsr, |
150 | .get_eisr = vgic_v2_get_eisr, | 157 | .get_eisr = vgic_v2_get_eisr, |
158 | .clear_eisr = vgic_v2_clear_eisr, | ||
151 | .get_interrupt_status = vgic_v2_get_interrupt_status, | 159 | .get_interrupt_status = vgic_v2_get_interrupt_status, |
152 | .enable_underflow = vgic_v2_enable_underflow, | 160 | .enable_underflow = vgic_v2_enable_underflow, |
153 | .disable_underflow = vgic_v2_disable_underflow, | 161 | .disable_underflow = vgic_v2_disable_underflow, |
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c index 3a62d8a9a2c6..dff06021e748 100644 --- a/virt/kvm/arm/vgic-v3.c +++ b/virt/kvm/arm/vgic-v3.c | |||
@@ -104,6 +104,8 @@ static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, | |||
104 | { | 104 | { |
105 | if (!(lr_desc.state & LR_STATE_MASK)) | 105 | if (!(lr_desc.state & LR_STATE_MASK)) |
106 | vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr); | 106 | vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr); |
107 | else | ||
108 | vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr &= ~(1U << lr); | ||
107 | } | 109 | } |
108 | 110 | ||
109 | static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu) | 111 | static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu) |
@@ -116,6 +118,11 @@ static u64 vgic_v3_get_eisr(const struct kvm_vcpu *vcpu) | |||
116 | return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr; | 118 | return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr; |
117 | } | 119 | } |
118 | 120 | ||
121 | static void vgic_v3_clear_eisr(struct kvm_vcpu *vcpu) | ||
122 | { | ||
123 | vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr = 0; | ||
124 | } | ||
125 | |||
119 | static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu) | 126 | static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu) |
120 | { | 127 | { |
121 | u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr; | 128 | u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr; |
@@ -192,6 +199,7 @@ static const struct vgic_ops vgic_v3_ops = { | |||
192 | .sync_lr_elrsr = vgic_v3_sync_lr_elrsr, | 199 | .sync_lr_elrsr = vgic_v3_sync_lr_elrsr, |
193 | .get_elrsr = vgic_v3_get_elrsr, | 200 | .get_elrsr = vgic_v3_get_elrsr, |
194 | .get_eisr = vgic_v3_get_eisr, | 201 | .get_eisr = vgic_v3_get_eisr, |
202 | .clear_eisr = vgic_v3_clear_eisr, | ||
195 | .get_interrupt_status = vgic_v3_get_interrupt_status, | 203 | .get_interrupt_status = vgic_v3_get_interrupt_status, |
196 | .enable_underflow = vgic_v3_enable_underflow, | 204 | .enable_underflow = vgic_v3_enable_underflow, |
197 | .disable_underflow = vgic_v3_disable_underflow, | 205 | .disable_underflow = vgic_v3_disable_underflow, |
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 0cc6ab6005a0..c9f60f524588 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c | |||
@@ -883,6 +883,11 @@ static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu) | |||
883 | return vgic_ops->get_eisr(vcpu); | 883 | return vgic_ops->get_eisr(vcpu); |
884 | } | 884 | } |
885 | 885 | ||
886 | static inline void vgic_clear_eisr(struct kvm_vcpu *vcpu) | ||
887 | { | ||
888 | vgic_ops->clear_eisr(vcpu); | ||
889 | } | ||
890 | |||
886 | static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu) | 891 | static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu) |
887 | { | 892 | { |
888 | return vgic_ops->get_interrupt_status(vcpu); | 893 | return vgic_ops->get_interrupt_status(vcpu); |
@@ -922,6 +927,7 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu) | |||
922 | vgic_set_lr(vcpu, lr_nr, vlr); | 927 | vgic_set_lr(vcpu, lr_nr, vlr); |
923 | clear_bit(lr_nr, vgic_cpu->lr_used); | 928 | clear_bit(lr_nr, vgic_cpu->lr_used); |
924 | vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; | 929 | vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; |
930 | vgic_sync_lr_elrsr(vcpu, lr_nr, vlr); | ||
925 | } | 931 | } |
926 | 932 | ||
927 | /* | 933 | /* |
@@ -978,6 +984,7 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) | |||
978 | BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); | 984 | BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); |
979 | vlr.state |= LR_STATE_PENDING; | 985 | vlr.state |= LR_STATE_PENDING; |
980 | vgic_set_lr(vcpu, lr, vlr); | 986 | vgic_set_lr(vcpu, lr, vlr); |
987 | vgic_sync_lr_elrsr(vcpu, lr, vlr); | ||
981 | return true; | 988 | return true; |
982 | } | 989 | } |
983 | } | 990 | } |
@@ -999,6 +1006,7 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) | |||
999 | vlr.state |= LR_EOI_INT; | 1006 | vlr.state |= LR_EOI_INT; |
1000 | 1007 | ||
1001 | vgic_set_lr(vcpu, lr, vlr); | 1008 | vgic_set_lr(vcpu, lr, vlr); |
1009 | vgic_sync_lr_elrsr(vcpu, lr, vlr); | ||
1002 | 1010 | ||
1003 | return true; | 1011 | return true; |
1004 | } | 1012 | } |
@@ -1136,6 +1144,14 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
1136 | if (status & INT_STATUS_UNDERFLOW) | 1144 | if (status & INT_STATUS_UNDERFLOW) |
1137 | vgic_disable_underflow(vcpu); | 1145 | vgic_disable_underflow(vcpu); |
1138 | 1146 | ||
1147 | /* | ||
1148 | * In the next iterations of the vcpu loop, if we sync the vgic state | ||
1149 | * after flushing it, but before entering the guest (this happens for | ||
1150 | * pending signals and vmid rollovers), then make sure we don't pick | ||
1151 | * up any old maintenance interrupts here. | ||
1152 | */ | ||
1153 | vgic_clear_eisr(vcpu); | ||
1154 | |||
1139 | return level_pending; | 1155 | return level_pending; |
1140 | } | 1156 | } |
1141 | 1157 | ||
@@ -1583,8 +1599,10 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) | |||
1583 | * emulation. So check this here again. KVM_CREATE_DEVICE does | 1599 | * emulation. So check this here again. KVM_CREATE_DEVICE does |
1584 | * the proper checks already. | 1600 | * the proper checks already. |
1585 | */ | 1601 | */ |
1586 | if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2) | 1602 | if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2) { |
1587 | return -ENODEV; | 1603 | ret = -ENODEV; |
1604 | goto out; | ||
1605 | } | ||
1588 | 1606 | ||
1589 | /* | 1607 | /* |
1590 | * Any time a vcpu is run, vcpu_load is called which tries to grab the | 1608 | * Any time a vcpu is run, vcpu_load is called which tries to grab the |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a1093700f3a4..cc6a25d95fbf 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -471,7 +471,7 @@ static struct kvm *kvm_create_vm(unsigned long type) | |||
471 | BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); | 471 | BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); |
472 | 472 | ||
473 | r = -ENOMEM; | 473 | r = -ENOMEM; |
474 | kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); | 474 | kvm->memslots = kvm_kvzalloc(sizeof(struct kvm_memslots)); |
475 | if (!kvm->memslots) | 475 | if (!kvm->memslots) |
476 | goto out_err_no_srcu; | 476 | goto out_err_no_srcu; |
477 | 477 | ||
@@ -522,7 +522,7 @@ out_err_no_srcu: | |||
522 | out_err_no_disable: | 522 | out_err_no_disable: |
523 | for (i = 0; i < KVM_NR_BUSES; i++) | 523 | for (i = 0; i < KVM_NR_BUSES; i++) |
524 | kfree(kvm->buses[i]); | 524 | kfree(kvm->buses[i]); |
525 | kfree(kvm->memslots); | 525 | kvfree(kvm->memslots); |
526 | kvm_arch_free_vm(kvm); | 526 | kvm_arch_free_vm(kvm); |
527 | return ERR_PTR(r); | 527 | return ERR_PTR(r); |
528 | } | 528 | } |
@@ -578,7 +578,7 @@ static void kvm_free_physmem(struct kvm *kvm) | |||
578 | kvm_for_each_memslot(memslot, slots) | 578 | kvm_for_each_memslot(memslot, slots) |
579 | kvm_free_physmem_slot(kvm, memslot, NULL); | 579 | kvm_free_physmem_slot(kvm, memslot, NULL); |
580 | 580 | ||
581 | kfree(kvm->memslots); | 581 | kvfree(kvm->memslots); |
582 | } | 582 | } |
583 | 583 | ||
584 | static void kvm_destroy_devices(struct kvm *kvm) | 584 | static void kvm_destroy_devices(struct kvm *kvm) |
@@ -871,10 +871,10 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
871 | goto out_free; | 871 | goto out_free; |
872 | } | 872 | } |
873 | 873 | ||
874 | slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), | 874 | slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); |
875 | GFP_KERNEL); | ||
876 | if (!slots) | 875 | if (!slots) |
877 | goto out_free; | 876 | goto out_free; |
877 | memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); | ||
878 | 878 | ||
879 | if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { | 879 | if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { |
880 | slot = id_to_memslot(slots, mem->slot); | 880 | slot = id_to_memslot(slots, mem->slot); |
@@ -917,7 +917,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
917 | kvm_arch_commit_memory_region(kvm, mem, &old, change); | 917 | kvm_arch_commit_memory_region(kvm, mem, &old, change); |
918 | 918 | ||
919 | kvm_free_physmem_slot(kvm, &old, &new); | 919 | kvm_free_physmem_slot(kvm, &old, &new); |
920 | kfree(old_memslots); | 920 | kvfree(old_memslots); |
921 | 921 | ||
922 | /* | 922 | /* |
923 | * IOMMU mapping: New slots need to be mapped. Old slots need to be | 923 | * IOMMU mapping: New slots need to be mapped. Old slots need to be |
@@ -936,7 +936,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
936 | return 0; | 936 | return 0; |
937 | 937 | ||
938 | out_slots: | 938 | out_slots: |
939 | kfree(slots); | 939 | kvfree(slots); |
940 | out_free: | 940 | out_free: |
941 | kvm_free_physmem_slot(kvm, &new, &old); | 941 | kvm_free_physmem_slot(kvm, &new, &old); |
942 | out: | 942 | out: |
@@ -2492,6 +2492,7 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) | |||
2492 | case KVM_CAP_SIGNAL_MSI: | 2492 | case KVM_CAP_SIGNAL_MSI: |
2493 | #endif | 2493 | #endif |
2494 | #ifdef CONFIG_HAVE_KVM_IRQFD | 2494 | #ifdef CONFIG_HAVE_KVM_IRQFD |
2495 | case KVM_CAP_IRQFD: | ||
2495 | case KVM_CAP_IRQFD_RESAMPLE: | 2496 | case KVM_CAP_IRQFD_RESAMPLE: |
2496 | #endif | 2497 | #endif |
2497 | case KVM_CAP_CHECK_EXTENSION_VM: | 2498 | case KVM_CAP_CHECK_EXTENSION_VM: |