diff options
372 files changed, 6281 insertions, 2615 deletions
diff --git a/Documentation/devicetree/bindings/spi/efm32-spi.txt b/Documentation/devicetree/bindings/spi/efm32-spi.txt new file mode 100644 index 000000000000..a590ca51be75 --- /dev/null +++ b/Documentation/devicetree/bindings/spi/efm32-spi.txt | |||
@@ -0,0 +1,34 @@ | |||
1 | * Energy Micro EFM32 SPI | ||
2 | |||
3 | Required properties: | ||
4 | - #address-cells: see spi-bus.txt | ||
5 | - #size-cells: see spi-bus.txt | ||
6 | - compatible: should be "efm32,spi" | ||
7 | - reg: Offset and length of the register set for the controller | ||
8 | - interrupts: pair specifying rx and tx irq | ||
9 | - clocks: phandle to the spi clock | ||
10 | - cs-gpios: see spi-bus.txt | ||
11 | - location: Value to write to the ROUTE register's LOCATION bitfield to configure the pinmux for the device, see datasheet for values. | ||
12 | |||
13 | Example: | ||
14 | |||
15 | spi1: spi@0x4000c400 { /* USART1 */ | ||
16 | #address-cells = <1>; | ||
17 | #size-cells = <0>; | ||
18 | compatible = "efm32,spi"; | ||
19 | reg = <0x4000c400 0x400>; | ||
20 | interrupts = <15 16>; | ||
21 | clocks = <&cmu 20>; | ||
22 | cs-gpios = <&gpio 51 1>; // D3 | ||
23 | location = <1>; | ||
24 | status = "ok"; | ||
25 | |||
26 | ks8851@0 { | ||
27 | compatible = "ks8851"; | ||
28 | spi-max-frequency = <6000000>; | ||
29 | reg = <0>; | ||
30 | interrupt-parent = <&boardfpga>; | ||
31 | interrupts = <4>; | ||
32 | status = "ok"; | ||
33 | }; | ||
34 | }; | ||
diff --git a/Documentation/devicetree/bindings/spi/spi-bus.txt b/Documentation/devicetree/bindings/spi/spi-bus.txt index 296015e3c632..800dafe5b01b 100644 --- a/Documentation/devicetree/bindings/spi/spi-bus.txt +++ b/Documentation/devicetree/bindings/spi/spi-bus.txt | |||
@@ -55,6 +55,16 @@ contain the following properties. | |||
55 | chip select active high | 55 | chip select active high |
56 | - spi-3wire - (optional) Empty property indicating device requires | 56 | - spi-3wire - (optional) Empty property indicating device requires |
57 | 3-wire mode. | 57 | 3-wire mode. |
58 | - spi-tx-bus-width - (optional) The bus width(number of data wires) that | ||
59 | used for MOSI. Defaults to 1 if not present. | ||
60 | - spi-rx-bus-width - (optional) The bus width(number of data wires) that | ||
61 | used for MISO. Defaults to 1 if not present. | ||
62 | |||
63 | Some SPI controllers and devices support Dual and Quad SPI transfer mode. | ||
64 | It allows data in SPI system transfered in 2 wires(DUAL) or 4 wires(QUAD). | ||
65 | Now the value that spi-tx-bus-width and spi-rx-bus-width can receive is | ||
66 | only 1(SINGLE), 2(DUAL) and 4(QUAD). | ||
67 | Dual/Quad mode is not allowed when 3-wire mode is used. | ||
58 | 68 | ||
59 | If a gpio chipselect is used for the SPI slave the gpio number will be passed | 69 | If a gpio chipselect is used for the SPI slave the gpio number will be passed |
60 | via the cs_gpio | 70 | via the cs_gpio |
diff --git a/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt b/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt new file mode 100644 index 000000000000..a1fb3035a42b --- /dev/null +++ b/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt | |||
@@ -0,0 +1,42 @@ | |||
1 | ARM Freescale DSPI controller | ||
2 | |||
3 | Required properties: | ||
4 | - compatible : "fsl,vf610-dspi" | ||
5 | - reg : Offset and length of the register set for the device | ||
6 | - interrupts : Should contain SPI controller interrupt | ||
7 | - clocks: from common clock binding: handle to dspi clock. | ||
8 | - clock-names: from common clock binding: Shall be "dspi". | ||
9 | - pinctrl-0: pin control group to be used for this controller. | ||
10 | - pinctrl-names: must contain a "default" entry. | ||
11 | - spi-num-chipselects : the number of the chipselect signals. | ||
12 | - bus-num : the slave chip chipselect signal number. | ||
13 | Example: | ||
14 | |||
15 | dspi0@4002c000 { | ||
16 | #address-cells = <1>; | ||
17 | #size-cells = <0>; | ||
18 | compatible = "fsl,vf610-dspi"; | ||
19 | reg = <0x4002c000 0x1000>; | ||
20 | interrupts = <0 67 0x04>; | ||
21 | clocks = <&clks VF610_CLK_DSPI0>; | ||
22 | clock-names = "dspi"; | ||
23 | spi-num-chipselects = <5>; | ||
24 | bus-num = <0>; | ||
25 | pinctrl-names = "default"; | ||
26 | pinctrl-0 = <&pinctrl_dspi0_1>; | ||
27 | status = "okay"; | ||
28 | |||
29 | sflash: at26df081a@0 { | ||
30 | #address-cells = <1>; | ||
31 | #size-cells = <1>; | ||
32 | compatible = "atmel,at26df081a"; | ||
33 | spi-max-frequency = <16000000>; | ||
34 | spi-cpol; | ||
35 | spi-cpha; | ||
36 | reg = <0>; | ||
37 | linux,modalias = "m25p80"; | ||
38 | modal = "at26df081a"; | ||
39 | }; | ||
40 | }; | ||
41 | |||
42 | |||
diff --git a/Documentation/devicetree/bindings/spi/ti_qspi.txt b/Documentation/devicetree/bindings/spi/ti_qspi.txt new file mode 100644 index 000000000000..1f9641ade0b5 --- /dev/null +++ b/Documentation/devicetree/bindings/spi/ti_qspi.txt | |||
@@ -0,0 +1,22 @@ | |||
1 | TI QSPI controller. | ||
2 | |||
3 | Required properties: | ||
4 | - compatible : should be "ti,dra7xxx-qspi" or "ti,am4372-qspi". | ||
5 | - reg: Should contain QSPI registers location and length. | ||
6 | - #address-cells, #size-cells : Must be present if the device has sub-nodes | ||
7 | - ti,hwmods: Name of the hwmod associated to the QSPI | ||
8 | |||
9 | Recommended properties: | ||
10 | - spi-max-frequency: Definition as per | ||
11 | Documentation/devicetree/bindings/spi/spi-bus.txt | ||
12 | |||
13 | Example: | ||
14 | |||
15 | qspi: qspi@4b300000 { | ||
16 | compatible = "ti,dra7xxx-qspi"; | ||
17 | reg = <0x4b300000 0x100>; | ||
18 | #address-cells = <1>; | ||
19 | #size-cells = <0>; | ||
20 | spi-max-frequency = <25000000>; | ||
21 | ti,hwmods = "qspi"; | ||
22 | }; | ||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 15356aca938c..7f9d4f53882c 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -2953,7 +2953,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
2953 | improve throughput, but will also increase the | 2953 | improve throughput, but will also increase the |
2954 | amount of memory reserved for use by the client. | 2954 | amount of memory reserved for use by the client. |
2955 | 2955 | ||
2956 | swapaccount[=0|1] | 2956 | swapaccount=[0|1] |
2957 | [KNL] Enable accounting of swap in memory resource | 2957 | [KNL] Enable accounting of swap in memory resource |
2958 | controller if no parameter or 1 is given or disable | 2958 | controller if no parameter or 1 is given or disable |
2959 | it if 0 is given (See Documentation/cgroups/memory.txt) | 2959 | it if 0 is given (See Documentation/cgroups/memory.txt) |
diff --git a/Documentation/spi/spi-summary b/Documentation/spi/spi-summary index 2331eb214146..f21edb983413 100644 --- a/Documentation/spi/spi-summary +++ b/Documentation/spi/spi-summary | |||
@@ -215,7 +215,7 @@ So for example arch/.../mach-*/board-*.c files might have code like: | |||
215 | /* if your mach-* infrastructure doesn't support kernels that can | 215 | /* if your mach-* infrastructure doesn't support kernels that can |
216 | * run on multiple boards, pdata wouldn't benefit from "__init". | 216 | * run on multiple boards, pdata wouldn't benefit from "__init". |
217 | */ | 217 | */ |
218 | static struct mysoc_spi_data __initdata pdata = { ... }; | 218 | static struct mysoc_spi_data pdata __initdata = { ... }; |
219 | 219 | ||
220 | static __init board_init(void) | 220 | static __init board_init(void) |
221 | { | 221 | { |
diff --git a/MAINTAINERS b/MAINTAINERS index 7cacc88dc79c..8197fbd70a3e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -5581,9 +5581,9 @@ S: Maintained | |||
5581 | F: drivers/media/tuners/mxl5007t.* | 5581 | F: drivers/media/tuners/mxl5007t.* |
5582 | 5582 | ||
5583 | MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) | 5583 | MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) |
5584 | M: Andrew Gallatin <gallatin@myri.com> | 5584 | M: Hyong-Youb Kim <hykim@myri.com> |
5585 | L: netdev@vger.kernel.org | 5585 | L: netdev@vger.kernel.org |
5586 | W: http://www.myri.com/scs/download-Myri10GE.html | 5586 | W: https://www.myricom.com/support/downloads/myri10ge.html |
5587 | S: Supported | 5587 | S: Supported |
5588 | F: drivers/net/ethernet/myricom/myri10ge/ | 5588 | F: drivers/net/ethernet/myricom/myri10ge/ |
5589 | 5589 | ||
@@ -5884,7 +5884,7 @@ F: drivers/i2c/busses/i2c-omap.c | |||
5884 | F: include/linux/i2c-omap.h | 5884 | F: include/linux/i2c-omap.h |
5885 | 5885 | ||
5886 | OMAP DEVICE TREE SUPPORT | 5886 | OMAP DEVICE TREE SUPPORT |
5887 | M: Benoît Cousson <b-cousson@ti.com> | 5887 | M: Benoît Cousson <bcousson@baylibre.com> |
5888 | M: Tony Lindgren <tony@atomide.com> | 5888 | M: Tony Lindgren <tony@atomide.com> |
5889 | L: linux-omap@vger.kernel.org | 5889 | L: linux-omap@vger.kernel.org |
5890 | L: devicetree@vger.kernel.org | 5890 | L: devicetree@vger.kernel.org |
@@ -5964,14 +5964,14 @@ S: Maintained | |||
5964 | F: drivers/char/hw_random/omap-rng.c | 5964 | F: drivers/char/hw_random/omap-rng.c |
5965 | 5965 | ||
5966 | OMAP HWMOD SUPPORT | 5966 | OMAP HWMOD SUPPORT |
5967 | M: Benoît Cousson <b-cousson@ti.com> | 5967 | M: Benoît Cousson <bcousson@baylibre.com> |
5968 | M: Paul Walmsley <paul@pwsan.com> | 5968 | M: Paul Walmsley <paul@pwsan.com> |
5969 | L: linux-omap@vger.kernel.org | 5969 | L: linux-omap@vger.kernel.org |
5970 | S: Maintained | 5970 | S: Maintained |
5971 | F: arch/arm/mach-omap2/omap_hwmod.* | 5971 | F: arch/arm/mach-omap2/omap_hwmod.* |
5972 | 5972 | ||
5973 | OMAP HWMOD DATA FOR OMAP4-BASED DEVICES | 5973 | OMAP HWMOD DATA FOR OMAP4-BASED DEVICES |
5974 | M: Benoît Cousson <b-cousson@ti.com> | 5974 | M: Benoît Cousson <bcousson@baylibre.com> |
5975 | L: linux-omap@vger.kernel.org | 5975 | L: linux-omap@vger.kernel.org |
5976 | S: Maintained | 5976 | S: Maintained |
5977 | F: arch/arm/mach-omap2/omap_hwmod_44xx_data.c | 5977 | F: arch/arm/mach-omap2/omap_hwmod_44xx_data.c |
@@ -7366,7 +7366,6 @@ F: drivers/net/ethernet/sfc/ | |||
7366 | 7366 | ||
7367 | SGI GRU DRIVER | 7367 | SGI GRU DRIVER |
7368 | M: Dimitri Sivanich <sivanich@sgi.com> | 7368 | M: Dimitri Sivanich <sivanich@sgi.com> |
7369 | M: Robin Holt <holt@sgi.com> | ||
7370 | S: Maintained | 7369 | S: Maintained |
7371 | F: drivers/misc/sgi-gru/ | 7370 | F: drivers/misc/sgi-gru/ |
7372 | 7371 | ||
@@ -7386,7 +7385,8 @@ S: Maintained for 2.6. | |||
7386 | F: Documentation/sgi-visws.txt | 7385 | F: Documentation/sgi-visws.txt |
7387 | 7386 | ||
7388 | SGI XP/XPC/XPNET DRIVER | 7387 | SGI XP/XPC/XPNET DRIVER |
7389 | M: Robin Holt <holt@sgi.com> | 7388 | M: Cliff Whickman <cpw@sgi.com> |
7389 | M: Robin Holt <robinmholt@gmail.com> | ||
7390 | S: Maintained | 7390 | S: Maintained |
7391 | F: drivers/misc/sgi-xp/ | 7391 | F: drivers/misc/sgi-xp/ |
7392 | 7392 | ||
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 11 | 2 | PATCHLEVEL = 11 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc5 | 4 | EXTRAVERSION = -rc7 |
5 | NAME = Linux for Workgroups | 5 | NAME = Linux for Workgroups |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/Kconfig b/arch/Kconfig index 8d2ae24b9f4a..1feb169274fe 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -407,6 +407,12 @@ config CLONE_BACKWARDS2 | |||
407 | help | 407 | help |
408 | Architecture has the first two arguments of clone(2) swapped. | 408 | Architecture has the first two arguments of clone(2) swapped. |
409 | 409 | ||
410 | config CLONE_BACKWARDS3 | ||
411 | bool | ||
412 | help | ||
413 | Architecture has tls passed as the 3rd argument of clone(2), | ||
414 | not the 5th one. | ||
415 | |||
410 | config ODD_RT_SIGACTION | 416 | config ODD_RT_SIGACTION |
411 | bool | 417 | bool |
412 | help | 418 | help |
diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S index 99c10475d477..9c548c7cf001 100644 --- a/arch/arc/lib/strchr-700.S +++ b/arch/arc/lib/strchr-700.S | |||
@@ -39,9 +39,18 @@ ARC_ENTRY strchr | |||
39 | ld.a r2,[r0,4] | 39 | ld.a r2,[r0,4] |
40 | sub r12,r6,r7 | 40 | sub r12,r6,r7 |
41 | bic r12,r12,r6 | 41 | bic r12,r12,r6 |
42 | #ifdef __LITTLE_ENDIAN__ | ||
42 | and r7,r12,r4 | 43 | and r7,r12,r4 |
43 | breq r7,0,.Loop ; For speed, we want this branch to be unaligned. | 44 | breq r7,0,.Loop ; For speed, we want this branch to be unaligned. |
44 | b .Lfound_char ; Likewise this one. | 45 | b .Lfound_char ; Likewise this one. |
46 | #else | ||
47 | and r12,r12,r4 | ||
48 | breq r12,0,.Loop ; For speed, we want this branch to be unaligned. | ||
49 | lsr_s r12,r12,7 | ||
50 | bic r2,r7,r6 | ||
51 | b.d .Lfound_char_b | ||
52 | and_s r2,r2,r12 | ||
53 | #endif | ||
45 | ; /* We require this code address to be unaligned for speed... */ | 54 | ; /* We require this code address to be unaligned for speed... */ |
46 | .Laligned: | 55 | .Laligned: |
47 | ld_s r2,[r0] | 56 | ld_s r2,[r0] |
@@ -95,6 +104,7 @@ ARC_ENTRY strchr | |||
95 | lsr r7,r7,7 | 104 | lsr r7,r7,7 |
96 | 105 | ||
97 | bic r2,r7,r6 | 106 | bic r2,r7,r6 |
107 | .Lfound_char_b: | ||
98 | norm r2,r2 | 108 | norm r2,r2 |
99 | sub_s r0,r0,4 | 109 | sub_s r0,r0,4 |
100 | asr_s r2,r2,3 | 110 | asr_s r2,r2,3 |
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts index d59b70c6a6a0..3d77dbe406f4 100644 --- a/arch/arm/boot/dts/at91sam9n12ek.dts +++ b/arch/arm/boot/dts/at91sam9n12ek.dts | |||
@@ -14,11 +14,11 @@ | |||
14 | compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9"; | 14 | compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9"; |
15 | 15 | ||
16 | chosen { | 16 | chosen { |
17 | bootargs = "mem=128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2"; | 17 | bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2"; |
18 | }; | 18 | }; |
19 | 19 | ||
20 | memory { | 20 | memory { |
21 | reg = <0x20000000 0x10000000>; | 21 | reg = <0x20000000 0x8000000>; |
22 | }; | 22 | }; |
23 | 23 | ||
24 | clocks { | 24 | clocks { |
diff --git a/arch/arm/boot/dts/at91sam9x5ek.dtsi b/arch/arm/boot/dts/at91sam9x5ek.dtsi index b753855b2058..49e3c45818c2 100644 --- a/arch/arm/boot/dts/at91sam9x5ek.dtsi +++ b/arch/arm/boot/dts/at91sam9x5ek.dtsi | |||
@@ -94,8 +94,9 @@ | |||
94 | 94 | ||
95 | usb0: ohci@00600000 { | 95 | usb0: ohci@00600000 { |
96 | status = "okay"; | 96 | status = "okay"; |
97 | num-ports = <2>; | 97 | num-ports = <3>; |
98 | atmel,vbus-gpio = <&pioD 19 GPIO_ACTIVE_LOW | 98 | atmel,vbus-gpio = <0 /* &pioD 18 GPIO_ACTIVE_LOW *//* Activate to have access to port A */ |
99 | &pioD 19 GPIO_ACTIVE_LOW | ||
99 | &pioD 20 GPIO_ACTIVE_LOW | 100 | &pioD 20 GPIO_ACTIVE_LOW |
100 | >; | 101 | >; |
101 | }; | 102 | }; |
diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts index 365760b33a26..40e6fb280333 100644 --- a/arch/arm/boot/dts/tegra20-seaboard.dts +++ b/arch/arm/boot/dts/tegra20-seaboard.dts | |||
@@ -830,6 +830,8 @@ | |||
830 | regulator-max-microvolt = <5000000>; | 830 | regulator-max-microvolt = <5000000>; |
831 | enable-active-high; | 831 | enable-active-high; |
832 | gpio = <&gpio 24 0>; /* PD0 */ | 832 | gpio = <&gpio 24 0>; /* PD0 */ |
833 | regulator-always-on; | ||
834 | regulator-boot-on; | ||
833 | }; | 835 | }; |
834 | }; | 836 | }; |
835 | 837 | ||
diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts index ed4b901b0227..37c93d3c4812 100644 --- a/arch/arm/boot/dts/tegra20-trimslice.dts +++ b/arch/arm/boot/dts/tegra20-trimslice.dts | |||
@@ -412,6 +412,8 @@ | |||
412 | regulator-max-microvolt = <5000000>; | 412 | regulator-max-microvolt = <5000000>; |
413 | enable-active-high; | 413 | enable-active-high; |
414 | gpio = <&gpio 170 0>; /* PV2 */ | 414 | gpio = <&gpio 170 0>; /* PV2 */ |
415 | regulator-always-on; | ||
416 | regulator-boot-on; | ||
415 | }; | 417 | }; |
416 | }; | 418 | }; |
417 | 419 | ||
diff --git a/arch/arm/boot/dts/tegra20-whistler.dts b/arch/arm/boot/dts/tegra20-whistler.dts index ab67c94db280..a3d0ebad78a1 100644 --- a/arch/arm/boot/dts/tegra20-whistler.dts +++ b/arch/arm/boot/dts/tegra20-whistler.dts | |||
@@ -588,6 +588,8 @@ | |||
588 | regulator-max-microvolt = <5000000>; | 588 | regulator-max-microvolt = <5000000>; |
589 | enable-active-high; | 589 | enable-active-high; |
590 | gpio = <&tca6416 0 0>; /* GPIO_PMU0 */ | 590 | gpio = <&tca6416 0 0>; /* GPIO_PMU0 */ |
591 | regulator-always-on; | ||
592 | regulator-boot-on; | ||
591 | }; | 593 | }; |
592 | 594 | ||
593 | vbus3_reg: regulator@3 { | 595 | vbus3_reg: regulator@3 { |
@@ -598,6 +600,8 @@ | |||
598 | regulator-max-microvolt = <5000000>; | 600 | regulator-max-microvolt = <5000000>; |
599 | enable-active-high; | 601 | enable-active-high; |
600 | gpio = <&tca6416 1 0>; /* GPIO_PMU1 */ | 602 | gpio = <&tca6416 1 0>; /* GPIO_PMU1 */ |
603 | regulator-always-on; | ||
604 | regulator-boot-on; | ||
601 | }; | 605 | }; |
602 | }; | 606 | }; |
603 | 607 | ||
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index 6462a721ebd4..a252c0bfacf5 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h | |||
@@ -88,4 +88,7 @@ static inline u32 mpidr_hash_size(void) | |||
88 | { | 88 | { |
89 | return 1 << mpidr_hash.bits; | 89 | return 1 << mpidr_hash.bits; |
90 | } | 90 | } |
91 | |||
92 | extern int platform_can_cpu_hotplug(void); | ||
93 | |||
91 | #endif | 94 | #endif |
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index f8b8965666e9..b07c09e5a0ac 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
@@ -107,7 +107,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
107 | " subs %1, %0, %0, ror #16\n" | 107 | " subs %1, %0, %0, ror #16\n" |
108 | " addeq %0, %0, %4\n" | 108 | " addeq %0, %0, %4\n" |
109 | " strexeq %2, %0, [%3]" | 109 | " strexeq %2, %0, [%3]" |
110 | : "=&r" (slock), "=&r" (contended), "=r" (res) | 110 | : "=&r" (slock), "=&r" (contended), "=&r" (res) |
111 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) | 111 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) |
112 | : "cc"); | 112 | : "cc"); |
113 | } while (res); | 113 | } while (res); |
@@ -168,17 +168,20 @@ static inline void arch_write_lock(arch_rwlock_t *rw) | |||
168 | 168 | ||
169 | static inline int arch_write_trylock(arch_rwlock_t *rw) | 169 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
170 | { | 170 | { |
171 | unsigned long tmp; | 171 | unsigned long contended, res; |
172 | 172 | ||
173 | __asm__ __volatile__( | 173 | do { |
174 | " ldrex %0, [%1]\n" | 174 | __asm__ __volatile__( |
175 | " teq %0, #0\n" | 175 | " ldrex %0, [%2]\n" |
176 | " strexeq %0, %2, [%1]" | 176 | " mov %1, #0\n" |
177 | : "=&r" (tmp) | 177 | " teq %0, #0\n" |
178 | : "r" (&rw->lock), "r" (0x80000000) | 178 | " strexeq %1, %3, [%2]" |
179 | : "cc"); | 179 | : "=&r" (contended), "=&r" (res) |
180 | : "r" (&rw->lock), "r" (0x80000000) | ||
181 | : "cc"); | ||
182 | } while (res); | ||
180 | 183 | ||
181 | if (tmp == 0) { | 184 | if (!contended) { |
182 | smp_mb(); | 185 | smp_mb(); |
183 | return 1; | 186 | return 1; |
184 | } else { | 187 | } else { |
@@ -254,18 +257,26 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) | |||
254 | 257 | ||
255 | static inline int arch_read_trylock(arch_rwlock_t *rw) | 258 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
256 | { | 259 | { |
257 | unsigned long tmp, tmp2 = 1; | 260 | unsigned long contended, res; |
258 | 261 | ||
259 | __asm__ __volatile__( | 262 | do { |
260 | " ldrex %0, [%2]\n" | 263 | __asm__ __volatile__( |
261 | " adds %0, %0, #1\n" | 264 | " ldrex %0, [%2]\n" |
262 | " strexpl %1, %0, [%2]\n" | 265 | " mov %1, #0\n" |
263 | : "=&r" (tmp), "+r" (tmp2) | 266 | " adds %0, %0, #1\n" |
264 | : "r" (&rw->lock) | 267 | " strexpl %1, %0, [%2]" |
265 | : "cc"); | 268 | : "=&r" (contended), "=&r" (res) |
269 | : "r" (&rw->lock) | ||
270 | : "cc"); | ||
271 | } while (res); | ||
266 | 272 | ||
267 | smp_mb(); | 273 | /* If the lock is negative, then it is already held for write. */ |
268 | return tmp2 == 0; | 274 | if (contended < 0x80000000) { |
275 | smp_mb(); | ||
276 | return 1; | ||
277 | } else { | ||
278 | return 0; | ||
279 | } | ||
269 | } | 280 | } |
270 | 281 | ||
271 | /* read_can_lock - would read_trylock() succeed? */ | 282 | /* read_can_lock - would read_trylock() succeed? */ |
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 46e7cfb3e721..0baf7f0d9394 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h | |||
@@ -43,6 +43,7 @@ struct mmu_gather { | |||
43 | struct mm_struct *mm; | 43 | struct mm_struct *mm; |
44 | unsigned int fullmm; | 44 | unsigned int fullmm; |
45 | struct vm_area_struct *vma; | 45 | struct vm_area_struct *vma; |
46 | unsigned long start, end; | ||
46 | unsigned long range_start; | 47 | unsigned long range_start; |
47 | unsigned long range_end; | 48 | unsigned long range_end; |
48 | unsigned int nr; | 49 | unsigned int nr; |
@@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb) | |||
107 | } | 108 | } |
108 | 109 | ||
109 | static inline void | 110 | static inline void |
110 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) | 111 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
111 | { | 112 | { |
112 | tlb->mm = mm; | 113 | tlb->mm = mm; |
113 | tlb->fullmm = fullmm; | 114 | tlb->fullmm = !(start | (end+1)); |
115 | tlb->start = start; | ||
116 | tlb->end = end; | ||
114 | tlb->vma = NULL; | 117 | tlb->vma = NULL; |
115 | tlb->max = ARRAY_SIZE(tlb->local); | 118 | tlb->max = ARRAY_SIZE(tlb->local); |
116 | tlb->pages = tlb->local; | 119 | tlb->pages = tlb->local; |
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index d40d0ef389db..9cbe70c8b0ef 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -357,7 +357,8 @@ ENDPROC(__pabt_svc) | |||
357 | .endm | 357 | .endm |
358 | 358 | ||
359 | .macro kuser_cmpxchg_check | 359 | .macro kuser_cmpxchg_check |
360 | #if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | 360 | #if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \ |
361 | !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | ||
361 | #ifndef CONFIG_MMU | 362 | #ifndef CONFIG_MMU |
362 | #warning "NPTL on non MMU needs fixing" | 363 | #warning "NPTL on non MMU needs fixing" |
363 | #else | 364 | #else |
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c index 25442f451148..918875d96d5d 100644 --- a/arch/arm/kernel/fiq.c +++ b/arch/arm/kernel/fiq.c | |||
@@ -84,17 +84,14 @@ int show_fiq_list(struct seq_file *p, int prec) | |||
84 | 84 | ||
85 | void set_fiq_handler(void *start, unsigned int length) | 85 | void set_fiq_handler(void *start, unsigned int length) |
86 | { | 86 | { |
87 | #if defined(CONFIG_CPU_USE_DOMAINS) | ||
88 | void *base = (void *)0xffff0000; | ||
89 | #else | ||
90 | void *base = vectors_page; | 87 | void *base = vectors_page; |
91 | #endif | ||
92 | unsigned offset = FIQ_OFFSET; | 88 | unsigned offset = FIQ_OFFSET; |
93 | 89 | ||
94 | memcpy(base + offset, start, length); | 90 | memcpy(base + offset, start, length); |
91 | if (!cache_is_vipt_nonaliasing()) | ||
92 | flush_icache_range((unsigned long)base + offset, offset + | ||
93 | length); | ||
95 | flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); | 94 | flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); |
96 | if (!vectors_high()) | ||
97 | flush_icache_range(offset, offset + length); | ||
98 | } | 95 | } |
99 | 96 | ||
100 | int claim_fiq(struct fiq_handler *f) | 97 | int claim_fiq(struct fiq_handler *f) |
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 4fb074c446bf..57221e349a7c 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/mmu_context.h> | 15 | #include <asm/mmu_context.h> |
16 | #include <asm/cacheflush.h> | 16 | #include <asm/cacheflush.h> |
17 | #include <asm/mach-types.h> | 17 | #include <asm/mach-types.h> |
18 | #include <asm/smp_plat.h> | ||
18 | #include <asm/system_misc.h> | 19 | #include <asm/system_misc.h> |
19 | 20 | ||
20 | extern const unsigned char relocate_new_kernel[]; | 21 | extern const unsigned char relocate_new_kernel[]; |
@@ -39,6 +40,14 @@ int machine_kexec_prepare(struct kimage *image) | |||
39 | int i, err; | 40 | int i, err; |
40 | 41 | ||
41 | /* | 42 | /* |
43 | * Validate that if the current HW supports SMP, then the SW supports | ||
44 | * and implements CPU hotplug for the current HW. If not, we won't be | ||
45 | * able to kexec reliably, so fail the prepare operation. | ||
46 | */ | ||
47 | if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug()) | ||
48 | return -EINVAL; | ||
49 | |||
50 | /* | ||
42 | * No segment at default ATAGs address. try to locate | 51 | * No segment at default ATAGs address. try to locate |
43 | * a dtb using magic. | 52 | * a dtb using magic. |
44 | */ | 53 | */ |
@@ -73,6 +82,7 @@ void machine_crash_nonpanic_core(void *unused) | |||
73 | crash_save_cpu(®s, smp_processor_id()); | 82 | crash_save_cpu(®s, smp_processor_id()); |
74 | flush_cache_all(); | 83 | flush_cache_all(); |
75 | 84 | ||
85 | set_cpu_online(smp_processor_id(), false); | ||
76 | atomic_dec(&waiting_for_crash_ipi); | 86 | atomic_dec(&waiting_for_crash_ipi); |
77 | while (1) | 87 | while (1) |
78 | cpu_relax(); | 88 | cpu_relax(); |
@@ -134,10 +144,13 @@ void machine_kexec(struct kimage *image) | |||
134 | unsigned long reboot_code_buffer_phys; | 144 | unsigned long reboot_code_buffer_phys; |
135 | void *reboot_code_buffer; | 145 | void *reboot_code_buffer; |
136 | 146 | ||
137 | if (num_online_cpus() > 1) { | 147 | /* |
138 | pr_err("kexec: error: multiple CPUs still online\n"); | 148 | * This can only happen if machine_shutdown() failed to disable some |
139 | return; | 149 | * CPU, and that can only happen if the checks in |
140 | } | 150 | * machine_kexec_prepare() were not correct. If this fails, we can't |
151 | * reliably kexec anyway, so BUG_ON is appropriate. | ||
152 | */ | ||
153 | BUG_ON(num_online_cpus() > 1); | ||
141 | 154 | ||
142 | page_list = image->head & PAGE_MASK; | 155 | page_list = image->head & PAGE_MASK; |
143 | 156 | ||
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index d9f5cd4e533f..e186ee1e63f6 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map) | |||
53 | static int | 53 | static int |
54 | armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) | 54 | armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) |
55 | { | 55 | { |
56 | int mapping = (*event_map)[config]; | 56 | int mapping; |
57 | |||
58 | if (config >= PERF_COUNT_HW_MAX) | ||
59 | return -EINVAL; | ||
60 | |||
61 | mapping = (*event_map)[config]; | ||
57 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; | 62 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; |
58 | } | 63 | } |
59 | 64 | ||
@@ -253,6 +258,9 @@ validate_event(struct pmu_hw_events *hw_events, | |||
253 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 258 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
254 | struct pmu *leader_pmu = event->group_leader->pmu; | 259 | struct pmu *leader_pmu = event->group_leader->pmu; |
255 | 260 | ||
261 | if (is_software_event(event)) | ||
262 | return 1; | ||
263 | |||
256 | if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) | 264 | if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) |
257 | return 1; | 265 | return 1; |
258 | 266 | ||
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 536c85fe72a8..94f6b05f9e24 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -462,7 +462,7 @@ int in_gate_area_no_mm(unsigned long addr) | |||
462 | { | 462 | { |
463 | return in_gate_area(NULL, addr); | 463 | return in_gate_area(NULL, addr); |
464 | } | 464 | } |
465 | #define is_gate_vma(vma) ((vma) = &gate_vma) | 465 | #define is_gate_vma(vma) ((vma) == &gate_vma) |
466 | #else | 466 | #else |
467 | #define is_gate_vma(vma) 0 | 467 | #define is_gate_vma(vma) 0 |
468 | #endif | 468 | #endif |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index c2b4f8f0be9a..2dc19349eb19 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -145,6 +145,16 @@ int boot_secondary(unsigned int cpu, struct task_struct *idle) | |||
145 | return -ENOSYS; | 145 | return -ENOSYS; |
146 | } | 146 | } |
147 | 147 | ||
148 | int platform_can_cpu_hotplug(void) | ||
149 | { | ||
150 | #ifdef CONFIG_HOTPLUG_CPU | ||
151 | if (smp_ops.cpu_kill) | ||
152 | return 1; | ||
153 | #endif | ||
154 | |||
155 | return 0; | ||
156 | } | ||
157 | |||
148 | #ifdef CONFIG_HOTPLUG_CPU | 158 | #ifdef CONFIG_HOTPLUG_CPU |
149 | static void percpu_timer_stop(void); | 159 | static void percpu_timer_stop(void); |
150 | 160 | ||
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 4a5199070430..db9cf692d4dd 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c | |||
@@ -146,7 +146,11 @@ static bool pm_fake(struct kvm_vcpu *vcpu, | |||
146 | #define access_pmintenclr pm_fake | 146 | #define access_pmintenclr pm_fake |
147 | 147 | ||
148 | /* Architected CP15 registers. | 148 | /* Architected CP15 registers. |
149 | * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 | 149 | * CRn denotes the primary register number, but is copied to the CRm in the |
150 | * user space API for 64-bit register access in line with the terminology used | ||
151 | * in the ARM ARM. | ||
152 | * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit | ||
153 | * registers preceding 32-bit ones. | ||
150 | */ | 154 | */ |
151 | static const struct coproc_reg cp15_regs[] = { | 155 | static const struct coproc_reg cp15_regs[] = { |
152 | /* CSSELR: swapped by interrupt.S. */ | 156 | /* CSSELR: swapped by interrupt.S. */ |
@@ -154,8 +158,8 @@ static const struct coproc_reg cp15_regs[] = { | |||
154 | NULL, reset_unknown, c0_CSSELR }, | 158 | NULL, reset_unknown, c0_CSSELR }, |
155 | 159 | ||
156 | /* TTBR0/TTBR1: swapped by interrupt.S. */ | 160 | /* TTBR0/TTBR1: swapped by interrupt.S. */ |
157 | { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, | 161 | { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, |
158 | { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, | 162 | { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, |
159 | 163 | ||
160 | /* TTBCR: swapped by interrupt.S. */ | 164 | /* TTBCR: swapped by interrupt.S. */ |
161 | { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, | 165 | { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, |
@@ -182,7 +186,7 @@ static const struct coproc_reg cp15_regs[] = { | |||
182 | NULL, reset_unknown, c6_IFAR }, | 186 | NULL, reset_unknown, c6_IFAR }, |
183 | 187 | ||
184 | /* PAR swapped by interrupt.S */ | 188 | /* PAR swapped by interrupt.S */ |
185 | { CRn( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, | 189 | { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, |
186 | 190 | ||
187 | /* | 191 | /* |
188 | * DC{C,I,CI}SW operations: | 192 | * DC{C,I,CI}SW operations: |
@@ -399,12 +403,13 @@ static bool index_to_params(u64 id, struct coproc_params *params) | |||
399 | | KVM_REG_ARM_OPC1_MASK)) | 403 | | KVM_REG_ARM_OPC1_MASK)) |
400 | return false; | 404 | return false; |
401 | params->is_64bit = true; | 405 | params->is_64bit = true; |
402 | params->CRm = ((id & KVM_REG_ARM_CRM_MASK) | 406 | /* CRm to CRn: see cp15_to_index for details */ |
407 | params->CRn = ((id & KVM_REG_ARM_CRM_MASK) | ||
403 | >> KVM_REG_ARM_CRM_SHIFT); | 408 | >> KVM_REG_ARM_CRM_SHIFT); |
404 | params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) | 409 | params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) |
405 | >> KVM_REG_ARM_OPC1_SHIFT); | 410 | >> KVM_REG_ARM_OPC1_SHIFT); |
406 | params->Op2 = 0; | 411 | params->Op2 = 0; |
407 | params->CRn = 0; | 412 | params->CRm = 0; |
408 | return true; | 413 | return true; |
409 | default: | 414 | default: |
410 | return false; | 415 | return false; |
@@ -898,7 +903,14 @@ static u64 cp15_to_index(const struct coproc_reg *reg) | |||
898 | if (reg->is_64) { | 903 | if (reg->is_64) { |
899 | val |= KVM_REG_SIZE_U64; | 904 | val |= KVM_REG_SIZE_U64; |
900 | val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); | 905 | val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); |
901 | val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); | 906 | /* |
907 | * CRn always denotes the primary coproc. reg. nr. for the | ||
908 | * in-kernel representation, but the user space API uses the | ||
909 | * CRm for the encoding, because it is modelled after the | ||
910 | * MRRC/MCRR instructions: see the ARM ARM rev. c page | ||
911 | * B3-1445 | ||
912 | */ | ||
913 | val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT); | ||
902 | } else { | 914 | } else { |
903 | val |= KVM_REG_SIZE_U32; | 915 | val |= KVM_REG_SIZE_U32; |
904 | val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); | 916 | val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); |
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h index b7301d3e4799..0461d5c8d3de 100644 --- a/arch/arm/kvm/coproc.h +++ b/arch/arm/kvm/coproc.h | |||
@@ -135,6 +135,8 @@ static inline int cmp_reg(const struct coproc_reg *i1, | |||
135 | return -1; | 135 | return -1; |
136 | if (i1->CRn != i2->CRn) | 136 | if (i1->CRn != i2->CRn) |
137 | return i1->CRn - i2->CRn; | 137 | return i1->CRn - i2->CRn; |
138 | if (i1->is_64 != i2->is_64) | ||
139 | return i2->is_64 - i1->is_64; | ||
138 | if (i1->CRm != i2->CRm) | 140 | if (i1->CRm != i2->CRm) |
139 | return i1->CRm - i2->CRm; | 141 | return i1->CRm - i2->CRm; |
140 | if (i1->Op1 != i2->Op1) | 142 | if (i1->Op1 != i2->Op1) |
@@ -145,6 +147,7 @@ static inline int cmp_reg(const struct coproc_reg *i1, | |||
145 | 147 | ||
146 | #define CRn(_x) .CRn = _x | 148 | #define CRn(_x) .CRn = _x |
147 | #define CRm(_x) .CRm = _x | 149 | #define CRm(_x) .CRm = _x |
150 | #define CRm64(_x) .CRn = _x, .CRm = 0 | ||
148 | #define Op1(_x) .Op1 = _x | 151 | #define Op1(_x) .Op1 = _x |
149 | #define Op2(_x) .Op2 = _x | 152 | #define Op2(_x) .Op2 = _x |
150 | #define is64 .is_64 = true | 153 | #define is64 .is_64 = true |
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c index 685063a6d0cf..cf93472b9dd6 100644 --- a/arch/arm/kvm/coproc_a15.c +++ b/arch/arm/kvm/coproc_a15.c | |||
@@ -114,7 +114,11 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu, | |||
114 | 114 | ||
115 | /* | 115 | /* |
116 | * A15-specific CP15 registers. | 116 | * A15-specific CP15 registers. |
117 | * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 | 117 | * CRn denotes the primary register number, but is copied to the CRm in the |
118 | * user space API for 64-bit register access in line with the terminology used | ||
119 | * in the ARM ARM. | ||
120 | * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit | ||
121 | * registers preceding 32-bit ones. | ||
118 | */ | 122 | */ |
119 | static const struct coproc_reg a15_regs[] = { | 123 | static const struct coproc_reg a15_regs[] = { |
120 | /* MPIDR: we use VMPIDR for guest access. */ | 124 | /* MPIDR: we use VMPIDR for guest access. */ |
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c index b8e06b7a2833..0c25d9487d53 100644 --- a/arch/arm/kvm/mmio.c +++ b/arch/arm/kvm/mmio.c | |||
@@ -63,7 +63,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
63 | static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | 63 | static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
64 | struct kvm_exit_mmio *mmio) | 64 | struct kvm_exit_mmio *mmio) |
65 | { | 65 | { |
66 | unsigned long rt, len; | 66 | unsigned long rt; |
67 | int len; | ||
67 | bool is_write, sign_extend; | 68 | bool is_write, sign_extend; |
68 | 69 | ||
69 | if (kvm_vcpu_dabt_isextabt(vcpu)) { | 70 | if (kvm_vcpu_dabt_isextabt(vcpu)) { |
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index ca6bea4859b4..0988d9e04dd4 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -85,6 +85,12 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) | |||
85 | return p; | 85 | return p; |
86 | } | 86 | } |
87 | 87 | ||
88 | static bool page_empty(void *ptr) | ||
89 | { | ||
90 | struct page *ptr_page = virt_to_page(ptr); | ||
91 | return page_count(ptr_page) == 1; | ||
92 | } | ||
93 | |||
88 | static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) | 94 | static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) |
89 | { | 95 | { |
90 | pmd_t *pmd_table = pmd_offset(pud, 0); | 96 | pmd_t *pmd_table = pmd_offset(pud, 0); |
@@ -103,12 +109,6 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) | |||
103 | put_page(virt_to_page(pmd)); | 109 | put_page(virt_to_page(pmd)); |
104 | } | 110 | } |
105 | 111 | ||
106 | static bool pmd_empty(pmd_t *pmd) | ||
107 | { | ||
108 | struct page *pmd_page = virt_to_page(pmd); | ||
109 | return page_count(pmd_page) == 1; | ||
110 | } | ||
111 | |||
112 | static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) | 112 | static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) |
113 | { | 113 | { |
114 | if (pte_present(*pte)) { | 114 | if (pte_present(*pte)) { |
@@ -118,12 +118,6 @@ static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) | |||
118 | } | 118 | } |
119 | } | 119 | } |
120 | 120 | ||
121 | static bool pte_empty(pte_t *pte) | ||
122 | { | ||
123 | struct page *pte_page = virt_to_page(pte); | ||
124 | return page_count(pte_page) == 1; | ||
125 | } | ||
126 | |||
127 | static void unmap_range(struct kvm *kvm, pgd_t *pgdp, | 121 | static void unmap_range(struct kvm *kvm, pgd_t *pgdp, |
128 | unsigned long long start, u64 size) | 122 | unsigned long long start, u64 size) |
129 | { | 123 | { |
@@ -132,37 +126,37 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, | |||
132 | pmd_t *pmd; | 126 | pmd_t *pmd; |
133 | pte_t *pte; | 127 | pte_t *pte; |
134 | unsigned long long addr = start, end = start + size; | 128 | unsigned long long addr = start, end = start + size; |
135 | u64 range; | 129 | u64 next; |
136 | 130 | ||
137 | while (addr < end) { | 131 | while (addr < end) { |
138 | pgd = pgdp + pgd_index(addr); | 132 | pgd = pgdp + pgd_index(addr); |
139 | pud = pud_offset(pgd, addr); | 133 | pud = pud_offset(pgd, addr); |
140 | if (pud_none(*pud)) { | 134 | if (pud_none(*pud)) { |
141 | addr += PUD_SIZE; | 135 | addr = pud_addr_end(addr, end); |
142 | continue; | 136 | continue; |
143 | } | 137 | } |
144 | 138 | ||
145 | pmd = pmd_offset(pud, addr); | 139 | pmd = pmd_offset(pud, addr); |
146 | if (pmd_none(*pmd)) { | 140 | if (pmd_none(*pmd)) { |
147 | addr += PMD_SIZE; | 141 | addr = pmd_addr_end(addr, end); |
148 | continue; | 142 | continue; |
149 | } | 143 | } |
150 | 144 | ||
151 | pte = pte_offset_kernel(pmd, addr); | 145 | pte = pte_offset_kernel(pmd, addr); |
152 | clear_pte_entry(kvm, pte, addr); | 146 | clear_pte_entry(kvm, pte, addr); |
153 | range = PAGE_SIZE; | 147 | next = addr + PAGE_SIZE; |
154 | 148 | ||
155 | /* If we emptied the pte, walk back up the ladder */ | 149 | /* If we emptied the pte, walk back up the ladder */ |
156 | if (pte_empty(pte)) { | 150 | if (page_empty(pte)) { |
157 | clear_pmd_entry(kvm, pmd, addr); | 151 | clear_pmd_entry(kvm, pmd, addr); |
158 | range = PMD_SIZE; | 152 | next = pmd_addr_end(addr, end); |
159 | if (pmd_empty(pmd)) { | 153 | if (page_empty(pmd) && !page_empty(pud)) { |
160 | clear_pud_entry(kvm, pud, addr); | 154 | clear_pud_entry(kvm, pud, addr); |
161 | range = PUD_SIZE; | 155 | next = pud_addr_end(addr, end); |
162 | } | 156 | } |
163 | } | 157 | } |
164 | 158 | ||
165 | addr += range; | 159 | addr = next; |
166 | } | 160 | } |
167 | } | 161 | } |
168 | 162 | ||
diff --git a/arch/arm/mach-at91/at91sam9x5.c b/arch/arm/mach-at91/at91sam9x5.c index 2abee6626aac..916e5a142917 100644 --- a/arch/arm/mach-at91/at91sam9x5.c +++ b/arch/arm/mach-at91/at91sam9x5.c | |||
@@ -227,6 +227,8 @@ static struct clk_lookup periph_clocks_lookups[] = { | |||
227 | CLKDEV_CON_DEV_ID("usart", "f8020000.serial", &usart1_clk), | 227 | CLKDEV_CON_DEV_ID("usart", "f8020000.serial", &usart1_clk), |
228 | CLKDEV_CON_DEV_ID("usart", "f8024000.serial", &usart2_clk), | 228 | CLKDEV_CON_DEV_ID("usart", "f8024000.serial", &usart2_clk), |
229 | CLKDEV_CON_DEV_ID("usart", "f8028000.serial", &usart3_clk), | 229 | CLKDEV_CON_DEV_ID("usart", "f8028000.serial", &usart3_clk), |
230 | CLKDEV_CON_DEV_ID("usart", "f8040000.serial", &uart0_clk), | ||
231 | CLKDEV_CON_DEV_ID("usart", "f8044000.serial", &uart1_clk), | ||
230 | CLKDEV_CON_DEV_ID("t0_clk", "f8008000.timer", &tcb0_clk), | 232 | CLKDEV_CON_DEV_ID("t0_clk", "f8008000.timer", &tcb0_clk), |
231 | CLKDEV_CON_DEV_ID("t0_clk", "f800c000.timer", &tcb0_clk), | 233 | CLKDEV_CON_DEV_ID("t0_clk", "f800c000.timer", &tcb0_clk), |
232 | CLKDEV_CON_DEV_ID("mci_clk", "f0008000.mmc", &mmc0_clk), | 234 | CLKDEV_CON_DEV_ID("mci_clk", "f0008000.mmc", &mmc0_clk), |
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c index dff4ddc5ef81..139e42da25f0 100644 --- a/arch/arm/mach-davinci/board-dm355-leopard.c +++ b/arch/arm/mach-davinci/board-dm355-leopard.c | |||
@@ -75,6 +75,7 @@ static struct davinci_nand_pdata davinci_nand_data = { | |||
75 | .parts = davinci_nand_partitions, | 75 | .parts = davinci_nand_partitions, |
76 | .nr_parts = ARRAY_SIZE(davinci_nand_partitions), | 76 | .nr_parts = ARRAY_SIZE(davinci_nand_partitions), |
77 | .ecc_mode = NAND_ECC_HW_SYNDROME, | 77 | .ecc_mode = NAND_ECC_HW_SYNDROME, |
78 | .ecc_bits = 4, | ||
78 | .bbt_options = NAND_BBT_USE_FLASH, | 79 | .bbt_options = NAND_BBT_USE_FLASH, |
79 | }; | 80 | }; |
80 | 81 | ||
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c index a33686a6fbb2..fa4bfaf952d8 100644 --- a/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/arch/arm/mach-davinci/board-dm644x-evm.c | |||
@@ -153,6 +153,7 @@ static struct davinci_nand_pdata davinci_evm_nandflash_data = { | |||
153 | .parts = davinci_evm_nandflash_partition, | 153 | .parts = davinci_evm_nandflash_partition, |
154 | .nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition), | 154 | .nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition), |
155 | .ecc_mode = NAND_ECC_HW, | 155 | .ecc_mode = NAND_ECC_HW, |
156 | .ecc_bits = 1, | ||
156 | .bbt_options = NAND_BBT_USE_FLASH, | 157 | .bbt_options = NAND_BBT_USE_FLASH, |
157 | .timing = &davinci_evm_nandflash_timing, | 158 | .timing = &davinci_evm_nandflash_timing, |
158 | }; | 159 | }; |
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c index fbb8e5ab1dc1..0c005e876cac 100644 --- a/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/arch/arm/mach-davinci/board-dm646x-evm.c | |||
@@ -90,6 +90,7 @@ static struct davinci_nand_pdata davinci_nand_data = { | |||
90 | .parts = davinci_nand_partitions, | 90 | .parts = davinci_nand_partitions, |
91 | .nr_parts = ARRAY_SIZE(davinci_nand_partitions), | 91 | .nr_parts = ARRAY_SIZE(davinci_nand_partitions), |
92 | .ecc_mode = NAND_ECC_HW, | 92 | .ecc_mode = NAND_ECC_HW, |
93 | .ecc_bits = 1, | ||
93 | .options = 0, | 94 | .options = 0, |
94 | }; | 95 | }; |
95 | 96 | ||
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c index 2bc112adf565..808233b60e3d 100644 --- a/arch/arm/mach-davinci/board-neuros-osd2.c +++ b/arch/arm/mach-davinci/board-neuros-osd2.c | |||
@@ -88,6 +88,7 @@ static struct davinci_nand_pdata davinci_ntosd2_nandflash_data = { | |||
88 | .parts = davinci_ntosd2_nandflash_partition, | 88 | .parts = davinci_ntosd2_nandflash_partition, |
89 | .nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition), | 89 | .nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition), |
90 | .ecc_mode = NAND_ECC_HW, | 90 | .ecc_mode = NAND_ECC_HW, |
91 | .ecc_bits = 1, | ||
91 | .bbt_options = NAND_BBT_USE_FLASH, | 92 | .bbt_options = NAND_BBT_USE_FLASH, |
92 | }; | 93 | }; |
93 | 94 | ||
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c index f6eeb87e4e95..827d15009a86 100644 --- a/arch/arm/mach-omap2/board-n8x0.c +++ b/arch/arm/mach-omap2/board-n8x0.c | |||
@@ -122,11 +122,7 @@ static struct musb_hdrc_config musb_config = { | |||
122 | }; | 122 | }; |
123 | 123 | ||
124 | static struct musb_hdrc_platform_data tusb_data = { | 124 | static struct musb_hdrc_platform_data tusb_data = { |
125 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
126 | .mode = MUSB_OTG, | 125 | .mode = MUSB_OTG, |
127 | #else | ||
128 | .mode = MUSB_HOST, | ||
129 | #endif | ||
130 | .set_power = tusb_set_power, | 126 | .set_power = tusb_set_power, |
131 | .min_power = 25, /* x2 = 50 mA drawn from VBUS as peripheral */ | 127 | .min_power = 25, /* x2 = 50 mA drawn from VBUS as peripheral */ |
132 | .power = 100, /* Max 100 mA VBUS for host mode */ | 128 | .power = 100, /* Max 100 mA VBUS for host mode */ |
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c index d2ea68ea678a..7735105561d8 100644 --- a/arch/arm/mach-omap2/board-rx51.c +++ b/arch/arm/mach-omap2/board-rx51.c | |||
@@ -85,7 +85,7 @@ static struct omap_board_mux board_mux[] __initdata = { | |||
85 | 85 | ||
86 | static struct omap_musb_board_data musb_board_data = { | 86 | static struct omap_musb_board_data musb_board_data = { |
87 | .interface_type = MUSB_INTERFACE_ULPI, | 87 | .interface_type = MUSB_INTERFACE_ULPI, |
88 | .mode = MUSB_PERIPHERAL, | 88 | .mode = MUSB_OTG, |
89 | .power = 0, | 89 | .power = 0, |
90 | }; | 90 | }; |
91 | 91 | ||
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c index 8c4de2708cf2..bc897231bd10 100644 --- a/arch/arm/mach-omap2/usb-musb.c +++ b/arch/arm/mach-omap2/usb-musb.c | |||
@@ -38,11 +38,8 @@ static struct musb_hdrc_config musb_config = { | |||
38 | }; | 38 | }; |
39 | 39 | ||
40 | static struct musb_hdrc_platform_data musb_plat = { | 40 | static struct musb_hdrc_platform_data musb_plat = { |
41 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
42 | .mode = MUSB_OTG, | 41 | .mode = MUSB_OTG, |
43 | #else | 42 | |
44 | .mode = MUSB_HOST, | ||
45 | #endif | ||
46 | /* .clock is set dynamically */ | 43 | /* .clock is set dynamically */ |
47 | .config = &musb_config, | 44 | .config = &musb_config, |
48 | 45 | ||
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index db5c2cab8fda..cd2c88e7a8f7 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -809,15 +809,18 @@ config KUSER_HELPERS | |||
809 | the CPU type fitted to the system. This permits binaries to be | 809 | the CPU type fitted to the system. This permits binaries to be |
810 | run on ARMv4 through to ARMv7 without modification. | 810 | run on ARMv4 through to ARMv7 without modification. |
811 | 811 | ||
812 | See Documentation/arm/kernel_user_helpers.txt for details. | ||
813 | |||
812 | However, the fixed address nature of these helpers can be used | 814 | However, the fixed address nature of these helpers can be used |
813 | by ROP (return orientated programming) authors when creating | 815 | by ROP (return orientated programming) authors when creating |
814 | exploits. | 816 | exploits. |
815 | 817 | ||
816 | If all of the binaries and libraries which run on your platform | 818 | If all of the binaries and libraries which run on your platform |
817 | are built specifically for your platform, and make no use of | 819 | are built specifically for your platform, and make no use of |
818 | these helpers, then you can turn this option off. However, | 820 | these helpers, then you can turn this option off to hinder |
819 | when such an binary or library is run, it will receive a SIGILL | 821 | such exploits. However, in that case, if a binary or library |
820 | signal, which will terminate the program. | 822 | relying on those helpers is run, it will receive a SIGILL signal, |
823 | which will terminate the program. | ||
821 | 824 | ||
822 | Say N here only if you are absolutely certain that you do not | 825 | Say N here only if you are absolutely certain that you do not |
823 | need these helpers; otherwise, the safe option is to say Y. | 826 | need these helpers; otherwise, the safe option is to say Y. |
diff --git a/arch/arm/plat-samsung/init.c b/arch/arm/plat-samsung/init.c index 3e5c4619caa5..50a3ea0037db 100644 --- a/arch/arm/plat-samsung/init.c +++ b/arch/arm/plat-samsung/init.c | |||
@@ -55,12 +55,13 @@ void __init s3c_init_cpu(unsigned long idcode, | |||
55 | 55 | ||
56 | printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode); | 56 | printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode); |
57 | 57 | ||
58 | if (cpu->map_io == NULL || cpu->init == NULL) { | 58 | if (cpu->init == NULL) { |
59 | printk(KERN_ERR "CPU %s support not enabled\n", cpu->name); | 59 | printk(KERN_ERR "CPU %s support not enabled\n", cpu->name); |
60 | panic("Unsupported Samsung CPU"); | 60 | panic("Unsupported Samsung CPU"); |
61 | } | 61 | } |
62 | 62 | ||
63 | cpu->map_io(); | 63 | if (cpu->map_io) |
64 | cpu->map_io(); | ||
64 | } | 65 | } |
65 | 66 | ||
66 | /* s3c24xx_init_clocks | 67 | /* s3c24xx_init_clocks |
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index c9770ba5c7df..8a6295c86209 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c | |||
@@ -170,6 +170,7 @@ static void __init xen_percpu_init(void *unused) | |||
170 | per_cpu(xen_vcpu, cpu) = vcpup; | 170 | per_cpu(xen_vcpu, cpu) = vcpup; |
171 | 171 | ||
172 | enable_percpu_irq(xen_events_irq, 0); | 172 | enable_percpu_irq(xen_events_irq, 0); |
173 | put_cpu(); | ||
173 | } | 174 | } |
174 | 175 | ||
175 | static void xen_restart(enum reboot_mode reboot_mode, const char *cmd) | 176 | static void xen_restart(enum reboot_mode reboot_mode, const char *cmd) |
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index c92de4163eba..b25763bc0ec4 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h | |||
@@ -42,14 +42,15 @@ | |||
42 | #define TPIDR_EL1 18 /* Thread ID, Privileged */ | 42 | #define TPIDR_EL1 18 /* Thread ID, Privileged */ |
43 | #define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */ | 43 | #define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */ |
44 | #define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */ | 44 | #define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */ |
45 | #define PAR_EL1 21 /* Physical Address Register */ | ||
45 | /* 32bit specific registers. Keep them at the end of the range */ | 46 | /* 32bit specific registers. Keep them at the end of the range */ |
46 | #define DACR32_EL2 21 /* Domain Access Control Register */ | 47 | #define DACR32_EL2 22 /* Domain Access Control Register */ |
47 | #define IFSR32_EL2 22 /* Instruction Fault Status Register */ | 48 | #define IFSR32_EL2 23 /* Instruction Fault Status Register */ |
48 | #define FPEXC32_EL2 23 /* Floating-Point Exception Control Register */ | 49 | #define FPEXC32_EL2 24 /* Floating-Point Exception Control Register */ |
49 | #define DBGVCR32_EL2 24 /* Debug Vector Catch Register */ | 50 | #define DBGVCR32_EL2 25 /* Debug Vector Catch Register */ |
50 | #define TEECR32_EL1 25 /* ThumbEE Configuration Register */ | 51 | #define TEECR32_EL1 26 /* ThumbEE Configuration Register */ |
51 | #define TEEHBR32_EL1 26 /* ThumbEE Handler Base Register */ | 52 | #define TEEHBR32_EL1 27 /* ThumbEE Handler Base Register */ |
52 | #define NR_SYS_REGS 27 | 53 | #define NR_SYS_REGS 28 |
53 | 54 | ||
54 | /* 32bit mapping */ | 55 | /* 32bit mapping */ |
55 | #define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ | 56 | #define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ |
@@ -69,6 +70,8 @@ | |||
69 | #define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */ | 70 | #define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */ |
70 | #define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */ | 71 | #define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */ |
71 | #define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */ | 72 | #define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */ |
73 | #define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */ | ||
74 | #define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */ | ||
72 | #define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */ | 75 | #define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */ |
73 | #define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */ | 76 | #define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */ |
74 | #define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */ | 77 | #define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */ |
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 644d73956864..0859a4ddd1e7 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
@@ -129,7 +129,7 @@ struct kvm_vcpu_arch { | |||
129 | struct kvm_mmu_memory_cache mmu_page_cache; | 129 | struct kvm_mmu_memory_cache mmu_page_cache; |
130 | 130 | ||
131 | /* Target CPU and feature flags */ | 131 | /* Target CPU and feature flags */ |
132 | u32 target; | 132 | int target; |
133 | DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); | 133 | DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); |
134 | 134 | ||
135 | /* Detect first run of a vcpu */ | 135 | /* Detect first run of a vcpu */ |
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 46b3beb4b773..717031a762c2 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h | |||
@@ -35,6 +35,7 @@ struct mmu_gather { | |||
35 | struct mm_struct *mm; | 35 | struct mm_struct *mm; |
36 | unsigned int fullmm; | 36 | unsigned int fullmm; |
37 | struct vm_area_struct *vma; | 37 | struct vm_area_struct *vma; |
38 | unsigned long start, end; | ||
38 | unsigned long range_start; | 39 | unsigned long range_start; |
39 | unsigned long range_end; | 40 | unsigned long range_end; |
40 | unsigned int nr; | 41 | unsigned int nr; |
@@ -97,10 +98,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb) | |||
97 | } | 98 | } |
98 | 99 | ||
99 | static inline void | 100 | static inline void |
100 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) | 101 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
101 | { | 102 | { |
102 | tlb->mm = mm; | 103 | tlb->mm = mm; |
103 | tlb->fullmm = fullmm; | 104 | tlb->fullmm = !(start | (end+1)); |
105 | tlb->start = start; | ||
106 | tlb->end = end; | ||
104 | tlb->vma = NULL; | 107 | tlb->vma = NULL; |
105 | tlb->max = ARRAY_SIZE(tlb->local); | 108 | tlb->max = ARRAY_SIZE(tlb->local); |
106 | tlb->pages = tlb->local; | 109 | tlb->pages = tlb->local; |
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 9ba33c40cdf8..12e6ccb88691 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c | |||
@@ -107,7 +107,12 @@ armpmu_map_cache_event(const unsigned (*cache_map) | |||
107 | static int | 107 | static int |
108 | armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) | 108 | armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) |
109 | { | 109 | { |
110 | int mapping = (*event_map)[config]; | 110 | int mapping; |
111 | |||
112 | if (config >= PERF_COUNT_HW_MAX) | ||
113 | return -EINVAL; | ||
114 | |||
115 | mapping = (*event_map)[config]; | ||
111 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; | 116 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; |
112 | } | 117 | } |
113 | 118 | ||
@@ -317,6 +322,9 @@ validate_event(struct pmu_hw_events *hw_events, | |||
317 | struct hw_perf_event fake_event = event->hw; | 322 | struct hw_perf_event fake_event = event->hw; |
318 | struct pmu *leader_pmu = event->group_leader->pmu; | 323 | struct pmu *leader_pmu = event->group_leader->pmu; |
319 | 324 | ||
325 | if (is_software_event(event)) | ||
326 | return 1; | ||
327 | |||
320 | if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) | 328 | if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) |
321 | return 1; | 329 | return 1; |
322 | 330 | ||
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index ff985e3d8b72..1ac0bbbdddb2 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S | |||
@@ -214,6 +214,7 @@ __kvm_hyp_code_start: | |||
214 | mrs x21, tpidr_el1 | 214 | mrs x21, tpidr_el1 |
215 | mrs x22, amair_el1 | 215 | mrs x22, amair_el1 |
216 | mrs x23, cntkctl_el1 | 216 | mrs x23, cntkctl_el1 |
217 | mrs x24, par_el1 | ||
217 | 218 | ||
218 | stp x4, x5, [x3] | 219 | stp x4, x5, [x3] |
219 | stp x6, x7, [x3, #16] | 220 | stp x6, x7, [x3, #16] |
@@ -225,6 +226,7 @@ __kvm_hyp_code_start: | |||
225 | stp x18, x19, [x3, #112] | 226 | stp x18, x19, [x3, #112] |
226 | stp x20, x21, [x3, #128] | 227 | stp x20, x21, [x3, #128] |
227 | stp x22, x23, [x3, #144] | 228 | stp x22, x23, [x3, #144] |
229 | str x24, [x3, #160] | ||
228 | .endm | 230 | .endm |
229 | 231 | ||
230 | .macro restore_sysregs | 232 | .macro restore_sysregs |
@@ -243,6 +245,7 @@ __kvm_hyp_code_start: | |||
243 | ldp x18, x19, [x3, #112] | 245 | ldp x18, x19, [x3, #112] |
244 | ldp x20, x21, [x3, #128] | 246 | ldp x20, x21, [x3, #128] |
245 | ldp x22, x23, [x3, #144] | 247 | ldp x22, x23, [x3, #144] |
248 | ldr x24, [x3, #160] | ||
246 | 249 | ||
247 | msr vmpidr_el2, x4 | 250 | msr vmpidr_el2, x4 |
248 | msr csselr_el1, x5 | 251 | msr csselr_el1, x5 |
@@ -264,6 +267,7 @@ __kvm_hyp_code_start: | |||
264 | msr tpidr_el1, x21 | 267 | msr tpidr_el1, x21 |
265 | msr amair_el1, x22 | 268 | msr amair_el1, x22 |
266 | msr cntkctl_el1, x23 | 269 | msr cntkctl_el1, x23 |
270 | msr par_el1, x24 | ||
267 | .endm | 271 | .endm |
268 | 272 | ||
269 | .macro skip_32bit_state tmp, target | 273 | .macro skip_32bit_state tmp, target |
@@ -600,6 +604,8 @@ END(__kvm_vcpu_run) | |||
600 | 604 | ||
601 | // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); | 605 | // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); |
602 | ENTRY(__kvm_tlb_flush_vmid_ipa) | 606 | ENTRY(__kvm_tlb_flush_vmid_ipa) |
607 | dsb ishst | ||
608 | |||
603 | kern_hyp_va x0 | 609 | kern_hyp_va x0 |
604 | ldr x2, [x0, #KVM_VTTBR] | 610 | ldr x2, [x0, #KVM_VTTBR] |
605 | msr vttbr_el2, x2 | 611 | msr vttbr_el2, x2 |
@@ -621,6 +627,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa) | |||
621 | ENDPROC(__kvm_tlb_flush_vmid_ipa) | 627 | ENDPROC(__kvm_tlb_flush_vmid_ipa) |
622 | 628 | ||
623 | ENTRY(__kvm_flush_vm_context) | 629 | ENTRY(__kvm_flush_vm_context) |
630 | dsb ishst | ||
624 | tlbi alle1is | 631 | tlbi alle1is |
625 | ic ialluis | 632 | ic ialluis |
626 | dsb sy | 633 | dsb sy |
@@ -753,6 +760,10 @@ el1_trap: | |||
753 | */ | 760 | */ |
754 | tbnz x1, #7, 1f // S1PTW is set | 761 | tbnz x1, #7, 1f // S1PTW is set |
755 | 762 | ||
763 | /* Preserve PAR_EL1 */ | ||
764 | mrs x3, par_el1 | ||
765 | push x3, xzr | ||
766 | |||
756 | /* | 767 | /* |
757 | * Permission fault, HPFAR_EL2 is invalid. | 768 | * Permission fault, HPFAR_EL2 is invalid. |
758 | * Resolve the IPA the hard way using the guest VA. | 769 | * Resolve the IPA the hard way using the guest VA. |
@@ -766,6 +777,8 @@ el1_trap: | |||
766 | 777 | ||
767 | /* Read result */ | 778 | /* Read result */ |
768 | mrs x3, par_el1 | 779 | mrs x3, par_el1 |
780 | pop x0, xzr // Restore PAR_EL1 from the stack | ||
781 | msr par_el1, x0 | ||
769 | tbnz x3, #0, 3f // Bail out if we failed the translation | 782 | tbnz x3, #0, 3f // Bail out if we failed the translation |
770 | ubfx x3, x3, #12, #36 // Extract IPA | 783 | ubfx x3, x3, #12, #36 // Extract IPA |
771 | lsl x3, x3, #4 // and present it like HPFAR | 784 | lsl x3, x3, #4 // and present it like HPFAR |
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 94923609753b..02e9d09e1d80 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c | |||
@@ -211,6 +211,9 @@ static const struct sys_reg_desc sys_reg_descs[] = { | |||
211 | /* FAR_EL1 */ | 211 | /* FAR_EL1 */ |
212 | { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), | 212 | { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), |
213 | NULL, reset_unknown, FAR_EL1 }, | 213 | NULL, reset_unknown, FAR_EL1 }, |
214 | /* PAR_EL1 */ | ||
215 | { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000), | ||
216 | NULL, reset_unknown, PAR_EL1 }, | ||
214 | 217 | ||
215 | /* PMINTENSET_EL1 */ | 218 | /* PMINTENSET_EL1 */ |
216 | { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), | 219 | { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), |
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 33a97929d055..77d442ab28c8 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig | |||
@@ -158,6 +158,7 @@ source "kernel/Kconfig.hz" | |||
158 | endmenu | 158 | endmenu |
159 | 159 | ||
160 | source "init/Kconfig" | 160 | source "init/Kconfig" |
161 | source "kernel/Kconfig.freezer" | ||
161 | source "drivers/Kconfig" | 162 | source "drivers/Kconfig" |
162 | source "fs/Kconfig" | 163 | source "fs/Kconfig" |
163 | 164 | ||
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index ef3a9de01954..bc5efc7c3f3f 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h | |||
@@ -22,7 +22,7 @@ | |||
22 | * unmapping a portion of the virtual address space, these hooks are called according to | 22 | * unmapping a portion of the virtual address space, these hooks are called according to |
23 | * the following template: | 23 | * the following template: |
24 | * | 24 | * |
25 | * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM | 25 | * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM |
26 | * { | 26 | * { |
27 | * for each vma that needs a shootdown do { | 27 | * for each vma that needs a shootdown do { |
28 | * tlb_start_vma(tlb, vma); | 28 | * tlb_start_vma(tlb, vma); |
@@ -58,6 +58,7 @@ struct mmu_gather { | |||
58 | unsigned int max; | 58 | unsigned int max; |
59 | unsigned char fullmm; /* non-zero means full mm flush */ | 59 | unsigned char fullmm; /* non-zero means full mm flush */ |
60 | unsigned char need_flush; /* really unmapped some PTEs? */ | 60 | unsigned char need_flush; /* really unmapped some PTEs? */ |
61 | unsigned long start, end; | ||
61 | unsigned long start_addr; | 62 | unsigned long start_addr; |
62 | unsigned long end_addr; | 63 | unsigned long end_addr; |
63 | struct page **pages; | 64 | struct page **pages; |
@@ -155,13 +156,15 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb) | |||
155 | 156 | ||
156 | 157 | ||
157 | static inline void | 158 | static inline void |
158 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) | 159 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
159 | { | 160 | { |
160 | tlb->mm = mm; | 161 | tlb->mm = mm; |
161 | tlb->max = ARRAY_SIZE(tlb->local); | 162 | tlb->max = ARRAY_SIZE(tlb->local); |
162 | tlb->pages = tlb->local; | 163 | tlb->pages = tlb->local; |
163 | tlb->nr = 0; | 164 | tlb->nr = 0; |
164 | tlb->fullmm = full_mm_flush; | 165 | tlb->fullmm = !(start | (end+1)); |
166 | tlb->start = start; | ||
167 | tlb->end = end; | ||
165 | tlb->start_addr = ~0UL; | 168 | tlb->start_addr = ~0UL; |
166 | } | 169 | } |
167 | 170 | ||
diff --git a/arch/m68k/emu/natfeat.c b/arch/m68k/emu/natfeat.c index 2291a7d69d49..fa277aecfb78 100644 --- a/arch/m68k/emu/natfeat.c +++ b/arch/m68k/emu/natfeat.c | |||
@@ -18,9 +18,11 @@ | |||
18 | #include <asm/machdep.h> | 18 | #include <asm/machdep.h> |
19 | #include <asm/natfeat.h> | 19 | #include <asm/natfeat.h> |
20 | 20 | ||
21 | extern long nf_get_id2(const char *feature_name); | ||
22 | |||
21 | asm("\n" | 23 | asm("\n" |
22 | " .global nf_get_id,nf_call\n" | 24 | " .global nf_get_id2,nf_call\n" |
23 | "nf_get_id:\n" | 25 | "nf_get_id2:\n" |
24 | " .short 0x7300\n" | 26 | " .short 0x7300\n" |
25 | " rts\n" | 27 | " rts\n" |
26 | "nf_call:\n" | 28 | "nf_call:\n" |
@@ -29,12 +31,25 @@ asm("\n" | |||
29 | "1: moveq.l #0,%d0\n" | 31 | "1: moveq.l #0,%d0\n" |
30 | " rts\n" | 32 | " rts\n" |
31 | " .section __ex_table,\"a\"\n" | 33 | " .section __ex_table,\"a\"\n" |
32 | " .long nf_get_id,1b\n" | 34 | " .long nf_get_id2,1b\n" |
33 | " .long nf_call,1b\n" | 35 | " .long nf_call,1b\n" |
34 | " .previous"); | 36 | " .previous"); |
35 | EXPORT_SYMBOL_GPL(nf_get_id); | ||
36 | EXPORT_SYMBOL_GPL(nf_call); | 37 | EXPORT_SYMBOL_GPL(nf_call); |
37 | 38 | ||
39 | long nf_get_id(const char *feature_name) | ||
40 | { | ||
41 | /* feature_name may be in vmalloc()ed memory, so make a copy */ | ||
42 | char name_copy[32]; | ||
43 | size_t n; | ||
44 | |||
45 | n = strlcpy(name_copy, feature_name, sizeof(name_copy)); | ||
46 | if (n >= sizeof(name_copy)) | ||
47 | return 0; | ||
48 | |||
49 | return nf_get_id2(name_copy); | ||
50 | } | ||
51 | EXPORT_SYMBOL_GPL(nf_get_id); | ||
52 | |||
38 | void nfprint(const char *fmt, ...) | 53 | void nfprint(const char *fmt, ...) |
39 | { | 54 | { |
40 | static char buf[256]; | 55 | static char buf[256]; |
diff --git a/arch/m68k/include/asm/div64.h b/arch/m68k/include/asm/div64.h index 444ea8a09e9f..ef881cfbbca9 100644 --- a/arch/m68k/include/asm/div64.h +++ b/arch/m68k/include/asm/div64.h | |||
@@ -15,16 +15,17 @@ | |||
15 | unsigned long long n64; \ | 15 | unsigned long long n64; \ |
16 | } __n; \ | 16 | } __n; \ |
17 | unsigned long __rem, __upper; \ | 17 | unsigned long __rem, __upper; \ |
18 | unsigned long __base = (base); \ | ||
18 | \ | 19 | \ |
19 | __n.n64 = (n); \ | 20 | __n.n64 = (n); \ |
20 | if ((__upper = __n.n32[0])) { \ | 21 | if ((__upper = __n.n32[0])) { \ |
21 | asm ("divul.l %2,%1:%0" \ | 22 | asm ("divul.l %2,%1:%0" \ |
22 | : "=d" (__n.n32[0]), "=d" (__upper) \ | 23 | : "=d" (__n.n32[0]), "=d" (__upper) \ |
23 | : "d" (base), "0" (__n.n32[0])); \ | 24 | : "d" (__base), "0" (__n.n32[0])); \ |
24 | } \ | 25 | } \ |
25 | asm ("divu.l %2,%1:%0" \ | 26 | asm ("divu.l %2,%1:%0" \ |
26 | : "=d" (__n.n32[1]), "=d" (__rem) \ | 27 | : "=d" (__n.n32[1]), "=d" (__rem) \ |
27 | : "d" (base), "1" (__upper), "0" (__n.n32[1])); \ | 28 | : "d" (__base), "1" (__upper), "0" (__n.n32[1])); \ |
28 | (n) = __n.n64; \ | 29 | (n) = __n.n64; \ |
29 | __rem; \ | 30 | __rem; \ |
30 | }) | 31 | }) |
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index d22a4ecffff4..4fab52294d98 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
@@ -28,7 +28,7 @@ config MICROBLAZE | |||
28 | select GENERIC_CLOCKEVENTS | 28 | select GENERIC_CLOCKEVENTS |
29 | select GENERIC_IDLE_POLL_SETUP | 29 | select GENERIC_IDLE_POLL_SETUP |
30 | select MODULES_USE_ELF_RELA | 30 | select MODULES_USE_ELF_RELA |
31 | select CLONE_BACKWARDS | 31 | select CLONE_BACKWARDS3 |
32 | 32 | ||
33 | config SWAP | 33 | config SWAP |
34 | def_bool n | 34 | def_bool n |
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index e773659ccf9f..46048d24328c 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c | |||
@@ -803,6 +803,32 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, | |||
803 | dec_insn.next_pc_inc; | 803 | dec_insn.next_pc_inc; |
804 | return 1; | 804 | return 1; |
805 | break; | 805 | break; |
806 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | ||
807 | case lwc2_op: /* This is bbit0 on Octeon */ | ||
808 | if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0) | ||
809 | *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); | ||
810 | else | ||
811 | *contpc = regs->cp0_epc + 8; | ||
812 | return 1; | ||
813 | case ldc2_op: /* This is bbit032 on Octeon */ | ||
814 | if ((regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) == 0) | ||
815 | *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); | ||
816 | else | ||
817 | *contpc = regs->cp0_epc + 8; | ||
818 | return 1; | ||
819 | case swc2_op: /* This is bbit1 on Octeon */ | ||
820 | if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) | ||
821 | *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); | ||
822 | else | ||
823 | *contpc = regs->cp0_epc + 8; | ||
824 | return 1; | ||
825 | case sdc2_op: /* This is bbit132 on Octeon */ | ||
826 | if (regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) | ||
827 | *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); | ||
828 | else | ||
829 | *contpc = regs->cp0_epc + 8; | ||
830 | return 1; | ||
831 | #endif | ||
806 | case cop0_op: | 832 | case cop0_op: |
807 | case cop1_op: | 833 | case cop1_op: |
808 | case cop2_op: | 834 | case cop2_op: |
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index 99dbab1c59ac..d60bf98fa5cf 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig | |||
@@ -55,6 +55,7 @@ config GENERIC_CSUM | |||
55 | 55 | ||
56 | source "init/Kconfig" | 56 | source "init/Kconfig" |
57 | 57 | ||
58 | source "kernel/Kconfig.freezer" | ||
58 | 59 | ||
59 | menu "Processor type and features" | 60 | menu "Processor type and features" |
60 | 61 | ||
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index b75d7d686684..6d6d92b4ea11 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h | |||
@@ -32,6 +32,7 @@ struct mmu_gather { | |||
32 | struct mm_struct *mm; | 32 | struct mm_struct *mm; |
33 | struct mmu_table_batch *batch; | 33 | struct mmu_table_batch *batch; |
34 | unsigned int fullmm; | 34 | unsigned int fullmm; |
35 | unsigned long start, end; | ||
35 | }; | 36 | }; |
36 | 37 | ||
37 | struct mmu_table_batch { | 38 | struct mmu_table_batch { |
@@ -48,10 +49,13 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table); | |||
48 | 49 | ||
49 | static inline void tlb_gather_mmu(struct mmu_gather *tlb, | 50 | static inline void tlb_gather_mmu(struct mmu_gather *tlb, |
50 | struct mm_struct *mm, | 51 | struct mm_struct *mm, |
51 | unsigned int full_mm_flush) | 52 | unsigned long start, |
53 | unsigned long end) | ||
52 | { | 54 | { |
53 | tlb->mm = mm; | 55 | tlb->mm = mm; |
54 | tlb->fullmm = full_mm_flush; | 56 | tlb->start = start; |
57 | tlb->end = end; | ||
58 | tlb->fullmm = !(start | (end+1)); | ||
55 | tlb->batch = NULL; | 59 | tlb->batch = NULL; |
56 | if (tlb->fullmm) | 60 | if (tlb->fullmm) |
57 | __tlb_flush_mm(mm); | 61 | __tlb_flush_mm(mm); |
diff --git a/arch/score/Kconfig b/arch/score/Kconfig index c8def8bc9020..5fc237581caf 100644 --- a/arch/score/Kconfig +++ b/arch/score/Kconfig | |||
@@ -87,6 +87,8 @@ config STACKTRACE_SUPPORT | |||
87 | 87 | ||
88 | source "init/Kconfig" | 88 | source "init/Kconfig" |
89 | 89 | ||
90 | source "kernel/Kconfig.freezer" | ||
91 | |||
90 | config MMU | 92 | config MMU |
91 | def_bool y | 93 | def_bool y |
92 | 94 | ||
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index e61d43d9f689..362192ed12fe 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h | |||
@@ -36,10 +36,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb) | |||
36 | } | 36 | } |
37 | 37 | ||
38 | static inline void | 38 | static inline void |
39 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) | 39 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
40 | { | 40 | { |
41 | tlb->mm = mm; | 41 | tlb->mm = mm; |
42 | tlb->fullmm = full_mm_flush; | 42 | tlb->start = start; |
43 | tlb->end = end; | ||
44 | tlb->fullmm = !(start | (end+1)); | ||
43 | 45 | ||
44 | init_tlb_gather(tlb); | 46 | init_tlb_gather(tlb); |
45 | } | 47 | } |
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h index 4febacd1a8a1..29b0301c18aa 100644 --- a/arch/um/include/asm/tlb.h +++ b/arch/um/include/asm/tlb.h | |||
@@ -45,10 +45,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb) | |||
45 | } | 45 | } |
46 | 46 | ||
47 | static inline void | 47 | static inline void |
48 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) | 48 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
49 | { | 49 | { |
50 | tlb->mm = mm; | 50 | tlb->mm = mm; |
51 | tlb->fullmm = full_mm_flush; | 51 | tlb->start = start; |
52 | tlb->end = end; | ||
53 | tlb->fullmm = !(start | (end+1)); | ||
52 | 54 | ||
53 | init_tlb_gather(tlb); | 55 | init_tlb_gather(tlb); |
54 | } | 56 | } |
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h index 653668d140f9..4a8cb8d7cbd5 100644 --- a/arch/x86/include/asm/bootparam_utils.h +++ b/arch/x86/include/asm/bootparam_utils.h | |||
@@ -35,9 +35,9 @@ static void sanitize_boot_params(struct boot_params *boot_params) | |||
35 | */ | 35 | */ |
36 | if (boot_params->sentinel) { | 36 | if (boot_params->sentinel) { |
37 | /* fields in boot_params are left uninitialized, clear them */ | 37 | /* fields in boot_params are left uninitialized, clear them */ |
38 | memset(&boot_params->olpc_ofw_header, 0, | 38 | memset(&boot_params->ext_ramdisk_image, 0, |
39 | (char *)&boot_params->efi_info - | 39 | (char *)&boot_params->efi_info - |
40 | (char *)&boot_params->olpc_ofw_header); | 40 | (char *)&boot_params->ext_ramdisk_image); |
41 | memset(&boot_params->kbd_status, 0, | 41 | memset(&boot_params->kbd_status, 0, |
42 | (char *)&boot_params->hdr - | 42 | (char *)&boot_params->hdr - |
43 | (char *)&boot_params->kbd_status); | 43 | (char *)&boot_params->kbd_status); |
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h index 50e5c58ced23..4c019179a57d 100644 --- a/arch/x86/include/asm/microcode_amd.h +++ b/arch/x86/include/asm/microcode_amd.h | |||
@@ -59,7 +59,7 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table, | |||
59 | 59 | ||
60 | extern int __apply_microcode_amd(struct microcode_amd *mc_amd); | 60 | extern int __apply_microcode_amd(struct microcode_amd *mc_amd); |
61 | extern int apply_microcode_amd(int cpu); | 61 | extern int apply_microcode_amd(int cpu); |
62 | extern enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size); | 62 | extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); |
63 | 63 | ||
64 | #ifdef CONFIG_MICROCODE_AMD_EARLY | 64 | #ifdef CONFIG_MICROCODE_AMD_EARLY |
65 | #ifdef CONFIG_X86_32 | 65 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h index f2b489cf1602..3bf2dd0cf61f 100644 --- a/arch/x86/include/asm/pgtable-2level.h +++ b/arch/x86/include/asm/pgtable-2level.h | |||
@@ -55,9 +55,53 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) | |||
55 | #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) | 55 | #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | #ifdef CONFIG_MEM_SOFT_DIRTY | ||
59 | |||
60 | /* | ||
61 | * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE, _PAGE_BIT_SOFT_DIRTY and | ||
62 | * _PAGE_BIT_PROTNONE are taken, split up the 28 bits of offset | ||
63 | * into this range. | ||
64 | */ | ||
65 | #define PTE_FILE_MAX_BITS 28 | ||
66 | #define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) | ||
67 | #define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1) | ||
68 | #define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1) | ||
69 | #define PTE_FILE_SHIFT4 (_PAGE_BIT_SOFT_DIRTY + 1) | ||
70 | #define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1) | ||
71 | #define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1) | ||
72 | #define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1) | ||
73 | |||
74 | #define pte_to_pgoff(pte) \ | ||
75 | ((((pte).pte_low >> (PTE_FILE_SHIFT1)) \ | ||
76 | & ((1U << PTE_FILE_BITS1) - 1))) \ | ||
77 | + ((((pte).pte_low >> (PTE_FILE_SHIFT2)) \ | ||
78 | & ((1U << PTE_FILE_BITS2) - 1)) \ | ||
79 | << (PTE_FILE_BITS1)) \ | ||
80 | + ((((pte).pte_low >> (PTE_FILE_SHIFT3)) \ | ||
81 | & ((1U << PTE_FILE_BITS3) - 1)) \ | ||
82 | << (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ | ||
83 | + ((((pte).pte_low >> (PTE_FILE_SHIFT4))) \ | ||
84 | << (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3)) | ||
85 | |||
86 | #define pgoff_to_pte(off) \ | ||
87 | ((pte_t) { .pte_low = \ | ||
88 | ((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \ | ||
89 | + ((((off) >> PTE_FILE_BITS1) \ | ||
90 | & ((1U << PTE_FILE_BITS2) - 1)) \ | ||
91 | << PTE_FILE_SHIFT2) \ | ||
92 | + ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ | ||
93 | & ((1U << PTE_FILE_BITS3) - 1)) \ | ||
94 | << PTE_FILE_SHIFT3) \ | ||
95 | + ((((off) >> \ | ||
96 | (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))) \ | ||
97 | << PTE_FILE_SHIFT4) \ | ||
98 | + _PAGE_FILE }) | ||
99 | |||
100 | #else /* CONFIG_MEM_SOFT_DIRTY */ | ||
101 | |||
58 | /* | 102 | /* |
59 | * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, | 103 | * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, |
60 | * split up the 29 bits of offset into this range: | 104 | * split up the 29 bits of offset into this range. |
61 | */ | 105 | */ |
62 | #define PTE_FILE_MAX_BITS 29 | 106 | #define PTE_FILE_MAX_BITS 29 |
63 | #define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) | 107 | #define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) |
@@ -88,6 +132,8 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) | |||
88 | << PTE_FILE_SHIFT3) \ | 132 | << PTE_FILE_SHIFT3) \ |
89 | + _PAGE_FILE }) | 133 | + _PAGE_FILE }) |
90 | 134 | ||
135 | #endif /* CONFIG_MEM_SOFT_DIRTY */ | ||
136 | |||
91 | /* Encode and de-code a swap entry */ | 137 | /* Encode and de-code a swap entry */ |
92 | #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE | 138 | #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE |
93 | #define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) | 139 | #define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) |
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 4cc9f2b7cdc3..81bb91b49a88 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h | |||
@@ -179,6 +179,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp) | |||
179 | /* | 179 | /* |
180 | * Bits 0, 6 and 7 are taken in the low part of the pte, | 180 | * Bits 0, 6 and 7 are taken in the low part of the pte, |
181 | * put the 32 bits of offset into the high part. | 181 | * put the 32 bits of offset into the high part. |
182 | * | ||
183 | * For soft-dirty tracking 11 bit is taken from | ||
184 | * the low part of pte as well. | ||
182 | */ | 185 | */ |
183 | #define pte_to_pgoff(pte) ((pte).pte_high) | 186 | #define pte_to_pgoff(pte) ((pte).pte_high) |
184 | #define pgoff_to_pte(off) \ | 187 | #define pgoff_to_pte(off) \ |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 7dc305a46058..1c00631164c2 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -314,6 +314,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) | |||
314 | return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); | 314 | return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); |
315 | } | 315 | } |
316 | 316 | ||
317 | static inline pte_t pte_swp_mksoft_dirty(pte_t pte) | ||
318 | { | ||
319 | return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY); | ||
320 | } | ||
321 | |||
322 | static inline int pte_swp_soft_dirty(pte_t pte) | ||
323 | { | ||
324 | return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY; | ||
325 | } | ||
326 | |||
327 | static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) | ||
328 | { | ||
329 | return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY); | ||
330 | } | ||
331 | |||
332 | static inline pte_t pte_file_clear_soft_dirty(pte_t pte) | ||
333 | { | ||
334 | return pte_clear_flags(pte, _PAGE_SOFT_DIRTY); | ||
335 | } | ||
336 | |||
337 | static inline pte_t pte_file_mksoft_dirty(pte_t pte) | ||
338 | { | ||
339 | return pte_set_flags(pte, _PAGE_SOFT_DIRTY); | ||
340 | } | ||
341 | |||
342 | static inline int pte_file_soft_dirty(pte_t pte) | ||
343 | { | ||
344 | return pte_flags(pte) & _PAGE_SOFT_DIRTY; | ||
345 | } | ||
346 | |||
317 | /* | 347 | /* |
318 | * Mask out unsupported bits in a present pgprot. Non-present pgprots | 348 | * Mask out unsupported bits in a present pgprot. Non-present pgprots |
319 | * can use those bits for other purposes, so leave them be. | 349 | * can use those bits for other purposes, so leave them be. |
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index c98ac63aae48..f4843e031131 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h | |||
@@ -61,12 +61,27 @@ | |||
61 | * they do not conflict with each other. | 61 | * they do not conflict with each other. |
62 | */ | 62 | */ |
63 | 63 | ||
64 | #define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_HIDDEN | ||
65 | |||
64 | #ifdef CONFIG_MEM_SOFT_DIRTY | 66 | #ifdef CONFIG_MEM_SOFT_DIRTY |
65 | #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) | 67 | #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY) |
66 | #else | 68 | #else |
67 | #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0)) | 69 | #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0)) |
68 | #endif | 70 | #endif |
69 | 71 | ||
72 | /* | ||
73 | * Tracking soft dirty bit when a page goes to a swap is tricky. | ||
74 | * We need a bit which can be stored in pte _and_ not conflict | ||
75 | * with swap entry format. On x86 bits 6 and 7 are *not* involved | ||
76 | * into swap entry computation, but bit 6 is used for nonlinear | ||
77 | * file mapping, so we borrow bit 7 for soft dirty tracking. | ||
78 | */ | ||
79 | #ifdef CONFIG_MEM_SOFT_DIRTY | ||
80 | #define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE | ||
81 | #else | ||
82 | #define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0)) | ||
83 | #endif | ||
84 | |||
70 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | 85 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) |
71 | #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) | 86 | #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) |
72 | #else | 87 | #else |
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 33692eaabab5..e3ddd7db723f 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -233,8 +233,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) | |||
233 | #define arch_read_relax(lock) cpu_relax() | 233 | #define arch_read_relax(lock) cpu_relax() |
234 | #define arch_write_relax(lock) cpu_relax() | 234 | #define arch_write_relax(lock) cpu_relax() |
235 | 235 | ||
236 | /* The {read|write|spin}_lock() on x86 are full memory barriers. */ | ||
237 | static inline void smp_mb__after_lock(void) { } | ||
238 | #define ARCH_HAS_SMP_MB_AFTER_LOCK | ||
239 | |||
240 | #endif /* _ASM_X86_SPINLOCK_H */ | 236 | #endif /* _ASM_X86_SPINLOCK_H */ |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index f654ecefea5b..08a089043ccf 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -512,7 +512,7 @@ static void early_init_amd(struct cpuinfo_x86 *c) | |||
512 | 512 | ||
513 | static const int amd_erratum_383[]; | 513 | static const int amd_erratum_383[]; |
514 | static const int amd_erratum_400[]; | 514 | static const int amd_erratum_400[]; |
515 | static bool cpu_has_amd_erratum(const int *erratum); | 515 | static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); |
516 | 516 | ||
517 | static void init_amd(struct cpuinfo_x86 *c) | 517 | static void init_amd(struct cpuinfo_x86 *c) |
518 | { | 518 | { |
@@ -729,11 +729,11 @@ static void init_amd(struct cpuinfo_x86 *c) | |||
729 | value &= ~(1ULL << 24); | 729 | value &= ~(1ULL << 24); |
730 | wrmsrl_safe(MSR_AMD64_BU_CFG2, value); | 730 | wrmsrl_safe(MSR_AMD64_BU_CFG2, value); |
731 | 731 | ||
732 | if (cpu_has_amd_erratum(amd_erratum_383)) | 732 | if (cpu_has_amd_erratum(c, amd_erratum_383)) |
733 | set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); | 733 | set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); |
734 | } | 734 | } |
735 | 735 | ||
736 | if (cpu_has_amd_erratum(amd_erratum_400)) | 736 | if (cpu_has_amd_erratum(c, amd_erratum_400)) |
737 | set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); | 737 | set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); |
738 | 738 | ||
739 | rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); | 739 | rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); |
@@ -878,23 +878,13 @@ static const int amd_erratum_400[] = | |||
878 | static const int amd_erratum_383[] = | 878 | static const int amd_erratum_383[] = |
879 | AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); | 879 | AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); |
880 | 880 | ||
881 | static bool cpu_has_amd_erratum(const int *erratum) | 881 | |
882 | static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) | ||
882 | { | 883 | { |
883 | struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info); | ||
884 | int osvw_id = *erratum++; | 884 | int osvw_id = *erratum++; |
885 | u32 range; | 885 | u32 range; |
886 | u32 ms; | 886 | u32 ms; |
887 | 887 | ||
888 | /* | ||
889 | * If called early enough that current_cpu_data hasn't been initialized | ||
890 | * yet, fall back to boot_cpu_data. | ||
891 | */ | ||
892 | if (cpu->x86 == 0) | ||
893 | cpu = &boot_cpu_data; | ||
894 | |||
895 | if (cpu->x86_vendor != X86_VENDOR_AMD) | ||
896 | return false; | ||
897 | |||
898 | if (osvw_id >= 0 && osvw_id < 65536 && | 888 | if (osvw_id >= 0 && osvw_id < 65536 && |
899 | cpu_has(cpu, X86_FEATURE_OSVW)) { | 889 | cpu_has(cpu, X86_FEATURE_OSVW)) { |
900 | u64 osvw_len; | 890 | u64 osvw_len; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index fbc9210b45bc..a45d8d4ace10 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -2270,6 +2270,7 @@ __init int intel_pmu_init(void) | |||
2270 | case 70: | 2270 | case 70: |
2271 | case 71: | 2271 | case 71: |
2272 | case 63: | 2272 | case 63: |
2273 | case 69: | ||
2273 | x86_pmu.late_ack = true; | 2274 | x86_pmu.late_ack = true; |
2274 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); | 2275 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); |
2275 | memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); | 2276 | memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index cad791dbde95..1fb6c72717bd 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c | |||
@@ -314,8 +314,8 @@ static struct uncore_event_desc snbep_uncore_imc_events[] = { | |||
314 | static struct uncore_event_desc snbep_uncore_qpi_events[] = { | 314 | static struct uncore_event_desc snbep_uncore_qpi_events[] = { |
315 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"), | 315 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"), |
316 | INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"), | 316 | INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"), |
317 | INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"), | 317 | INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"), |
318 | INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"), | 318 | INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"), |
319 | { /* end: all zeroes */ }, | 319 | { /* end: all zeroes */ }, |
320 | }; | 320 | }; |
321 | 321 | ||
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 7a0adb7ee433..7123b5df479d 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c | |||
@@ -145,10 +145,9 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) | |||
145 | return 0; | 145 | return 0; |
146 | } | 146 | } |
147 | 147 | ||
148 | static unsigned int verify_patch_size(int cpu, u32 patch_size, | 148 | static unsigned int verify_patch_size(u8 family, u32 patch_size, |
149 | unsigned int size) | 149 | unsigned int size) |
150 | { | 150 | { |
151 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
152 | u32 max_size; | 151 | u32 max_size; |
153 | 152 | ||
154 | #define F1XH_MPB_MAX_SIZE 2048 | 153 | #define F1XH_MPB_MAX_SIZE 2048 |
@@ -156,7 +155,7 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size, | |||
156 | #define F15H_MPB_MAX_SIZE 4096 | 155 | #define F15H_MPB_MAX_SIZE 4096 |
157 | #define F16H_MPB_MAX_SIZE 3458 | 156 | #define F16H_MPB_MAX_SIZE 3458 |
158 | 157 | ||
159 | switch (c->x86) { | 158 | switch (family) { |
160 | case 0x14: | 159 | case 0x14: |
161 | max_size = F14H_MPB_MAX_SIZE; | 160 | max_size = F14H_MPB_MAX_SIZE; |
162 | break; | 161 | break; |
@@ -277,9 +276,8 @@ static void cleanup(void) | |||
277 | * driver cannot continue functioning normally. In such cases, we tear | 276 | * driver cannot continue functioning normally. In such cases, we tear |
278 | * down everything we've used up so far and exit. | 277 | * down everything we've used up so far and exit. |
279 | */ | 278 | */ |
280 | static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) | 279 | static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover) |
281 | { | 280 | { |
282 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
283 | struct microcode_header_amd *mc_hdr; | 281 | struct microcode_header_amd *mc_hdr; |
284 | struct ucode_patch *patch; | 282 | struct ucode_patch *patch; |
285 | unsigned int patch_size, crnt_size, ret; | 283 | unsigned int patch_size, crnt_size, ret; |
@@ -299,7 +297,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) | |||
299 | 297 | ||
300 | /* check if patch is for the current family */ | 298 | /* check if patch is for the current family */ |
301 | proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff); | 299 | proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff); |
302 | if (proc_fam != c->x86) | 300 | if (proc_fam != family) |
303 | return crnt_size; | 301 | return crnt_size; |
304 | 302 | ||
305 | if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { | 303 | if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { |
@@ -308,7 +306,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) | |||
308 | return crnt_size; | 306 | return crnt_size; |
309 | } | 307 | } |
310 | 308 | ||
311 | ret = verify_patch_size(cpu, patch_size, leftover); | 309 | ret = verify_patch_size(family, patch_size, leftover); |
312 | if (!ret) { | 310 | if (!ret) { |
313 | pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id); | 311 | pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id); |
314 | return crnt_size; | 312 | return crnt_size; |
@@ -339,7 +337,8 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) | |||
339 | return crnt_size; | 337 | return crnt_size; |
340 | } | 338 | } |
341 | 339 | ||
342 | static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t size) | 340 | static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, |
341 | size_t size) | ||
343 | { | 342 | { |
344 | enum ucode_state ret = UCODE_ERROR; | 343 | enum ucode_state ret = UCODE_ERROR; |
345 | unsigned int leftover; | 344 | unsigned int leftover; |
@@ -362,7 +361,7 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz | |||
362 | } | 361 | } |
363 | 362 | ||
364 | while (leftover) { | 363 | while (leftover) { |
365 | crnt_size = verify_and_add_patch(cpu, fw, leftover); | 364 | crnt_size = verify_and_add_patch(family, fw, leftover); |
366 | if (crnt_size < 0) | 365 | if (crnt_size < 0) |
367 | return ret; | 366 | return ret; |
368 | 367 | ||
@@ -373,22 +372,22 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz | |||
373 | return UCODE_OK; | 372 | return UCODE_OK; |
374 | } | 373 | } |
375 | 374 | ||
376 | enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size) | 375 | enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size) |
377 | { | 376 | { |
378 | enum ucode_state ret; | 377 | enum ucode_state ret; |
379 | 378 | ||
380 | /* free old equiv table */ | 379 | /* free old equiv table */ |
381 | free_equiv_cpu_table(); | 380 | free_equiv_cpu_table(); |
382 | 381 | ||
383 | ret = __load_microcode_amd(cpu, data, size); | 382 | ret = __load_microcode_amd(family, data, size); |
384 | 383 | ||
385 | if (ret != UCODE_OK) | 384 | if (ret != UCODE_OK) |
386 | cleanup(); | 385 | cleanup(); |
387 | 386 | ||
388 | #if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32) | 387 | #if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32) |
389 | /* save BSP's matching patch for early load */ | 388 | /* save BSP's matching patch for early load */ |
390 | if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) { | 389 | if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) { |
391 | struct ucode_patch *p = find_patch(cpu); | 390 | struct ucode_patch *p = find_patch(smp_processor_id()); |
392 | if (p) { | 391 | if (p) { |
393 | memset(amd_bsp_mpb, 0, MPB_MAX_SIZE); | 392 | memset(amd_bsp_mpb, 0, MPB_MAX_SIZE); |
394 | memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data), | 393 | memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data), |
@@ -441,7 +440,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device, | |||
441 | goto fw_release; | 440 | goto fw_release; |
442 | } | 441 | } |
443 | 442 | ||
444 | ret = load_microcode_amd(cpu, fw->data, fw->size); | 443 | ret = load_microcode_amd(c->x86, fw->data, fw->size); |
445 | 444 | ||
446 | fw_release: | 445 | fw_release: |
447 | release_firmware(fw); | 446 | release_firmware(fw); |
diff --git a/arch/x86/kernel/microcode_amd_early.c b/arch/x86/kernel/microcode_amd_early.c index 1d14ffee5749..6073104ccaa3 100644 --- a/arch/x86/kernel/microcode_amd_early.c +++ b/arch/x86/kernel/microcode_amd_early.c | |||
@@ -238,25 +238,17 @@ static void __init collect_cpu_sig_on_bsp(void *arg) | |||
238 | uci->cpu_sig.sig = cpuid_eax(0x00000001); | 238 | uci->cpu_sig.sig = cpuid_eax(0x00000001); |
239 | } | 239 | } |
240 | #else | 240 | #else |
241 | static void collect_cpu_info_amd_early(struct cpuinfo_x86 *c, | 241 | void load_ucode_amd_ap(void) |
242 | struct ucode_cpu_info *uci) | ||
243 | { | 242 | { |
243 | unsigned int cpu = smp_processor_id(); | ||
244 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | ||
244 | u32 rev, eax; | 245 | u32 rev, eax; |
245 | 246 | ||
246 | rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); | 247 | rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); |
247 | eax = cpuid_eax(0x00000001); | 248 | eax = cpuid_eax(0x00000001); |
248 | 249 | ||
249 | uci->cpu_sig.sig = eax; | ||
250 | uci->cpu_sig.rev = rev; | 250 | uci->cpu_sig.rev = rev; |
251 | c->microcode = rev; | 251 | uci->cpu_sig.sig = eax; |
252 | c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); | ||
253 | } | ||
254 | |||
255 | void load_ucode_amd_ap(void) | ||
256 | { | ||
257 | unsigned int cpu = smp_processor_id(); | ||
258 | |||
259 | collect_cpu_info_amd_early(&cpu_data(cpu), ucode_cpu_info + cpu); | ||
260 | 252 | ||
261 | if (cpu && !ucode_loaded) { | 253 | if (cpu && !ucode_loaded) { |
262 | void *ucode; | 254 | void *ucode; |
@@ -265,8 +257,10 @@ void load_ucode_amd_ap(void) | |||
265 | return; | 257 | return; |
266 | 258 | ||
267 | ucode = (void *)(initrd_start + ucode_offset); | 259 | ucode = (void *)(initrd_start + ucode_offset); |
268 | if (load_microcode_amd(0, ucode, ucode_size) != UCODE_OK) | 260 | eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); |
261 | if (load_microcode_amd(eax, ucode, ucode_size) != UCODE_OK) | ||
269 | return; | 262 | return; |
263 | |||
270 | ucode_loaded = true; | 264 | ucode_loaded = true; |
271 | } | 265 | } |
272 | 266 | ||
@@ -278,6 +272,8 @@ int __init save_microcode_in_initrd_amd(void) | |||
278 | { | 272 | { |
279 | enum ucode_state ret; | 273 | enum ucode_state ret; |
280 | void *ucode; | 274 | void *ucode; |
275 | u32 eax; | ||
276 | |||
281 | #ifdef CONFIG_X86_32 | 277 | #ifdef CONFIG_X86_32 |
282 | unsigned int bsp = boot_cpu_data.cpu_index; | 278 | unsigned int bsp = boot_cpu_data.cpu_index; |
283 | struct ucode_cpu_info *uci = ucode_cpu_info + bsp; | 279 | struct ucode_cpu_info *uci = ucode_cpu_info + bsp; |
@@ -293,7 +289,10 @@ int __init save_microcode_in_initrd_amd(void) | |||
293 | return 0; | 289 | return 0; |
294 | 290 | ||
295 | ucode = (void *)(initrd_start + ucode_offset); | 291 | ucode = (void *)(initrd_start + ucode_offset); |
296 | ret = load_microcode_amd(0, ucode, ucode_size); | 292 | eax = cpuid_eax(0x00000001); |
293 | eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); | ||
294 | |||
295 | ret = load_microcode_amd(eax, ucode, ucode_size); | ||
297 | if (ret != UCODE_OK) | 296 | if (ret != UCODE_OK) |
298 | return -EINVAL; | 297 | return -EINVAL; |
299 | 298 | ||
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index dbded5aedb81..30277e27431a 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c | |||
@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin, | |||
101 | *begin = new_begin; | 101 | *begin = new_begin; |
102 | } | 102 | } |
103 | } else { | 103 | } else { |
104 | *begin = TASK_UNMAPPED_BASE; | 104 | *begin = current->mm->mmap_legacy_base; |
105 | *end = TASK_SIZE; | 105 | *end = TASK_SIZE; |
106 | } | 106 | } |
107 | } | 107 | } |
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 62c29a5bfe26..25e7e1372bb2 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c | |||
@@ -112,11 +112,13 @@ static unsigned long mmap_legacy_base(void) | |||
112 | */ | 112 | */ |
113 | void arch_pick_mmap_layout(struct mm_struct *mm) | 113 | void arch_pick_mmap_layout(struct mm_struct *mm) |
114 | { | 114 | { |
115 | mm->mmap_legacy_base = mmap_legacy_base(); | ||
116 | mm->mmap_base = mmap_base(); | ||
117 | |||
115 | if (mmap_is_legacy()) { | 118 | if (mmap_is_legacy()) { |
116 | mm->mmap_base = mmap_legacy_base(); | 119 | mm->mmap_base = mm->mmap_legacy_base; |
117 | mm->get_unmapped_area = arch_get_unmapped_area; | 120 | mm->get_unmapped_area = arch_get_unmapped_area; |
118 | } else { | 121 | } else { |
119 | mm->mmap_base = mmap_base(); | ||
120 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; | 122 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; |
121 | } | 123 | } |
122 | } | 124 | } |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 056d11faef21..8f3eea6b80c5 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -313,6 +313,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type) | |||
313 | e820_add_region(start, end - start, type); | 313 | e820_add_region(start, end - start, type); |
314 | } | 314 | } |
315 | 315 | ||
316 | void xen_ignore_unusable(struct e820entry *list, size_t map_size) | ||
317 | { | ||
318 | struct e820entry *entry; | ||
319 | unsigned int i; | ||
320 | |||
321 | for (i = 0, entry = list; i < map_size; i++, entry++) { | ||
322 | if (entry->type == E820_UNUSABLE) | ||
323 | entry->type = E820_RAM; | ||
324 | } | ||
325 | } | ||
326 | |||
316 | /** | 327 | /** |
317 | * machine_specific_memory_setup - Hook for machine specific memory setup. | 328 | * machine_specific_memory_setup - Hook for machine specific memory setup. |
318 | **/ | 329 | **/ |
@@ -353,6 +364,17 @@ char * __init xen_memory_setup(void) | |||
353 | } | 364 | } |
354 | BUG_ON(rc); | 365 | BUG_ON(rc); |
355 | 366 | ||
367 | /* | ||
368 | * Xen won't allow a 1:1 mapping to be created to UNUSABLE | ||
369 | * regions, so if we're using the machine memory map leave the | ||
370 | * region as RAM as it is in the pseudo-physical map. | ||
371 | * | ||
372 | * UNUSABLE regions in domUs are not handled and will need | ||
373 | * a patch in the future. | ||
374 | */ | ||
375 | if (xen_initial_domain()) | ||
376 | xen_ignore_unusable(map, memmap.nr_entries); | ||
377 | |||
356 | /* Make sure the Xen-supplied memory map is well-ordered. */ | 378 | /* Make sure the Xen-supplied memory map is well-ordered. */ |
357 | sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); | 379 | sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); |
358 | 380 | ||
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index ca92754eb846..b81c88e51daa 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -694,8 +694,15 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) | |||
694 | static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) | 694 | static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) |
695 | { | 695 | { |
696 | int rc; | 696 | int rc; |
697 | rc = native_cpu_up(cpu, tidle); | 697 | /* |
698 | WARN_ON (xen_smp_intr_init(cpu)); | 698 | * xen_smp_intr_init() needs to run before native_cpu_up() |
699 | * so that IPI vectors are set up on the booting CPU before | ||
700 | * it is marked online in native_cpu_up(). | ||
701 | */ | ||
702 | rc = xen_smp_intr_init(cpu); | ||
703 | WARN_ON(rc); | ||
704 | if (!rc) | ||
705 | rc = native_cpu_up(cpu, tidle); | ||
699 | return rc; | 706 | return rc; |
700 | } | 707 | } |
701 | 708 | ||
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index e1284b8dc6ee..3270d3c8ba4e 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -908,9 +908,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) | |||
908 | device->cap._DDC = 1; | 908 | device->cap._DDC = 1; |
909 | } | 909 | } |
910 | 910 | ||
911 | if (acpi_video_init_brightness(device)) | ||
912 | return; | ||
913 | |||
914 | if (acpi_video_backlight_support()) { | 911 | if (acpi_video_backlight_support()) { |
915 | struct backlight_properties props; | 912 | struct backlight_properties props; |
916 | struct pci_dev *pdev; | 913 | struct pci_dev *pdev; |
@@ -920,6 +917,9 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) | |||
920 | static int count = 0; | 917 | static int count = 0; |
921 | char *name; | 918 | char *name; |
922 | 919 | ||
920 | result = acpi_video_init_brightness(device); | ||
921 | if (result) | ||
922 | return; | ||
923 | name = kasprintf(GFP_KERNEL, "acpi_video%d", count); | 923 | name = kasprintf(GFP_KERNEL, "acpi_video%d", count); |
924 | if (!name) | 924 | if (!name) |
925 | return; | 925 | return; |
@@ -979,11 +979,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) | |||
979 | if (result) | 979 | if (result) |
980 | printk(KERN_ERR PREFIX "Create sysfs link\n"); | 980 | printk(KERN_ERR PREFIX "Create sysfs link\n"); |
981 | 981 | ||
982 | } else { | ||
983 | /* Remove the brightness object. */ | ||
984 | kfree(device->brightness->levels); | ||
985 | kfree(device->brightness); | ||
986 | device->brightness = NULL; | ||
987 | } | 982 | } |
988 | } | 983 | } |
989 | 984 | ||
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index 1c41722bb7e2..20fd337a5731 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c | |||
@@ -289,24 +289,24 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info) | |||
289 | 289 | ||
290 | /* Disable sending Early R_OK. | 290 | /* Disable sending Early R_OK. |
291 | * With "cached read" HDD testing and multiple ports busy on a SATA | 291 | * With "cached read" HDD testing and multiple ports busy on a SATA |
292 | * host controller, 3726 PMP will very rarely drop a deferred | 292 | * host controller, 3x26 PMP will very rarely drop a deferred |
293 | * R_OK that was intended for the host. Symptom will be all | 293 | * R_OK that was intended for the host. Symptom will be all |
294 | * 5 drives under test will timeout, get reset, and recover. | 294 | * 5 drives under test will timeout, get reset, and recover. |
295 | */ | 295 | */ |
296 | if (vendor == 0x1095 && devid == 0x3726) { | 296 | if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) { |
297 | u32 reg; | 297 | u32 reg; |
298 | 298 | ||
299 | err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, ®); | 299 | err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, ®); |
300 | if (err_mask) { | 300 | if (err_mask) { |
301 | rc = -EIO; | 301 | rc = -EIO; |
302 | reason = "failed to read Sil3726 Private Register"; | 302 | reason = "failed to read Sil3x26 Private Register"; |
303 | goto fail; | 303 | goto fail; |
304 | } | 304 | } |
305 | reg &= ~0x1; | 305 | reg &= ~0x1; |
306 | err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg); | 306 | err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg); |
307 | if (err_mask) { | 307 | if (err_mask) { |
308 | rc = -EIO; | 308 | rc = -EIO; |
309 | reason = "failed to write Sil3726 Private Register"; | 309 | reason = "failed to write Sil3x26 Private Register"; |
310 | goto fail; | 310 | goto fail; |
311 | } | 311 | } |
312 | } | 312 | } |
@@ -383,8 +383,8 @@ static void sata_pmp_quirks(struct ata_port *ap) | |||
383 | u16 devid = sata_pmp_gscr_devid(gscr); | 383 | u16 devid = sata_pmp_gscr_devid(gscr); |
384 | struct ata_link *link; | 384 | struct ata_link *link; |
385 | 385 | ||
386 | if (vendor == 0x1095 && devid == 0x3726) { | 386 | if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) { |
387 | /* sil3726 quirks */ | 387 | /* sil3x26 quirks */ |
388 | ata_for_each_link(link, ap, EDGE) { | 388 | ata_for_each_link(link, ap, EDGE) { |
389 | /* link reports offline after LPM */ | 389 | /* link reports offline after LPM */ |
390 | link->flags |= ATA_LFLAG_NO_LPM; | 390 | link->flags |= ATA_LFLAG_NO_LPM; |
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 19720a0a4a65..851bd3f43ac6 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c | |||
@@ -293,6 +293,7 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host, | |||
293 | { | 293 | { |
294 | struct sata_fsl_host_priv *host_priv = host->private_data; | 294 | struct sata_fsl_host_priv *host_priv = host->private_data; |
295 | void __iomem *hcr_base = host_priv->hcr_base; | 295 | void __iomem *hcr_base = host_priv->hcr_base; |
296 | unsigned long flags; | ||
296 | 297 | ||
297 | if (count > ICC_MAX_INT_COUNT_THRESHOLD) | 298 | if (count > ICC_MAX_INT_COUNT_THRESHOLD) |
298 | count = ICC_MAX_INT_COUNT_THRESHOLD; | 299 | count = ICC_MAX_INT_COUNT_THRESHOLD; |
@@ -305,12 +306,12 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host, | |||
305 | (count > ICC_MIN_INT_COUNT_THRESHOLD)) | 306 | (count > ICC_MIN_INT_COUNT_THRESHOLD)) |
306 | ticks = ICC_SAFE_INT_TICKS; | 307 | ticks = ICC_SAFE_INT_TICKS; |
307 | 308 | ||
308 | spin_lock(&host->lock); | 309 | spin_lock_irqsave(&host->lock, flags); |
309 | iowrite32((count << 24 | ticks), hcr_base + ICC); | 310 | iowrite32((count << 24 | ticks), hcr_base + ICC); |
310 | 311 | ||
311 | intr_coalescing_count = count; | 312 | intr_coalescing_count = count; |
312 | intr_coalescing_ticks = ticks; | 313 | intr_coalescing_ticks = ticks; |
313 | spin_unlock(&host->lock); | 314 | spin_unlock_irqrestore(&host->lock, flags); |
314 | 315 | ||
315 | DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n", | 316 | DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n", |
316 | intr_coalescing_count, intr_coalescing_ticks); | 317 | intr_coalescing_count, intr_coalescing_ticks); |
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c index d047d92a456f..e9a4f46d962e 100644 --- a/drivers/ata/sata_highbank.c +++ b/drivers/ata/sata_highbank.c | |||
@@ -86,11 +86,11 @@ struct ecx_plat_data { | |||
86 | 86 | ||
87 | #define SGPIO_SIGNALS 3 | 87 | #define SGPIO_SIGNALS 3 |
88 | #define ECX_ACTIVITY_BITS 0x300000 | 88 | #define ECX_ACTIVITY_BITS 0x300000 |
89 | #define ECX_ACTIVITY_SHIFT 2 | 89 | #define ECX_ACTIVITY_SHIFT 0 |
90 | #define ECX_LOCATE_BITS 0x80000 | 90 | #define ECX_LOCATE_BITS 0x80000 |
91 | #define ECX_LOCATE_SHIFT 1 | 91 | #define ECX_LOCATE_SHIFT 1 |
92 | #define ECX_FAULT_BITS 0x400000 | 92 | #define ECX_FAULT_BITS 0x400000 |
93 | #define ECX_FAULT_SHIFT 0 | 93 | #define ECX_FAULT_SHIFT 2 |
94 | static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port, | 94 | static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port, |
95 | u32 shift) | 95 | u32 shift) |
96 | { | 96 | { |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 99cb944a002d..4d45dba7fb8f 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
@@ -906,16 +906,10 @@ bio_pageinc(struct bio *bio) | |||
906 | int i; | 906 | int i; |
907 | 907 | ||
908 | bio_for_each_segment(bv, bio, i) { | 908 | bio_for_each_segment(bv, bio, i) { |
909 | page = bv->bv_page; | ||
910 | /* Non-zero page count for non-head members of | 909 | /* Non-zero page count for non-head members of |
911 | * compound pages is no longer allowed by the kernel, | 910 | * compound pages is no longer allowed by the kernel. |
912 | * but this has never been seen here. | ||
913 | */ | 911 | */ |
914 | if (unlikely(PageCompound(page))) | 912 | page = compound_trans_head(bv->bv_page); |
915 | if (compound_trans_head(page) != page) { | ||
916 | pr_crit("page tail used for block I/O\n"); | ||
917 | BUG(); | ||
918 | } | ||
919 | atomic_inc(&page->_count); | 913 | atomic_inc(&page->_count); |
920 | } | 914 | } |
921 | } | 915 | } |
@@ -924,10 +918,13 @@ static void | |||
924 | bio_pagedec(struct bio *bio) | 918 | bio_pagedec(struct bio *bio) |
925 | { | 919 | { |
926 | struct bio_vec *bv; | 920 | struct bio_vec *bv; |
921 | struct page *page; | ||
927 | int i; | 922 | int i; |
928 | 923 | ||
929 | bio_for_each_segment(bv, bio, i) | 924 | bio_for_each_segment(bv, bio, i) { |
930 | atomic_dec(&bv->bv_page->_count); | 925 | page = compound_trans_head(bv->bv_page); |
926 | atomic_dec(&page->_count); | ||
927 | } | ||
931 | } | 928 | } |
932 | 929 | ||
933 | static void | 930 | static void |
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index 1bdb882c845b..4e5739773c33 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c | |||
@@ -581,11 +581,15 @@ struct samsung_div_clock exynos4x12_div_clks[] __initdata = { | |||
581 | DIV(none, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4), | 581 | DIV(none, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4), |
582 | DIV(none, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8), | 582 | DIV(none, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8), |
583 | DIV(none, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4), | 583 | DIV(none, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4), |
584 | DIV(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3), | 584 | DIV_F(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3, |
585 | DIV(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3), | 585 | CLK_GET_RATE_NOCACHE, 0), |
586 | DIV_F(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3, | ||
587 | CLK_GET_RATE_NOCACHE, 0), | ||
586 | DIV(none, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3), | 588 | DIV(none, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3), |
587 | DIV(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1, 4, 3), | 589 | DIV_F(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1, |
588 | DIV(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, 8, 3), | 590 | 4, 3, CLK_GET_RATE_NOCACHE, 0), |
591 | DIV_F(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, | ||
592 | 8, 3, CLK_GET_RATE_NOCACHE, 0), | ||
589 | DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4), | 593 | DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4), |
590 | }; | 594 | }; |
591 | 595 | ||
@@ -863,57 +867,57 @@ struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = { | |||
863 | GATE_DA(i2s0, "samsung-i2s.0", "i2s0", "aclk100", | 867 | GATE_DA(i2s0, "samsung-i2s.0", "i2s0", "aclk100", |
864 | E4X12_GATE_IP_MAUDIO, 3, 0, 0, "iis"), | 868 | E4X12_GATE_IP_MAUDIO, 3, 0, 0, "iis"), |
865 | GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0, | 869 | GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0, |
866 | CLK_IGNORE_UNUSED, 0), | 870 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
867 | GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1, | 871 | GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1, |
868 | CLK_IGNORE_UNUSED, 0), | 872 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
869 | GATE(fimc_fd, "fd", "aclk200", E4X12_GATE_ISP0, 2, | 873 | GATE(fimc_fd, "fd", "aclk200", E4X12_GATE_ISP0, 2, |
870 | CLK_IGNORE_UNUSED, 0), | 874 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
871 | GATE(fimc_lite0, "lite0", "aclk200", E4X12_GATE_ISP0, 3, | 875 | GATE(fimc_lite0, "lite0", "aclk200", E4X12_GATE_ISP0, 3, |
872 | CLK_IGNORE_UNUSED, 0), | 876 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
873 | GATE(fimc_lite1, "lite1", "aclk200", E4X12_GATE_ISP0, 4, | 877 | GATE(fimc_lite1, "lite1", "aclk200", E4X12_GATE_ISP0, 4, |
874 | CLK_IGNORE_UNUSED, 0), | 878 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
875 | GATE(mcuisp, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5, | 879 | GATE(mcuisp, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5, |
876 | CLK_IGNORE_UNUSED, 0), | 880 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
877 | GATE(gicisp, "gicisp", "aclk200", E4X12_GATE_ISP0, 7, | 881 | GATE(gicisp, "gicisp", "aclk200", E4X12_GATE_ISP0, 7, |
878 | CLK_IGNORE_UNUSED, 0), | 882 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
879 | GATE(smmu_isp, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8, | 883 | GATE(smmu_isp, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8, |
880 | CLK_IGNORE_UNUSED, 0), | 884 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
881 | GATE(smmu_drc, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9, | 885 | GATE(smmu_drc, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9, |
882 | CLK_IGNORE_UNUSED, 0), | 886 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
883 | GATE(smmu_fd, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10, | 887 | GATE(smmu_fd, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10, |
884 | CLK_IGNORE_UNUSED, 0), | 888 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
885 | GATE(smmu_lite0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11, | 889 | GATE(smmu_lite0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11, |
886 | CLK_IGNORE_UNUSED, 0), | 890 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
887 | GATE(smmu_lite1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12, | 891 | GATE(smmu_lite1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12, |
888 | CLK_IGNORE_UNUSED, 0), | 892 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
889 | GATE(ppmuispmx, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20, | 893 | GATE(ppmuispmx, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20, |
890 | CLK_IGNORE_UNUSED, 0), | 894 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
891 | GATE(ppmuispx, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21, | 895 | GATE(ppmuispx, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21, |
892 | CLK_IGNORE_UNUSED, 0), | 896 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
893 | GATE(mcuctl_isp, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23, | 897 | GATE(mcuctl_isp, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23, |
894 | CLK_IGNORE_UNUSED, 0), | 898 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
895 | GATE(mpwm_isp, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24, | 899 | GATE(mpwm_isp, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24, |
896 | CLK_IGNORE_UNUSED, 0), | 900 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
897 | GATE(i2c0_isp, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25, | 901 | GATE(i2c0_isp, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25, |
898 | CLK_IGNORE_UNUSED, 0), | 902 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
899 | GATE(i2c1_isp, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26, | 903 | GATE(i2c1_isp, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26, |
900 | CLK_IGNORE_UNUSED, 0), | 904 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
901 | GATE(mtcadc_isp, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27, | 905 | GATE(mtcadc_isp, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27, |
902 | CLK_IGNORE_UNUSED, 0), | 906 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
903 | GATE(pwm_isp, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28, | 907 | GATE(pwm_isp, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28, |
904 | CLK_IGNORE_UNUSED, 0), | 908 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
905 | GATE(wdt_isp, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30, | 909 | GATE(wdt_isp, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30, |
906 | CLK_IGNORE_UNUSED, 0), | 910 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
907 | GATE(uart_isp, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31, | 911 | GATE(uart_isp, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31, |
908 | CLK_IGNORE_UNUSED, 0), | 912 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
909 | GATE(asyncaxim, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0, | 913 | GATE(asyncaxim, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0, |
910 | CLK_IGNORE_UNUSED, 0), | 914 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
911 | GATE(smmu_ispcx, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4, | 915 | GATE(smmu_ispcx, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4, |
912 | CLK_IGNORE_UNUSED, 0), | 916 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
913 | GATE(spi0_isp, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12, | 917 | GATE(spi0_isp, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12, |
914 | CLK_IGNORE_UNUSED, 0), | 918 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
915 | GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13, | 919 | GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13, |
916 | CLK_IGNORE_UNUSED, 0), | 920 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
917 | GATE(g2d, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0), | 921 | GATE(g2d, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0), |
918 | }; | 922 | }; |
919 | 923 | ||
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c index 5c205b60a82a..089d3e30e221 100644 --- a/drivers/clk/zynq/clkc.c +++ b/drivers/clk/zynq/clkc.c | |||
@@ -71,6 +71,7 @@ static DEFINE_SPINLOCK(armpll_lock); | |||
71 | static DEFINE_SPINLOCK(ddrpll_lock); | 71 | static DEFINE_SPINLOCK(ddrpll_lock); |
72 | static DEFINE_SPINLOCK(iopll_lock); | 72 | static DEFINE_SPINLOCK(iopll_lock); |
73 | static DEFINE_SPINLOCK(armclk_lock); | 73 | static DEFINE_SPINLOCK(armclk_lock); |
74 | static DEFINE_SPINLOCK(swdtclk_lock); | ||
74 | static DEFINE_SPINLOCK(ddrclk_lock); | 75 | static DEFINE_SPINLOCK(ddrclk_lock); |
75 | static DEFINE_SPINLOCK(dciclk_lock); | 76 | static DEFINE_SPINLOCK(dciclk_lock); |
76 | static DEFINE_SPINLOCK(gem0clk_lock); | 77 | static DEFINE_SPINLOCK(gem0clk_lock); |
@@ -293,7 +294,7 @@ static void __init zynq_clk_setup(struct device_node *np) | |||
293 | } | 294 | } |
294 | clks[swdt] = clk_register_mux(NULL, clk_output_name[swdt], | 295 | clks[swdt] = clk_register_mux(NULL, clk_output_name[swdt], |
295 | swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT, | 296 | swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT, |
296 | SLCR_SWDT_CLK_SEL, 0, 1, 0, &gem0clk_lock); | 297 | SLCR_SWDT_CLK_SEL, 0, 1, 0, &swdtclk_lock); |
297 | 298 | ||
298 | /* DDR clocks */ | 299 | /* DDR clocks */ |
299 | clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0, | 300 | clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0, |
@@ -364,8 +365,9 @@ static void __init zynq_clk_setup(struct device_node *np) | |||
364 | CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 20, 6, | 365 | CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 20, 6, |
365 | CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, | 366 | CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, |
366 | &gem0clk_lock); | 367 | &gem0clk_lock); |
367 | clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2, 0, | 368 | clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2, |
368 | SLCR_GEM0_CLK_CTRL, 6, 1, 0, &gem0clk_lock); | 369 | CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 6, 1, 0, |
370 | &gem0clk_lock); | ||
369 | clks[gem0] = clk_register_gate(NULL, clk_output_name[gem0], | 371 | clks[gem0] = clk_register_gate(NULL, clk_output_name[gem0], |
370 | "gem0_emio_mux", CLK_SET_RATE_PARENT, | 372 | "gem0_emio_mux", CLK_SET_RATE_PARENT, |
371 | SLCR_GEM0_CLK_CTRL, 0, 0, &gem0clk_lock); | 373 | SLCR_GEM0_CLK_CTRL, 0, 0, &gem0clk_lock); |
@@ -386,8 +388,9 @@ static void __init zynq_clk_setup(struct device_node *np) | |||
386 | CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 20, 6, | 388 | CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 20, 6, |
387 | CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, | 389 | CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, |
388 | &gem1clk_lock); | 390 | &gem1clk_lock); |
389 | clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2, 0, | 391 | clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2, |
390 | SLCR_GEM1_CLK_CTRL, 6, 1, 0, &gem1clk_lock); | 392 | CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 6, 1, 0, |
393 | &gem1clk_lock); | ||
391 | clks[gem1] = clk_register_gate(NULL, clk_output_name[gem1], | 394 | clks[gem1] = clk_register_gate(NULL, clk_output_name[gem1], |
392 | "gem1_emio_mux", CLK_SET_RATE_PARENT, | 395 | "gem1_emio_mux", CLK_SET_RATE_PARENT, |
393 | SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock); | 396 | SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock); |
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index 19e36603b23b..3bc8414533c9 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c | |||
@@ -500,7 +500,8 @@ static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo, | |||
500 | &status)) | 500 | &status)) |
501 | goto log_fail; | 501 | goto log_fail; |
502 | 502 | ||
503 | while (status == SDVO_CMD_STATUS_PENDING && retry--) { | 503 | while ((status == SDVO_CMD_STATUS_PENDING || |
504 | status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && retry--) { | ||
504 | udelay(15); | 505 | udelay(15); |
505 | if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, | 506 | if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, |
506 | SDVO_I2C_CMD_STATUS, | 507 | SDVO_I2C_CMD_STATUS, |
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index dc53a527126b..9e6578330801 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c | |||
@@ -85,9 +85,17 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, | |||
85 | struct sg_table *sg, | 85 | struct sg_table *sg, |
86 | enum dma_data_direction dir) | 86 | enum dma_data_direction dir) |
87 | { | 87 | { |
88 | struct drm_i915_gem_object *obj = attachment->dmabuf->priv; | ||
89 | |||
90 | mutex_lock(&obj->base.dev->struct_mutex); | ||
91 | |||
88 | dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); | 92 | dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); |
89 | sg_free_table(sg); | 93 | sg_free_table(sg); |
90 | kfree(sg); | 94 | kfree(sg); |
95 | |||
96 | i915_gem_object_unpin_pages(obj); | ||
97 | |||
98 | mutex_unlock(&obj->base.dev->struct_mutex); | ||
91 | } | 99 | } |
92 | 100 | ||
93 | static void i915_gem_dmabuf_release(struct dma_buf *dma_buf) | 101 | static void i915_gem_dmabuf_release(struct dma_buf *dma_buf) |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 6f514297c483..53cddd985406 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -752,6 +752,8 @@ | |||
752 | will not assert AGPBUSY# and will only | 752 | will not assert AGPBUSY# and will only |
753 | be delivered when out of C3. */ | 753 | be delivered when out of C3. */ |
754 | #define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ | 754 | #define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ |
755 | #define INSTPM_TLB_INVALIDATE (1<<9) | ||
756 | #define INSTPM_SYNC_FLUSH (1<<5) | ||
755 | #define ACTHD 0x020c8 | 757 | #define ACTHD 0x020c8 |
756 | #define FW_BLC 0x020d8 | 758 | #define FW_BLC 0x020d8 |
757 | #define FW_BLC2 0x020dc | 759 | #define FW_BLC2 0x020dc |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e38b45786653..be79f477a38f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -10042,6 +10042,8 @@ struct intel_display_error_state { | |||
10042 | 10042 | ||
10043 | u32 power_well_driver; | 10043 | u32 power_well_driver; |
10044 | 10044 | ||
10045 | int num_transcoders; | ||
10046 | |||
10045 | struct intel_cursor_error_state { | 10047 | struct intel_cursor_error_state { |
10046 | u32 control; | 10048 | u32 control; |
10047 | u32 position; | 10049 | u32 position; |
@@ -10050,16 +10052,7 @@ struct intel_display_error_state { | |||
10050 | } cursor[I915_MAX_PIPES]; | 10052 | } cursor[I915_MAX_PIPES]; |
10051 | 10053 | ||
10052 | struct intel_pipe_error_state { | 10054 | struct intel_pipe_error_state { |
10053 | enum transcoder cpu_transcoder; | ||
10054 | u32 conf; | ||
10055 | u32 source; | 10055 | u32 source; |
10056 | |||
10057 | u32 htotal; | ||
10058 | u32 hblank; | ||
10059 | u32 hsync; | ||
10060 | u32 vtotal; | ||
10061 | u32 vblank; | ||
10062 | u32 vsync; | ||
10063 | } pipe[I915_MAX_PIPES]; | 10056 | } pipe[I915_MAX_PIPES]; |
10064 | 10057 | ||
10065 | struct intel_plane_error_state { | 10058 | struct intel_plane_error_state { |
@@ -10071,6 +10064,19 @@ struct intel_display_error_state { | |||
10071 | u32 surface; | 10064 | u32 surface; |
10072 | u32 tile_offset; | 10065 | u32 tile_offset; |
10073 | } plane[I915_MAX_PIPES]; | 10066 | } plane[I915_MAX_PIPES]; |
10067 | |||
10068 | struct intel_transcoder_error_state { | ||
10069 | enum transcoder cpu_transcoder; | ||
10070 | |||
10071 | u32 conf; | ||
10072 | |||
10073 | u32 htotal; | ||
10074 | u32 hblank; | ||
10075 | u32 hsync; | ||
10076 | u32 vtotal; | ||
10077 | u32 vblank; | ||
10078 | u32 vsync; | ||
10079 | } transcoder[4]; | ||
10074 | }; | 10080 | }; |
10075 | 10081 | ||
10076 | struct intel_display_error_state * | 10082 | struct intel_display_error_state * |
@@ -10078,9 +10084,17 @@ intel_display_capture_error_state(struct drm_device *dev) | |||
10078 | { | 10084 | { |
10079 | drm_i915_private_t *dev_priv = dev->dev_private; | 10085 | drm_i915_private_t *dev_priv = dev->dev_private; |
10080 | struct intel_display_error_state *error; | 10086 | struct intel_display_error_state *error; |
10081 | enum transcoder cpu_transcoder; | 10087 | int transcoders[] = { |
10088 | TRANSCODER_A, | ||
10089 | TRANSCODER_B, | ||
10090 | TRANSCODER_C, | ||
10091 | TRANSCODER_EDP, | ||
10092 | }; | ||
10082 | int i; | 10093 | int i; |
10083 | 10094 | ||
10095 | if (INTEL_INFO(dev)->num_pipes == 0) | ||
10096 | return NULL; | ||
10097 | |||
10084 | error = kmalloc(sizeof(*error), GFP_ATOMIC); | 10098 | error = kmalloc(sizeof(*error), GFP_ATOMIC); |
10085 | if (error == NULL) | 10099 | if (error == NULL) |
10086 | return NULL; | 10100 | return NULL; |
@@ -10089,9 +10103,6 @@ intel_display_capture_error_state(struct drm_device *dev) | |||
10089 | error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); | 10103 | error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); |
10090 | 10104 | ||
10091 | for_each_pipe(i) { | 10105 | for_each_pipe(i) { |
10092 | cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i); | ||
10093 | error->pipe[i].cpu_transcoder = cpu_transcoder; | ||
10094 | |||
10095 | if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { | 10106 | if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { |
10096 | error->cursor[i].control = I915_READ(CURCNTR(i)); | 10107 | error->cursor[i].control = I915_READ(CURCNTR(i)); |
10097 | error->cursor[i].position = I915_READ(CURPOS(i)); | 10108 | error->cursor[i].position = I915_READ(CURPOS(i)); |
@@ -10115,14 +10126,25 @@ intel_display_capture_error_state(struct drm_device *dev) | |||
10115 | error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); | 10126 | error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); |
10116 | } | 10127 | } |
10117 | 10128 | ||
10118 | error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder)); | ||
10119 | error->pipe[i].source = I915_READ(PIPESRC(i)); | 10129 | error->pipe[i].source = I915_READ(PIPESRC(i)); |
10120 | error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); | 10130 | } |
10121 | error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder)); | 10131 | |
10122 | error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder)); | 10132 | error->num_transcoders = INTEL_INFO(dev)->num_pipes; |
10123 | error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); | 10133 | if (HAS_DDI(dev_priv->dev)) |
10124 | error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder)); | 10134 | error->num_transcoders++; /* Account for eDP. */ |
10125 | error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder)); | 10135 | |
10136 | for (i = 0; i < error->num_transcoders; i++) { | ||
10137 | enum transcoder cpu_transcoder = transcoders[i]; | ||
10138 | |||
10139 | error->transcoder[i].cpu_transcoder = cpu_transcoder; | ||
10140 | |||
10141 | error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); | ||
10142 | error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); | ||
10143 | error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); | ||
10144 | error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); | ||
10145 | error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); | ||
10146 | error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); | ||
10147 | error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); | ||
10126 | } | 10148 | } |
10127 | 10149 | ||
10128 | /* In the code above we read the registers without checking if the power | 10150 | /* In the code above we read the registers without checking if the power |
@@ -10144,22 +10166,16 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, | |||
10144 | { | 10166 | { |
10145 | int i; | 10167 | int i; |
10146 | 10168 | ||
10169 | if (!error) | ||
10170 | return; | ||
10171 | |||
10147 | err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); | 10172 | err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); |
10148 | if (HAS_POWER_WELL(dev)) | 10173 | if (HAS_POWER_WELL(dev)) |
10149 | err_printf(m, "PWR_WELL_CTL2: %08x\n", | 10174 | err_printf(m, "PWR_WELL_CTL2: %08x\n", |
10150 | error->power_well_driver); | 10175 | error->power_well_driver); |
10151 | for_each_pipe(i) { | 10176 | for_each_pipe(i) { |
10152 | err_printf(m, "Pipe [%d]:\n", i); | 10177 | err_printf(m, "Pipe [%d]:\n", i); |
10153 | err_printf(m, " CPU transcoder: %c\n", | ||
10154 | transcoder_name(error->pipe[i].cpu_transcoder)); | ||
10155 | err_printf(m, " CONF: %08x\n", error->pipe[i].conf); | ||
10156 | err_printf(m, " SRC: %08x\n", error->pipe[i].source); | 10178 | err_printf(m, " SRC: %08x\n", error->pipe[i].source); |
10157 | err_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); | ||
10158 | err_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); | ||
10159 | err_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); | ||
10160 | err_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); | ||
10161 | err_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); | ||
10162 | err_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); | ||
10163 | 10179 | ||
10164 | err_printf(m, "Plane [%d]:\n", i); | 10180 | err_printf(m, "Plane [%d]:\n", i); |
10165 | err_printf(m, " CNTR: %08x\n", error->plane[i].control); | 10181 | err_printf(m, " CNTR: %08x\n", error->plane[i].control); |
@@ -10180,5 +10196,17 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, | |||
10180 | err_printf(m, " POS: %08x\n", error->cursor[i].position); | 10196 | err_printf(m, " POS: %08x\n", error->cursor[i].position); |
10181 | err_printf(m, " BASE: %08x\n", error->cursor[i].base); | 10197 | err_printf(m, " BASE: %08x\n", error->cursor[i].base); |
10182 | } | 10198 | } |
10199 | |||
10200 | for (i = 0; i < error->num_transcoders; i++) { | ||
10201 | err_printf(m, " CPU transcoder: %c\n", | ||
10202 | transcoder_name(error->transcoder[i].cpu_transcoder)); | ||
10203 | err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); | ||
10204 | err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); | ||
10205 | err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); | ||
10206 | err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); | ||
10207 | err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); | ||
10208 | err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); | ||
10209 | err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); | ||
10210 | } | ||
10183 | } | 10211 | } |
10184 | #endif | 10212 | #endif |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 664118d8c1d6..079ef0129e74 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -968,6 +968,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) | |||
968 | 968 | ||
969 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); | 969 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); |
970 | POSTING_READ(mmio); | 970 | POSTING_READ(mmio); |
971 | |||
972 | /* Flush the TLB for this page */ | ||
973 | if (INTEL_INFO(dev)->gen >= 6) { | ||
974 | u32 reg = RING_INSTPM(ring->mmio_base); | ||
975 | I915_WRITE(reg, | ||
976 | _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | | ||
977 | INSTPM_SYNC_FLUSH)); | ||
978 | if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, | ||
979 | 1000)) | ||
980 | DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", | ||
981 | ring->name); | ||
982 | } | ||
971 | } | 983 | } |
972 | 984 | ||
973 | static int | 985 | static int |
diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c index d8291724dbd4..7a4e0891c5f8 100644 --- a/drivers/gpu/drm/nouveau/core/core/mm.c +++ b/drivers/gpu/drm/nouveau/core/core/mm.c | |||
@@ -98,6 +98,8 @@ nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min, | |||
98 | u32 splitoff; | 98 | u32 splitoff; |
99 | u32 s, e; | 99 | u32 s, e; |
100 | 100 | ||
101 | BUG_ON(!type); | ||
102 | |||
101 | list_for_each_entry(this, &mm->free, fl_entry) { | 103 | list_for_each_entry(this, &mm->free, fl_entry) { |
102 | e = this->offset + this->length; | 104 | e = this->offset + this->length; |
103 | s = this->offset; | 105 | s = this->offset; |
@@ -162,6 +164,8 @@ nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min, | |||
162 | struct nouveau_mm_node *prev, *this, *next; | 164 | struct nouveau_mm_node *prev, *this, *next; |
163 | u32 mask = align - 1; | 165 | u32 mask = align - 1; |
164 | 166 | ||
167 | BUG_ON(!type); | ||
168 | |||
165 | list_for_each_entry_reverse(this, &mm->free, fl_entry) { | 169 | list_for_each_entry_reverse(this, &mm->free, fl_entry) { |
166 | u32 e = this->offset + this->length; | 170 | u32 e = this->offset + this->length; |
167 | u32 s = this->offset; | 171 | u32 s = this->offset; |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h index d5502267c30f..9d2cd2006250 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h | |||
@@ -20,8 +20,8 @@ nouveau_mc(void *obj) | |||
20 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC]; | 20 | return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC]; |
21 | } | 21 | } |
22 | 22 | ||
23 | #define nouveau_mc_create(p,e,o,d) \ | 23 | #define nouveau_mc_create(p,e,o,m,d) \ |
24 | nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d) | 24 | nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d) |
25 | #define nouveau_mc_destroy(p) ({ \ | 25 | #define nouveau_mc_destroy(p) ({ \ |
26 | struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \ | 26 | struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \ |
27 | }) | 27 | }) |
@@ -33,7 +33,8 @@ nouveau_mc(void *obj) | |||
33 | }) | 33 | }) |
34 | 34 | ||
35 | int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *, | 35 | int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *, |
36 | struct nouveau_oclass *, int, void **); | 36 | struct nouveau_oclass *, const struct nouveau_mc_intr *, |
37 | int, void **); | ||
37 | void _nouveau_mc_dtor(struct nouveau_object *); | 38 | void _nouveau_mc_dtor(struct nouveau_object *); |
38 | int _nouveau_mc_init(struct nouveau_object *); | 39 | int _nouveau_mc_init(struct nouveau_object *); |
39 | int _nouveau_mc_fini(struct nouveau_object *, bool); | 40 | int _nouveau_mc_fini(struct nouveau_object *, bool); |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c index 19e3a9a63a02..ab7ef0ac9e34 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c | |||
@@ -40,15 +40,15 @@ nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine, | |||
40 | return ret; | 40 | return ret; |
41 | 41 | ||
42 | switch (pfb914 & 0x00000003) { | 42 | switch (pfb914 & 0x00000003) { |
43 | case 0x00000000: pfb->ram->type = NV_MEM_TYPE_DDR1; break; | 43 | case 0x00000000: ram->type = NV_MEM_TYPE_DDR1; break; |
44 | case 0x00000001: pfb->ram->type = NV_MEM_TYPE_DDR2; break; | 44 | case 0x00000001: ram->type = NV_MEM_TYPE_DDR2; break; |
45 | case 0x00000002: pfb->ram->type = NV_MEM_TYPE_GDDR3; break; | 45 | case 0x00000002: ram->type = NV_MEM_TYPE_GDDR3; break; |
46 | case 0x00000003: break; | 46 | case 0x00000003: break; |
47 | } | 47 | } |
48 | 48 | ||
49 | pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; | 49 | ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; |
50 | pfb->ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; | 50 | ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; |
51 | pfb->ram->tags = nv_rd32(pfb, 0x100320); | 51 | ram->tags = nv_rd32(pfb, 0x100320); |
52 | return 0; | 52 | return 0; |
53 | } | 53 | } |
54 | 54 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c index 7192aa6e5577..63a6aab86028 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c | |||
@@ -38,8 +38,8 @@ nv4e_ram_create(struct nouveau_object *parent, struct nouveau_object *engine, | |||
38 | if (ret) | 38 | if (ret) |
39 | return ret; | 39 | return ret; |
40 | 40 | ||
41 | pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; | 41 | ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; |
42 | pfb->ram->type = NV_MEM_TYPE_STOLEN; | 42 | ram->type = NV_MEM_TYPE_STOLEN; |
43 | return 0; | 43 | return 0; |
44 | } | 44 | } |
45 | 45 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c index bcca883018f4..cce65cc56514 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c | |||
@@ -30,8 +30,9 @@ struct nvc0_ltcg_priv { | |||
30 | struct nouveau_ltcg base; | 30 | struct nouveau_ltcg base; |
31 | u32 part_nr; | 31 | u32 part_nr; |
32 | u32 subp_nr; | 32 | u32 subp_nr; |
33 | struct nouveau_mm tags; | ||
34 | u32 num_tags; | 33 | u32 num_tags; |
34 | u32 tag_base; | ||
35 | struct nouveau_mm tags; | ||
35 | struct nouveau_mm_node *tag_ram; | 36 | struct nouveau_mm_node *tag_ram; |
36 | }; | 37 | }; |
37 | 38 | ||
@@ -117,10 +118,6 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv) | |||
117 | u32 tag_size, tag_margin, tag_align; | 118 | u32 tag_size, tag_margin, tag_align; |
118 | int ret; | 119 | int ret; |
119 | 120 | ||
120 | nv_wr32(priv, 0x17e8d8, priv->part_nr); | ||
121 | if (nv_device(pfb)->card_type >= NV_E0) | ||
122 | nv_wr32(priv, 0x17e000, priv->part_nr); | ||
123 | |||
124 | /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */ | 121 | /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */ |
125 | priv->num_tags = (pfb->ram->size >> 17) / 4; | 122 | priv->num_tags = (pfb->ram->size >> 17) / 4; |
126 | if (priv->num_tags > (1 << 17)) | 123 | if (priv->num_tags > (1 << 17)) |
@@ -142,7 +139,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv) | |||
142 | tag_size += tag_align; | 139 | tag_size += tag_align; |
143 | tag_size = (tag_size + 0xfff) >> 12; /* round up */ | 140 | tag_size = (tag_size + 0xfff) >> 12; /* round up */ |
144 | 141 | ||
145 | ret = nouveau_mm_tail(&pfb->vram, 0, tag_size, tag_size, 1, | 142 | ret = nouveau_mm_tail(&pfb->vram, 1, tag_size, tag_size, 1, |
146 | &priv->tag_ram); | 143 | &priv->tag_ram); |
147 | if (ret) { | 144 | if (ret) { |
148 | priv->num_tags = 0; | 145 | priv->num_tags = 0; |
@@ -152,7 +149,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv) | |||
152 | tag_base += tag_align - 1; | 149 | tag_base += tag_align - 1; |
153 | ret = do_div(tag_base, tag_align); | 150 | ret = do_div(tag_base, tag_align); |
154 | 151 | ||
155 | nv_wr32(priv, 0x17e8d4, tag_base); | 152 | priv->tag_base = tag_base; |
156 | } | 153 | } |
157 | ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1); | 154 | ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1); |
158 | 155 | ||
@@ -182,8 +179,6 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
182 | } | 179 | } |
183 | priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28; | 180 | priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28; |
184 | 181 | ||
185 | nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ | ||
186 | |||
187 | ret = nvc0_ltcg_init_tag_ram(pfb, priv); | 182 | ret = nvc0_ltcg_init_tag_ram(pfb, priv); |
188 | if (ret) | 183 | if (ret) |
189 | return ret; | 184 | return ret; |
@@ -209,13 +204,32 @@ nvc0_ltcg_dtor(struct nouveau_object *object) | |||
209 | nouveau_ltcg_destroy(ltcg); | 204 | nouveau_ltcg_destroy(ltcg); |
210 | } | 205 | } |
211 | 206 | ||
207 | static int | ||
208 | nvc0_ltcg_init(struct nouveau_object *object) | ||
209 | { | ||
210 | struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object; | ||
211 | struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg; | ||
212 | int ret; | ||
213 | |||
214 | ret = nouveau_ltcg_init(ltcg); | ||
215 | if (ret) | ||
216 | return ret; | ||
217 | |||
218 | nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ | ||
219 | nv_wr32(priv, 0x17e8d8, priv->part_nr); | ||
220 | if (nv_device(ltcg)->card_type >= NV_E0) | ||
221 | nv_wr32(priv, 0x17e000, priv->part_nr); | ||
222 | nv_wr32(priv, 0x17e8d4, priv->tag_base); | ||
223 | return 0; | ||
224 | } | ||
225 | |||
212 | struct nouveau_oclass | 226 | struct nouveau_oclass |
213 | nvc0_ltcg_oclass = { | 227 | nvc0_ltcg_oclass = { |
214 | .handle = NV_SUBDEV(LTCG, 0xc0), | 228 | .handle = NV_SUBDEV(LTCG, 0xc0), |
215 | .ofuncs = &(struct nouveau_ofuncs) { | 229 | .ofuncs = &(struct nouveau_ofuncs) { |
216 | .ctor = nvc0_ltcg_ctor, | 230 | .ctor = nvc0_ltcg_ctor, |
217 | .dtor = nvc0_ltcg_dtor, | 231 | .dtor = nvc0_ltcg_dtor, |
218 | .init = _nouveau_ltcg_init, | 232 | .init = nvc0_ltcg_init, |
219 | .fini = _nouveau_ltcg_fini, | 233 | .fini = _nouveau_ltcg_fini, |
220 | }, | 234 | }, |
221 | }; | 235 | }; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c index 1c0330b8c9a4..ec9cd6f10f91 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c | |||
@@ -80,7 +80,9 @@ _nouveau_mc_dtor(struct nouveau_object *object) | |||
80 | 80 | ||
81 | int | 81 | int |
82 | nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, | 82 | nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, |
83 | struct nouveau_oclass *oclass, int length, void **pobject) | 83 | struct nouveau_oclass *oclass, |
84 | const struct nouveau_mc_intr *intr_map, | ||
85 | int length, void **pobject) | ||
84 | { | 86 | { |
85 | struct nouveau_device *device = nv_device(parent); | 87 | struct nouveau_device *device = nv_device(parent); |
86 | struct nouveau_mc *pmc; | 88 | struct nouveau_mc *pmc; |
@@ -92,6 +94,8 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, | |||
92 | if (ret) | 94 | if (ret) |
93 | return ret; | 95 | return ret; |
94 | 96 | ||
97 | pmc->intr_map = intr_map; | ||
98 | |||
95 | ret = request_irq(device->pdev->irq, nouveau_mc_intr, | 99 | ret = request_irq(device->pdev->irq, nouveau_mc_intr, |
96 | IRQF_SHARED, "nouveau", pmc); | 100 | IRQF_SHARED, "nouveau", pmc); |
97 | if (ret < 0) | 101 | if (ret < 0) |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c index 8c769715227b..64aa4edb0d9d 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c | |||
@@ -50,12 +50,11 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
50 | struct nv04_mc_priv *priv; | 50 | struct nv04_mc_priv *priv; |
51 | int ret; | 51 | int ret; |
52 | 52 | ||
53 | ret = nouveau_mc_create(parent, engine, oclass, &priv); | 53 | ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv); |
54 | *pobject = nv_object(priv); | 54 | *pobject = nv_object(priv); |
55 | if (ret) | 55 | if (ret) |
56 | return ret; | 56 | return ret; |
57 | 57 | ||
58 | priv->base.intr_map = nv04_mc_intr; | ||
59 | return 0; | 58 | return 0; |
60 | } | 59 | } |
61 | 60 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c index 51919371810f..d9891782bf28 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c | |||
@@ -36,12 +36,11 @@ nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
36 | struct nv44_mc_priv *priv; | 36 | struct nv44_mc_priv *priv; |
37 | int ret; | 37 | int ret; |
38 | 38 | ||
39 | ret = nouveau_mc_create(parent, engine, oclass, &priv); | 39 | ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv); |
40 | *pobject = nv_object(priv); | 40 | *pobject = nv_object(priv); |
41 | if (ret) | 41 | if (ret) |
42 | return ret; | 42 | return ret; |
43 | 43 | ||
44 | priv->base.intr_map = nv04_mc_intr; | ||
45 | return 0; | 44 | return 0; |
46 | } | 45 | } |
47 | 46 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c index f25fc5fc7dd1..2b1afe225db8 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c | |||
@@ -53,12 +53,11 @@ nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
53 | struct nv50_mc_priv *priv; | 53 | struct nv50_mc_priv *priv; |
54 | int ret; | 54 | int ret; |
55 | 55 | ||
56 | ret = nouveau_mc_create(parent, engine, oclass, &priv); | 56 | ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv); |
57 | *pobject = nv_object(priv); | 57 | *pobject = nv_object(priv); |
58 | if (ret) | 58 | if (ret) |
59 | return ret; | 59 | return ret; |
60 | 60 | ||
61 | priv->base.intr_map = nv50_mc_intr; | ||
62 | return 0; | 61 | return 0; |
63 | } | 62 | } |
64 | 63 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c index e82fd21b5041..0d57b4d3e001 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c | |||
@@ -54,12 +54,11 @@ nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
54 | struct nv98_mc_priv *priv; | 54 | struct nv98_mc_priv *priv; |
55 | int ret; | 55 | int ret; |
56 | 56 | ||
57 | ret = nouveau_mc_create(parent, engine, oclass, &priv); | 57 | ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv); |
58 | *pobject = nv_object(priv); | 58 | *pobject = nv_object(priv); |
59 | if (ret) | 59 | if (ret) |
60 | return ret; | 60 | return ret; |
61 | 61 | ||
62 | priv->base.intr_map = nv98_mc_intr; | ||
63 | return 0; | 62 | return 0; |
64 | } | 63 | } |
65 | 64 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c index c5da3babbc62..104175c5a2dd 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c | |||
@@ -57,12 +57,11 @@ nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
57 | struct nvc0_mc_priv *priv; | 57 | struct nvc0_mc_priv *priv; |
58 | int ret; | 58 | int ret; |
59 | 59 | ||
60 | ret = nouveau_mc_create(parent, engine, oclass, &priv); | 60 | ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv); |
61 | *pobject = nv_object(priv); | 61 | *pobject = nv_object(priv); |
62 | if (ret) | 62 | if (ret) |
63 | return ret; | 63 | return ret; |
64 | 64 | ||
65 | priv->base.intr_map = nvc0_mc_intr; | ||
66 | return 0; | 65 | return 0; |
67 | } | 66 | } |
68 | 67 | ||
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c index 0782bd2f1e04..6a13ffb53bdb 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c +++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c | |||
@@ -606,6 +606,24 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) | |||
606 | regp->ramdac_a34 = 0x1; | 606 | regp->ramdac_a34 = 0x1; |
607 | } | 607 | } |
608 | 608 | ||
609 | static int | ||
610 | nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) | ||
611 | { | ||
612 | struct nv04_display *disp = nv04_display(crtc->dev); | ||
613 | struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); | ||
614 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
615 | int ret; | ||
616 | |||
617 | ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); | ||
618 | if (ret == 0) { | ||
619 | if (disp->image[nv_crtc->index]) | ||
620 | nouveau_bo_unpin(disp->image[nv_crtc->index]); | ||
621 | nouveau_bo_ref(nvfb->nvbo, &disp->image[nv_crtc->index]); | ||
622 | } | ||
623 | |||
624 | return ret; | ||
625 | } | ||
626 | |||
609 | /** | 627 | /** |
610 | * Sets up registers for the given mode/adjusted_mode pair. | 628 | * Sets up registers for the given mode/adjusted_mode pair. |
611 | * | 629 | * |
@@ -622,10 +640,15 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
622 | struct drm_device *dev = crtc->dev; | 640 | struct drm_device *dev = crtc->dev; |
623 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 641 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
624 | struct nouveau_drm *drm = nouveau_drm(dev); | 642 | struct nouveau_drm *drm = nouveau_drm(dev); |
643 | int ret; | ||
625 | 644 | ||
626 | NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index); | 645 | NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index); |
627 | drm_mode_debug_printmodeline(adjusted_mode); | 646 | drm_mode_debug_printmodeline(adjusted_mode); |
628 | 647 | ||
648 | ret = nv_crtc_swap_fbs(crtc, old_fb); | ||
649 | if (ret) | ||
650 | return ret; | ||
651 | |||
629 | /* unlock must come after turning off FP_TG_CONTROL in output_prepare */ | 652 | /* unlock must come after turning off FP_TG_CONTROL in output_prepare */ |
630 | nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1); | 653 | nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1); |
631 | 654 | ||
@@ -722,6 +745,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc) | |||
722 | 745 | ||
723 | static void nv_crtc_destroy(struct drm_crtc *crtc) | 746 | static void nv_crtc_destroy(struct drm_crtc *crtc) |
724 | { | 747 | { |
748 | struct nv04_display *disp = nv04_display(crtc->dev); | ||
725 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 749 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
726 | 750 | ||
727 | if (!nv_crtc) | 751 | if (!nv_crtc) |
@@ -729,6 +753,10 @@ static void nv_crtc_destroy(struct drm_crtc *crtc) | |||
729 | 753 | ||
730 | drm_crtc_cleanup(crtc); | 754 | drm_crtc_cleanup(crtc); |
731 | 755 | ||
756 | if (disp->image[nv_crtc->index]) | ||
757 | nouveau_bo_unpin(disp->image[nv_crtc->index]); | ||
758 | nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]); | ||
759 | |||
732 | nouveau_bo_unmap(nv_crtc->cursor.nvbo); | 760 | nouveau_bo_unmap(nv_crtc->cursor.nvbo); |
733 | nouveau_bo_unpin(nv_crtc->cursor.nvbo); | 761 | nouveau_bo_unpin(nv_crtc->cursor.nvbo); |
734 | nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); | 762 | nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); |
@@ -754,6 +782,16 @@ nv_crtc_gamma_load(struct drm_crtc *crtc) | |||
754 | } | 782 | } |
755 | 783 | ||
756 | static void | 784 | static void |
785 | nv_crtc_disable(struct drm_crtc *crtc) | ||
786 | { | ||
787 | struct nv04_display *disp = nv04_display(crtc->dev); | ||
788 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | ||
789 | if (disp->image[nv_crtc->index]) | ||
790 | nouveau_bo_unpin(disp->image[nv_crtc->index]); | ||
791 | nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]); | ||
792 | } | ||
793 | |||
794 | static void | ||
757 | nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, | 795 | nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, |
758 | uint32_t size) | 796 | uint32_t size) |
759 | { | 797 | { |
@@ -791,7 +829,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
791 | struct drm_framebuffer *drm_fb; | 829 | struct drm_framebuffer *drm_fb; |
792 | struct nouveau_framebuffer *fb; | 830 | struct nouveau_framebuffer *fb; |
793 | int arb_burst, arb_lwm; | 831 | int arb_burst, arb_lwm; |
794 | int ret; | ||
795 | 832 | ||
796 | NV_DEBUG(drm, "index %d\n", nv_crtc->index); | 833 | NV_DEBUG(drm, "index %d\n", nv_crtc->index); |
797 | 834 | ||
@@ -801,10 +838,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
801 | return 0; | 838 | return 0; |
802 | } | 839 | } |
803 | 840 | ||
804 | |||
805 | /* If atomic, we want to switch to the fb we were passed, so | 841 | /* If atomic, we want to switch to the fb we were passed, so |
806 | * now we update pointers to do that. (We don't pin; just | 842 | * now we update pointers to do that. |
807 | * assume we're already pinned and update the base address.) | ||
808 | */ | 843 | */ |
809 | if (atomic) { | 844 | if (atomic) { |
810 | drm_fb = passed_fb; | 845 | drm_fb = passed_fb; |
@@ -812,17 +847,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, | |||
812 | } else { | 847 | } else { |
813 | drm_fb = crtc->fb; | 848 | drm_fb = crtc->fb; |
814 | fb = nouveau_framebuffer(crtc->fb); | 849 | fb = nouveau_framebuffer(crtc->fb); |
815 | /* If not atomic, we can go ahead and pin, and unpin the | ||
816 | * old fb we were passed. | ||
817 | */ | ||
818 | ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM); | ||
819 | if (ret) | ||
820 | return ret; | ||
821 | |||
822 | if (passed_fb) { | ||
823 | struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb); | ||
824 | nouveau_bo_unpin(ofb->nvbo); | ||
825 | } | ||
826 | } | 850 | } |
827 | 851 | ||
828 | nv_crtc->fb.offset = fb->nvbo->bo.offset; | 852 | nv_crtc->fb.offset = fb->nvbo->bo.offset; |
@@ -877,6 +901,9 @@ static int | |||
877 | nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, | 901 | nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, |
878 | struct drm_framebuffer *old_fb) | 902 | struct drm_framebuffer *old_fb) |
879 | { | 903 | { |
904 | int ret = nv_crtc_swap_fbs(crtc, old_fb); | ||
905 | if (ret) | ||
906 | return ret; | ||
880 | return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false); | 907 | return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false); |
881 | } | 908 | } |
882 | 909 | ||
@@ -1027,6 +1054,7 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = { | |||
1027 | .mode_set_base = nv04_crtc_mode_set_base, | 1054 | .mode_set_base = nv04_crtc_mode_set_base, |
1028 | .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic, | 1055 | .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic, |
1029 | .load_lut = nv_crtc_gamma_load, | 1056 | .load_lut = nv_crtc_gamma_load, |
1057 | .disable = nv_crtc_disable, | ||
1030 | }; | 1058 | }; |
1031 | 1059 | ||
1032 | int | 1060 | int |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h index a0a031dad13f..9928187f0a7d 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.h +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h | |||
@@ -81,6 +81,7 @@ struct nv04_display { | |||
81 | uint32_t saved_vga_font[4][16384]; | 81 | uint32_t saved_vga_font[4][16384]; |
82 | uint32_t dac_users[4]; | 82 | uint32_t dac_users[4]; |
83 | struct nouveau_object *core; | 83 | struct nouveau_object *core; |
84 | struct nouveau_bo *image[2]; | ||
84 | }; | 85 | }; |
85 | 86 | ||
86 | static inline struct nv04_display * | 87 | static inline struct nv04_display * |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 907d20ef6d4d..a03e75deacaf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
@@ -577,6 +577,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
577 | ret = nv50_display_flip_next(crtc, fb, chan, 0); | 577 | ret = nv50_display_flip_next(crtc, fb, chan, 0); |
578 | if (ret) | 578 | if (ret) |
579 | goto fail_unreserve; | 579 | goto fail_unreserve; |
580 | } else { | ||
581 | struct nv04_display *dispnv04 = nv04_display(dev); | ||
582 | nouveau_bo_ref(new_bo, &dispnv04->image[nouveau_crtc(crtc)->index]); | ||
580 | } | 583 | } |
581 | 584 | ||
582 | ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); | 585 | ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); |
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c index 3af5bcd0b203..625f80d53dc2 100644 --- a/drivers/gpu/drm/nouveau/nv40_pm.c +++ b/drivers/gpu/drm/nouveau/nv40_pm.c | |||
@@ -131,7 +131,7 @@ nv40_calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll, | |||
131 | if (clk < pll->vco1.max_freq) | 131 | if (clk < pll->vco1.max_freq) |
132 | pll->vco2.max_freq = 0; | 132 | pll->vco2.max_freq = 0; |
133 | 133 | ||
134 | pclk->pll_calc(pclk, pll, clk, &coef); | 134 | ret = pclk->pll_calc(pclk, pll, clk, &coef); |
135 | if (ret == 0) | 135 | if (ret == 0) |
136 | return -ERANGE; | 136 | return -ERANGE; |
137 | 137 | ||
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 274b8e1b889f..9f19259667df 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -2163,7 +2163,7 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v); | |||
2163 | WREG32(reg, tmp_); \ | 2163 | WREG32(reg, tmp_); \ |
2164 | } while (0) | 2164 | } while (0) |
2165 | #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) | 2165 | #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) |
2166 | #define WREG32_OR(reg, or) WREG32_P(reg, or, ~or) | 2166 | #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) |
2167 | #define WREG32_PLL_P(reg, val, mask) \ | 2167 | #define WREG32_PLL_P(reg, val, mask) \ |
2168 | do { \ | 2168 | do { \ |
2169 | uint32_t tmp_ = RREG32_PLL(reg); \ | 2169 | uint32_t tmp_ = RREG32_PLL(reg); \ |
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index f1c15754e73c..b79f4f5cdd62 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c | |||
@@ -356,6 +356,14 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, | |||
356 | return -EINVAL; | 356 | return -EINVAL; |
357 | } | 357 | } |
358 | 358 | ||
359 | if (bo->tbo.sync_obj) { | ||
360 | r = radeon_fence_wait(bo->tbo.sync_obj, false); | ||
361 | if (r) { | ||
362 | DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); | ||
363 | return r; | ||
364 | } | ||
365 | } | ||
366 | |||
359 | r = radeon_bo_kmap(bo, &ptr); | 367 | r = radeon_bo_kmap(bo, &ptr); |
360 | if (r) { | 368 | if (r) { |
361 | DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); | 369 | DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index bcc68ec204ad..f5e92cfcc140 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -744,10 +744,10 @@ static void rv770_init_golden_registers(struct radeon_device *rdev) | |||
744 | (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); | 744 | (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); |
745 | radeon_program_register_sequence(rdev, | 745 | radeon_program_register_sequence(rdev, |
746 | rv730_golden_registers, | 746 | rv730_golden_registers, |
747 | (const u32)ARRAY_SIZE(rv770_golden_registers)); | 747 | (const u32)ARRAY_SIZE(rv730_golden_registers)); |
748 | radeon_program_register_sequence(rdev, | 748 | radeon_program_register_sequence(rdev, |
749 | rv730_mgcg_init, | 749 | rv730_mgcg_init, |
750 | (const u32)ARRAY_SIZE(rv770_mgcg_init)); | 750 | (const u32)ARRAY_SIZE(rv730_mgcg_init)); |
751 | break; | 751 | break; |
752 | case CHIP_RV710: | 752 | case CHIP_RV710: |
753 | radeon_program_register_sequence(rdev, | 753 | radeon_program_register_sequence(rdev, |
@@ -758,18 +758,18 @@ static void rv770_init_golden_registers(struct radeon_device *rdev) | |||
758 | (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); | 758 | (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); |
759 | radeon_program_register_sequence(rdev, | 759 | radeon_program_register_sequence(rdev, |
760 | rv710_golden_registers, | 760 | rv710_golden_registers, |
761 | (const u32)ARRAY_SIZE(rv770_golden_registers)); | 761 | (const u32)ARRAY_SIZE(rv710_golden_registers)); |
762 | radeon_program_register_sequence(rdev, | 762 | radeon_program_register_sequence(rdev, |
763 | rv710_mgcg_init, | 763 | rv710_mgcg_init, |
764 | (const u32)ARRAY_SIZE(rv770_mgcg_init)); | 764 | (const u32)ARRAY_SIZE(rv710_mgcg_init)); |
765 | break; | 765 | break; |
766 | case CHIP_RV740: | 766 | case CHIP_RV740: |
767 | radeon_program_register_sequence(rdev, | 767 | radeon_program_register_sequence(rdev, |
768 | rv740_golden_registers, | 768 | rv740_golden_registers, |
769 | (const u32)ARRAY_SIZE(rv770_golden_registers)); | 769 | (const u32)ARRAY_SIZE(rv740_golden_registers)); |
770 | radeon_program_register_sequence(rdev, | 770 | radeon_program_register_sequence(rdev, |
771 | rv740_mgcg_init, | 771 | rv740_mgcg_init, |
772 | (const u32)ARRAY_SIZE(rv770_mgcg_init)); | 772 | (const u32)ARRAY_SIZE(rv740_mgcg_init)); |
773 | break; | 773 | break; |
774 | default: | 774 | default: |
775 | break; | 775 | break; |
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c index 5f4749e60b04..c1cd5698b8ae 100644 --- a/drivers/iio/light/adjd_s311.c +++ b/drivers/iio/light/adjd_s311.c | |||
@@ -232,7 +232,8 @@ static int adjd_s311_read_raw(struct iio_dev *indio_dev, | |||
232 | 232 | ||
233 | switch (mask) { | 233 | switch (mask) { |
234 | case IIO_CHAN_INFO_RAW: | 234 | case IIO_CHAN_INFO_RAW: |
235 | ret = adjd_s311_read_data(indio_dev, chan->address, val); | 235 | ret = adjd_s311_read_data(indio_dev, |
236 | ADJD_S311_DATA_REG(chan->address), val); | ||
236 | if (ret < 0) | 237 | if (ret < 0) |
237 | return ret; | 238 | return ret; |
238 | return IIO_VAL_INT; | 239 | return IIO_VAL_INT; |
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c index dc112a7137fe..4296155090b2 100644 --- a/drivers/md/dm-cache-policy-mq.c +++ b/drivers/md/dm-cache-policy-mq.c | |||
@@ -959,23 +959,21 @@ out: | |||
959 | return r; | 959 | return r; |
960 | } | 960 | } |
961 | 961 | ||
962 | static void remove_mapping(struct mq_policy *mq, dm_oblock_t oblock) | 962 | static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock) |
963 | { | 963 | { |
964 | struct entry *e = hash_lookup(mq, oblock); | 964 | struct mq_policy *mq = to_mq_policy(p); |
965 | struct entry *e; | ||
966 | |||
967 | mutex_lock(&mq->lock); | ||
968 | |||
969 | e = hash_lookup(mq, oblock); | ||
965 | 970 | ||
966 | BUG_ON(!e || !e->in_cache); | 971 | BUG_ON(!e || !e->in_cache); |
967 | 972 | ||
968 | del(mq, e); | 973 | del(mq, e); |
969 | e->in_cache = false; | 974 | e->in_cache = false; |
970 | push(mq, e); | 975 | push(mq, e); |
971 | } | ||
972 | 976 | ||
973 | static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock) | ||
974 | { | ||
975 | struct mq_policy *mq = to_mq_policy(p); | ||
976 | |||
977 | mutex_lock(&mq->lock); | ||
978 | remove_mapping(mq, oblock); | ||
979 | mutex_unlock(&mq->lock); | 977 | mutex_unlock(&mq->lock); |
980 | } | 978 | } |
981 | 979 | ||
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 07f257d44a1e..e48cb339c0c6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -3714,11 +3714,17 @@ static int bond_neigh_init(struct neighbour *n) | |||
3714 | * The bonding ndo_neigh_setup is called at init time beofre any | 3714 | * The bonding ndo_neigh_setup is called at init time beofre any |
3715 | * slave exists. So we must declare proxy setup function which will | 3715 | * slave exists. So we must declare proxy setup function which will |
3716 | * be used at run time to resolve the actual slave neigh param setup. | 3716 | * be used at run time to resolve the actual slave neigh param setup. |
3717 | * | ||
3718 | * It's also called by master devices (such as vlans) to setup their | ||
3719 | * underlying devices. In that case - do nothing, we're already set up from | ||
3720 | * our init. | ||
3717 | */ | 3721 | */ |
3718 | static int bond_neigh_setup(struct net_device *dev, | 3722 | static int bond_neigh_setup(struct net_device *dev, |
3719 | struct neigh_parms *parms) | 3723 | struct neigh_parms *parms) |
3720 | { | 3724 | { |
3721 | parms->neigh_setup = bond_neigh_init; | 3725 | /* modify only our neigh_parms */ |
3726 | if (parms->dev == dev) | ||
3727 | parms->neigh_setup = bond_neigh_init; | ||
3722 | 3728 | ||
3723 | return 0; | 3729 | return 0; |
3724 | } | 3730 | } |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 25723d8ee201..925ab8ec9329 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c | |||
@@ -649,7 +649,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) | |||
649 | if ((mc->ptr + rec_len) > mc->end) | 649 | if ((mc->ptr + rec_len) > mc->end) |
650 | goto decode_failed; | 650 | goto decode_failed; |
651 | 651 | ||
652 | memcpy(cf->data, mc->ptr, rec_len); | 652 | memcpy(cf->data, mc->ptr, cf->can_dlc); |
653 | mc->ptr += rec_len; | 653 | mc->ptr += rec_len; |
654 | } | 654 | } |
655 | 655 | ||
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index f1b121ee5525..55d79cb53a79 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c | |||
@@ -199,7 +199,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget) | |||
199 | struct arc_emac_priv *priv = netdev_priv(ndev); | 199 | struct arc_emac_priv *priv = netdev_priv(ndev); |
200 | unsigned int work_done; | 200 | unsigned int work_done; |
201 | 201 | ||
202 | for (work_done = 0; work_done <= budget; work_done++) { | 202 | for (work_done = 0; work_done < budget; work_done++) { |
203 | unsigned int *last_rx_bd = &priv->last_rx_bd; | 203 | unsigned int *last_rx_bd = &priv->last_rx_bd; |
204 | struct net_device_stats *stats = &priv->stats; | 204 | struct net_device_stats *stats = &priv->stats; |
205 | struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; | 205 | struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index d80e34b8285f..00b88cbfde25 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
@@ -1333,6 +1333,8 @@ enum { | |||
1333 | BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, | 1333 | BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, |
1334 | BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, | 1334 | BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, |
1335 | BNX2X_SP_RTNL_HYPERVISOR_VLAN, | 1335 | BNX2X_SP_RTNL_HYPERVISOR_VLAN, |
1336 | BNX2X_SP_RTNL_TX_STOP, | ||
1337 | BNX2X_SP_RTNL_TX_RESUME, | ||
1336 | }; | 1338 | }; |
1337 | 1339 | ||
1338 | struct bnx2x_prev_path_list { | 1340 | struct bnx2x_prev_path_list { |
@@ -1502,6 +1504,7 @@ struct bnx2x { | |||
1502 | #define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) | 1504 | #define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) |
1503 | #define IS_VF_FLAG (1 << 22) | 1505 | #define IS_VF_FLAG (1 << 22) |
1504 | #define INTERRUPTS_ENABLED_FLAG (1 << 23) | 1506 | #define INTERRUPTS_ENABLED_FLAG (1 << 23) |
1507 | #define BC_SUPPORTS_RMMOD_CMD (1 << 24) | ||
1505 | 1508 | ||
1506 | #define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) | 1509 | #define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) |
1507 | 1510 | ||
@@ -1830,6 +1833,8 @@ struct bnx2x { | |||
1830 | 1833 | ||
1831 | int fp_array_size; | 1834 | int fp_array_size; |
1832 | u32 dump_preset_idx; | 1835 | u32 dump_preset_idx; |
1836 | bool stats_started; | ||
1837 | struct semaphore stats_sema; | ||
1833 | }; | 1838 | }; |
1834 | 1839 | ||
1835 | /* Tx queues may be less or equal to Rx queues */ | 1840 | /* Tx queues may be less or equal to Rx queues */ |
@@ -2451,4 +2456,6 @@ enum bnx2x_pci_bus_speed { | |||
2451 | BNX2X_PCI_LINK_SPEED_5000 = 5000, | 2456 | BNX2X_PCI_LINK_SPEED_5000 = 5000, |
2452 | BNX2X_PCI_LINK_SPEED_8000 = 8000 | 2457 | BNX2X_PCI_LINK_SPEED_8000 = 8000 |
2453 | }; | 2458 | }; |
2459 | |||
2460 | void bnx2x_set_local_cmng(struct bnx2x *bp); | ||
2454 | #endif /* bnx2x.h */ | 2461 | #endif /* bnx2x.h */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 0c94df47e0e8..fcf2761d8828 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c | |||
@@ -30,10 +30,8 @@ | |||
30 | #include "bnx2x_dcb.h" | 30 | #include "bnx2x_dcb.h" |
31 | 31 | ||
32 | /* forward declarations of dcbx related functions */ | 32 | /* forward declarations of dcbx related functions */ |
33 | static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp); | ||
34 | static void bnx2x_pfc_set_pfc(struct bnx2x *bp); | 33 | static void bnx2x_pfc_set_pfc(struct bnx2x *bp); |
35 | static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp); | 34 | static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp); |
36 | static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp); | ||
37 | static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, | 35 | static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, |
38 | u32 *set_configuration_ets_pg, | 36 | u32 *set_configuration_ets_pg, |
39 | u32 *pri_pg_tbl); | 37 | u32 *pri_pg_tbl); |
@@ -425,30 +423,52 @@ static void bnx2x_pfc_set_pfc(struct bnx2x *bp) | |||
425 | bnx2x_pfc_clear(bp); | 423 | bnx2x_pfc_clear(bp); |
426 | } | 424 | } |
427 | 425 | ||
428 | static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) | 426 | int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) |
429 | { | 427 | { |
430 | struct bnx2x_func_state_params func_params = {NULL}; | 428 | struct bnx2x_func_state_params func_params = {NULL}; |
429 | int rc; | ||
431 | 430 | ||
432 | func_params.f_obj = &bp->func_obj; | 431 | func_params.f_obj = &bp->func_obj; |
433 | func_params.cmd = BNX2X_F_CMD_TX_STOP; | 432 | func_params.cmd = BNX2X_F_CMD_TX_STOP; |
434 | 433 | ||
434 | __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); | ||
435 | __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); | ||
436 | |||
435 | DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n"); | 437 | DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n"); |
436 | return bnx2x_func_state_change(bp, &func_params); | 438 | |
439 | rc = bnx2x_func_state_change(bp, &func_params); | ||
440 | if (rc) { | ||
441 | BNX2X_ERR("Unable to hold traffic for HW configuration\n"); | ||
442 | bnx2x_panic(); | ||
443 | } | ||
444 | |||
445 | return rc; | ||
437 | } | 446 | } |
438 | 447 | ||
439 | static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) | 448 | int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) |
440 | { | 449 | { |
441 | struct bnx2x_func_state_params func_params = {NULL}; | 450 | struct bnx2x_func_state_params func_params = {NULL}; |
442 | struct bnx2x_func_tx_start_params *tx_params = | 451 | struct bnx2x_func_tx_start_params *tx_params = |
443 | &func_params.params.tx_start; | 452 | &func_params.params.tx_start; |
453 | int rc; | ||
444 | 454 | ||
445 | func_params.f_obj = &bp->func_obj; | 455 | func_params.f_obj = &bp->func_obj; |
446 | func_params.cmd = BNX2X_F_CMD_TX_START; | 456 | func_params.cmd = BNX2X_F_CMD_TX_START; |
447 | 457 | ||
458 | __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); | ||
459 | __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); | ||
460 | |||
448 | bnx2x_dcbx_fw_struct(bp, tx_params); | 461 | bnx2x_dcbx_fw_struct(bp, tx_params); |
449 | 462 | ||
450 | DP(BNX2X_MSG_DCB, "START TRAFFIC\n"); | 463 | DP(BNX2X_MSG_DCB, "START TRAFFIC\n"); |
451 | return bnx2x_func_state_change(bp, &func_params); | 464 | |
465 | rc = bnx2x_func_state_change(bp, &func_params); | ||
466 | if (rc) { | ||
467 | BNX2X_ERR("Unable to resume traffic after HW configuration\n"); | ||
468 | bnx2x_panic(); | ||
469 | } | ||
470 | |||
471 | return rc; | ||
452 | } | 472 | } |
453 | 473 | ||
454 | static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp) | 474 | static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp) |
@@ -744,7 +764,9 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) | |||
744 | if (IS_MF(bp)) | 764 | if (IS_MF(bp)) |
745 | bnx2x_link_sync_notify(bp); | 765 | bnx2x_link_sync_notify(bp); |
746 | 766 | ||
747 | bnx2x_dcbx_stop_hw_tx(bp); | 767 | set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state); |
768 | |||
769 | schedule_delayed_work(&bp->sp_rtnl_task, 0); | ||
748 | 770 | ||
749 | return; | 771 | return; |
750 | } | 772 | } |
@@ -753,7 +775,13 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) | |||
753 | bnx2x_pfc_set_pfc(bp); | 775 | bnx2x_pfc_set_pfc(bp); |
754 | 776 | ||
755 | bnx2x_dcbx_update_ets_params(bp); | 777 | bnx2x_dcbx_update_ets_params(bp); |
756 | bnx2x_dcbx_resume_hw_tx(bp); | 778 | |
779 | /* ets may affect cmng configuration: reinit it in hw */ | ||
780 | bnx2x_set_local_cmng(bp); | ||
781 | |||
782 | set_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state); | ||
783 | |||
784 | schedule_delayed_work(&bp->sp_rtnl_task, 0); | ||
757 | 785 | ||
758 | return; | 786 | return; |
759 | case BNX2X_DCBX_STATE_TX_RELEASED: | 787 | case BNX2X_DCBX_STATE_TX_RELEASED: |
@@ -2363,21 +2391,24 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid, | |||
2363 | case DCB_FEATCFG_ATTR_PG: | 2391 | case DCB_FEATCFG_ATTR_PG: |
2364 | if (bp->dcbx_local_feat.ets.enabled) | 2392 | if (bp->dcbx_local_feat.ets.enabled) |
2365 | *flags |= DCB_FEATCFG_ENABLE; | 2393 | *flags |= DCB_FEATCFG_ENABLE; |
2366 | if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR) | 2394 | if (bp->dcbx_error & (DCBX_LOCAL_ETS_ERROR | |
2395 | DCBX_REMOTE_MIB_ERROR)) | ||
2367 | *flags |= DCB_FEATCFG_ERROR; | 2396 | *flags |= DCB_FEATCFG_ERROR; |
2368 | break; | 2397 | break; |
2369 | case DCB_FEATCFG_ATTR_PFC: | 2398 | case DCB_FEATCFG_ATTR_PFC: |
2370 | if (bp->dcbx_local_feat.pfc.enabled) | 2399 | if (bp->dcbx_local_feat.pfc.enabled) |
2371 | *flags |= DCB_FEATCFG_ENABLE; | 2400 | *flags |= DCB_FEATCFG_ENABLE; |
2372 | if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR | | 2401 | if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR | |
2373 | DCBX_LOCAL_PFC_MISMATCH)) | 2402 | DCBX_LOCAL_PFC_MISMATCH | |
2403 | DCBX_REMOTE_MIB_ERROR)) | ||
2374 | *flags |= DCB_FEATCFG_ERROR; | 2404 | *flags |= DCB_FEATCFG_ERROR; |
2375 | break; | 2405 | break; |
2376 | case DCB_FEATCFG_ATTR_APP: | 2406 | case DCB_FEATCFG_ATTR_APP: |
2377 | if (bp->dcbx_local_feat.app.enabled) | 2407 | if (bp->dcbx_local_feat.app.enabled) |
2378 | *flags |= DCB_FEATCFG_ENABLE; | 2408 | *flags |= DCB_FEATCFG_ENABLE; |
2379 | if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR | | 2409 | if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR | |
2380 | DCBX_LOCAL_APP_MISMATCH)) | 2410 | DCBX_LOCAL_APP_MISMATCH | |
2411 | DCBX_REMOTE_MIB_ERROR)) | ||
2381 | *flags |= DCB_FEATCFG_ERROR; | 2412 | *flags |= DCB_FEATCFG_ERROR; |
2382 | break; | 2413 | break; |
2383 | default: | 2414 | default: |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h index 125bd1b6586f..804b8f64463e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h | |||
@@ -199,4 +199,7 @@ extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops; | |||
199 | int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall); | 199 | int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall); |
200 | #endif /* BCM_DCBNL */ | 200 | #endif /* BCM_DCBNL */ |
201 | 201 | ||
202 | int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp); | ||
203 | int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp); | ||
204 | |||
202 | #endif /* BNX2X_DCB_H */ | 205 | #endif /* BNX2X_DCB_H */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 5018e52ae2ad..32767f6aa33f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h | |||
@@ -1300,6 +1300,9 @@ struct drv_func_mb { | |||
1300 | 1300 | ||
1301 | #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000 | 1301 | #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000 |
1302 | 1302 | ||
1303 | #define DRV_MSG_CODE_RMMOD 0xdb000000 | ||
1304 | #define REQ_BC_VER_4_RMMOD_CMD 0x0007080f | ||
1305 | |||
1303 | #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 | 1306 | #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 |
1304 | #define REQ_BC_VER_4_SET_MF_BW 0x00060202 | 1307 | #define REQ_BC_VER_4_SET_MF_BW 0x00060202 |
1305 | #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 | 1308 | #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 |
@@ -1372,6 +1375,8 @@ struct drv_func_mb { | |||
1372 | 1375 | ||
1373 | #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000 | 1376 | #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000 |
1374 | 1377 | ||
1378 | #define FW_MSG_CODE_RMMOD_ACK 0xdb100000 | ||
1379 | |||
1375 | #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 | 1380 | #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 |
1376 | #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 | 1381 | #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 |
1377 | 1382 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e06186c305d8..8bdc8b973007 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -2261,6 +2261,23 @@ static void bnx2x_set_requested_fc(struct bnx2x *bp) | |||
2261 | bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; | 2261 | bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; |
2262 | } | 2262 | } |
2263 | 2263 | ||
2264 | static void bnx2x_init_dropless_fc(struct bnx2x *bp) | ||
2265 | { | ||
2266 | u32 pause_enabled = 0; | ||
2267 | |||
2268 | if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) { | ||
2269 | if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) | ||
2270 | pause_enabled = 1; | ||
2271 | |||
2272 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
2273 | USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)), | ||
2274 | pause_enabled); | ||
2275 | } | ||
2276 | |||
2277 | DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n", | ||
2278 | pause_enabled ? "enabled" : "disabled"); | ||
2279 | } | ||
2280 | |||
2264 | int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) | 2281 | int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) |
2265 | { | 2282 | { |
2266 | int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp); | 2283 | int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp); |
@@ -2294,6 +2311,8 @@ int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) | |||
2294 | 2311 | ||
2295 | bnx2x_release_phy_lock(bp); | 2312 | bnx2x_release_phy_lock(bp); |
2296 | 2313 | ||
2314 | bnx2x_init_dropless_fc(bp); | ||
2315 | |||
2297 | bnx2x_calc_fc_adv(bp); | 2316 | bnx2x_calc_fc_adv(bp); |
2298 | 2317 | ||
2299 | if (bp->link_vars.link_up) { | 2318 | if (bp->link_vars.link_up) { |
@@ -2315,6 +2334,8 @@ void bnx2x_link_set(struct bnx2x *bp) | |||
2315 | bnx2x_phy_init(&bp->link_params, &bp->link_vars); | 2334 | bnx2x_phy_init(&bp->link_params, &bp->link_vars); |
2316 | bnx2x_release_phy_lock(bp); | 2335 | bnx2x_release_phy_lock(bp); |
2317 | 2336 | ||
2337 | bnx2x_init_dropless_fc(bp); | ||
2338 | |||
2318 | bnx2x_calc_fc_adv(bp); | 2339 | bnx2x_calc_fc_adv(bp); |
2319 | } else | 2340 | } else |
2320 | BNX2X_ERR("Bootcode is missing - can not set link\n"); | 2341 | BNX2X_ERR("Bootcode is missing - can not set link\n"); |
@@ -2476,7 +2497,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) | |||
2476 | 2497 | ||
2477 | input.port_rate = bp->link_vars.line_speed; | 2498 | input.port_rate = bp->link_vars.line_speed; |
2478 | 2499 | ||
2479 | if (cmng_type == CMNG_FNS_MINMAX) { | 2500 | if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) { |
2480 | int vn; | 2501 | int vn; |
2481 | 2502 | ||
2482 | /* read mf conf from shmem */ | 2503 | /* read mf conf from shmem */ |
@@ -2533,6 +2554,21 @@ static void storm_memset_cmng(struct bnx2x *bp, | |||
2533 | } | 2554 | } |
2534 | } | 2555 | } |
2535 | 2556 | ||
2557 | /* init cmng mode in HW according to local configuration */ | ||
2558 | void bnx2x_set_local_cmng(struct bnx2x *bp) | ||
2559 | { | ||
2560 | int cmng_fns = bnx2x_get_cmng_fns_mode(bp); | ||
2561 | |||
2562 | if (cmng_fns != CMNG_FNS_NONE) { | ||
2563 | bnx2x_cmng_fns_init(bp, false, cmng_fns); | ||
2564 | storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); | ||
2565 | } else { | ||
2566 | /* rate shaping and fairness are disabled */ | ||
2567 | DP(NETIF_MSG_IFUP, | ||
2568 | "single function mode without fairness\n"); | ||
2569 | } | ||
2570 | } | ||
2571 | |||
2536 | /* This function is called upon link interrupt */ | 2572 | /* This function is called upon link interrupt */ |
2537 | static void bnx2x_link_attn(struct bnx2x *bp) | 2573 | static void bnx2x_link_attn(struct bnx2x *bp) |
2538 | { | 2574 | { |
@@ -2541,20 +2577,9 @@ static void bnx2x_link_attn(struct bnx2x *bp) | |||
2541 | 2577 | ||
2542 | bnx2x_link_update(&bp->link_params, &bp->link_vars); | 2578 | bnx2x_link_update(&bp->link_params, &bp->link_vars); |
2543 | 2579 | ||
2544 | if (bp->link_vars.link_up) { | 2580 | bnx2x_init_dropless_fc(bp); |
2545 | 2581 | ||
2546 | /* dropless flow control */ | 2582 | if (bp->link_vars.link_up) { |
2547 | if (!CHIP_IS_E1(bp) && bp->dropless_fc) { | ||
2548 | int port = BP_PORT(bp); | ||
2549 | u32 pause_enabled = 0; | ||
2550 | |||
2551 | if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) | ||
2552 | pause_enabled = 1; | ||
2553 | |||
2554 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
2555 | USTORM_ETH_PAUSE_ENABLED_OFFSET(port), | ||
2556 | pause_enabled); | ||
2557 | } | ||
2558 | 2583 | ||
2559 | if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { | 2584 | if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { |
2560 | struct host_port_stats *pstats; | 2585 | struct host_port_stats *pstats; |
@@ -2568,17 +2593,8 @@ static void bnx2x_link_attn(struct bnx2x *bp) | |||
2568 | bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); | 2593 | bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); |
2569 | } | 2594 | } |
2570 | 2595 | ||
2571 | if (bp->link_vars.link_up && bp->link_vars.line_speed) { | 2596 | if (bp->link_vars.link_up && bp->link_vars.line_speed) |
2572 | int cmng_fns = bnx2x_get_cmng_fns_mode(bp); | 2597 | bnx2x_set_local_cmng(bp); |
2573 | |||
2574 | if (cmng_fns != CMNG_FNS_NONE) { | ||
2575 | bnx2x_cmng_fns_init(bp, false, cmng_fns); | ||
2576 | storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); | ||
2577 | } else | ||
2578 | /* rate shaping and fairness are disabled */ | ||
2579 | DP(NETIF_MSG_IFUP, | ||
2580 | "single function mode without fairness\n"); | ||
2581 | } | ||
2582 | 2598 | ||
2583 | __bnx2x_link_report(bp); | 2599 | __bnx2x_link_report(bp); |
2584 | 2600 | ||
@@ -9639,6 +9655,12 @@ sp_rtnl_not_reset: | |||
9639 | &bp->sp_rtnl_state)) | 9655 | &bp->sp_rtnl_state)) |
9640 | bnx2x_pf_set_vfs_vlan(bp); | 9656 | bnx2x_pf_set_vfs_vlan(bp); |
9641 | 9657 | ||
9658 | if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) | ||
9659 | bnx2x_dcbx_stop_hw_tx(bp); | ||
9660 | |||
9661 | if (test_and_clear_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state)) | ||
9662 | bnx2x_dcbx_resume_hw_tx(bp); | ||
9663 | |||
9642 | /* work which needs rtnl lock not-taken (as it takes the lock itself and | 9664 | /* work which needs rtnl lock not-taken (as it takes the lock itself and |
9643 | * can be called from other contexts as well) | 9665 | * can be called from other contexts as well) |
9644 | */ | 9666 | */ |
@@ -10362,6 +10384,10 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp) | |||
10362 | 10384 | ||
10363 | bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? | 10385 | bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? |
10364 | BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; | 10386 | BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; |
10387 | |||
10388 | bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ? | ||
10389 | BC_SUPPORTS_RMMOD_CMD : 0; | ||
10390 | |||
10365 | boot_mode = SHMEM_RD(bp, | 10391 | boot_mode = SHMEM_RD(bp, |
10366 | dev_info.port_feature_config[BP_PORT(bp)].mba_config) & | 10392 | dev_info.port_feature_config[BP_PORT(bp)].mba_config) & |
10367 | PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; | 10393 | PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; |
@@ -11137,6 +11163,9 @@ static bool bnx2x_get_dropless_info(struct bnx2x *bp) | |||
11137 | int tmp; | 11163 | int tmp; |
11138 | u32 cfg; | 11164 | u32 cfg; |
11139 | 11165 | ||
11166 | if (IS_VF(bp)) | ||
11167 | return 0; | ||
11168 | |||
11140 | if (IS_MF(bp) && !CHIP_IS_E1x(bp)) { | 11169 | if (IS_MF(bp) && !CHIP_IS_E1x(bp)) { |
11141 | /* Take function: tmp = func */ | 11170 | /* Take function: tmp = func */ |
11142 | tmp = BP_ABS_FUNC(bp); | 11171 | tmp = BP_ABS_FUNC(bp); |
@@ -11524,6 +11553,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) | |||
11524 | mutex_init(&bp->port.phy_mutex); | 11553 | mutex_init(&bp->port.phy_mutex); |
11525 | mutex_init(&bp->fw_mb_mutex); | 11554 | mutex_init(&bp->fw_mb_mutex); |
11526 | spin_lock_init(&bp->stats_lock); | 11555 | spin_lock_init(&bp->stats_lock); |
11556 | sema_init(&bp->stats_sema, 1); | ||
11527 | 11557 | ||
11528 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); | 11558 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); |
11529 | INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); | 11559 | INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); |
@@ -12817,13 +12847,17 @@ static void __bnx2x_remove(struct pci_dev *pdev, | |||
12817 | bnx2x_dcbnl_update_applist(bp, true); | 12847 | bnx2x_dcbnl_update_applist(bp, true); |
12818 | #endif | 12848 | #endif |
12819 | 12849 | ||
12850 | if (IS_PF(bp) && | ||
12851 | !BP_NOMCP(bp) && | ||
12852 | (bp->flags & BC_SUPPORTS_RMMOD_CMD)) | ||
12853 | bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0); | ||
12854 | |||
12820 | /* Close the interface - either directly or implicitly */ | 12855 | /* Close the interface - either directly or implicitly */ |
12821 | if (remove_netdev) { | 12856 | if (remove_netdev) { |
12822 | unregister_netdev(dev); | 12857 | unregister_netdev(dev); |
12823 | } else { | 12858 | } else { |
12824 | rtnl_lock(); | 12859 | rtnl_lock(); |
12825 | if (netif_running(dev)) | 12860 | dev_close(dev); |
12826 | bnx2x_close(dev); | ||
12827 | rtnl_unlock(); | 12861 | rtnl_unlock(); |
12828 | } | 12862 | } |
12829 | 12863 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 95861efb5051..ad83f4b48777 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | |||
@@ -1747,11 +1747,8 @@ void bnx2x_iov_init_dq(struct bnx2x *bp) | |||
1747 | 1747 | ||
1748 | void bnx2x_iov_init_dmae(struct bnx2x *bp) | 1748 | void bnx2x_iov_init_dmae(struct bnx2x *bp) |
1749 | { | 1749 | { |
1750 | DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF"); | 1750 | if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) |
1751 | if (!IS_SRIOV(bp)) | 1751 | REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); |
1752 | return; | ||
1753 | |||
1754 | REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); | ||
1755 | } | 1752 | } |
1756 | 1753 | ||
1757 | static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) | 1754 | static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) |
@@ -3084,8 +3081,9 @@ void bnx2x_disable_sriov(struct bnx2x *bp) | |||
3084 | pci_disable_sriov(bp->pdev); | 3081 | pci_disable_sriov(bp->pdev); |
3085 | } | 3082 | } |
3086 | 3083 | ||
3087 | static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx, | 3084 | static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, |
3088 | struct bnx2x_virtf *vf) | 3085 | struct bnx2x_virtf **vf, |
3086 | struct pf_vf_bulletin_content **bulletin) | ||
3089 | { | 3087 | { |
3090 | if (bp->state != BNX2X_STATE_OPEN) { | 3088 | if (bp->state != BNX2X_STATE_OPEN) { |
3091 | BNX2X_ERR("vf ndo called though PF is down\n"); | 3089 | BNX2X_ERR("vf ndo called though PF is down\n"); |
@@ -3103,12 +3101,22 @@ static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx, | |||
3103 | return -EINVAL; | 3101 | return -EINVAL; |
3104 | } | 3102 | } |
3105 | 3103 | ||
3106 | if (!vf) { | 3104 | /* init members */ |
3105 | *vf = BP_VF(bp, vfidx); | ||
3106 | *bulletin = BP_VF_BULLETIN(bp, vfidx); | ||
3107 | |||
3108 | if (!*vf) { | ||
3107 | BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n", | 3109 | BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n", |
3108 | vfidx); | 3110 | vfidx); |
3109 | return -EINVAL; | 3111 | return -EINVAL; |
3110 | } | 3112 | } |
3111 | 3113 | ||
3114 | if (!*bulletin) { | ||
3115 | BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n", | ||
3116 | vfidx); | ||
3117 | return -EINVAL; | ||
3118 | } | ||
3119 | |||
3112 | return 0; | 3120 | return 0; |
3113 | } | 3121 | } |
3114 | 3122 | ||
@@ -3116,17 +3124,19 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, | |||
3116 | struct ifla_vf_info *ivi) | 3124 | struct ifla_vf_info *ivi) |
3117 | { | 3125 | { |
3118 | struct bnx2x *bp = netdev_priv(dev); | 3126 | struct bnx2x *bp = netdev_priv(dev); |
3119 | struct bnx2x_virtf *vf = BP_VF(bp, vfidx); | 3127 | struct bnx2x_virtf *vf = NULL; |
3120 | struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); | 3128 | struct pf_vf_bulletin_content *bulletin = NULL; |
3121 | struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); | 3129 | struct bnx2x_vlan_mac_obj *mac_obj; |
3122 | struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); | 3130 | struct bnx2x_vlan_mac_obj *vlan_obj; |
3123 | int rc; | 3131 | int rc; |
3124 | 3132 | ||
3125 | /* sanity */ | 3133 | /* sanity and init */ |
3126 | rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); | 3134 | rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); |
3127 | if (rc) | 3135 | if (rc) |
3128 | return rc; | 3136 | return rc; |
3129 | if (!mac_obj || !vlan_obj || !bulletin) { | 3137 | mac_obj = &bnx2x_vfq(vf, 0, mac_obj); |
3138 | vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); | ||
3139 | if (!mac_obj || !vlan_obj) { | ||
3130 | BNX2X_ERR("VF partially initialized\n"); | 3140 | BNX2X_ERR("VF partially initialized\n"); |
3131 | return -EINVAL; | 3141 | return -EINVAL; |
3132 | } | 3142 | } |
@@ -3183,11 +3193,11 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) | |||
3183 | { | 3193 | { |
3184 | struct bnx2x *bp = netdev_priv(dev); | 3194 | struct bnx2x *bp = netdev_priv(dev); |
3185 | int rc, q_logical_state; | 3195 | int rc, q_logical_state; |
3186 | struct bnx2x_virtf *vf = BP_VF(bp, vfidx); | 3196 | struct bnx2x_virtf *vf = NULL; |
3187 | struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); | 3197 | struct pf_vf_bulletin_content *bulletin = NULL; |
3188 | 3198 | ||
3189 | /* sanity */ | 3199 | /* sanity and init */ |
3190 | rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); | 3200 | rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); |
3191 | if (rc) | 3201 | if (rc) |
3192 | return rc; | 3202 | return rc; |
3193 | if (!is_valid_ether_addr(mac)) { | 3203 | if (!is_valid_ether_addr(mac)) { |
@@ -3249,11 +3259,11 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) | |||
3249 | { | 3259 | { |
3250 | struct bnx2x *bp = netdev_priv(dev); | 3260 | struct bnx2x *bp = netdev_priv(dev); |
3251 | int rc, q_logical_state; | 3261 | int rc, q_logical_state; |
3252 | struct bnx2x_virtf *vf = BP_VF(bp, vfidx); | 3262 | struct bnx2x_virtf *vf = NULL; |
3253 | struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); | 3263 | struct pf_vf_bulletin_content *bulletin = NULL; |
3254 | 3264 | ||
3255 | /* sanity */ | 3265 | /* sanity and init */ |
3256 | rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); | 3266 | rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); |
3257 | if (rc) | 3267 | if (rc) |
3258 | return rc; | 3268 | return rc; |
3259 | 3269 | ||
@@ -3463,7 +3473,7 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp) | |||
3463 | alloc_mem_err: | 3473 | alloc_mem_err: |
3464 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, | 3474 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, |
3465 | sizeof(struct bnx2x_vf_mbx_msg)); | 3475 | sizeof(struct bnx2x_vf_mbx_msg)); |
3466 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, | 3476 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, |
3467 | sizeof(union pf_vf_bulletin)); | 3477 | sizeof(union pf_vf_bulletin)); |
3468 | return -ENOMEM; | 3478 | return -ENOMEM; |
3469 | } | 3479 | } |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 98366abd02bd..d63d1327b051 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c | |||
@@ -221,7 +221,8 @@ static int bnx2x_stats_comp(struct bnx2x *bp) | |||
221 | * Statistics service functions | 221 | * Statistics service functions |
222 | */ | 222 | */ |
223 | 223 | ||
224 | static void bnx2x_stats_pmf_update(struct bnx2x *bp) | 224 | /* should be called under stats_sema */ |
225 | static void __bnx2x_stats_pmf_update(struct bnx2x *bp) | ||
225 | { | 226 | { |
226 | struct dmae_command *dmae; | 227 | struct dmae_command *dmae; |
227 | u32 opcode; | 228 | u32 opcode; |
@@ -518,7 +519,8 @@ static void bnx2x_func_stats_init(struct bnx2x *bp) | |||
518 | *stats_comp = 0; | 519 | *stats_comp = 0; |
519 | } | 520 | } |
520 | 521 | ||
521 | static void bnx2x_stats_start(struct bnx2x *bp) | 522 | /* should be called under stats_sema */ |
523 | static void __bnx2x_stats_start(struct bnx2x *bp) | ||
522 | { | 524 | { |
523 | /* vfs travel through here as part of the statistics FSM, but no action | 525 | /* vfs travel through here as part of the statistics FSM, but no action |
524 | * is required | 526 | * is required |
@@ -534,13 +536,34 @@ static void bnx2x_stats_start(struct bnx2x *bp) | |||
534 | 536 | ||
535 | bnx2x_hw_stats_post(bp); | 537 | bnx2x_hw_stats_post(bp); |
536 | bnx2x_storm_stats_post(bp); | 538 | bnx2x_storm_stats_post(bp); |
539 | |||
540 | bp->stats_started = true; | ||
541 | } | ||
542 | |||
543 | static void bnx2x_stats_start(struct bnx2x *bp) | ||
544 | { | ||
545 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
546 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
547 | __bnx2x_stats_start(bp); | ||
548 | up(&bp->stats_sema); | ||
537 | } | 549 | } |
538 | 550 | ||
539 | static void bnx2x_stats_pmf_start(struct bnx2x *bp) | 551 | static void bnx2x_stats_pmf_start(struct bnx2x *bp) |
540 | { | 552 | { |
553 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
554 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
541 | bnx2x_stats_comp(bp); | 555 | bnx2x_stats_comp(bp); |
542 | bnx2x_stats_pmf_update(bp); | 556 | __bnx2x_stats_pmf_update(bp); |
543 | bnx2x_stats_start(bp); | 557 | __bnx2x_stats_start(bp); |
558 | up(&bp->stats_sema); | ||
559 | } | ||
560 | |||
561 | static void bnx2x_stats_pmf_update(struct bnx2x *bp) | ||
562 | { | ||
563 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
564 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
565 | __bnx2x_stats_pmf_update(bp); | ||
566 | up(&bp->stats_sema); | ||
544 | } | 567 | } |
545 | 568 | ||
546 | static void bnx2x_stats_restart(struct bnx2x *bp) | 569 | static void bnx2x_stats_restart(struct bnx2x *bp) |
@@ -550,8 +573,11 @@ static void bnx2x_stats_restart(struct bnx2x *bp) | |||
550 | */ | 573 | */ |
551 | if (IS_VF(bp)) | 574 | if (IS_VF(bp)) |
552 | return; | 575 | return; |
576 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
577 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
553 | bnx2x_stats_comp(bp); | 578 | bnx2x_stats_comp(bp); |
554 | bnx2x_stats_start(bp); | 579 | __bnx2x_stats_start(bp); |
580 | up(&bp->stats_sema); | ||
555 | } | 581 | } |
556 | 582 | ||
557 | static void bnx2x_bmac_stats_update(struct bnx2x *bp) | 583 | static void bnx2x_bmac_stats_update(struct bnx2x *bp) |
@@ -888,9 +914,7 @@ static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp) | |||
888 | /* Make sure we use the value of the counter | 914 | /* Make sure we use the value of the counter |
889 | * used for sending the last stats ramrod. | 915 | * used for sending the last stats ramrod. |
890 | */ | 916 | */ |
891 | spin_lock_bh(&bp->stats_lock); | ||
892 | cur_stats_counter = bp->stats_counter - 1; | 917 | cur_stats_counter = bp->stats_counter - 1; |
893 | spin_unlock_bh(&bp->stats_lock); | ||
894 | 918 | ||
895 | /* are storm stats valid? */ | 919 | /* are storm stats valid? */ |
896 | if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { | 920 | if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { |
@@ -1227,12 +1251,18 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1227 | { | 1251 | { |
1228 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | 1252 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); |
1229 | 1253 | ||
1230 | if (bnx2x_edebug_stats_stopped(bp)) | 1254 | /* we run update from timer context, so give up |
1255 | * if somebody is in the middle of transition | ||
1256 | */ | ||
1257 | if (down_trylock(&bp->stats_sema)) | ||
1231 | return; | 1258 | return; |
1232 | 1259 | ||
1260 | if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started) | ||
1261 | goto out; | ||
1262 | |||
1233 | if (IS_PF(bp)) { | 1263 | if (IS_PF(bp)) { |
1234 | if (*stats_comp != DMAE_COMP_VAL) | 1264 | if (*stats_comp != DMAE_COMP_VAL) |
1235 | return; | 1265 | goto out; |
1236 | 1266 | ||
1237 | if (bp->port.pmf) | 1267 | if (bp->port.pmf) |
1238 | bnx2x_hw_stats_update(bp); | 1268 | bnx2x_hw_stats_update(bp); |
@@ -1242,7 +1272,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1242 | BNX2X_ERR("storm stats were not updated for 3 times\n"); | 1272 | BNX2X_ERR("storm stats were not updated for 3 times\n"); |
1243 | bnx2x_panic(); | 1273 | bnx2x_panic(); |
1244 | } | 1274 | } |
1245 | return; | 1275 | goto out; |
1246 | } | 1276 | } |
1247 | } else { | 1277 | } else { |
1248 | /* vf doesn't collect HW statistics, and doesn't get completions | 1278 | /* vf doesn't collect HW statistics, and doesn't get completions |
@@ -1256,7 +1286,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1256 | 1286 | ||
1257 | /* vf is done */ | 1287 | /* vf is done */ |
1258 | if (IS_VF(bp)) | 1288 | if (IS_VF(bp)) |
1259 | return; | 1289 | goto out; |
1260 | 1290 | ||
1261 | if (netif_msg_timer(bp)) { | 1291 | if (netif_msg_timer(bp)) { |
1262 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | 1292 | struct bnx2x_eth_stats *estats = &bp->eth_stats; |
@@ -1267,6 +1297,9 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1267 | 1297 | ||
1268 | bnx2x_hw_stats_post(bp); | 1298 | bnx2x_hw_stats_post(bp); |
1269 | bnx2x_storm_stats_post(bp); | 1299 | bnx2x_storm_stats_post(bp); |
1300 | |||
1301 | out: | ||
1302 | up(&bp->stats_sema); | ||
1270 | } | 1303 | } |
1271 | 1304 | ||
1272 | static void bnx2x_port_stats_stop(struct bnx2x *bp) | 1305 | static void bnx2x_port_stats_stop(struct bnx2x *bp) |
@@ -1332,6 +1365,11 @@ static void bnx2x_stats_stop(struct bnx2x *bp) | |||
1332 | { | 1365 | { |
1333 | int update = 0; | 1366 | int update = 0; |
1334 | 1367 | ||
1368 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
1369 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
1370 | |||
1371 | bp->stats_started = false; | ||
1372 | |||
1335 | bnx2x_stats_comp(bp); | 1373 | bnx2x_stats_comp(bp); |
1336 | 1374 | ||
1337 | if (bp->port.pmf) | 1375 | if (bp->port.pmf) |
@@ -1348,6 +1386,8 @@ static void bnx2x_stats_stop(struct bnx2x *bp) | |||
1348 | bnx2x_hw_stats_post(bp); | 1386 | bnx2x_hw_stats_post(bp); |
1349 | bnx2x_stats_comp(bp); | 1387 | bnx2x_stats_comp(bp); |
1350 | } | 1388 | } |
1389 | |||
1390 | up(&bp->stats_sema); | ||
1351 | } | 1391 | } |
1352 | 1392 | ||
1353 | static void bnx2x_stats_do_nothing(struct bnx2x *bp) | 1393 | static void bnx2x_stats_do_nothing(struct bnx2x *bp) |
@@ -1376,15 +1416,17 @@ static const struct { | |||
1376 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) | 1416 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) |
1377 | { | 1417 | { |
1378 | enum bnx2x_stats_state state; | 1418 | enum bnx2x_stats_state state; |
1419 | void (*action)(struct bnx2x *bp); | ||
1379 | if (unlikely(bp->panic)) | 1420 | if (unlikely(bp->panic)) |
1380 | return; | 1421 | return; |
1381 | 1422 | ||
1382 | spin_lock_bh(&bp->stats_lock); | 1423 | spin_lock_bh(&bp->stats_lock); |
1383 | state = bp->stats_state; | 1424 | state = bp->stats_state; |
1384 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; | 1425 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; |
1426 | action = bnx2x_stats_stm[state][event].action; | ||
1385 | spin_unlock_bh(&bp->stats_lock); | 1427 | spin_unlock_bh(&bp->stats_lock); |
1386 | 1428 | ||
1387 | bnx2x_stats_stm[state][event].action(bp); | 1429 | action(bp); |
1388 | 1430 | ||
1389 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) | 1431 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) |
1390 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", | 1432 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ddebc7a5dda0..0da2214ef1b9 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -17796,8 +17796,10 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, | |||
17796 | 17796 | ||
17797 | done: | 17797 | done: |
17798 | if (state == pci_channel_io_perm_failure) { | 17798 | if (state == pci_channel_io_perm_failure) { |
17799 | tg3_napi_enable(tp); | 17799 | if (netdev) { |
17800 | dev_close(netdev); | 17800 | tg3_napi_enable(tp); |
17801 | dev_close(netdev); | ||
17802 | } | ||
17801 | err = PCI_ERS_RESULT_DISCONNECT; | 17803 | err = PCI_ERS_RESULT_DISCONNECT; |
17802 | } else { | 17804 | } else { |
17803 | pci_disable_device(pdev); | 17805 | pci_disable_device(pdev); |
@@ -17827,7 +17829,8 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) | |||
17827 | rtnl_lock(); | 17829 | rtnl_lock(); |
17828 | 17830 | ||
17829 | if (pci_enable_device(pdev)) { | 17831 | if (pci_enable_device(pdev)) { |
17830 | netdev_err(netdev, "Cannot re-enable PCI device after reset.\n"); | 17832 | dev_err(&pdev->dev, |
17833 | "Cannot re-enable PCI device after reset.\n"); | ||
17831 | goto done; | 17834 | goto done; |
17832 | } | 17835 | } |
17833 | 17836 | ||
@@ -17835,7 +17838,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) | |||
17835 | pci_restore_state(pdev); | 17838 | pci_restore_state(pdev); |
17836 | pci_save_state(pdev); | 17839 | pci_save_state(pdev); |
17837 | 17840 | ||
17838 | if (!netif_running(netdev)) { | 17841 | if (!netdev || !netif_running(netdev)) { |
17839 | rc = PCI_ERS_RESULT_RECOVERED; | 17842 | rc = PCI_ERS_RESULT_RECOVERED; |
17840 | goto done; | 17843 | goto done; |
17841 | } | 17844 | } |
@@ -17847,7 +17850,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) | |||
17847 | rc = PCI_ERS_RESULT_RECOVERED; | 17850 | rc = PCI_ERS_RESULT_RECOVERED; |
17848 | 17851 | ||
17849 | done: | 17852 | done: |
17850 | if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) { | 17853 | if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) { |
17851 | tg3_napi_enable(tp); | 17854 | tg3_napi_enable(tp); |
17852 | dev_close(netdev); | 17855 | dev_close(netdev); |
17853 | } | 17856 | } |
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 687ec4a8bb48..9c89dc8fe105 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c | |||
@@ -455,11 +455,6 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, | |||
455 | q->pg_chunk.offset = 0; | 455 | q->pg_chunk.offset = 0; |
456 | mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, | 456 | mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, |
457 | 0, q->alloc_size, PCI_DMA_FROMDEVICE); | 457 | 0, q->alloc_size, PCI_DMA_FROMDEVICE); |
458 | if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) { | ||
459 | __free_pages(q->pg_chunk.page, order); | ||
460 | q->pg_chunk.page = NULL; | ||
461 | return -EIO; | ||
462 | } | ||
463 | q->pg_chunk.mapping = mapping; | 458 | q->pg_chunk.mapping = mapping; |
464 | } | 459 | } |
465 | sd->pg_chunk = q->pg_chunk; | 460 | sd->pg_chunk = q->pg_chunk; |
@@ -954,75 +949,40 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb) | |||
954 | return flits_to_desc(flits); | 949 | return flits_to_desc(flits); |
955 | } | 950 | } |
956 | 951 | ||
957 | |||
958 | /* map_skb - map a packet main body and its page fragments | ||
959 | * @pdev: the PCI device | ||
960 | * @skb: the packet | ||
961 | * @addr: placeholder to save the mapped addresses | ||
962 | * | ||
963 | * map the main body of an sk_buff and its page fragments, if any. | ||
964 | */ | ||
965 | static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb, | ||
966 | dma_addr_t *addr) | ||
967 | { | ||
968 | const skb_frag_t *fp, *end; | ||
969 | const struct skb_shared_info *si; | ||
970 | |||
971 | *addr = pci_map_single(pdev, skb->data, skb_headlen(skb), | ||
972 | PCI_DMA_TODEVICE); | ||
973 | if (pci_dma_mapping_error(pdev, *addr)) | ||
974 | goto out_err; | ||
975 | |||
976 | si = skb_shinfo(skb); | ||
977 | end = &si->frags[si->nr_frags]; | ||
978 | |||
979 | for (fp = si->frags; fp < end; fp++) { | ||
980 | *++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp), | ||
981 | DMA_TO_DEVICE); | ||
982 | if (pci_dma_mapping_error(pdev, *addr)) | ||
983 | goto unwind; | ||
984 | } | ||
985 | return 0; | ||
986 | |||
987 | unwind: | ||
988 | while (fp-- > si->frags) | ||
989 | dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp), | ||
990 | DMA_TO_DEVICE); | ||
991 | |||
992 | pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE); | ||
993 | out_err: | ||
994 | return -ENOMEM; | ||
995 | } | ||
996 | |||
997 | /** | 952 | /** |
998 | * write_sgl - populate a scatter/gather list for a packet | 953 | * make_sgl - populate a scatter/gather list for a packet |
999 | * @skb: the packet | 954 | * @skb: the packet |
1000 | * @sgp: the SGL to populate | 955 | * @sgp: the SGL to populate |
1001 | * @start: start address of skb main body data to include in the SGL | 956 | * @start: start address of skb main body data to include in the SGL |
1002 | * @len: length of skb main body data to include in the SGL | 957 | * @len: length of skb main body data to include in the SGL |
1003 | * @addr: the list of the mapped addresses | 958 | * @pdev: the PCI device |
1004 | * | 959 | * |
1005 | * Copies the scatter/gather list for the buffers that make up a packet | 960 | * Generates a scatter/gather list for the buffers that make up a packet |
1006 | * and returns the SGL size in 8-byte words. The caller must size the SGL | 961 | * and returns the SGL size in 8-byte words. The caller must size the SGL |
1007 | * appropriately. | 962 | * appropriately. |
1008 | */ | 963 | */ |
1009 | static inline unsigned int write_sgl(const struct sk_buff *skb, | 964 | static inline unsigned int make_sgl(const struct sk_buff *skb, |
1010 | struct sg_ent *sgp, unsigned char *start, | 965 | struct sg_ent *sgp, unsigned char *start, |
1011 | unsigned int len, const dma_addr_t *addr) | 966 | unsigned int len, struct pci_dev *pdev) |
1012 | { | 967 | { |
1013 | unsigned int i, j = 0, k = 0, nfrags; | 968 | dma_addr_t mapping; |
969 | unsigned int i, j = 0, nfrags; | ||
1014 | 970 | ||
1015 | if (len) { | 971 | if (len) { |
972 | mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE); | ||
1016 | sgp->len[0] = cpu_to_be32(len); | 973 | sgp->len[0] = cpu_to_be32(len); |
1017 | sgp->addr[j++] = cpu_to_be64(addr[k++]); | 974 | sgp->addr[0] = cpu_to_be64(mapping); |
975 | j = 1; | ||
1018 | } | 976 | } |
1019 | 977 | ||
1020 | nfrags = skb_shinfo(skb)->nr_frags; | 978 | nfrags = skb_shinfo(skb)->nr_frags; |
1021 | for (i = 0; i < nfrags; i++) { | 979 | for (i = 0; i < nfrags; i++) { |
1022 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 980 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1023 | 981 | ||
982 | mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), | ||
983 | DMA_TO_DEVICE); | ||
1024 | sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); | 984 | sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); |
1025 | sgp->addr[j] = cpu_to_be64(addr[k++]); | 985 | sgp->addr[j] = cpu_to_be64(mapping); |
1026 | j ^= 1; | 986 | j ^= 1; |
1027 | if (j == 0) | 987 | if (j == 0) |
1028 | ++sgp; | 988 | ++sgp; |
@@ -1178,7 +1138,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, | |||
1178 | const struct port_info *pi, | 1138 | const struct port_info *pi, |
1179 | unsigned int pidx, unsigned int gen, | 1139 | unsigned int pidx, unsigned int gen, |
1180 | struct sge_txq *q, unsigned int ndesc, | 1140 | struct sge_txq *q, unsigned int ndesc, |
1181 | unsigned int compl, const dma_addr_t *addr) | 1141 | unsigned int compl) |
1182 | { | 1142 | { |
1183 | unsigned int flits, sgl_flits, cntrl, tso_info; | 1143 | unsigned int flits, sgl_flits, cntrl, tso_info; |
1184 | struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; | 1144 | struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; |
@@ -1236,7 +1196,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, | |||
1236 | } | 1196 | } |
1237 | 1197 | ||
1238 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; | 1198 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; |
1239 | sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr); | 1199 | sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev); |
1240 | 1200 | ||
1241 | write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, | 1201 | write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, |
1242 | htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), | 1202 | htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), |
@@ -1267,7 +1227,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1267 | struct netdev_queue *txq; | 1227 | struct netdev_queue *txq; |
1268 | struct sge_qset *qs; | 1228 | struct sge_qset *qs; |
1269 | struct sge_txq *q; | 1229 | struct sge_txq *q; |
1270 | dma_addr_t addr[MAX_SKB_FRAGS + 1]; | ||
1271 | 1230 | ||
1272 | /* | 1231 | /* |
1273 | * The chip min packet length is 9 octets but play safe and reject | 1232 | * The chip min packet length is 9 octets but play safe and reject |
@@ -1296,11 +1255,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1296 | return NETDEV_TX_BUSY; | 1255 | return NETDEV_TX_BUSY; |
1297 | } | 1256 | } |
1298 | 1257 | ||
1299 | if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) { | ||
1300 | dev_kfree_skb(skb); | ||
1301 | return NETDEV_TX_OK; | ||
1302 | } | ||
1303 | |||
1304 | q->in_use += ndesc; | 1258 | q->in_use += ndesc; |
1305 | if (unlikely(credits - ndesc < q->stop_thres)) { | 1259 | if (unlikely(credits - ndesc < q->stop_thres)) { |
1306 | t3_stop_tx_queue(txq, qs, q); | 1260 | t3_stop_tx_queue(txq, qs, q); |
@@ -1358,7 +1312,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1358 | if (likely(!skb_shared(skb))) | 1312 | if (likely(!skb_shared(skb))) |
1359 | skb_orphan(skb); | 1313 | skb_orphan(skb); |
1360 | 1314 | ||
1361 | write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr); | 1315 | write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl); |
1362 | check_ring_tx_db(adap, q); | 1316 | check_ring_tx_db(adap, q); |
1363 | return NETDEV_TX_OK; | 1317 | return NETDEV_TX_OK; |
1364 | } | 1318 | } |
@@ -1623,8 +1577,7 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev, | |||
1623 | */ | 1577 | */ |
1624 | static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, | 1578 | static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, |
1625 | struct sge_txq *q, unsigned int pidx, | 1579 | struct sge_txq *q, unsigned int pidx, |
1626 | unsigned int gen, unsigned int ndesc, | 1580 | unsigned int gen, unsigned int ndesc) |
1627 | const dma_addr_t *addr) | ||
1628 | { | 1581 | { |
1629 | unsigned int sgl_flits, flits; | 1582 | unsigned int sgl_flits, flits; |
1630 | struct work_request_hdr *from; | 1583 | struct work_request_hdr *from; |
@@ -1645,9 +1598,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, | |||
1645 | 1598 | ||
1646 | flits = skb_transport_offset(skb) / 8; | 1599 | flits = skb_transport_offset(skb) / 8; |
1647 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; | 1600 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; |
1648 | sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb), | 1601 | sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), |
1649 | skb_tail_pointer(skb) - | 1602 | skb->tail - skb->transport_header, |
1650 | skb_transport_header(skb), addr); | 1603 | adap->pdev); |
1651 | if (need_skb_unmap()) { | 1604 | if (need_skb_unmap()) { |
1652 | setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); | 1605 | setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); |
1653 | skb->destructor = deferred_unmap_destructor; | 1606 | skb->destructor = deferred_unmap_destructor; |
@@ -1705,11 +1658,6 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
1705 | goto again; | 1658 | goto again; |
1706 | } | 1659 | } |
1707 | 1660 | ||
1708 | if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) { | ||
1709 | spin_unlock(&q->lock); | ||
1710 | return NET_XMIT_SUCCESS; | ||
1711 | } | ||
1712 | |||
1713 | gen = q->gen; | 1661 | gen = q->gen; |
1714 | q->in_use += ndesc; | 1662 | q->in_use += ndesc; |
1715 | pidx = q->pidx; | 1663 | pidx = q->pidx; |
@@ -1720,7 +1668,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
1720 | } | 1668 | } |
1721 | spin_unlock(&q->lock); | 1669 | spin_unlock(&q->lock); |
1722 | 1670 | ||
1723 | write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head); | 1671 | write_ofld_wr(adap, skb, q, pidx, gen, ndesc); |
1724 | check_ring_tx_db(adap, q); | 1672 | check_ring_tx_db(adap, q); |
1725 | return NET_XMIT_SUCCESS; | 1673 | return NET_XMIT_SUCCESS; |
1726 | } | 1674 | } |
@@ -1738,7 +1686,6 @@ static void restart_offloadq(unsigned long data) | |||
1738 | struct sge_txq *q = &qs->txq[TXQ_OFLD]; | 1686 | struct sge_txq *q = &qs->txq[TXQ_OFLD]; |
1739 | const struct port_info *pi = netdev_priv(qs->netdev); | 1687 | const struct port_info *pi = netdev_priv(qs->netdev); |
1740 | struct adapter *adap = pi->adapter; | 1688 | struct adapter *adap = pi->adapter; |
1741 | unsigned int written = 0; | ||
1742 | 1689 | ||
1743 | spin_lock(&q->lock); | 1690 | spin_lock(&q->lock); |
1744 | again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | 1691 | again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); |
@@ -1758,14 +1705,10 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
1758 | break; | 1705 | break; |
1759 | } | 1706 | } |
1760 | 1707 | ||
1761 | if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) | ||
1762 | break; | ||
1763 | |||
1764 | gen = q->gen; | 1708 | gen = q->gen; |
1765 | q->in_use += ndesc; | 1709 | q->in_use += ndesc; |
1766 | pidx = q->pidx; | 1710 | pidx = q->pidx; |
1767 | q->pidx += ndesc; | 1711 | q->pidx += ndesc; |
1768 | written += ndesc; | ||
1769 | if (q->pidx >= q->size) { | 1712 | if (q->pidx >= q->size) { |
1770 | q->pidx -= q->size; | 1713 | q->pidx -= q->size; |
1771 | q->gen ^= 1; | 1714 | q->gen ^= 1; |
@@ -1773,8 +1716,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
1773 | __skb_unlink(skb, &q->sendq); | 1716 | __skb_unlink(skb, &q->sendq); |
1774 | spin_unlock(&q->lock); | 1717 | spin_unlock(&q->lock); |
1775 | 1718 | ||
1776 | write_ofld_wr(adap, skb, q, pidx, gen, ndesc, | 1719 | write_ofld_wr(adap, skb, q, pidx, gen, ndesc); |
1777 | (dma_addr_t *)skb->head); | ||
1778 | spin_lock(&q->lock); | 1720 | spin_lock(&q->lock); |
1779 | } | 1721 | } |
1780 | spin_unlock(&q->lock); | 1722 | spin_unlock(&q->lock); |
@@ -1784,9 +1726,8 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
1784 | set_bit(TXQ_LAST_PKT_DB, &q->flags); | 1726 | set_bit(TXQ_LAST_PKT_DB, &q->flags); |
1785 | #endif | 1727 | #endif |
1786 | wmb(); | 1728 | wmb(); |
1787 | if (likely(written)) | 1729 | t3_write_reg(adap, A_SG_KDOORBELL, |
1788 | t3_write_reg(adap, A_SG_KDOORBELL, | 1730 | F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); |
1789 | F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); | ||
1790 | } | 1731 | } |
1791 | 1732 | ||
1792 | /** | 1733 | /** |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 6e6e0a117ee2..8ec5d74ad44d 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
@@ -3048,6 +3048,9 @@ int be_cmd_get_func_config(struct be_adapter *adapter) | |||
3048 | 3048 | ||
3049 | adapter->max_event_queues = le16_to_cpu(desc->eq_count); | 3049 | adapter->max_event_queues = le16_to_cpu(desc->eq_count); |
3050 | adapter->if_cap_flags = le32_to_cpu(desc->cap_flags); | 3050 | adapter->if_cap_flags = le32_to_cpu(desc->cap_flags); |
3051 | |||
3052 | /* Clear flags that driver is not interested in */ | ||
3053 | adapter->if_cap_flags &= BE_IF_CAP_FLAGS_WANT; | ||
3051 | } | 3054 | } |
3052 | err: | 3055 | err: |
3053 | mutex_unlock(&adapter->mbox_lock); | 3056 | mutex_unlock(&adapter->mbox_lock); |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 5228d88c5a02..1b3b9e886412 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h | |||
@@ -563,6 +563,12 @@ enum be_if_flags { | |||
563 | BE_IF_FLAGS_MULTICAST = 0x1000 | 563 | BE_IF_FLAGS_MULTICAST = 0x1000 |
564 | }; | 564 | }; |
565 | 565 | ||
566 | #define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\ | ||
567 | BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\ | ||
568 | BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\ | ||
569 | BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\ | ||
570 | BE_IF_FLAGS_UNTAGGED) | ||
571 | |||
566 | /* An RX interface is an object with one or more MAC addresses and | 572 | /* An RX interface is an object with one or more MAC addresses and |
567 | * filtering capabilities. */ | 573 | * filtering capabilities. */ |
568 | struct be_cmd_req_if_create { | 574 | struct be_cmd_req_if_create { |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 181edb522450..4559c35eea13 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -2563,8 +2563,8 @@ static int be_close(struct net_device *netdev) | |||
2563 | /* Wait for all pending tx completions to arrive so that | 2563 | /* Wait for all pending tx completions to arrive so that |
2564 | * all tx skbs are freed. | 2564 | * all tx skbs are freed. |
2565 | */ | 2565 | */ |
2566 | be_tx_compl_clean(adapter); | ||
2567 | netif_tx_disable(netdev); | 2566 | netif_tx_disable(netdev); |
2567 | be_tx_compl_clean(adapter); | ||
2568 | 2568 | ||
2569 | be_rx_qs_destroy(adapter); | 2569 | be_rx_qs_destroy(adapter); |
2570 | 2570 | ||
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index c896079728e1..ef94a591f9e5 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c | |||
@@ -931,17 +931,20 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) | |||
931 | } | 931 | } |
932 | 932 | ||
933 | /* Allocate and setup a new buffer for receiving */ | 933 | /* Allocate and setup a new buffer for receiving */ |
934 | static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, | 934 | static int skge_rx_setup(struct skge_port *skge, struct skge_element *e, |
935 | struct sk_buff *skb, unsigned int bufsize) | 935 | struct sk_buff *skb, unsigned int bufsize) |
936 | { | 936 | { |
937 | struct skge_rx_desc *rd = e->desc; | 937 | struct skge_rx_desc *rd = e->desc; |
938 | u64 map; | 938 | dma_addr_t map; |
939 | 939 | ||
940 | map = pci_map_single(skge->hw->pdev, skb->data, bufsize, | 940 | map = pci_map_single(skge->hw->pdev, skb->data, bufsize, |
941 | PCI_DMA_FROMDEVICE); | 941 | PCI_DMA_FROMDEVICE); |
942 | 942 | ||
943 | rd->dma_lo = map; | 943 | if (pci_dma_mapping_error(skge->hw->pdev, map)) |
944 | rd->dma_hi = map >> 32; | 944 | return -1; |
945 | |||
946 | rd->dma_lo = lower_32_bits(map); | ||
947 | rd->dma_hi = upper_32_bits(map); | ||
945 | e->skb = skb; | 948 | e->skb = skb; |
946 | rd->csum1_start = ETH_HLEN; | 949 | rd->csum1_start = ETH_HLEN; |
947 | rd->csum2_start = ETH_HLEN; | 950 | rd->csum2_start = ETH_HLEN; |
@@ -953,6 +956,7 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, | |||
953 | rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; | 956 | rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; |
954 | dma_unmap_addr_set(e, mapaddr, map); | 957 | dma_unmap_addr_set(e, mapaddr, map); |
955 | dma_unmap_len_set(e, maplen, bufsize); | 958 | dma_unmap_len_set(e, maplen, bufsize); |
959 | return 0; | ||
956 | } | 960 | } |
957 | 961 | ||
958 | /* Resume receiving using existing skb, | 962 | /* Resume receiving using existing skb, |
@@ -1014,7 +1018,10 @@ static int skge_rx_fill(struct net_device *dev) | |||
1014 | return -ENOMEM; | 1018 | return -ENOMEM; |
1015 | 1019 | ||
1016 | skb_reserve(skb, NET_IP_ALIGN); | 1020 | skb_reserve(skb, NET_IP_ALIGN); |
1017 | skge_rx_setup(skge, e, skb, skge->rx_buf_size); | 1021 | if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) { |
1022 | dev_kfree_skb(skb); | ||
1023 | return -EIO; | ||
1024 | } | ||
1018 | } while ((e = e->next) != ring->start); | 1025 | } while ((e = e->next) != ring->start); |
1019 | 1026 | ||
1020 | ring->to_clean = ring->start; | 1027 | ring->to_clean = ring->start; |
@@ -2544,7 +2551,7 @@ static int skge_up(struct net_device *dev) | |||
2544 | 2551 | ||
2545 | BUG_ON(skge->dma & 7); | 2552 | BUG_ON(skge->dma & 7); |
2546 | 2553 | ||
2547 | if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { | 2554 | if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) { |
2548 | dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); | 2555 | dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); |
2549 | err = -EINVAL; | 2556 | err = -EINVAL; |
2550 | goto free_pci_mem; | 2557 | goto free_pci_mem; |
@@ -2729,7 +2736,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2729 | struct skge_tx_desc *td; | 2736 | struct skge_tx_desc *td; |
2730 | int i; | 2737 | int i; |
2731 | u32 control, len; | 2738 | u32 control, len; |
2732 | u64 map; | 2739 | dma_addr_t map; |
2733 | 2740 | ||
2734 | if (skb_padto(skb, ETH_ZLEN)) | 2741 | if (skb_padto(skb, ETH_ZLEN)) |
2735 | return NETDEV_TX_OK; | 2742 | return NETDEV_TX_OK; |
@@ -2743,11 +2750,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2743 | e->skb = skb; | 2750 | e->skb = skb; |
2744 | len = skb_headlen(skb); | 2751 | len = skb_headlen(skb); |
2745 | map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); | 2752 | map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); |
2753 | if (pci_dma_mapping_error(hw->pdev, map)) | ||
2754 | goto mapping_error; | ||
2755 | |||
2746 | dma_unmap_addr_set(e, mapaddr, map); | 2756 | dma_unmap_addr_set(e, mapaddr, map); |
2747 | dma_unmap_len_set(e, maplen, len); | 2757 | dma_unmap_len_set(e, maplen, len); |
2748 | 2758 | ||
2749 | td->dma_lo = map; | 2759 | td->dma_lo = lower_32_bits(map); |
2750 | td->dma_hi = map >> 32; | 2760 | td->dma_hi = upper_32_bits(map); |
2751 | 2761 | ||
2752 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 2762 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2753 | const int offset = skb_checksum_start_offset(skb); | 2763 | const int offset = skb_checksum_start_offset(skb); |
@@ -2778,14 +2788,16 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2778 | 2788 | ||
2779 | map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, | 2789 | map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, |
2780 | skb_frag_size(frag), DMA_TO_DEVICE); | 2790 | skb_frag_size(frag), DMA_TO_DEVICE); |
2791 | if (dma_mapping_error(&hw->pdev->dev, map)) | ||
2792 | goto mapping_unwind; | ||
2781 | 2793 | ||
2782 | e = e->next; | 2794 | e = e->next; |
2783 | e->skb = skb; | 2795 | e->skb = skb; |
2784 | tf = e->desc; | 2796 | tf = e->desc; |
2785 | BUG_ON(tf->control & BMU_OWN); | 2797 | BUG_ON(tf->control & BMU_OWN); |
2786 | 2798 | ||
2787 | tf->dma_lo = map; | 2799 | tf->dma_lo = lower_32_bits(map); |
2788 | tf->dma_hi = (u64) map >> 32; | 2800 | tf->dma_hi = upper_32_bits(map); |
2789 | dma_unmap_addr_set(e, mapaddr, map); | 2801 | dma_unmap_addr_set(e, mapaddr, map); |
2790 | dma_unmap_len_set(e, maplen, skb_frag_size(frag)); | 2802 | dma_unmap_len_set(e, maplen, skb_frag_size(frag)); |
2791 | 2803 | ||
@@ -2815,6 +2827,26 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2815 | } | 2827 | } |
2816 | 2828 | ||
2817 | return NETDEV_TX_OK; | 2829 | return NETDEV_TX_OK; |
2830 | |||
2831 | mapping_unwind: | ||
2832 | e = skge->tx_ring.to_use; | ||
2833 | pci_unmap_single(hw->pdev, | ||
2834 | dma_unmap_addr(e, mapaddr), | ||
2835 | dma_unmap_len(e, maplen), | ||
2836 | PCI_DMA_TODEVICE); | ||
2837 | while (i-- > 0) { | ||
2838 | e = e->next; | ||
2839 | pci_unmap_page(hw->pdev, | ||
2840 | dma_unmap_addr(e, mapaddr), | ||
2841 | dma_unmap_len(e, maplen), | ||
2842 | PCI_DMA_TODEVICE); | ||
2843 | } | ||
2844 | |||
2845 | mapping_error: | ||
2846 | if (net_ratelimit()) | ||
2847 | dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); | ||
2848 | dev_kfree_skb(skb); | ||
2849 | return NETDEV_TX_OK; | ||
2818 | } | 2850 | } |
2819 | 2851 | ||
2820 | 2852 | ||
@@ -3045,11 +3077,13 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, | |||
3045 | 3077 | ||
3046 | pci_dma_sync_single_for_cpu(skge->hw->pdev, | 3078 | pci_dma_sync_single_for_cpu(skge->hw->pdev, |
3047 | dma_unmap_addr(e, mapaddr), | 3079 | dma_unmap_addr(e, mapaddr), |
3048 | len, PCI_DMA_FROMDEVICE); | 3080 | dma_unmap_len(e, maplen), |
3081 | PCI_DMA_FROMDEVICE); | ||
3049 | skb_copy_from_linear_data(e->skb, skb->data, len); | 3082 | skb_copy_from_linear_data(e->skb, skb->data, len); |
3050 | pci_dma_sync_single_for_device(skge->hw->pdev, | 3083 | pci_dma_sync_single_for_device(skge->hw->pdev, |
3051 | dma_unmap_addr(e, mapaddr), | 3084 | dma_unmap_addr(e, mapaddr), |
3052 | len, PCI_DMA_FROMDEVICE); | 3085 | dma_unmap_len(e, maplen), |
3086 | PCI_DMA_FROMDEVICE); | ||
3053 | skge_rx_reuse(e, skge->rx_buf_size); | 3087 | skge_rx_reuse(e, skge->rx_buf_size); |
3054 | } else { | 3088 | } else { |
3055 | struct sk_buff *nskb; | 3089 | struct sk_buff *nskb; |
@@ -3058,13 +3092,17 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, | |||
3058 | if (!nskb) | 3092 | if (!nskb) |
3059 | goto resubmit; | 3093 | goto resubmit; |
3060 | 3094 | ||
3095 | if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { | ||
3096 | dev_kfree_skb(nskb); | ||
3097 | goto resubmit; | ||
3098 | } | ||
3099 | |||
3061 | pci_unmap_single(skge->hw->pdev, | 3100 | pci_unmap_single(skge->hw->pdev, |
3062 | dma_unmap_addr(e, mapaddr), | 3101 | dma_unmap_addr(e, mapaddr), |
3063 | dma_unmap_len(e, maplen), | 3102 | dma_unmap_len(e, maplen), |
3064 | PCI_DMA_FROMDEVICE); | 3103 | PCI_DMA_FROMDEVICE); |
3065 | skb = e->skb; | 3104 | skb = e->skb; |
3066 | prefetch(skb->data); | 3105 | prefetch(skb->data); |
3067 | skge_rx_setup(skge, e, nskb, skge->rx_buf_size); | ||
3068 | } | 3106 | } |
3069 | 3107 | ||
3070 | skb_put(skb, len); | 3108 | skb_put(skb, len); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index c571de85d0f9..5472cbd34028 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c | |||
@@ -46,7 +46,7 @@ | |||
46 | #include "mlx5_core.h" | 46 | #include "mlx5_core.h" |
47 | 47 | ||
48 | enum { | 48 | enum { |
49 | CMD_IF_REV = 4, | 49 | CMD_IF_REV = 5, |
50 | }; | 50 | }; |
51 | 51 | ||
52 | enum { | 52 | enum { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index c02cbcfd0fb8..443cc4d7b024 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c | |||
@@ -268,7 +268,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) | |||
268 | case MLX5_EVENT_TYPE_PAGE_REQUEST: | 268 | case MLX5_EVENT_TYPE_PAGE_REQUEST: |
269 | { | 269 | { |
270 | u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); | 270 | u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); |
271 | s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages); | 271 | s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); |
272 | 272 | ||
273 | mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); | 273 | mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); |
274 | mlx5_core_req_pages_handler(dev, func_id, npages); | 274 | mlx5_core_req_pages_handler(dev, func_id, npages); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 72a5222447f5..f012658b6a92 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c | |||
@@ -113,7 +113,7 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, | |||
113 | caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; | 113 | caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; |
114 | caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; | 114 | caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; |
115 | caps->log_max_mcg = out->hca_cap.log_max_mcg; | 115 | caps->log_max_mcg = out->hca_cap.log_max_mcg; |
116 | caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg); | 116 | caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff; |
117 | caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); | 117 | caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); |
118 | caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); | 118 | caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); |
119 | caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; | 119 | caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 748f10a155c4..3e6670c4a7cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c | |||
@@ -55,33 +55,9 @@ enum { | |||
55 | }; | 55 | }; |
56 | 56 | ||
57 | static DEFINE_SPINLOCK(health_lock); | 57 | static DEFINE_SPINLOCK(health_lock); |
58 | |||
59 | static LIST_HEAD(health_list); | 58 | static LIST_HEAD(health_list); |
60 | static struct work_struct health_work; | 59 | static struct work_struct health_work; |
61 | 60 | ||
62 | static health_handler_t reg_handler; | ||
63 | int mlx5_register_health_report_handler(health_handler_t handler) | ||
64 | { | ||
65 | spin_lock_irq(&health_lock); | ||
66 | if (reg_handler) { | ||
67 | spin_unlock_irq(&health_lock); | ||
68 | return -EEXIST; | ||
69 | } | ||
70 | reg_handler = handler; | ||
71 | spin_unlock_irq(&health_lock); | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | EXPORT_SYMBOL(mlx5_register_health_report_handler); | ||
76 | |||
77 | void mlx5_unregister_health_report_handler(void) | ||
78 | { | ||
79 | spin_lock_irq(&health_lock); | ||
80 | reg_handler = NULL; | ||
81 | spin_unlock_irq(&health_lock); | ||
82 | } | ||
83 | EXPORT_SYMBOL(mlx5_unregister_health_report_handler); | ||
84 | |||
85 | static void health_care(struct work_struct *work) | 61 | static void health_care(struct work_struct *work) |
86 | { | 62 | { |
87 | struct mlx5_core_health *health, *n; | 63 | struct mlx5_core_health *health, *n; |
@@ -98,11 +74,8 @@ static void health_care(struct work_struct *work) | |||
98 | priv = container_of(health, struct mlx5_priv, health); | 74 | priv = container_of(health, struct mlx5_priv, health); |
99 | dev = container_of(priv, struct mlx5_core_dev, priv); | 75 | dev = container_of(priv, struct mlx5_core_dev, priv); |
100 | mlx5_core_warn(dev, "handling bad device here\n"); | 76 | mlx5_core_warn(dev, "handling bad device here\n"); |
77 | /* nothing yet */ | ||
101 | spin_lock_irq(&health_lock); | 78 | spin_lock_irq(&health_lock); |
102 | if (reg_handler) | ||
103 | reg_handler(dev->pdev, health->health, | ||
104 | sizeof(health->health)); | ||
105 | |||
106 | list_del_init(&health->list); | 79 | list_del_init(&health->list); |
107 | spin_unlock_irq(&health_lock); | 80 | spin_unlock_irq(&health_lock); |
108 | } | 81 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 4a3e137931a3..3a2408d44820 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c | |||
@@ -43,10 +43,16 @@ enum { | |||
43 | MLX5_PAGES_TAKE = 2 | 43 | MLX5_PAGES_TAKE = 2 |
44 | }; | 44 | }; |
45 | 45 | ||
46 | enum { | ||
47 | MLX5_BOOT_PAGES = 1, | ||
48 | MLX5_INIT_PAGES = 2, | ||
49 | MLX5_POST_INIT_PAGES = 3 | ||
50 | }; | ||
51 | |||
46 | struct mlx5_pages_req { | 52 | struct mlx5_pages_req { |
47 | struct mlx5_core_dev *dev; | 53 | struct mlx5_core_dev *dev; |
48 | u32 func_id; | 54 | u32 func_id; |
49 | s16 npages; | 55 | s32 npages; |
50 | struct work_struct work; | 56 | struct work_struct work; |
51 | }; | 57 | }; |
52 | 58 | ||
@@ -64,27 +70,23 @@ struct mlx5_query_pages_inbox { | |||
64 | 70 | ||
65 | struct mlx5_query_pages_outbox { | 71 | struct mlx5_query_pages_outbox { |
66 | struct mlx5_outbox_hdr hdr; | 72 | struct mlx5_outbox_hdr hdr; |
67 | __be16 num_boot_pages; | 73 | __be16 rsvd; |
68 | __be16 func_id; | 74 | __be16 func_id; |
69 | __be16 init_pages; | 75 | __be32 num_pages; |
70 | __be16 num_pages; | ||
71 | }; | 76 | }; |
72 | 77 | ||
73 | struct mlx5_manage_pages_inbox { | 78 | struct mlx5_manage_pages_inbox { |
74 | struct mlx5_inbox_hdr hdr; | 79 | struct mlx5_inbox_hdr hdr; |
75 | __be16 rsvd0; | 80 | __be16 rsvd; |
76 | __be16 func_id; | 81 | __be16 func_id; |
77 | __be16 rsvd1; | 82 | __be32 num_entries; |
78 | __be16 num_entries; | ||
79 | u8 rsvd2[16]; | ||
80 | __be64 pas[0]; | 83 | __be64 pas[0]; |
81 | }; | 84 | }; |
82 | 85 | ||
83 | struct mlx5_manage_pages_outbox { | 86 | struct mlx5_manage_pages_outbox { |
84 | struct mlx5_outbox_hdr hdr; | 87 | struct mlx5_outbox_hdr hdr; |
85 | u8 rsvd0[2]; | 88 | __be32 num_entries; |
86 | __be16 num_entries; | 89 | u8 rsvd[4]; |
87 | u8 rsvd1[20]; | ||
88 | __be64 pas[0]; | 90 | __be64 pas[0]; |
89 | }; | 91 | }; |
90 | 92 | ||
@@ -146,7 +148,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr) | |||
146 | } | 148 | } |
147 | 149 | ||
148 | static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, | 150 | static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, |
149 | s16 *pages, s16 *init_pages, u16 *boot_pages) | 151 | s32 *npages, int boot) |
150 | { | 152 | { |
151 | struct mlx5_query_pages_inbox in; | 153 | struct mlx5_query_pages_inbox in; |
152 | struct mlx5_query_pages_outbox out; | 154 | struct mlx5_query_pages_outbox out; |
@@ -155,6 +157,8 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, | |||
155 | memset(&in, 0, sizeof(in)); | 157 | memset(&in, 0, sizeof(in)); |
156 | memset(&out, 0, sizeof(out)); | 158 | memset(&out, 0, sizeof(out)); |
157 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); | 159 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); |
160 | in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES); | ||
161 | |||
158 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | 162 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); |
159 | if (err) | 163 | if (err) |
160 | return err; | 164 | return err; |
@@ -162,15 +166,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, | |||
162 | if (out.hdr.status) | 166 | if (out.hdr.status) |
163 | return mlx5_cmd_status_to_err(&out.hdr); | 167 | return mlx5_cmd_status_to_err(&out.hdr); |
164 | 168 | ||
165 | if (pages) | 169 | *npages = be32_to_cpu(out.num_pages); |
166 | *pages = be16_to_cpu(out.num_pages); | ||
167 | |||
168 | if (init_pages) | ||
169 | *init_pages = be16_to_cpu(out.init_pages); | ||
170 | |||
171 | if (boot_pages) | ||
172 | *boot_pages = be16_to_cpu(out.num_boot_pages); | ||
173 | |||
174 | *func_id = be16_to_cpu(out.func_id); | 170 | *func_id = be16_to_cpu(out.func_id); |
175 | 171 | ||
176 | return err; | 172 | return err; |
@@ -224,7 +220,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, | |||
224 | in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); | 220 | in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); |
225 | in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); | 221 | in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); |
226 | in->func_id = cpu_to_be16(func_id); | 222 | in->func_id = cpu_to_be16(func_id); |
227 | in->num_entries = cpu_to_be16(npages); | 223 | in->num_entries = cpu_to_be32(npages); |
228 | err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); | 224 | err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); |
229 | mlx5_core_dbg(dev, "err %d\n", err); | 225 | mlx5_core_dbg(dev, "err %d\n", err); |
230 | if (err) { | 226 | if (err) { |
@@ -292,7 +288,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, | |||
292 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); | 288 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); |
293 | in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); | 289 | in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); |
294 | in.func_id = cpu_to_be16(func_id); | 290 | in.func_id = cpu_to_be16(func_id); |
295 | in.num_entries = cpu_to_be16(npages); | 291 | in.num_entries = cpu_to_be32(npages); |
296 | mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); | 292 | mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); |
297 | err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); | 293 | err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); |
298 | if (err) { | 294 | if (err) { |
@@ -306,7 +302,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, | |||
306 | goto out_free; | 302 | goto out_free; |
307 | } | 303 | } |
308 | 304 | ||
309 | num_claimed = be16_to_cpu(out->num_entries); | 305 | num_claimed = be32_to_cpu(out->num_entries); |
310 | if (nclaimed) | 306 | if (nclaimed) |
311 | *nclaimed = num_claimed; | 307 | *nclaimed = num_claimed; |
312 | 308 | ||
@@ -345,7 +341,7 @@ static void pages_work_handler(struct work_struct *work) | |||
345 | } | 341 | } |
346 | 342 | ||
347 | void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, | 343 | void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, |
348 | s16 npages) | 344 | s32 npages) |
349 | { | 345 | { |
350 | struct mlx5_pages_req *req; | 346 | struct mlx5_pages_req *req; |
351 | 347 | ||
@@ -364,20 +360,18 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, | |||
364 | 360 | ||
365 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) | 361 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) |
366 | { | 362 | { |
367 | u16 uninitialized_var(boot_pages); | ||
368 | s16 uninitialized_var(init_pages); | ||
369 | u16 uninitialized_var(func_id); | 363 | u16 uninitialized_var(func_id); |
364 | s32 uninitialized_var(npages); | ||
370 | int err; | 365 | int err; |
371 | 366 | ||
372 | err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages, | 367 | err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot); |
373 | &boot_pages); | ||
374 | if (err) | 368 | if (err) |
375 | return err; | 369 | return err; |
376 | 370 | ||
371 | mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n", | ||
372 | npages, boot ? "boot" : "init", func_id); | ||
377 | 373 | ||
378 | mlx5_core_dbg(dev, "requested %d init pages and %d boot pages for func_id 0x%x\n", | 374 | return give_pages(dev, func_id, npages, 0); |
379 | init_pages, boot_pages, func_id); | ||
380 | return give_pages(dev, func_id, boot ? boot_pages : init_pages, 0); | ||
381 | } | 375 | } |
382 | 376 | ||
383 | static int optimal_reclaimed_pages(void) | 377 | static int optimal_reclaimed_pages(void) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 92da9980a0a0..9d4bb7f83904 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | |||
@@ -3266,6 +3266,11 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) | |||
3266 | u8 val; | 3266 | u8 val; |
3267 | int ret, max_sds_rings = adapter->max_sds_rings; | 3267 | int ret, max_sds_rings = adapter->max_sds_rings; |
3268 | 3268 | ||
3269 | if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { | ||
3270 | netdev_info(netdev, "Device is resetting\n"); | ||
3271 | return -EBUSY; | ||
3272 | } | ||
3273 | |||
3269 | if (qlcnic_get_diag_lock(adapter)) { | 3274 | if (qlcnic_get_diag_lock(adapter)) { |
3270 | netdev_info(netdev, "Device in diagnostics mode\n"); | 3275 | netdev_info(netdev, "Device in diagnostics mode\n"); |
3271 | return -EBUSY; | 3276 | return -EBUSY; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 9f4b8d5f0865..345d987aede4 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | |||
@@ -629,7 +629,8 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) | |||
629 | return -EIO; | 629 | return -EIO; |
630 | } | 630 | } |
631 | 631 | ||
632 | qlcnic_set_drv_version(adapter); | 632 | if (adapter->portnum == 0) |
633 | qlcnic_set_drv_version(adapter); | ||
633 | qlcnic_83xx_idc_attach_driver(adapter); | 634 | qlcnic_83xx_idc_attach_driver(adapter); |
634 | 635 | ||
635 | return 0; | 636 | return 0; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index ee013fcc3322..bc05d016c859 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | |||
@@ -2165,7 +2165,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2165 | if (err) | 2165 | if (err) |
2166 | goto err_out_disable_mbx_intr; | 2166 | goto err_out_disable_mbx_intr; |
2167 | 2167 | ||
2168 | qlcnic_set_drv_version(adapter); | 2168 | if (adapter->portnum == 0) |
2169 | qlcnic_set_drv_version(adapter); | ||
2169 | 2170 | ||
2170 | pci_set_drvdata(pdev, adapter); | 2171 | pci_set_drvdata(pdev, adapter); |
2171 | 2172 | ||
@@ -3085,7 +3086,8 @@ done: | |||
3085 | adapter->fw_fail_cnt = 0; | 3086 | adapter->fw_fail_cnt = 0; |
3086 | adapter->flags &= ~QLCNIC_FW_HANG; | 3087 | adapter->flags &= ~QLCNIC_FW_HANG; |
3087 | clear_bit(__QLCNIC_RESETTING, &adapter->state); | 3088 | clear_bit(__QLCNIC_RESETTING, &adapter->state); |
3088 | qlcnic_set_drv_version(adapter); | 3089 | if (adapter->portnum == 0) |
3090 | qlcnic_set_drv_version(adapter); | ||
3089 | 3091 | ||
3090 | if (!qlcnic_clr_drv_state(adapter)) | 3092 | if (!qlcnic_clr_drv_state(adapter)) |
3091 | qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, | 3093 | qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 10ed82b3baca..660c3f5b2237 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c | |||
@@ -170,9 +170,9 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter, | |||
170 | 170 | ||
171 | if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) { | 171 | if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) { |
172 | err = qlcnic_get_beacon_state(adapter, &h_beacon_state); | 172 | err = qlcnic_get_beacon_state(adapter, &h_beacon_state); |
173 | if (!err) { | 173 | if (err) { |
174 | dev_info(&adapter->pdev->dev, | 174 | netdev_err(adapter->netdev, |
175 | "Failed to get current beacon state\n"); | 175 | "Failed to get current beacon state\n"); |
176 | } else { | 176 | } else { |
177 | if (h_beacon_state == QLCNIC_BEACON_DISABLE) | 177 | if (h_beacon_state == QLCNIC_BEACON_DISABLE) |
178 | ahw->beacon_state = 0; | 178 | ahw->beacon_state = 0; |
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 6f35f8404d68..d2e591955bdd 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c | |||
@@ -524,6 +524,7 @@ rx_status_loop: | |||
524 | PCI_DMA_FROMDEVICE); | 524 | PCI_DMA_FROMDEVICE); |
525 | if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { | 525 | if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { |
526 | dev->stats.rx_dropped++; | 526 | dev->stats.rx_dropped++; |
527 | kfree_skb(new_skb); | ||
527 | goto rx_next; | 528 | goto rx_next; |
528 | } | 529 | } |
529 | 530 | ||
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index b5eb4195fc99..85e5c97191dd 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -7088,7 +7088,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
7088 | 7088 | ||
7089 | RTL_W8(Cfg9346, Cfg9346_Unlock); | 7089 | RTL_W8(Cfg9346, Cfg9346_Unlock); |
7090 | RTL_W8(Config1, RTL_R8(Config1) | PMEnable); | 7090 | RTL_W8(Config1, RTL_R8(Config1) | PMEnable); |
7091 | RTL_W8(Config5, RTL_R8(Config5) & PMEStatus); | 7091 | RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus)); |
7092 | if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) | 7092 | if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) |
7093 | tp->features |= RTL_FEATURE_WOL; | 7093 | tp->features |= RTL_FEATURE_WOL; |
7094 | if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) | 7094 | if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) |
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c index 2a469b27a506..30d744235d27 100644 --- a/drivers/net/ethernet/sfc/filter.c +++ b/drivers/net/ethernet/sfc/filter.c | |||
@@ -675,7 +675,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, | |||
675 | BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0); | 675 | BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0); |
676 | BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF != | 676 | BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF != |
677 | EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF); | 677 | EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF); |
678 | rep_index = spec->type - EFX_FILTER_INDEX_UC_DEF; | 678 | rep_index = spec->type - EFX_FILTER_UC_DEF; |
679 | ins_index = rep_index; | 679 | ins_index = rep_index; |
680 | 680 | ||
681 | spin_lock_bh(&state->lock); | 681 | spin_lock_bh(&state->lock); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index c9d942a5c335..1ef9d8a555aa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c | |||
@@ -33,10 +33,15 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
33 | struct stmmac_priv *priv = (struct stmmac_priv *)p; | 33 | struct stmmac_priv *priv = (struct stmmac_priv *)p; |
34 | unsigned int txsize = priv->dma_tx_size; | 34 | unsigned int txsize = priv->dma_tx_size; |
35 | unsigned int entry = priv->cur_tx % txsize; | 35 | unsigned int entry = priv->cur_tx % txsize; |
36 | struct dma_desc *desc = priv->dma_tx + entry; | 36 | struct dma_desc *desc; |
37 | unsigned int nopaged_len = skb_headlen(skb); | 37 | unsigned int nopaged_len = skb_headlen(skb); |
38 | unsigned int bmax, len; | 38 | unsigned int bmax, len; |
39 | 39 | ||
40 | if (priv->extend_desc) | ||
41 | desc = (struct dma_desc *)(priv->dma_etx + entry); | ||
42 | else | ||
43 | desc = priv->dma_tx + entry; | ||
44 | |||
40 | if (priv->plat->enh_desc) | 45 | if (priv->plat->enh_desc) |
41 | bmax = BUF_SIZE_8KiB; | 46 | bmax = BUF_SIZE_8KiB; |
42 | else | 47 | else |
@@ -54,7 +59,11 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
54 | STMMAC_RING_MODE); | 59 | STMMAC_RING_MODE); |
55 | wmb(); | 60 | wmb(); |
56 | entry = (++priv->cur_tx) % txsize; | 61 | entry = (++priv->cur_tx) % txsize; |
57 | desc = priv->dma_tx + entry; | 62 | |
63 | if (priv->extend_desc) | ||
64 | desc = (struct dma_desc *)(priv->dma_etx + entry); | ||
65 | else | ||
66 | desc = priv->dma_tx + entry; | ||
58 | 67 | ||
59 | desc->des2 = dma_map_single(priv->device, skb->data + bmax, | 68 | desc->des2 = dma_map_single(priv->device, skb->data + bmax, |
60 | len, DMA_TO_DEVICE); | 69 | len, DMA_TO_DEVICE); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f2ccb36e8685..0a9bb9d30c3f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -939,15 +939,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, | |||
939 | 939 | ||
940 | skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, | 940 | skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, |
941 | GFP_KERNEL); | 941 | GFP_KERNEL); |
942 | if (unlikely(skb == NULL)) { | 942 | if (!skb) { |
943 | pr_err("%s: Rx init fails; skb is NULL\n", __func__); | 943 | pr_err("%s: Rx init fails; skb is NULL\n", __func__); |
944 | return 1; | 944 | return -ENOMEM; |
945 | } | 945 | } |
946 | skb_reserve(skb, NET_IP_ALIGN); | 946 | skb_reserve(skb, NET_IP_ALIGN); |
947 | priv->rx_skbuff[i] = skb; | 947 | priv->rx_skbuff[i] = skb; |
948 | priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, | 948 | priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, |
949 | priv->dma_buf_sz, | 949 | priv->dma_buf_sz, |
950 | DMA_FROM_DEVICE); | 950 | DMA_FROM_DEVICE); |
951 | if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) { | ||
952 | pr_err("%s: DMA mapping error\n", __func__); | ||
953 | dev_kfree_skb_any(skb); | ||
954 | return -EINVAL; | ||
955 | } | ||
951 | 956 | ||
952 | p->des2 = priv->rx_skbuff_dma[i]; | 957 | p->des2 = priv->rx_skbuff_dma[i]; |
953 | 958 | ||
@@ -958,6 +963,16 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, | |||
958 | return 0; | 963 | return 0; |
959 | } | 964 | } |
960 | 965 | ||
966 | static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i) | ||
967 | { | ||
968 | if (priv->rx_skbuff[i]) { | ||
969 | dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], | ||
970 | priv->dma_buf_sz, DMA_FROM_DEVICE); | ||
971 | dev_kfree_skb_any(priv->rx_skbuff[i]); | ||
972 | } | ||
973 | priv->rx_skbuff[i] = NULL; | ||
974 | } | ||
975 | |||
961 | /** | 976 | /** |
962 | * init_dma_desc_rings - init the RX/TX descriptor rings | 977 | * init_dma_desc_rings - init the RX/TX descriptor rings |
963 | * @dev: net device structure | 978 | * @dev: net device structure |
@@ -965,13 +980,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, | |||
965 | * and allocates the socket buffers. It suppors the chained and ring | 980 | * and allocates the socket buffers. It suppors the chained and ring |
966 | * modes. | 981 | * modes. |
967 | */ | 982 | */ |
968 | static void init_dma_desc_rings(struct net_device *dev) | 983 | static int init_dma_desc_rings(struct net_device *dev) |
969 | { | 984 | { |
970 | int i; | 985 | int i; |
971 | struct stmmac_priv *priv = netdev_priv(dev); | 986 | struct stmmac_priv *priv = netdev_priv(dev); |
972 | unsigned int txsize = priv->dma_tx_size; | 987 | unsigned int txsize = priv->dma_tx_size; |
973 | unsigned int rxsize = priv->dma_rx_size; | 988 | unsigned int rxsize = priv->dma_rx_size; |
974 | unsigned int bfsize = 0; | 989 | unsigned int bfsize = 0; |
990 | int ret = -ENOMEM; | ||
975 | 991 | ||
976 | /* Set the max buffer size according to the DESC mode | 992 | /* Set the max buffer size according to the DESC mode |
977 | * and the MTU. Note that RING mode allows 16KiB bsize. | 993 | * and the MTU. Note that RING mode allows 16KiB bsize. |
@@ -992,34 +1008,60 @@ static void init_dma_desc_rings(struct net_device *dev) | |||
992 | dma_extended_desc), | 1008 | dma_extended_desc), |
993 | &priv->dma_rx_phy, | 1009 | &priv->dma_rx_phy, |
994 | GFP_KERNEL); | 1010 | GFP_KERNEL); |
1011 | if (!priv->dma_erx) | ||
1012 | goto err_dma; | ||
1013 | |||
995 | priv->dma_etx = dma_alloc_coherent(priv->device, txsize * | 1014 | priv->dma_etx = dma_alloc_coherent(priv->device, txsize * |
996 | sizeof(struct | 1015 | sizeof(struct |
997 | dma_extended_desc), | 1016 | dma_extended_desc), |
998 | &priv->dma_tx_phy, | 1017 | &priv->dma_tx_phy, |
999 | GFP_KERNEL); | 1018 | GFP_KERNEL); |
1000 | if ((!priv->dma_erx) || (!priv->dma_etx)) | 1019 | if (!priv->dma_etx) { |
1001 | return; | 1020 | dma_free_coherent(priv->device, priv->dma_rx_size * |
1021 | sizeof(struct dma_extended_desc), | ||
1022 | priv->dma_erx, priv->dma_rx_phy); | ||
1023 | goto err_dma; | ||
1024 | } | ||
1002 | } else { | 1025 | } else { |
1003 | priv->dma_rx = dma_alloc_coherent(priv->device, rxsize * | 1026 | priv->dma_rx = dma_alloc_coherent(priv->device, rxsize * |
1004 | sizeof(struct dma_desc), | 1027 | sizeof(struct dma_desc), |
1005 | &priv->dma_rx_phy, | 1028 | &priv->dma_rx_phy, |
1006 | GFP_KERNEL); | 1029 | GFP_KERNEL); |
1030 | if (!priv->dma_rx) | ||
1031 | goto err_dma; | ||
1032 | |||
1007 | priv->dma_tx = dma_alloc_coherent(priv->device, txsize * | 1033 | priv->dma_tx = dma_alloc_coherent(priv->device, txsize * |
1008 | sizeof(struct dma_desc), | 1034 | sizeof(struct dma_desc), |
1009 | &priv->dma_tx_phy, | 1035 | &priv->dma_tx_phy, |
1010 | GFP_KERNEL); | 1036 | GFP_KERNEL); |
1011 | if ((!priv->dma_rx) || (!priv->dma_tx)) | 1037 | if (!priv->dma_tx) { |
1012 | return; | 1038 | dma_free_coherent(priv->device, priv->dma_rx_size * |
1039 | sizeof(struct dma_desc), | ||
1040 | priv->dma_rx, priv->dma_rx_phy); | ||
1041 | goto err_dma; | ||
1042 | } | ||
1013 | } | 1043 | } |
1014 | 1044 | ||
1015 | priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), | 1045 | priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), |
1016 | GFP_KERNEL); | 1046 | GFP_KERNEL); |
1047 | if (!priv->rx_skbuff_dma) | ||
1048 | goto err_rx_skbuff_dma; | ||
1049 | |||
1017 | priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), | 1050 | priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), |
1018 | GFP_KERNEL); | 1051 | GFP_KERNEL); |
1052 | if (!priv->rx_skbuff) | ||
1053 | goto err_rx_skbuff; | ||
1054 | |||
1019 | priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), | 1055 | priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), |
1020 | GFP_KERNEL); | 1056 | GFP_KERNEL); |
1057 | if (!priv->tx_skbuff_dma) | ||
1058 | goto err_tx_skbuff_dma; | ||
1059 | |||
1021 | priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), | 1060 | priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), |
1022 | GFP_KERNEL); | 1061 | GFP_KERNEL); |
1062 | if (!priv->tx_skbuff) | ||
1063 | goto err_tx_skbuff; | ||
1064 | |||
1023 | if (netif_msg_probe(priv)) { | 1065 | if (netif_msg_probe(priv)) { |
1024 | pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, | 1066 | pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, |
1025 | (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); | 1067 | (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); |
@@ -1034,8 +1076,9 @@ static void init_dma_desc_rings(struct net_device *dev) | |||
1034 | else | 1076 | else |
1035 | p = priv->dma_rx + i; | 1077 | p = priv->dma_rx + i; |
1036 | 1078 | ||
1037 | if (stmmac_init_rx_buffers(priv, p, i)) | 1079 | ret = stmmac_init_rx_buffers(priv, p, i); |
1038 | break; | 1080 | if (ret) |
1081 | goto err_init_rx_buffers; | ||
1039 | 1082 | ||
1040 | if (netif_msg_probe(priv)) | 1083 | if (netif_msg_probe(priv)) |
1041 | pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], | 1084 | pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], |
@@ -1081,20 +1124,44 @@ static void init_dma_desc_rings(struct net_device *dev) | |||
1081 | 1124 | ||
1082 | if (netif_msg_hw(priv)) | 1125 | if (netif_msg_hw(priv)) |
1083 | stmmac_display_rings(priv); | 1126 | stmmac_display_rings(priv); |
1127 | |||
1128 | return 0; | ||
1129 | err_init_rx_buffers: | ||
1130 | while (--i >= 0) | ||
1131 | stmmac_free_rx_buffers(priv, i); | ||
1132 | kfree(priv->tx_skbuff); | ||
1133 | err_tx_skbuff: | ||
1134 | kfree(priv->tx_skbuff_dma); | ||
1135 | err_tx_skbuff_dma: | ||
1136 | kfree(priv->rx_skbuff); | ||
1137 | err_rx_skbuff: | ||
1138 | kfree(priv->rx_skbuff_dma); | ||
1139 | err_rx_skbuff_dma: | ||
1140 | if (priv->extend_desc) { | ||
1141 | dma_free_coherent(priv->device, priv->dma_tx_size * | ||
1142 | sizeof(struct dma_extended_desc), | ||
1143 | priv->dma_etx, priv->dma_tx_phy); | ||
1144 | dma_free_coherent(priv->device, priv->dma_rx_size * | ||
1145 | sizeof(struct dma_extended_desc), | ||
1146 | priv->dma_erx, priv->dma_rx_phy); | ||
1147 | } else { | ||
1148 | dma_free_coherent(priv->device, | ||
1149 | priv->dma_tx_size * sizeof(struct dma_desc), | ||
1150 | priv->dma_tx, priv->dma_tx_phy); | ||
1151 | dma_free_coherent(priv->device, | ||
1152 | priv->dma_rx_size * sizeof(struct dma_desc), | ||
1153 | priv->dma_rx, priv->dma_rx_phy); | ||
1154 | } | ||
1155 | err_dma: | ||
1156 | return ret; | ||
1084 | } | 1157 | } |
1085 | 1158 | ||
1086 | static void dma_free_rx_skbufs(struct stmmac_priv *priv) | 1159 | static void dma_free_rx_skbufs(struct stmmac_priv *priv) |
1087 | { | 1160 | { |
1088 | int i; | 1161 | int i; |
1089 | 1162 | ||
1090 | for (i = 0; i < priv->dma_rx_size; i++) { | 1163 | for (i = 0; i < priv->dma_rx_size; i++) |
1091 | if (priv->rx_skbuff[i]) { | 1164 | stmmac_free_rx_buffers(priv, i); |
1092 | dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], | ||
1093 | priv->dma_buf_sz, DMA_FROM_DEVICE); | ||
1094 | dev_kfree_skb_any(priv->rx_skbuff[i]); | ||
1095 | } | ||
1096 | priv->rx_skbuff[i] = NULL; | ||
1097 | } | ||
1098 | } | 1165 | } |
1099 | 1166 | ||
1100 | static void dma_free_tx_skbufs(struct stmmac_priv *priv) | 1167 | static void dma_free_tx_skbufs(struct stmmac_priv *priv) |
@@ -1560,12 +1627,17 @@ static int stmmac_open(struct net_device *dev) | |||
1560 | priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); | 1627 | priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); |
1561 | priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); | 1628 | priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); |
1562 | priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); | 1629 | priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); |
1563 | init_dma_desc_rings(dev); | 1630 | |
1631 | ret = init_dma_desc_rings(dev); | ||
1632 | if (ret < 0) { | ||
1633 | pr_err("%s: DMA descriptors initialization failed\n", __func__); | ||
1634 | goto dma_desc_error; | ||
1635 | } | ||
1564 | 1636 | ||
1565 | /* DMA initialization and SW reset */ | 1637 | /* DMA initialization and SW reset */ |
1566 | ret = stmmac_init_dma_engine(priv); | 1638 | ret = stmmac_init_dma_engine(priv); |
1567 | if (ret < 0) { | 1639 | if (ret < 0) { |
1568 | pr_err("%s: DMA initialization failed\n", __func__); | 1640 | pr_err("%s: DMA engine initialization failed\n", __func__); |
1569 | goto init_error; | 1641 | goto init_error; |
1570 | } | 1642 | } |
1571 | 1643 | ||
@@ -1672,6 +1744,7 @@ wolirq_error: | |||
1672 | 1744 | ||
1673 | init_error: | 1745 | init_error: |
1674 | free_dma_desc_resources(priv); | 1746 | free_dma_desc_resources(priv); |
1747 | dma_desc_error: | ||
1675 | if (priv->phydev) | 1748 | if (priv->phydev) |
1676 | phy_disconnect(priv->phydev); | 1749 | phy_disconnect(priv->phydev); |
1677 | phy_error: | 1750 | phy_error: |
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 1d6dc41f755d..d01cacf8a7c2 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c | |||
@@ -2100,7 +2100,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) | |||
2100 | 2100 | ||
2101 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); | 2101 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); |
2102 | } | 2102 | } |
2103 | netif_rx(skb); | 2103 | netif_receive_skb(skb); |
2104 | 2104 | ||
2105 | stats->rx_bytes += pkt_len; | 2105 | stats->rx_bytes += pkt_len; |
2106 | stats->rx_packets++; | 2106 | stats->rx_packets++; |
@@ -2884,6 +2884,7 @@ out: | |||
2884 | return ret; | 2884 | return ret; |
2885 | 2885 | ||
2886 | err_iounmap: | 2886 | err_iounmap: |
2887 | netif_napi_del(&vptr->napi); | ||
2887 | iounmap(regs); | 2888 | iounmap(regs); |
2888 | err_free_dev: | 2889 | err_free_dev: |
2889 | free_netdev(netdev); | 2890 | free_netdev(netdev); |
@@ -2904,6 +2905,7 @@ static int velocity_remove(struct device *dev) | |||
2904 | struct velocity_info *vptr = netdev_priv(netdev); | 2905 | struct velocity_info *vptr = netdev_priv(netdev); |
2905 | 2906 | ||
2906 | unregister_netdev(netdev); | 2907 | unregister_netdev(netdev); |
2908 | netif_napi_del(&vptr->napi); | ||
2907 | iounmap(vptr->mac_regs); | 2909 | iounmap(vptr->mac_regs); |
2908 | free_netdev(netdev); | 2910 | free_netdev(netdev); |
2909 | velocity_nics--; | 2911 | velocity_nics--; |
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c index 51f2bc376101..2dcc60fb37f1 100644 --- a/drivers/net/irda/via-ircc.c +++ b/drivers/net/irda/via-ircc.c | |||
@@ -210,8 +210,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id) | |||
210 | pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0)); | 210 | pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0)); |
211 | pci_write_config_byte(pcidev,0x5a,0xc0); | 211 | pci_write_config_byte(pcidev,0x5a,0xc0); |
212 | WriteLPCReg(0x28, 0x70 ); | 212 | WriteLPCReg(0x28, 0x70 ); |
213 | if (via_ircc_open(pcidev, &info, 0x3076) == 0) | 213 | rc = via_ircc_open(pcidev, &info, 0x3076); |
214 | rc=0; | ||
215 | } else | 214 | } else |
216 | rc = -ENODEV; //IR not turn on | 215 | rc = -ENODEV; //IR not turn on |
217 | } else { //Not VT1211 | 216 | } else { //Not VT1211 |
@@ -249,8 +248,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id) | |||
249 | info.irq=FirIRQ; | 248 | info.irq=FirIRQ; |
250 | info.dma=FirDRQ1; | 249 | info.dma=FirDRQ1; |
251 | info.dma2=FirDRQ0; | 250 | info.dma2=FirDRQ0; |
252 | if (via_ircc_open(pcidev, &info, 0x3096) == 0) | 251 | rc = via_ircc_open(pcidev, &info, 0x3096); |
253 | rc=0; | ||
254 | } else | 252 | } else |
255 | rc = -ENODEV; //IR not turn on !!!!! | 253 | rc = -ENODEV; //IR not turn on !!!!! |
256 | }//Not VT1211 | 254 | }//Not VT1211 |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index d0f9c2fd1d4f..16b43bf544b7 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -739,6 +739,10 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[]) | |||
739 | return -EADDRNOTAVAIL; | 739 | return -EADDRNOTAVAIL; |
740 | } | 740 | } |
741 | 741 | ||
742 | if (data && data[IFLA_MACVLAN_FLAGS] && | ||
743 | nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC) | ||
744 | return -EINVAL; | ||
745 | |||
742 | if (data && data[IFLA_MACVLAN_MODE]) { | 746 | if (data && data[IFLA_MACVLAN_MODE]) { |
743 | switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) { | 747 | switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) { |
744 | case MACVLAN_MODE_PRIVATE: | 748 | case MACVLAN_MODE_PRIVATE: |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index a98fb0ed6aef..ea53abb20988 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -68,6 +68,8 @@ static const struct proto_ops macvtap_socket_ops; | |||
68 | #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ | 68 | #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ |
69 | NETIF_F_TSO6 | NETIF_F_UFO) | 69 | NETIF_F_TSO6 | NETIF_F_UFO) |
70 | #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) | 70 | #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) |
71 | #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) | ||
72 | |||
71 | /* | 73 | /* |
72 | * RCU usage: | 74 | * RCU usage: |
73 | * The macvtap_queue and the macvlan_dev are loosely coupled, the | 75 | * The macvtap_queue and the macvlan_dev are loosely coupled, the |
@@ -278,7 +280,8 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb) | |||
278 | { | 280 | { |
279 | struct macvlan_dev *vlan = netdev_priv(dev); | 281 | struct macvlan_dev *vlan = netdev_priv(dev); |
280 | struct macvtap_queue *q = macvtap_get_queue(dev, skb); | 282 | struct macvtap_queue *q = macvtap_get_queue(dev, skb); |
281 | netdev_features_t features; | 283 | netdev_features_t features = TAP_FEATURES; |
284 | |||
282 | if (!q) | 285 | if (!q) |
283 | goto drop; | 286 | goto drop; |
284 | 287 | ||
@@ -287,9 +290,11 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb) | |||
287 | 290 | ||
288 | skb->dev = dev; | 291 | skb->dev = dev; |
289 | /* Apply the forward feature mask so that we perform segmentation | 292 | /* Apply the forward feature mask so that we perform segmentation |
290 | * according to users wishes. | 293 | * according to users wishes. This only works if VNET_HDR is |
294 | * enabled. | ||
291 | */ | 295 | */ |
292 | features = netif_skb_features(skb) & vlan->tap_features; | 296 | if (q->flags & IFF_VNET_HDR) |
297 | features |= vlan->tap_features; | ||
293 | if (netif_needs_gso(skb, features)) { | 298 | if (netif_needs_gso(skb, features)) { |
294 | struct sk_buff *segs = __skb_gso_segment(skb, features, false); | 299 | struct sk_buff *segs = __skb_gso_segment(skb, features, false); |
295 | 300 | ||
@@ -818,10 +823,13 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, | |||
818 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; | 823 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; |
819 | skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; | 824 | skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; |
820 | } | 825 | } |
821 | if (vlan) | 826 | if (vlan) { |
827 | local_bh_disable(); | ||
822 | macvlan_start_xmit(skb, vlan->dev); | 828 | macvlan_start_xmit(skb, vlan->dev); |
823 | else | 829 | local_bh_enable(); |
830 | } else { | ||
824 | kfree_skb(skb); | 831 | kfree_skb(skb); |
832 | } | ||
825 | rcu_read_unlock(); | 833 | rcu_read_unlock(); |
826 | 834 | ||
827 | return total_len; | 835 | return total_len; |
@@ -912,8 +920,11 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, | |||
912 | done: | 920 | done: |
913 | rcu_read_lock(); | 921 | rcu_read_lock(); |
914 | vlan = rcu_dereference(q->vlan); | 922 | vlan = rcu_dereference(q->vlan); |
915 | if (vlan) | 923 | if (vlan) { |
924 | preempt_disable(); | ||
916 | macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0); | 925 | macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0); |
926 | preempt_enable(); | ||
927 | } | ||
917 | rcu_read_unlock(); | 928 | rcu_read_unlock(); |
918 | 929 | ||
919 | return ret ? ret : copied; | 930 | return ret ? ret : copied; |
@@ -1058,8 +1069,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg) | |||
1058 | /* tap_features are the same as features on tun/tap and | 1069 | /* tap_features are the same as features on tun/tap and |
1059 | * reflect user expectations. | 1070 | * reflect user expectations. |
1060 | */ | 1071 | */ |
1061 | vlan->tap_features = vlan->dev->features & | 1072 | vlan->tap_features = feature_mask; |
1062 | (feature_mask | ~TUN_OFFLOADS); | ||
1063 | vlan->set_features = features; | 1073 | vlan->set_features = features; |
1064 | netdev_update_features(vlan->dev); | 1074 | netdev_update_features(vlan->dev); |
1065 | 1075 | ||
@@ -1155,10 +1165,6 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, | |||
1155 | TUN_F_TSO_ECN | TUN_F_UFO)) | 1165 | TUN_F_TSO_ECN | TUN_F_UFO)) |
1156 | return -EINVAL; | 1166 | return -EINVAL; |
1157 | 1167 | ||
1158 | /* TODO: only accept frames with the features that | ||
1159 | got enabled for forwarded frames */ | ||
1160 | if (!(q->flags & IFF_VNET_HDR)) | ||
1161 | return -EINVAL; | ||
1162 | rtnl_lock(); | 1168 | rtnl_lock(); |
1163 | ret = set_offload(q, arg); | 1169 | ret = set_offload(q, arg); |
1164 | rtnl_unlock(); | 1170 | rtnl_unlock(); |
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 8e7af8354342..138de837977f 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #define RTL821x_INER_INIT 0x6400 | 23 | #define RTL821x_INER_INIT 0x6400 |
24 | #define RTL821x_INSR 0x13 | 24 | #define RTL821x_INSR 0x13 |
25 | 25 | ||
26 | #define RTL8211E_INER_LINK_STAT 0x10 | 26 | #define RTL8211E_INER_LINK_STATUS 0x400 |
27 | 27 | ||
28 | MODULE_DESCRIPTION("Realtek PHY driver"); | 28 | MODULE_DESCRIPTION("Realtek PHY driver"); |
29 | MODULE_AUTHOR("Johnson Leung"); | 29 | MODULE_AUTHOR("Johnson Leung"); |
@@ -57,7 +57,7 @@ static int rtl8211e_config_intr(struct phy_device *phydev) | |||
57 | 57 | ||
58 | if (phydev->interrupts == PHY_INTERRUPT_ENABLED) | 58 | if (phydev->interrupts == PHY_INTERRUPT_ENABLED) |
59 | err = phy_write(phydev, RTL821x_INER, | 59 | err = phy_write(phydev, RTL821x_INER, |
60 | RTL8211E_INER_LINK_STAT); | 60 | RTL8211E_INER_LINK_STATUS); |
61 | else | 61 | else |
62 | err = phy_write(phydev, RTL821x_INER, 0); | 62 | err = phy_write(phydev, RTL821x_INER, 0); |
63 | 63 | ||
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index db690a372260..71af122edf2d 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -1074,8 +1074,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
1074 | u32 rxhash; | 1074 | u32 rxhash; |
1075 | 1075 | ||
1076 | if (!(tun->flags & TUN_NO_PI)) { | 1076 | if (!(tun->flags & TUN_NO_PI)) { |
1077 | if ((len -= sizeof(pi)) > total_len) | 1077 | if (len < sizeof(pi)) |
1078 | return -EINVAL; | 1078 | return -EINVAL; |
1079 | len -= sizeof(pi); | ||
1079 | 1080 | ||
1080 | if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi))) | 1081 | if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi))) |
1081 | return -EFAULT; | 1082 | return -EFAULT; |
@@ -1083,8 +1084,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
1083 | } | 1084 | } |
1084 | 1085 | ||
1085 | if (tun->flags & TUN_VNET_HDR) { | 1086 | if (tun->flags & TUN_VNET_HDR) { |
1086 | if ((len -= tun->vnet_hdr_sz) > total_len) | 1087 | if (len < tun->vnet_hdr_sz) |
1087 | return -EINVAL; | 1088 | return -EINVAL; |
1089 | len -= tun->vnet_hdr_sz; | ||
1088 | 1090 | ||
1089 | if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) | 1091 | if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) |
1090 | return -EFAULT; | 1092 | return -EFAULT; |
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index cba1d46e672e..86292e6aaf49 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c | |||
@@ -2816,13 +2816,16 @@ exit: | |||
2816 | static int hso_get_config_data(struct usb_interface *interface) | 2816 | static int hso_get_config_data(struct usb_interface *interface) |
2817 | { | 2817 | { |
2818 | struct usb_device *usbdev = interface_to_usbdev(interface); | 2818 | struct usb_device *usbdev = interface_to_usbdev(interface); |
2819 | u8 config_data[17]; | 2819 | u8 *config_data = kmalloc(17, GFP_KERNEL); |
2820 | u32 if_num = interface->altsetting->desc.bInterfaceNumber; | 2820 | u32 if_num = interface->altsetting->desc.bInterfaceNumber; |
2821 | s32 result; | 2821 | s32 result; |
2822 | 2822 | ||
2823 | if (!config_data) | ||
2824 | return -ENOMEM; | ||
2823 | if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), | 2825 | if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), |
2824 | 0x86, 0xC0, 0, 0, config_data, 17, | 2826 | 0x86, 0xC0, 0, 0, config_data, 17, |
2825 | USB_CTRL_SET_TIMEOUT) != 0x11) { | 2827 | USB_CTRL_SET_TIMEOUT) != 0x11) { |
2828 | kfree(config_data); | ||
2826 | return -EIO; | 2829 | return -EIO; |
2827 | } | 2830 | } |
2828 | 2831 | ||
@@ -2873,6 +2876,7 @@ static int hso_get_config_data(struct usb_interface *interface) | |||
2873 | if (config_data[16] & 0x1) | 2876 | if (config_data[16] & 0x1) |
2874 | result |= HSO_INFO_CRC_BUG; | 2877 | result |= HSO_INFO_CRC_BUG; |
2875 | 2878 | ||
2879 | kfree(config_data); | ||
2876 | return result; | 2880 | return result; |
2877 | } | 2881 | } |
2878 | 2882 | ||
@@ -2886,6 +2890,11 @@ static int hso_probe(struct usb_interface *interface, | |||
2886 | struct hso_shared_int *shared_int; | 2890 | struct hso_shared_int *shared_int; |
2887 | struct hso_device *tmp_dev = NULL; | 2891 | struct hso_device *tmp_dev = NULL; |
2888 | 2892 | ||
2893 | if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) { | ||
2894 | dev_err(&interface->dev, "Not our interface\n"); | ||
2895 | return -ENODEV; | ||
2896 | } | ||
2897 | |||
2889 | if_num = interface->altsetting->desc.bInterfaceNumber; | 2898 | if_num = interface->altsetting->desc.bInterfaceNumber; |
2890 | 2899 | ||
2891 | /* Get the interface/port specification from either driver_info or from | 2900 | /* Get the interface/port specification from either driver_info or from |
@@ -2895,10 +2904,6 @@ static int hso_probe(struct usb_interface *interface, | |||
2895 | else | 2904 | else |
2896 | port_spec = hso_get_config_data(interface); | 2905 | port_spec = hso_get_config_data(interface); |
2897 | 2906 | ||
2898 | if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) { | ||
2899 | dev_err(&interface->dev, "Not our interface\n"); | ||
2900 | return -ENODEV; | ||
2901 | } | ||
2902 | /* Check if we need to switch to alt interfaces prior to port | 2907 | /* Check if we need to switch to alt interfaces prior to port |
2903 | * configuration */ | 2908 | * configuration */ |
2904 | if (interface->num_altsetting > 1) | 2909 | if (interface->num_altsetting > 1) |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index f4c6db419ddb..767f7af3bd40 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -1386,7 +1386,7 @@ static int vxlan_open(struct net_device *dev) | |||
1386 | return -ENOTCONN; | 1386 | return -ENOTCONN; |
1387 | 1387 | ||
1388 | if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) && | 1388 | if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) && |
1389 | ! vxlan_group_used(vn, vxlan->default_dst.remote_ip)) { | 1389 | vxlan_group_used(vn, vxlan->default_dst.remote_ip)) { |
1390 | vxlan_sock_hold(vs); | 1390 | vxlan_sock_hold(vs); |
1391 | dev_hold(dev); | 1391 | dev_hold(dev); |
1392 | queue_work(vxlan_wq, &vxlan->igmp_join); | 1392 | queue_work(vxlan_wq, &vxlan->igmp_join); |
@@ -1793,8 +1793,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head) | |||
1793 | struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); | 1793 | struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); |
1794 | struct vxlan_dev *vxlan = netdev_priv(dev); | 1794 | struct vxlan_dev *vxlan = netdev_priv(dev); |
1795 | 1795 | ||
1796 | flush_workqueue(vxlan_wq); | ||
1797 | |||
1798 | spin_lock(&vn->sock_lock); | 1796 | spin_lock(&vn->sock_lock); |
1799 | hlist_del_rcu(&vxlan->hlist); | 1797 | hlist_del_rcu(&vxlan->hlist); |
1800 | spin_unlock(&vn->sock_lock); | 1798 | spin_unlock(&vn->sock_lock); |
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c index 7365674366f4..010b252be584 100644 --- a/drivers/net/wireless/cw1200/sta.c +++ b/drivers/net/wireless/cw1200/sta.c | |||
@@ -1406,11 +1406,8 @@ static void cw1200_do_unjoin(struct cw1200_common *priv) | |||
1406 | if (!priv->join_status) | 1406 | if (!priv->join_status) |
1407 | goto done; | 1407 | goto done; |
1408 | 1408 | ||
1409 | if (priv->join_status > CW1200_JOIN_STATUS_IBSS) { | 1409 | if (priv->join_status == CW1200_JOIN_STATUS_AP) |
1410 | wiphy_err(priv->hw->wiphy, "Unexpected: join status: %d\n", | 1410 | goto done; |
1411 | priv->join_status); | ||
1412 | BUG_ON(1); | ||
1413 | } | ||
1414 | 1411 | ||
1415 | cancel_work_sync(&priv->update_filtering_work); | 1412 | cancel_work_sync(&priv->update_filtering_work); |
1416 | cancel_work_sync(&priv->set_beacon_wakeup_period_work); | 1413 | cancel_work_sync(&priv->set_beacon_wakeup_period_work); |
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c index ac074731335a..e5090309824e 100644 --- a/drivers/net/wireless/hostap/hostap_ioctl.c +++ b/drivers/net/wireless/hostap/hostap_ioctl.c | |||
@@ -523,9 +523,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev, | |||
523 | 523 | ||
524 | data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1); | 524 | data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1); |
525 | 525 | ||
526 | memcpy(extra, &addr, sizeof(struct sockaddr) * data->length); | 526 | memcpy(extra, addr, sizeof(struct sockaddr) * data->length); |
527 | data->flags = 1; /* has quality information */ | 527 | data->flags = 1; /* has quality information */ |
528 | memcpy(extra + sizeof(struct sockaddr) * data->length, &qual, | 528 | memcpy(extra + sizeof(struct sockaddr) * data->length, qual, |
529 | sizeof(struct iw_quality) * data->length); | 529 | sizeof(struct iw_quality) * data->length); |
530 | 530 | ||
531 | kfree(addr); | 531 | kfree(addr); |
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c index b9b2bb51e605..f2ed62e37340 100644 --- a/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/iwlegacy/4965-mac.c | |||
@@ -4460,12 +4460,12 @@ il4965_irq_tasklet(struct il_priv *il) | |||
4460 | * is killed. Hence update the killswitch state here. The | 4460 | * is killed. Hence update the killswitch state here. The |
4461 | * rfkill handler will care about restarting if needed. | 4461 | * rfkill handler will care about restarting if needed. |
4462 | */ | 4462 | */ |
4463 | if (!test_bit(S_ALIVE, &il->status)) { | 4463 | if (hw_rf_kill) { |
4464 | if (hw_rf_kill) | 4464 | set_bit(S_RFKILL, &il->status); |
4465 | set_bit(S_RFKILL, &il->status); | 4465 | } else { |
4466 | else | 4466 | clear_bit(S_RFKILL, &il->status); |
4467 | clear_bit(S_RFKILL, &il->status); | ||
4468 | wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill); | 4467 | wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill); |
4468 | il_force_reset(il, true); | ||
4469 | } | 4469 | } |
4470 | 4470 | ||
4471 | handled |= CSR_INT_BIT_RF_KILL; | 4471 | handled |= CSR_INT_BIT_RF_KILL; |
@@ -5334,6 +5334,9 @@ il4965_alive_start(struct il_priv *il) | |||
5334 | 5334 | ||
5335 | il->active_rate = RATES_MASK; | 5335 | il->active_rate = RATES_MASK; |
5336 | 5336 | ||
5337 | il_power_update_mode(il, true); | ||
5338 | D_INFO("Updated power mode\n"); | ||
5339 | |||
5337 | if (il_is_associated(il)) { | 5340 | if (il_is_associated(il)) { |
5338 | struct il_rxon_cmd *active_rxon = | 5341 | struct il_rxon_cmd *active_rxon = |
5339 | (struct il_rxon_cmd *)&il->active; | 5342 | (struct il_rxon_cmd *)&il->active; |
@@ -5364,9 +5367,6 @@ il4965_alive_start(struct il_priv *il) | |||
5364 | D_INFO("ALIVE processing complete.\n"); | 5367 | D_INFO("ALIVE processing complete.\n"); |
5365 | wake_up(&il->wait_command_queue); | 5368 | wake_up(&il->wait_command_queue); |
5366 | 5369 | ||
5367 | il_power_update_mode(il, true); | ||
5368 | D_INFO("Updated power mode\n"); | ||
5369 | |||
5370 | return; | 5370 | return; |
5371 | 5371 | ||
5372 | restart: | 5372 | restart: |
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c index 3195aad440dd..b03e22ef5462 100644 --- a/drivers/net/wireless/iwlegacy/common.c +++ b/drivers/net/wireless/iwlegacy/common.c | |||
@@ -4660,6 +4660,7 @@ il_force_reset(struct il_priv *il, bool external) | |||
4660 | 4660 | ||
4661 | return 0; | 4661 | return 0; |
4662 | } | 4662 | } |
4663 | EXPORT_SYMBOL(il_force_reset); | ||
4663 | 4664 | ||
4664 | int | 4665 | int |
4665 | il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | 4666 | il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index 822f1a00efbb..319387263e12 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c | |||
@@ -1068,7 +1068,10 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success) | |||
1068 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | 1068 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) |
1069 | return; | 1069 | return; |
1070 | 1070 | ||
1071 | if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) | 1071 | if (!test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) |
1072 | return; | ||
1073 | |||
1074 | if (ctx->vif) | ||
1072 | ieee80211_chswitch_done(ctx->vif, is_success); | 1075 | ieee80211_chswitch_done(ctx->vif, is_success); |
1073 | } | 1076 | } |
1074 | 1077 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h index a70c7b9d9bad..ff8cc75c189d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/iwlwifi/iwl-prph.h | |||
@@ -97,8 +97,6 @@ | |||
97 | 97 | ||
98 | #define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) | 98 | #define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) |
99 | 99 | ||
100 | #define APMG_RTC_INT_STT_RFKILL (0x10000000) | ||
101 | |||
102 | /* Device system time */ | 100 | /* Device system time */ |
103 | #define DEVICE_SYSTEM_TIME_REG 0xA0206C | 101 | #define DEVICE_SYSTEM_TIME_REG 0xA0206C |
104 | 102 | ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index ad9bbca99213..7fd6fbfbc1b3 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c | |||
@@ -138,6 +138,20 @@ static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) | |||
138 | schedule_work(&mvm->roc_done_wk); | 138 | schedule_work(&mvm->roc_done_wk); |
139 | } | 139 | } |
140 | 140 | ||
141 | static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, | ||
142 | struct ieee80211_vif *vif, | ||
143 | const char *errmsg) | ||
144 | { | ||
145 | if (vif->type != NL80211_IFTYPE_STATION) | ||
146 | return false; | ||
147 | if (vif->bss_conf.assoc && vif->bss_conf.dtim_period) | ||
148 | return false; | ||
149 | if (errmsg) | ||
150 | IWL_ERR(mvm, "%s\n", errmsg); | ||
151 | ieee80211_connection_loss(vif); | ||
152 | return true; | ||
153 | } | ||
154 | |||
141 | /* | 155 | /* |
142 | * Handles a FW notification for an event that is known to the driver. | 156 | * Handles a FW notification for an event that is known to the driver. |
143 | * | 157 | * |
@@ -163,8 +177,13 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, | |||
163 | * P2P Device discoveribility, while there are other higher priority | 177 | * P2P Device discoveribility, while there are other higher priority |
164 | * events in the system). | 178 | * events in the system). |
165 | */ | 179 | */ |
166 | WARN_ONCE(!le32_to_cpu(notif->status), | 180 | if (WARN_ONCE(!le32_to_cpu(notif->status), |
167 | "Failed to schedule time event\n"); | 181 | "Failed to schedule time event\n")) { |
182 | if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) { | ||
183 | iwl_mvm_te_clear_data(mvm, te_data); | ||
184 | return; | ||
185 | } | ||
186 | } | ||
168 | 187 | ||
169 | if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) { | 188 | if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) { |
170 | IWL_DEBUG_TE(mvm, | 189 | IWL_DEBUG_TE(mvm, |
@@ -180,14 +199,8 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, | |||
180 | * By now, we should have finished association | 199 | * By now, we should have finished association |
181 | * and know the dtim period. | 200 | * and know the dtim period. |
182 | */ | 201 | */ |
183 | if (te_data->vif->type == NL80211_IFTYPE_STATION && | 202 | iwl_mvm_te_check_disconnect(mvm, te_data->vif, |
184 | (!te_data->vif->bss_conf.assoc || | 203 | "No assocation and the time event is over already..."); |
185 | !te_data->vif->bss_conf.dtim_period)) { | ||
186 | IWL_ERR(mvm, | ||
187 | "No assocation and the time event is over already...\n"); | ||
188 | ieee80211_connection_loss(te_data->vif); | ||
189 | } | ||
190 | |||
191 | iwl_mvm_te_clear_data(mvm, te_data); | 204 | iwl_mvm_te_clear_data(mvm, te_data); |
192 | } else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) { | 205 | } else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) { |
193 | te_data->running = true; | 206 | te_data->running = true; |
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index f600e68a410a..fd848cd1583e 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c | |||
@@ -888,14 +888,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) | |||
888 | 888 | ||
889 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); | 889 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); |
890 | if (hw_rfkill) { | 890 | if (hw_rfkill) { |
891 | /* | ||
892 | * Clear the interrupt in APMG if the NIC is going down. | ||
893 | * Note that when the NIC exits RFkill (else branch), we | ||
894 | * can't access prph and the NIC will be reset in | ||
895 | * start_hw anyway. | ||
896 | */ | ||
897 | iwl_write_prph(trans, APMG_RTC_INT_STT_REG, | ||
898 | APMG_RTC_INT_STT_RFKILL); | ||
899 | set_bit(STATUS_RFKILL, &trans_pcie->status); | 891 | set_bit(STATUS_RFKILL, &trans_pcie->status); |
900 | if (test_and_clear_bit(STATUS_HCMD_ACTIVE, | 892 | if (test_and_clear_bit(STATUS_HCMD_ACTIVE, |
901 | &trans_pcie->status)) | 893 | &trans_pcie->status)) |
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 96cfcdd39079..390e2f058aff 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c | |||
@@ -1502,16 +1502,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | |||
1502 | spin_lock_init(&trans_pcie->reg_lock); | 1502 | spin_lock_init(&trans_pcie->reg_lock); |
1503 | init_waitqueue_head(&trans_pcie->ucode_write_waitq); | 1503 | init_waitqueue_head(&trans_pcie->ucode_write_waitq); |
1504 | 1504 | ||
1505 | /* W/A - seems to solve weird behavior. We need to remove this if we | ||
1506 | * don't want to stay in L1 all the time. This wastes a lot of power */ | ||
1507 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | | ||
1508 | PCIE_LINK_STATE_CLKPM); | ||
1509 | |||
1510 | if (pci_enable_device(pdev)) { | 1505 | if (pci_enable_device(pdev)) { |
1511 | err = -ENODEV; | 1506 | err = -ENODEV; |
1512 | goto out_no_pci; | 1507 | goto out_no_pci; |
1513 | } | 1508 | } |
1514 | 1509 | ||
1510 | /* W/A - seems to solve weird behavior. We need to remove this if we | ||
1511 | * don't want to stay in L1 all the time. This wastes a lot of power */ | ||
1512 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | | ||
1513 | PCIE_LINK_STATE_CLKPM); | ||
1514 | |||
1515 | pci_set_master(pdev); | 1515 | pci_set_master(pdev); |
1516 | 1516 | ||
1517 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); | 1517 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); |
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c index 4941f201d6c8..b8ba1f925e75 100644 --- a/drivers/net/wireless/zd1201.c +++ b/drivers/net/wireless/zd1201.c | |||
@@ -98,10 +98,12 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw) | |||
98 | goto exit; | 98 | goto exit; |
99 | 99 | ||
100 | err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4, | 100 | err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4, |
101 | USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT); | 101 | USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT); |
102 | if (err < 0) | 102 | if (err < 0) |
103 | goto exit; | 103 | goto exit; |
104 | 104 | ||
105 | memcpy(&ret, buf, sizeof(ret)); | ||
106 | |||
105 | if (ret & 0x80) { | 107 | if (ret & 0x80) { |
106 | err = -EIO; | 108 | err = -EIO; |
107 | goto exit; | 109 | goto exit; |
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 6bb7cf2de556..b10ba00cc3e6 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c | |||
@@ -392,6 +392,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob, | |||
392 | mem = (unsigned long) | 392 | mem = (unsigned long) |
393 | dt_alloc(size + 4, __alignof__(struct device_node)); | 393 | dt_alloc(size + 4, __alignof__(struct device_node)); |
394 | 394 | ||
395 | memset((void *)mem, 0, size); | ||
396 | |||
395 | ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef); | 397 | ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef); |
396 | 398 | ||
397 | pr_debug(" unflattening %lx...\n", mem); | 399 | pr_debug(" unflattening %lx...\n", mem); |
diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c index c47fd1e5450b..94716c779800 100644 --- a/drivers/pinctrl/pinctrl-sunxi.c +++ b/drivers/pinctrl/pinctrl-sunxi.c | |||
@@ -278,6 +278,7 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev, | |||
278 | { | 278 | { |
279 | struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); | 279 | struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); |
280 | struct sunxi_pinctrl_group *g = &pctl->groups[group]; | 280 | struct sunxi_pinctrl_group *g = &pctl->groups[group]; |
281 | unsigned long flags; | ||
281 | u32 val, mask; | 282 | u32 val, mask; |
282 | u16 strength; | 283 | u16 strength; |
283 | u8 dlevel; | 284 | u8 dlevel; |
@@ -295,22 +296,35 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev, | |||
295 | * 3: 40mA | 296 | * 3: 40mA |
296 | */ | 297 | */ |
297 | dlevel = strength / 10 - 1; | 298 | dlevel = strength / 10 - 1; |
299 | |||
300 | spin_lock_irqsave(&pctl->lock, flags); | ||
301 | |||
298 | val = readl(pctl->membase + sunxi_dlevel_reg(g->pin)); | 302 | val = readl(pctl->membase + sunxi_dlevel_reg(g->pin)); |
299 | mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(g->pin); | 303 | mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(g->pin); |
300 | writel((val & ~mask) | dlevel << sunxi_dlevel_offset(g->pin), | 304 | writel((val & ~mask) | dlevel << sunxi_dlevel_offset(g->pin), |
301 | pctl->membase + sunxi_dlevel_reg(g->pin)); | 305 | pctl->membase + sunxi_dlevel_reg(g->pin)); |
306 | |||
307 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
302 | break; | 308 | break; |
303 | case PIN_CONFIG_BIAS_PULL_UP: | 309 | case PIN_CONFIG_BIAS_PULL_UP: |
310 | spin_lock_irqsave(&pctl->lock, flags); | ||
311 | |||
304 | val = readl(pctl->membase + sunxi_pull_reg(g->pin)); | 312 | val = readl(pctl->membase + sunxi_pull_reg(g->pin)); |
305 | mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin); | 313 | mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin); |
306 | writel((val & ~mask) | 1 << sunxi_pull_offset(g->pin), | 314 | writel((val & ~mask) | 1 << sunxi_pull_offset(g->pin), |
307 | pctl->membase + sunxi_pull_reg(g->pin)); | 315 | pctl->membase + sunxi_pull_reg(g->pin)); |
316 | |||
317 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
308 | break; | 318 | break; |
309 | case PIN_CONFIG_BIAS_PULL_DOWN: | 319 | case PIN_CONFIG_BIAS_PULL_DOWN: |
320 | spin_lock_irqsave(&pctl->lock, flags); | ||
321 | |||
310 | val = readl(pctl->membase + sunxi_pull_reg(g->pin)); | 322 | val = readl(pctl->membase + sunxi_pull_reg(g->pin)); |
311 | mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin); | 323 | mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin); |
312 | writel((val & ~mask) | 2 << sunxi_pull_offset(g->pin), | 324 | writel((val & ~mask) | 2 << sunxi_pull_offset(g->pin), |
313 | pctl->membase + sunxi_pull_reg(g->pin)); | 325 | pctl->membase + sunxi_pull_reg(g->pin)); |
326 | |||
327 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
314 | break; | 328 | break; |
315 | default: | 329 | default: |
316 | break; | 330 | break; |
@@ -360,11 +374,17 @@ static void sunxi_pmx_set(struct pinctrl_dev *pctldev, | |||
360 | u8 config) | 374 | u8 config) |
361 | { | 375 | { |
362 | struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); | 376 | struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); |
377 | unsigned long flags; | ||
378 | u32 val, mask; | ||
379 | |||
380 | spin_lock_irqsave(&pctl->lock, flags); | ||
363 | 381 | ||
364 | u32 val = readl(pctl->membase + sunxi_mux_reg(pin)); | 382 | val = readl(pctl->membase + sunxi_mux_reg(pin)); |
365 | u32 mask = MUX_PINS_MASK << sunxi_mux_offset(pin); | 383 | mask = MUX_PINS_MASK << sunxi_mux_offset(pin); |
366 | writel((val & ~mask) | config << sunxi_mux_offset(pin), | 384 | writel((val & ~mask) | config << sunxi_mux_offset(pin), |
367 | pctl->membase + sunxi_mux_reg(pin)); | 385 | pctl->membase + sunxi_mux_reg(pin)); |
386 | |||
387 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
368 | } | 388 | } |
369 | 389 | ||
370 | static int sunxi_pmx_enable(struct pinctrl_dev *pctldev, | 390 | static int sunxi_pmx_enable(struct pinctrl_dev *pctldev, |
@@ -464,8 +484,21 @@ static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip, | |||
464 | struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev); | 484 | struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev); |
465 | u32 reg = sunxi_data_reg(offset); | 485 | u32 reg = sunxi_data_reg(offset); |
466 | u8 index = sunxi_data_offset(offset); | 486 | u8 index = sunxi_data_offset(offset); |
487 | unsigned long flags; | ||
488 | u32 regval; | ||
489 | |||
490 | spin_lock_irqsave(&pctl->lock, flags); | ||
491 | |||
492 | regval = readl(pctl->membase + reg); | ||
467 | 493 | ||
468 | writel((value & DATA_PINS_MASK) << index, pctl->membase + reg); | 494 | if (value) |
495 | regval |= BIT(index); | ||
496 | else | ||
497 | regval &= ~(BIT(index)); | ||
498 | |||
499 | writel(regval, pctl->membase + reg); | ||
500 | |||
501 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
469 | } | 502 | } |
470 | 503 | ||
471 | static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc, | 504 | static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc, |
@@ -526,6 +559,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, | |||
526 | struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); | 559 | struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); |
527 | u32 reg = sunxi_irq_cfg_reg(d->hwirq); | 560 | u32 reg = sunxi_irq_cfg_reg(d->hwirq); |
528 | u8 index = sunxi_irq_cfg_offset(d->hwirq); | 561 | u8 index = sunxi_irq_cfg_offset(d->hwirq); |
562 | unsigned long flags; | ||
563 | u32 regval; | ||
529 | u8 mode; | 564 | u8 mode; |
530 | 565 | ||
531 | switch (type) { | 566 | switch (type) { |
@@ -548,7 +583,13 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, | |||
548 | return -EINVAL; | 583 | return -EINVAL; |
549 | } | 584 | } |
550 | 585 | ||
551 | writel((mode & IRQ_CFG_IRQ_MASK) << index, pctl->membase + reg); | 586 | spin_lock_irqsave(&pctl->lock, flags); |
587 | |||
588 | regval = readl(pctl->membase + reg); | ||
589 | regval &= ~IRQ_CFG_IRQ_MASK; | ||
590 | writel(regval | (mode << index), pctl->membase + reg); | ||
591 | |||
592 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
552 | 593 | ||
553 | return 0; | 594 | return 0; |
554 | } | 595 | } |
@@ -560,14 +601,19 @@ static void sunxi_pinctrl_irq_mask_ack(struct irq_data *d) | |||
560 | u8 ctrl_idx = sunxi_irq_ctrl_offset(d->hwirq); | 601 | u8 ctrl_idx = sunxi_irq_ctrl_offset(d->hwirq); |
561 | u32 status_reg = sunxi_irq_status_reg(d->hwirq); | 602 | u32 status_reg = sunxi_irq_status_reg(d->hwirq); |
562 | u8 status_idx = sunxi_irq_status_offset(d->hwirq); | 603 | u8 status_idx = sunxi_irq_status_offset(d->hwirq); |
604 | unsigned long flags; | ||
563 | u32 val; | 605 | u32 val; |
564 | 606 | ||
607 | spin_lock_irqsave(&pctl->lock, flags); | ||
608 | |||
565 | /* Mask the IRQ */ | 609 | /* Mask the IRQ */ |
566 | val = readl(pctl->membase + ctrl_reg); | 610 | val = readl(pctl->membase + ctrl_reg); |
567 | writel(val & ~(1 << ctrl_idx), pctl->membase + ctrl_reg); | 611 | writel(val & ~(1 << ctrl_idx), pctl->membase + ctrl_reg); |
568 | 612 | ||
569 | /* Clear the IRQ */ | 613 | /* Clear the IRQ */ |
570 | writel(1 << status_idx, pctl->membase + status_reg); | 614 | writel(1 << status_idx, pctl->membase + status_reg); |
615 | |||
616 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
571 | } | 617 | } |
572 | 618 | ||
573 | static void sunxi_pinctrl_irq_mask(struct irq_data *d) | 619 | static void sunxi_pinctrl_irq_mask(struct irq_data *d) |
@@ -575,11 +621,16 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d) | |||
575 | struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); | 621 | struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); |
576 | u32 reg = sunxi_irq_ctrl_reg(d->hwirq); | 622 | u32 reg = sunxi_irq_ctrl_reg(d->hwirq); |
577 | u8 idx = sunxi_irq_ctrl_offset(d->hwirq); | 623 | u8 idx = sunxi_irq_ctrl_offset(d->hwirq); |
624 | unsigned long flags; | ||
578 | u32 val; | 625 | u32 val; |
579 | 626 | ||
627 | spin_lock_irqsave(&pctl->lock, flags); | ||
628 | |||
580 | /* Mask the IRQ */ | 629 | /* Mask the IRQ */ |
581 | val = readl(pctl->membase + reg); | 630 | val = readl(pctl->membase + reg); |
582 | writel(val & ~(1 << idx), pctl->membase + reg); | 631 | writel(val & ~(1 << idx), pctl->membase + reg); |
632 | |||
633 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
583 | } | 634 | } |
584 | 635 | ||
585 | static void sunxi_pinctrl_irq_unmask(struct irq_data *d) | 636 | static void sunxi_pinctrl_irq_unmask(struct irq_data *d) |
@@ -588,6 +639,7 @@ static void sunxi_pinctrl_irq_unmask(struct irq_data *d) | |||
588 | struct sunxi_desc_function *func; | 639 | struct sunxi_desc_function *func; |
589 | u32 reg = sunxi_irq_ctrl_reg(d->hwirq); | 640 | u32 reg = sunxi_irq_ctrl_reg(d->hwirq); |
590 | u8 idx = sunxi_irq_ctrl_offset(d->hwirq); | 641 | u8 idx = sunxi_irq_ctrl_offset(d->hwirq); |
642 | unsigned long flags; | ||
591 | u32 val; | 643 | u32 val; |
592 | 644 | ||
593 | func = sunxi_pinctrl_desc_find_function_by_pin(pctl, | 645 | func = sunxi_pinctrl_desc_find_function_by_pin(pctl, |
@@ -597,9 +649,13 @@ static void sunxi_pinctrl_irq_unmask(struct irq_data *d) | |||
597 | /* Change muxing to INT mode */ | 649 | /* Change muxing to INT mode */ |
598 | sunxi_pmx_set(pctl->pctl_dev, pctl->irq_array[d->hwirq], func->muxval); | 650 | sunxi_pmx_set(pctl->pctl_dev, pctl->irq_array[d->hwirq], func->muxval); |
599 | 651 | ||
652 | spin_lock_irqsave(&pctl->lock, flags); | ||
653 | |||
600 | /* Unmask the IRQ */ | 654 | /* Unmask the IRQ */ |
601 | val = readl(pctl->membase + reg); | 655 | val = readl(pctl->membase + reg); |
602 | writel(val | (1 << idx), pctl->membase + reg); | 656 | writel(val | (1 << idx), pctl->membase + reg); |
657 | |||
658 | spin_unlock_irqrestore(&pctl->lock, flags); | ||
603 | } | 659 | } |
604 | 660 | ||
605 | static struct irq_chip sunxi_pinctrl_irq_chip = { | 661 | static struct irq_chip sunxi_pinctrl_irq_chip = { |
@@ -752,6 +808,8 @@ static int sunxi_pinctrl_probe(struct platform_device *pdev) | |||
752 | return -ENOMEM; | 808 | return -ENOMEM; |
753 | platform_set_drvdata(pdev, pctl); | 809 | platform_set_drvdata(pdev, pctl); |
754 | 810 | ||
811 | spin_lock_init(&pctl->lock); | ||
812 | |||
755 | pctl->membase = of_iomap(node, 0); | 813 | pctl->membase = of_iomap(node, 0); |
756 | if (!pctl->membase) | 814 | if (!pctl->membase) |
757 | return -ENOMEM; | 815 | return -ENOMEM; |
diff --git a/drivers/pinctrl/pinctrl-sunxi.h b/drivers/pinctrl/pinctrl-sunxi.h index d68047d8f699..01c494f8a14f 100644 --- a/drivers/pinctrl/pinctrl-sunxi.h +++ b/drivers/pinctrl/pinctrl-sunxi.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #define __PINCTRL_SUNXI_H | 14 | #define __PINCTRL_SUNXI_H |
15 | 15 | ||
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/spinlock.h> | ||
17 | 18 | ||
18 | #define PA_BASE 0 | 19 | #define PA_BASE 0 |
19 | #define PB_BASE 32 | 20 | #define PB_BASE 32 |
@@ -407,6 +408,7 @@ struct sunxi_pinctrl { | |||
407 | unsigned ngroups; | 408 | unsigned ngroups; |
408 | int irq; | 409 | int irq; |
409 | int irq_array[SUNXI_IRQ_NUMBER]; | 410 | int irq_array[SUNXI_IRQ_NUMBER]; |
411 | spinlock_t lock; | ||
410 | struct pinctrl_dev *pctl_dev; | 412 | struct pinctrl_dev *pctl_dev; |
411 | }; | 413 | }; |
412 | 414 | ||
diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c index 0f9f8596b300..f9119525f557 100644 --- a/drivers/platform/olpc/olpc-ec.c +++ b/drivers/platform/olpc/olpc-ec.c | |||
@@ -330,7 +330,7 @@ static int __init olpc_ec_init_module(void) | |||
330 | return platform_driver_register(&olpc_ec_plat_driver); | 330 | return platform_driver_register(&olpc_ec_plat_driver); |
331 | } | 331 | } |
332 | 332 | ||
333 | module_init(olpc_ec_init_module); | 333 | arch_initcall(olpc_ec_init_module); |
334 | 334 | ||
335 | MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>"); | 335 | MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>"); |
336 | MODULE_LICENSE("GPL"); | 336 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 97bb05edcb5a..d6970f47ae72 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c | |||
@@ -53,7 +53,6 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4"); | |||
53 | #define HPWMI_ALS_QUERY 0x3 | 53 | #define HPWMI_ALS_QUERY 0x3 |
54 | #define HPWMI_HARDWARE_QUERY 0x4 | 54 | #define HPWMI_HARDWARE_QUERY 0x4 |
55 | #define HPWMI_WIRELESS_QUERY 0x5 | 55 | #define HPWMI_WIRELESS_QUERY 0x5 |
56 | #define HPWMI_BIOS_QUERY 0x9 | ||
57 | #define HPWMI_HOTKEY_QUERY 0xc | 56 | #define HPWMI_HOTKEY_QUERY 0xc |
58 | #define HPWMI_WIRELESS2_QUERY 0x1b | 57 | #define HPWMI_WIRELESS2_QUERY 0x1b |
59 | #define HPWMI_POSTCODEERROR_QUERY 0x2a | 58 | #define HPWMI_POSTCODEERROR_QUERY 0x2a |
@@ -293,19 +292,6 @@ static int hp_wmi_tablet_state(void) | |||
293 | return (state & 0x4) ? 1 : 0; | 292 | return (state & 0x4) ? 1 : 0; |
294 | } | 293 | } |
295 | 294 | ||
296 | static int hp_wmi_enable_hotkeys(void) | ||
297 | { | ||
298 | int ret; | ||
299 | int query = 0x6e; | ||
300 | |||
301 | ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &query, sizeof(query), | ||
302 | 0); | ||
303 | |||
304 | if (ret) | ||
305 | return -EINVAL; | ||
306 | return 0; | ||
307 | } | ||
308 | |||
309 | static int hp_wmi_set_block(void *data, bool blocked) | 295 | static int hp_wmi_set_block(void *data, bool blocked) |
310 | { | 296 | { |
311 | enum hp_wmi_radio r = (enum hp_wmi_radio) data; | 297 | enum hp_wmi_radio r = (enum hp_wmi_radio) data; |
@@ -1009,8 +995,6 @@ static int __init hp_wmi_init(void) | |||
1009 | err = hp_wmi_input_setup(); | 995 | err = hp_wmi_input_setup(); |
1010 | if (err) | 996 | if (err) |
1011 | return err; | 997 | return err; |
1012 | |||
1013 | hp_wmi_enable_hotkeys(); | ||
1014 | } | 998 | } |
1015 | 999 | ||
1016 | if (bios_capable) { | 1000 | if (bios_capable) { |
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index 2ac045f27f10..3a1b6bf326a8 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c | |||
@@ -2440,7 +2440,10 @@ static ssize_t sony_nc_gfx_switch_status_show(struct device *dev, | |||
2440 | if (pos < 0) | 2440 | if (pos < 0) |
2441 | return pos; | 2441 | return pos; |
2442 | 2442 | ||
2443 | return snprintf(buffer, PAGE_SIZE, "%s\n", pos ? "speed" : "stamina"); | 2443 | return snprintf(buffer, PAGE_SIZE, "%s\n", |
2444 | pos == SPEED ? "speed" : | ||
2445 | pos == STAMINA ? "stamina" : | ||
2446 | pos == AUTO ? "auto" : "unknown"); | ||
2444 | } | 2447 | } |
2445 | 2448 | ||
2446 | static int sony_nc_gfx_switch_setup(struct platform_device *pd, | 2449 | static int sony_nc_gfx_switch_setup(struct platform_device *pd, |
@@ -4320,7 +4323,8 @@ static int sony_pic_add(struct acpi_device *device) | |||
4320 | goto err_free_resources; | 4323 | goto err_free_resources; |
4321 | } | 4324 | } |
4322 | 4325 | ||
4323 | if (sonypi_compat_init()) | 4326 | result = sonypi_compat_init(); |
4327 | if (result) | ||
4324 | goto err_remove_input; | 4328 | goto err_remove_input; |
4325 | 4329 | ||
4326 | /* request io port */ | 4330 | /* request io port */ |
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c index 767fee2ab340..26019531db15 100644 --- a/drivers/rtc/rtc-stmp3xxx.c +++ b/drivers/rtc/rtc-stmp3xxx.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
26 | #include <linux/delay.h> | ||
26 | #include <linux/rtc.h> | 27 | #include <linux/rtc.h> |
27 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
28 | #include <linux/of_device.h> | 29 | #include <linux/of_device.h> |
@@ -119,24 +120,39 @@ static void stmp3xxx_wdt_register(struct platform_device *rtc_pdev) | |||
119 | } | 120 | } |
120 | #endif /* CONFIG_STMP3XXX_RTC_WATCHDOG */ | 121 | #endif /* CONFIG_STMP3XXX_RTC_WATCHDOG */ |
121 | 122 | ||
122 | static void stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data) | 123 | static int stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data) |
123 | { | 124 | { |
125 | int timeout = 5000; /* 3ms according to i.MX28 Ref Manual */ | ||
124 | /* | 126 | /* |
125 | * The datasheet doesn't say which way round the | 127 | * The i.MX28 Applications Processor Reference Manual, Rev. 1, 2010 |
126 | * NEW_REGS/STALE_REGS bitfields go. In fact it's 0x1=P0, | 128 | * states: |
127 | * 0x2=P1, .., 0x20=P5, 0x40=ALARM, 0x80=SECONDS | 129 | * | The order in which registers are updated is |
130 | * | Persistent 0, 1, 2, 3, 4, 5, Alarm, Seconds. | ||
131 | * | (This list is in bitfield order, from LSB to MSB, as they would | ||
132 | * | appear in the STALE_REGS and NEW_REGS bitfields of the HW_RTC_STAT | ||
133 | * | register. For example, the Seconds register corresponds to | ||
134 | * | STALE_REGS or NEW_REGS containing 0x80.) | ||
128 | */ | 135 | */ |
129 | while (readl(rtc_data->io + STMP3XXX_RTC_STAT) & | 136 | do { |
130 | (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)) | 137 | if (!(readl(rtc_data->io + STMP3XXX_RTC_STAT) & |
131 | cpu_relax(); | 138 | (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT))) |
139 | return 0; | ||
140 | udelay(1); | ||
141 | } while (--timeout > 0); | ||
142 | return (readl(rtc_data->io + STMP3XXX_RTC_STAT) & | ||
143 | (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)) ? -ETIME : 0; | ||
132 | } | 144 | } |
133 | 145 | ||
134 | /* Time read/write */ | 146 | /* Time read/write */ |
135 | static int stmp3xxx_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) | 147 | static int stmp3xxx_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) |
136 | { | 148 | { |
149 | int ret; | ||
137 | struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); | 150 | struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); |
138 | 151 | ||
139 | stmp3xxx_wait_time(rtc_data); | 152 | ret = stmp3xxx_wait_time(rtc_data); |
153 | if (ret) | ||
154 | return ret; | ||
155 | |||
140 | rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_SECONDS), rtc_tm); | 156 | rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_SECONDS), rtc_tm); |
141 | return 0; | 157 | return 0; |
142 | } | 158 | } |
@@ -146,8 +162,7 @@ static int stmp3xxx_rtc_set_mmss(struct device *dev, unsigned long t) | |||
146 | struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); | 162 | struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); |
147 | 163 | ||
148 | writel(t, rtc_data->io + STMP3XXX_RTC_SECONDS); | 164 | writel(t, rtc_data->io + STMP3XXX_RTC_SECONDS); |
149 | stmp3xxx_wait_time(rtc_data); | 165 | return stmp3xxx_wait_time(rtc_data); |
150 | return 0; | ||
151 | } | 166 | } |
152 | 167 | ||
153 | /* interrupt(s) handler */ | 168 | /* interrupt(s) handler */ |
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 1d4c8fe72752..c82fe65c4128 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -102,10 +102,13 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) | |||
102 | 102 | ||
103 | if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) | 103 | if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) |
104 | zfcp_erp_action_dismiss(&port->erp_action); | 104 | zfcp_erp_action_dismiss(&port->erp_action); |
105 | else | 105 | else { |
106 | shost_for_each_device(sdev, port->adapter->scsi_host) | 106 | spin_lock(port->adapter->scsi_host->host_lock); |
107 | __shost_for_each_device(sdev, port->adapter->scsi_host) | ||
107 | if (sdev_to_zfcp(sdev)->port == port) | 108 | if (sdev_to_zfcp(sdev)->port == port) |
108 | zfcp_erp_action_dismiss_lun(sdev); | 109 | zfcp_erp_action_dismiss_lun(sdev); |
110 | spin_unlock(port->adapter->scsi_host->host_lock); | ||
111 | } | ||
109 | } | 112 | } |
110 | 113 | ||
111 | static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) | 114 | static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) |
@@ -592,9 +595,11 @@ static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear, | |||
592 | { | 595 | { |
593 | struct scsi_device *sdev; | 596 | struct scsi_device *sdev; |
594 | 597 | ||
595 | shost_for_each_device(sdev, port->adapter->scsi_host) | 598 | spin_lock(port->adapter->scsi_host->host_lock); |
599 | __shost_for_each_device(sdev, port->adapter->scsi_host) | ||
596 | if (sdev_to_zfcp(sdev)->port == port) | 600 | if (sdev_to_zfcp(sdev)->port == port) |
597 | _zfcp_erp_lun_reopen(sdev, clear, id, 0); | 601 | _zfcp_erp_lun_reopen(sdev, clear, id, 0); |
602 | spin_unlock(port->adapter->scsi_host->host_lock); | ||
598 | } | 603 | } |
599 | 604 | ||
600 | static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) | 605 | static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) |
@@ -1434,8 +1439,10 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask) | |||
1434 | atomic_set_mask(common_mask, &port->status); | 1439 | atomic_set_mask(common_mask, &port->status); |
1435 | read_unlock_irqrestore(&adapter->port_list_lock, flags); | 1440 | read_unlock_irqrestore(&adapter->port_list_lock, flags); |
1436 | 1441 | ||
1437 | shost_for_each_device(sdev, adapter->scsi_host) | 1442 | spin_lock_irqsave(adapter->scsi_host->host_lock, flags); |
1443 | __shost_for_each_device(sdev, adapter->scsi_host) | ||
1438 | atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); | 1444 | atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); |
1445 | spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); | ||
1439 | } | 1446 | } |
1440 | 1447 | ||
1441 | /** | 1448 | /** |
@@ -1469,11 +1476,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask) | |||
1469 | } | 1476 | } |
1470 | read_unlock_irqrestore(&adapter->port_list_lock, flags); | 1477 | read_unlock_irqrestore(&adapter->port_list_lock, flags); |
1471 | 1478 | ||
1472 | shost_for_each_device(sdev, adapter->scsi_host) { | 1479 | spin_lock_irqsave(adapter->scsi_host->host_lock, flags); |
1480 | __shost_for_each_device(sdev, adapter->scsi_host) { | ||
1473 | atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); | 1481 | atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); |
1474 | if (clear_counter) | 1482 | if (clear_counter) |
1475 | atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); | 1483 | atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); |
1476 | } | 1484 | } |
1485 | spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); | ||
1477 | } | 1486 | } |
1478 | 1487 | ||
1479 | /** | 1488 | /** |
@@ -1487,16 +1496,19 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask) | |||
1487 | { | 1496 | { |
1488 | struct scsi_device *sdev; | 1497 | struct scsi_device *sdev; |
1489 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; | 1498 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; |
1499 | unsigned long flags; | ||
1490 | 1500 | ||
1491 | atomic_set_mask(mask, &port->status); | 1501 | atomic_set_mask(mask, &port->status); |
1492 | 1502 | ||
1493 | if (!common_mask) | 1503 | if (!common_mask) |
1494 | return; | 1504 | return; |
1495 | 1505 | ||
1496 | shost_for_each_device(sdev, port->adapter->scsi_host) | 1506 | spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); |
1507 | __shost_for_each_device(sdev, port->adapter->scsi_host) | ||
1497 | if (sdev_to_zfcp(sdev)->port == port) | 1508 | if (sdev_to_zfcp(sdev)->port == port) |
1498 | atomic_set_mask(common_mask, | 1509 | atomic_set_mask(common_mask, |
1499 | &sdev_to_zfcp(sdev)->status); | 1510 | &sdev_to_zfcp(sdev)->status); |
1511 | spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags); | ||
1500 | } | 1512 | } |
1501 | 1513 | ||
1502 | /** | 1514 | /** |
@@ -1511,6 +1523,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask) | |||
1511 | struct scsi_device *sdev; | 1523 | struct scsi_device *sdev; |
1512 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; | 1524 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; |
1513 | u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; | 1525 | u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; |
1526 | unsigned long flags; | ||
1514 | 1527 | ||
1515 | atomic_clear_mask(mask, &port->status); | 1528 | atomic_clear_mask(mask, &port->status); |
1516 | 1529 | ||
@@ -1520,13 +1533,15 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask) | |||
1520 | if (clear_counter) | 1533 | if (clear_counter) |
1521 | atomic_set(&port->erp_counter, 0); | 1534 | atomic_set(&port->erp_counter, 0); |
1522 | 1535 | ||
1523 | shost_for_each_device(sdev, port->adapter->scsi_host) | 1536 | spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); |
1537 | __shost_for_each_device(sdev, port->adapter->scsi_host) | ||
1524 | if (sdev_to_zfcp(sdev)->port == port) { | 1538 | if (sdev_to_zfcp(sdev)->port == port) { |
1525 | atomic_clear_mask(common_mask, | 1539 | atomic_clear_mask(common_mask, |
1526 | &sdev_to_zfcp(sdev)->status); | 1540 | &sdev_to_zfcp(sdev)->status); |
1527 | if (clear_counter) | 1541 | if (clear_counter) |
1528 | atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); | 1542 | atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); |
1529 | } | 1543 | } |
1544 | spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags); | ||
1530 | } | 1545 | } |
1531 | 1546 | ||
1532 | /** | 1547 | /** |
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 665e3cfaaf85..de0598eaacd2 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -224,11 +224,9 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, | |||
224 | 224 | ||
225 | static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) | 225 | static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) |
226 | { | 226 | { |
227 | spin_lock_irq(&qdio->req_q_lock); | ||
228 | if (atomic_read(&qdio->req_q_free) || | 227 | if (atomic_read(&qdio->req_q_free) || |
229 | !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) | 228 | !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) |
230 | return 1; | 229 | return 1; |
231 | spin_unlock_irq(&qdio->req_q_lock); | ||
232 | return 0; | 230 | return 0; |
233 | } | 231 | } |
234 | 232 | ||
@@ -246,9 +244,8 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) | |||
246 | { | 244 | { |
247 | long ret; | 245 | long ret; |
248 | 246 | ||
249 | spin_unlock_irq(&qdio->req_q_lock); | 247 | ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq, |
250 | ret = wait_event_interruptible_timeout(qdio->req_q_wq, | 248 | zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ); |
251 | zfcp_qdio_sbal_check(qdio), 5 * HZ); | ||
252 | 249 | ||
253 | if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) | 250 | if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) |
254 | return -EIO; | 251 | return -EIO; |
@@ -262,7 +259,6 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) | |||
262 | zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1"); | 259 | zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1"); |
263 | } | 260 | } |
264 | 261 | ||
265 | spin_lock_irq(&qdio->req_q_lock); | ||
266 | return -EIO; | 262 | return -EIO; |
267 | } | 263 | } |
268 | 264 | ||
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 3f01bbf0609f..890639274bcf 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c | |||
@@ -27,6 +27,16 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \ | |||
27 | static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ | 27 | static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ |
28 | zfcp_sysfs_##_feat##_##_name##_show, NULL); | 28 | zfcp_sysfs_##_feat##_##_name##_show, NULL); |
29 | 29 | ||
30 | #define ZFCP_DEFINE_ATTR_CONST(_feat, _name, _format, _value) \ | ||
31 | static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \ | ||
32 | struct device_attribute *at,\ | ||
33 | char *buf) \ | ||
34 | { \ | ||
35 | return sprintf(buf, _format, _value); \ | ||
36 | } \ | ||
37 | static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ | ||
38 | zfcp_sysfs_##_feat##_##_name##_show, NULL); | ||
39 | |||
30 | #define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \ | 40 | #define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \ |
31 | static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \ | 41 | static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \ |
32 | struct device_attribute *at,\ | 42 | struct device_attribute *at,\ |
@@ -75,6 +85,8 @@ ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n", | |||
75 | ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n", | 85 | ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n", |
76 | (zfcp_unit_sdev_status(unit) & | 86 | (zfcp_unit_sdev_status(unit) & |
77 | ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); | 87 | ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); |
88 | ZFCP_DEFINE_ATTR_CONST(unit, access_shared, "%d\n", 0); | ||
89 | ZFCP_DEFINE_ATTR_CONST(unit, access_readonly, "%d\n", 0); | ||
78 | 90 | ||
79 | static ssize_t zfcp_sysfs_port_failed_show(struct device *dev, | 91 | static ssize_t zfcp_sysfs_port_failed_show(struct device *dev, |
80 | struct device_attribute *attr, | 92 | struct device_attribute *attr, |
@@ -347,6 +359,8 @@ static struct attribute *zfcp_unit_attrs[] = { | |||
347 | &dev_attr_unit_in_recovery.attr, | 359 | &dev_attr_unit_in_recovery.attr, |
348 | &dev_attr_unit_status.attr, | 360 | &dev_attr_unit_status.attr, |
349 | &dev_attr_unit_access_denied.attr, | 361 | &dev_attr_unit_access_denied.attr, |
362 | &dev_attr_unit_access_shared.attr, | ||
363 | &dev_attr_unit_access_readonly.attr, | ||
350 | NULL | 364 | NULL |
351 | }; | 365 | }; |
352 | static struct attribute_group zfcp_unit_attr_group = { | 366 | static struct attribute_group zfcp_unit_attr_group = { |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 48b2918e0d65..92ff027746f2 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -1353,7 +1353,6 @@ config SCSI_LPFC | |||
1353 | tristate "Emulex LightPulse Fibre Channel Support" | 1353 | tristate "Emulex LightPulse Fibre Channel Support" |
1354 | depends on PCI && SCSI | 1354 | depends on PCI && SCSI |
1355 | select SCSI_FC_ATTRS | 1355 | select SCSI_FC_ATTRS |
1356 | select GENERIC_CSUM | ||
1357 | select CRC_T10DIF | 1356 | select CRC_T10DIF |
1358 | help | 1357 | help |
1359 | This lpfc driver supports the Emulex LightPulse | 1358 | This lpfc driver supports the Emulex LightPulse |
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 89cbbabaff44..0170d4c4a8a3 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
@@ -70,14 +70,14 @@ config SPI_ATH79 | |||
70 | 70 | ||
71 | config SPI_ATMEL | 71 | config SPI_ATMEL |
72 | tristate "Atmel SPI Controller" | 72 | tristate "Atmel SPI Controller" |
73 | depends on (ARCH_AT91 || AVR32) | 73 | depends on (ARCH_AT91 || AVR32 || COMPILE_TEST) |
74 | help | 74 | help |
75 | This selects a driver for the Atmel SPI Controller, present on | 75 | This selects a driver for the Atmel SPI Controller, present on |
76 | many AT32 (AVR32) and AT91 (ARM) chips. | 76 | many AT32 (AVR32) and AT91 (ARM) chips. |
77 | 77 | ||
78 | config SPI_BCM2835 | 78 | config SPI_BCM2835 |
79 | tristate "BCM2835 SPI controller" | 79 | tristate "BCM2835 SPI controller" |
80 | depends on ARCH_BCM2835 | 80 | depends on ARCH_BCM2835 || COMPILE_TEST |
81 | help | 81 | help |
82 | This selects a driver for the Broadcom BCM2835 SPI master. | 82 | This selects a driver for the Broadcom BCM2835 SPI master. |
83 | 83 | ||
@@ -88,10 +88,17 @@ config SPI_BCM2835 | |||
88 | 88 | ||
89 | config SPI_BFIN5XX | 89 | config SPI_BFIN5XX |
90 | tristate "SPI controller driver for ADI Blackfin5xx" | 90 | tristate "SPI controller driver for ADI Blackfin5xx" |
91 | depends on BLACKFIN | 91 | depends on BLACKFIN && !BF60x |
92 | help | 92 | help |
93 | This is the SPI controller master driver for Blackfin 5xx processor. | 93 | This is the SPI controller master driver for Blackfin 5xx processor. |
94 | 94 | ||
95 | config SPI_BFIN_V3 | ||
96 | tristate "SPI controller v3 for Blackfin" | ||
97 | depends on BF60x | ||
98 | help | ||
99 | This is the SPI controller v3 master driver | ||
100 | found on Blackfin 60x processor. | ||
101 | |||
95 | config SPI_BFIN_SPORT | 102 | config SPI_BFIN_SPORT |
96 | tristate "SPI bus via Blackfin SPORT" | 103 | tristate "SPI bus via Blackfin SPORT" |
97 | depends on BLACKFIN | 104 | depends on BLACKFIN |
@@ -151,15 +158,22 @@ config SPI_COLDFIRE_QSPI | |||
151 | 158 | ||
152 | config SPI_DAVINCI | 159 | config SPI_DAVINCI |
153 | tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller" | 160 | tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller" |
154 | depends on ARCH_DAVINCI | 161 | depends on ARCH_DAVINCI || ARCH_KEYSTONE |
155 | select SPI_BITBANG | 162 | select SPI_BITBANG |
156 | select TI_EDMA | 163 | select TI_EDMA |
157 | help | 164 | help |
158 | SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules. | 165 | SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules. |
159 | 166 | ||
167 | config SPI_EFM32 | ||
168 | tristate "EFM32 SPI controller" | ||
169 | depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST) | ||
170 | select SPI_BITBANG | ||
171 | help | ||
172 | Driver for the spi controller found on Energy Micro's EFM32 SoCs. | ||
173 | |||
160 | config SPI_EP93XX | 174 | config SPI_EP93XX |
161 | tristate "Cirrus Logic EP93xx SPI controller" | 175 | tristate "Cirrus Logic EP93xx SPI controller" |
162 | depends on ARCH_EP93XX | 176 | depends on ARCH_EP93XX || COMPILE_TEST |
163 | help | 177 | help |
164 | This enables using the Cirrus EP93xx SPI controller in master | 178 | This enables using the Cirrus EP93xx SPI controller in master |
165 | mode. | 179 | mode. |
@@ -191,7 +205,7 @@ config SPI_GPIO | |||
191 | 205 | ||
192 | config SPI_IMX | 206 | config SPI_IMX |
193 | tristate "Freescale i.MX SPI controllers" | 207 | tristate "Freescale i.MX SPI controllers" |
194 | depends on ARCH_MXC | 208 | depends on ARCH_MXC || COMPILE_TEST |
195 | select SPI_BITBANG | 209 | select SPI_BITBANG |
196 | default m if IMX_HAVE_PLATFORM_SPI_IMX | 210 | default m if IMX_HAVE_PLATFORM_SPI_IMX |
197 | help | 211 | help |
@@ -248,6 +262,13 @@ config SPI_FSL_SPI | |||
248 | This also enables using the Aeroflex Gaisler GRLIB SPI controller in | 262 | This also enables using the Aeroflex Gaisler GRLIB SPI controller in |
249 | master mode. | 263 | master mode. |
250 | 264 | ||
265 | config SPI_FSL_DSPI | ||
266 | tristate "Freescale DSPI controller" | ||
267 | select SPI_BITBANG | ||
268 | help | ||
269 | This enables support for the Freescale DSPI controller in master | ||
270 | mode. VF610 platform uses the controller. | ||
271 | |||
251 | config SPI_FSL_ESPI | 272 | config SPI_FSL_ESPI |
252 | bool "Freescale eSPI controller" | 273 | bool "Freescale eSPI controller" |
253 | depends on FSL_SOC | 274 | depends on FSL_SOC |
@@ -280,20 +301,28 @@ config SPI_OMAP_UWIRE | |||
280 | 301 | ||
281 | config SPI_OMAP24XX | 302 | config SPI_OMAP24XX |
282 | tristate "McSPI driver for OMAP" | 303 | tristate "McSPI driver for OMAP" |
283 | depends on ARCH_OMAP2PLUS | 304 | depends on ARCH_OMAP2PLUS || COMPILE_TEST |
284 | help | 305 | help |
285 | SPI master controller for OMAP24XX and later Multichannel SPI | 306 | SPI master controller for OMAP24XX and later Multichannel SPI |
286 | (McSPI) modules. | 307 | (McSPI) modules. |
287 | 308 | ||
309 | config SPI_TI_QSPI | ||
310 | tristate "DRA7xxx QSPI controller support" | ||
311 | depends on ARCH_OMAP2PLUS || COMPILE_TEST | ||
312 | help | ||
313 | QSPI master controller for DRA7xxx used for flash devices. | ||
314 | This device supports single, dual and quad read support, while | ||
315 | it only supports single write mode. | ||
316 | |||
288 | config SPI_OMAP_100K | 317 | config SPI_OMAP_100K |
289 | tristate "OMAP SPI 100K" | 318 | tristate "OMAP SPI 100K" |
290 | depends on ARCH_OMAP850 || ARCH_OMAP730 | 319 | depends on ARCH_OMAP850 || ARCH_OMAP730 || COMPILE_TEST |
291 | help | 320 | help |
292 | OMAP SPI 100K master controller for omap7xx boards. | 321 | OMAP SPI 100K master controller for omap7xx boards. |
293 | 322 | ||
294 | config SPI_ORION | 323 | config SPI_ORION |
295 | tristate "Orion SPI master" | 324 | tristate "Orion SPI master" |
296 | depends on PLAT_ORION | 325 | depends on PLAT_ORION || COMPILE_TEST |
297 | help | 326 | help |
298 | This enables using the SPI master controller on the Orion chips. | 327 | This enables using the SPI master controller on the Orion chips. |
299 | 328 | ||
@@ -341,7 +370,7 @@ config SPI_PXA2XX_PCI | |||
341 | 370 | ||
342 | config SPI_RSPI | 371 | config SPI_RSPI |
343 | tristate "Renesas RSPI controller" | 372 | tristate "Renesas RSPI controller" |
344 | depends on SUPERH | 373 | depends on SUPERH && SH_DMAE_BASE |
345 | help | 374 | help |
346 | SPI driver for Renesas RSPI blocks. | 375 | SPI driver for Renesas RSPI blocks. |
347 | 376 | ||
@@ -385,7 +414,7 @@ config SPI_SH_MSIOF | |||
385 | 414 | ||
386 | config SPI_SH | 415 | config SPI_SH |
387 | tristate "SuperH SPI controller" | 416 | tristate "SuperH SPI controller" |
388 | depends on SUPERH | 417 | depends on SUPERH || COMPILE_TEST |
389 | help | 418 | help |
390 | SPI driver for SuperH SPI blocks. | 419 | SPI driver for SuperH SPI blocks. |
391 | 420 | ||
@@ -398,13 +427,13 @@ config SPI_SH_SCI | |||
398 | 427 | ||
399 | config SPI_SH_HSPI | 428 | config SPI_SH_HSPI |
400 | tristate "SuperH HSPI controller" | 429 | tristate "SuperH HSPI controller" |
401 | depends on ARCH_SHMOBILE | 430 | depends on ARCH_SHMOBILE || COMPILE_TEST |
402 | help | 431 | help |
403 | SPI driver for SuperH HSPI blocks. | 432 | SPI driver for SuperH HSPI blocks. |
404 | 433 | ||
405 | config SPI_SIRF | 434 | config SPI_SIRF |
406 | tristate "CSR SiRFprimaII SPI controller" | 435 | tristate "CSR SiRFprimaII SPI controller" |
407 | depends on ARCH_SIRF | 436 | depends on SIRF_DMA |
408 | select SPI_BITBANG | 437 | select SPI_BITBANG |
409 | help | 438 | help |
410 | SPI driver for CSR SiRFprimaII SoCs | 439 | SPI driver for CSR SiRFprimaII SoCs |
@@ -418,7 +447,7 @@ config SPI_MXS | |||
418 | 447 | ||
419 | config SPI_TEGRA114 | 448 | config SPI_TEGRA114 |
420 | tristate "NVIDIA Tegra114 SPI Controller" | 449 | tristate "NVIDIA Tegra114 SPI Controller" |
421 | depends on ARCH_TEGRA && TEGRA20_APB_DMA | 450 | depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST |
422 | help | 451 | help |
423 | SPI driver for NVIDIA Tegra114 SPI Controller interface. This controller | 452 | SPI driver for NVIDIA Tegra114 SPI Controller interface. This controller |
424 | is different than the older SoCs SPI controller and also register interface | 453 | is different than the older SoCs SPI controller and also register interface |
@@ -426,7 +455,7 @@ config SPI_TEGRA114 | |||
426 | 455 | ||
427 | config SPI_TEGRA20_SFLASH | 456 | config SPI_TEGRA20_SFLASH |
428 | tristate "Nvidia Tegra20 Serial flash Controller" | 457 | tristate "Nvidia Tegra20 Serial flash Controller" |
429 | depends on ARCH_TEGRA | 458 | depends on ARCH_TEGRA || COMPILE_TEST |
430 | help | 459 | help |
431 | SPI driver for Nvidia Tegra20 Serial flash Controller interface. | 460 | SPI driver for Nvidia Tegra20 Serial flash Controller interface. |
432 | The main usecase of this controller is to use spi flash as boot | 461 | The main usecase of this controller is to use spi flash as boot |
@@ -434,7 +463,7 @@ config SPI_TEGRA20_SFLASH | |||
434 | 463 | ||
435 | config SPI_TEGRA20_SLINK | 464 | config SPI_TEGRA20_SLINK |
436 | tristate "Nvidia Tegra20/Tegra30 SLINK Controller" | 465 | tristate "Nvidia Tegra20/Tegra30 SLINK Controller" |
437 | depends on ARCH_TEGRA && TEGRA20_APB_DMA | 466 | depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST |
438 | help | 467 | help |
439 | SPI driver for Nvidia Tegra20/Tegra30 SLINK Controller interface. | 468 | SPI driver for Nvidia Tegra20/Tegra30 SLINK Controller interface. |
440 | 469 | ||
@@ -457,7 +486,7 @@ config SPI_TOPCLIFF_PCH | |||
457 | 486 | ||
458 | config SPI_TXX9 | 487 | config SPI_TXX9 |
459 | tristate "Toshiba TXx9 SPI controller" | 488 | tristate "Toshiba TXx9 SPI controller" |
460 | depends on GPIOLIB && CPU_TX49XX | 489 | depends on GPIOLIB && (CPU_TX49XX || COMPILE_TEST) |
461 | help | 490 | help |
462 | SPI driver for Toshiba TXx9 MIPS SoCs | 491 | SPI driver for Toshiba TXx9 MIPS SoCs |
463 | 492 | ||
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 33f9c09561e7..ab8d8644af0e 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile | |||
@@ -17,6 +17,7 @@ obj-$(CONFIG_SPI_AU1550) += spi-au1550.o | |||
17 | obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o | 17 | obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o |
18 | obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o | 18 | obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o |
19 | obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o | 19 | obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o |
20 | obj-$(CONFIG_SPI_BFIN_V3) += spi-bfin-v3.o | ||
20 | obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o | 21 | obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o |
21 | obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o | 22 | obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o |
22 | obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o | 23 | obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o |
@@ -27,9 +28,11 @@ obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o | |||
27 | obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o | 28 | obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o |
28 | obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o | 29 | obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o |
29 | spi-dw-midpci-objs := spi-dw-pci.o spi-dw-mid.o | 30 | spi-dw-midpci-objs := spi-dw-pci.o spi-dw-mid.o |
31 | obj-$(CONFIG_SPI_EFM32) += spi-efm32.o | ||
30 | obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o | 32 | obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o |
31 | obj-$(CONFIG_SPI_FALCON) += spi-falcon.o | 33 | obj-$(CONFIG_SPI_FALCON) += spi-falcon.o |
32 | obj-$(CONFIG_SPI_FSL_CPM) += spi-fsl-cpm.o | 34 | obj-$(CONFIG_SPI_FSL_CPM) += spi-fsl-cpm.o |
35 | obj-$(CONFIG_SPI_FSL_DSPI) += spi-fsl-dspi.o | ||
33 | obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o | 36 | obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o |
34 | obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o | 37 | obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o |
35 | obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o | 38 | obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o |
@@ -46,6 +49,7 @@ obj-$(CONFIG_SPI_OCTEON) += spi-octeon.o | |||
46 | obj-$(CONFIG_SPI_OMAP_UWIRE) += spi-omap-uwire.o | 49 | obj-$(CONFIG_SPI_OMAP_UWIRE) += spi-omap-uwire.o |
47 | obj-$(CONFIG_SPI_OMAP_100K) += spi-omap-100k.o | 50 | obj-$(CONFIG_SPI_OMAP_100K) += spi-omap-100k.o |
48 | obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o | 51 | obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o |
52 | obj-$(CONFIG_SPI_TI_QSPI) += spi-ti-qspi.o | ||
49 | obj-$(CONFIG_SPI_ORION) += spi-orion.o | 53 | obj-$(CONFIG_SPI_ORION) += spi-orion.o |
50 | obj-$(CONFIG_SPI_PL022) += spi-pl022.o | 54 | obj-$(CONFIG_SPI_PL022) += spi-pl022.o |
51 | obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o | 55 | obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o |
diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c index 81b9adb6e766..f38855f76536 100644 --- a/drivers/spi/spi-altera.c +++ b/drivers/spi/spi-altera.c | |||
@@ -103,16 +103,6 @@ static void altera_spi_chipsel(struct spi_device *spi, int value) | |||
103 | } | 103 | } |
104 | } | 104 | } |
105 | 105 | ||
106 | static int altera_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t) | ||
107 | { | ||
108 | return 0; | ||
109 | } | ||
110 | |||
111 | static int altera_spi_setup(struct spi_device *spi) | ||
112 | { | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | static inline unsigned int hw_txbyte(struct altera_spi *hw, int count) | 106 | static inline unsigned int hw_txbyte(struct altera_spi *hw, int count) |
117 | { | 107 | { |
118 | if (hw->tx) { | 108 | if (hw->tx) { |
@@ -134,7 +124,7 @@ static int altera_spi_txrx(struct spi_device *spi, struct spi_transfer *t) | |||
134 | hw->tx = t->tx_buf; | 124 | hw->tx = t->tx_buf; |
135 | hw->rx = t->rx_buf; | 125 | hw->rx = t->rx_buf; |
136 | hw->count = 0; | 126 | hw->count = 0; |
137 | hw->bytes_per_word = t->bits_per_word / 8; | 127 | hw->bytes_per_word = DIV_ROUND_UP(t->bits_per_word, 8); |
138 | hw->len = t->len / hw->bytes_per_word; | 128 | hw->len = t->len / hw->bytes_per_word; |
139 | 129 | ||
140 | if (hw->irq >= 0) { | 130 | if (hw->irq >= 0) { |
@@ -150,12 +140,12 @@ static int altera_spi_txrx(struct spi_device *spi, struct spi_transfer *t) | |||
150 | hw->imr &= ~ALTERA_SPI_CONTROL_IRRDY_MSK; | 140 | hw->imr &= ~ALTERA_SPI_CONTROL_IRRDY_MSK; |
151 | writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); | 141 | writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); |
152 | } else { | 142 | } else { |
153 | /* send the first byte */ | 143 | while (hw->count < hw->len) { |
154 | writel(hw_txbyte(hw, 0), hw->base + ALTERA_SPI_TXDATA); | ||
155 | |||
156 | while (1) { | ||
157 | unsigned int rxd; | 144 | unsigned int rxd; |
158 | 145 | ||
146 | writel(hw_txbyte(hw, hw->count), | ||
147 | hw->base + ALTERA_SPI_TXDATA); | ||
148 | |||
159 | while (!(readl(hw->base + ALTERA_SPI_STATUS) & | 149 | while (!(readl(hw->base + ALTERA_SPI_STATUS) & |
160 | ALTERA_SPI_STATUS_RRDY_MSK)) | 150 | ALTERA_SPI_STATUS_RRDY_MSK)) |
161 | cpu_relax(); | 151 | cpu_relax(); |
@@ -174,14 +164,7 @@ static int altera_spi_txrx(struct spi_device *spi, struct spi_transfer *t) | |||
174 | } | 164 | } |
175 | 165 | ||
176 | hw->count++; | 166 | hw->count++; |
177 | |||
178 | if (hw->count < hw->len) | ||
179 | writel(hw_txbyte(hw, hw->count), | ||
180 | hw->base + ALTERA_SPI_TXDATA); | ||
181 | else | ||
182 | break; | ||
183 | } | 167 | } |
184 | |||
185 | } | 168 | } |
186 | 169 | ||
187 | return hw->count * hw->bytes_per_word; | 170 | return hw->count * hw->bytes_per_word; |
@@ -217,7 +200,7 @@ static irqreturn_t altera_spi_irq(int irq, void *dev) | |||
217 | 200 | ||
218 | static int altera_spi_probe(struct platform_device *pdev) | 201 | static int altera_spi_probe(struct platform_device *pdev) |
219 | { | 202 | { |
220 | struct altera_spi_platform_data *platp = pdev->dev.platform_data; | 203 | struct altera_spi_platform_data *platp = dev_get_platdata(&pdev->dev); |
221 | struct altera_spi *hw; | 204 | struct altera_spi *hw; |
222 | struct spi_master *master; | 205 | struct spi_master *master; |
223 | struct resource *res; | 206 | struct resource *res; |
@@ -231,7 +214,6 @@ static int altera_spi_probe(struct platform_device *pdev) | |||
231 | master->bus_num = pdev->id; | 214 | master->bus_num = pdev->id; |
232 | master->num_chipselect = 16; | 215 | master->num_chipselect = 16; |
233 | master->mode_bits = SPI_CS_HIGH; | 216 | master->mode_bits = SPI_CS_HIGH; |
234 | master->setup = altera_spi_setup; | ||
235 | 217 | ||
236 | hw = spi_master_get_devdata(master); | 218 | hw = spi_master_get_devdata(master); |
237 | platform_set_drvdata(pdev, hw); | 219 | platform_set_drvdata(pdev, hw); |
@@ -240,21 +222,16 @@ static int altera_spi_probe(struct platform_device *pdev) | |||
240 | hw->bitbang.master = spi_master_get(master); | 222 | hw->bitbang.master = spi_master_get(master); |
241 | if (!hw->bitbang.master) | 223 | if (!hw->bitbang.master) |
242 | return err; | 224 | return err; |
243 | hw->bitbang.setup_transfer = altera_spi_setupxfer; | ||
244 | hw->bitbang.chipselect = altera_spi_chipsel; | 225 | hw->bitbang.chipselect = altera_spi_chipsel; |
245 | hw->bitbang.txrx_bufs = altera_spi_txrx; | 226 | hw->bitbang.txrx_bufs = altera_spi_txrx; |
246 | 227 | ||
247 | /* find and map our resources */ | 228 | /* find and map our resources */ |
248 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 229 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
249 | if (!res) | 230 | hw->base = devm_ioremap_resource(&pdev->dev, res); |
250 | goto exit_busy; | 231 | if (IS_ERR(hw->base)) { |
251 | if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), | 232 | err = PTR_ERR(hw->base); |
252 | pdev->name)) | 233 | goto exit; |
253 | goto exit_busy; | 234 | } |
254 | hw->base = devm_ioremap_nocache(&pdev->dev, res->start, | ||
255 | resource_size(res)); | ||
256 | if (!hw->base) | ||
257 | goto exit_busy; | ||
258 | /* program defaults into the registers */ | 235 | /* program defaults into the registers */ |
259 | hw->imr = 0; /* disable spi interrupts */ | 236 | hw->imr = 0; /* disable spi interrupts */ |
260 | writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); | 237 | writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); |
@@ -281,9 +258,6 @@ static int altera_spi_probe(struct platform_device *pdev) | |||
281 | dev_info(&pdev->dev, "base %p, irq %d\n", hw->base, hw->irq); | 258 | dev_info(&pdev->dev, "base %p, irq %d\n", hw->base, hw->irq); |
282 | 259 | ||
283 | return 0; | 260 | return 0; |
284 | |||
285 | exit_busy: | ||
286 | err = -EBUSY; | ||
287 | exit: | 261 | exit: |
288 | spi_master_put(master); | 262 | spi_master_put(master); |
289 | return err; | 263 | return err; |
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c index 0e06407a4670..37bad952ab38 100644 --- a/drivers/spi/spi-ath79.c +++ b/drivers/spi/spi-ath79.c | |||
@@ -221,7 +221,7 @@ static int ath79_spi_probe(struct platform_device *pdev) | |||
221 | sp = spi_master_get_devdata(master); | 221 | sp = spi_master_get_devdata(master); |
222 | platform_set_drvdata(pdev, sp); | 222 | platform_set_drvdata(pdev, sp); |
223 | 223 | ||
224 | pdata = pdev->dev.platform_data; | 224 | pdata = dev_get_platdata(&pdev->dev); |
225 | 225 | ||
226 | master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); | 226 | master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); |
227 | master->setup = ath79_spi_setup; | 227 | master->setup = ath79_spi_setup; |
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index ea1ec009f44d..fd7cc566095a 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c | |||
@@ -360,12 +360,12 @@ static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi) | |||
360 | gpio_set_value(asd->npcs_pin, !active); | 360 | gpio_set_value(asd->npcs_pin, !active); |
361 | } | 361 | } |
362 | 362 | ||
363 | static void atmel_spi_lock(struct atmel_spi *as) | 363 | static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock) |
364 | { | 364 | { |
365 | spin_lock_irqsave(&as->lock, as->flags); | 365 | spin_lock_irqsave(&as->lock, as->flags); |
366 | } | 366 | } |
367 | 367 | ||
368 | static void atmel_spi_unlock(struct atmel_spi *as) | 368 | static void atmel_spi_unlock(struct atmel_spi *as) __releases(&as->lock) |
369 | { | 369 | { |
370 | spin_unlock_irqrestore(&as->lock, as->flags); | 370 | spin_unlock_irqrestore(&as->lock, as->flags); |
371 | } | 371 | } |
@@ -629,9 +629,9 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master, | |||
629 | goto err_dma; | 629 | goto err_dma; |
630 | 630 | ||
631 | dev_dbg(master->dev.parent, | 631 | dev_dbg(master->dev.parent, |
632 | " start dma xfer %p: len %u tx %p/%08x rx %p/%08x\n", | 632 | " start dma xfer %p: len %u tx %p/%08llx rx %p/%08llx\n", |
633 | xfer, xfer->len, xfer->tx_buf, xfer->tx_dma, | 633 | xfer, xfer->len, xfer->tx_buf, (unsigned long long)xfer->tx_dma, |
634 | xfer->rx_buf, xfer->rx_dma); | 634 | xfer->rx_buf, (unsigned long long)xfer->rx_dma); |
635 | 635 | ||
636 | /* Enable relevant interrupts */ | 636 | /* Enable relevant interrupts */ |
637 | spi_writel(as, IER, SPI_BIT(OVRES)); | 637 | spi_writel(as, IER, SPI_BIT(OVRES)); |
@@ -732,9 +732,10 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master, | |||
732 | spi_writel(as, TCR, len); | 732 | spi_writel(as, TCR, len); |
733 | 733 | ||
734 | dev_dbg(&msg->spi->dev, | 734 | dev_dbg(&msg->spi->dev, |
735 | " start xfer %p: len %u tx %p/%08x rx %p/%08x\n", | 735 | " start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n", |
736 | xfer, xfer->len, xfer->tx_buf, xfer->tx_dma, | 736 | xfer, xfer->len, xfer->tx_buf, |
737 | xfer->rx_buf, xfer->rx_dma); | 737 | (unsigned long long)xfer->tx_dma, xfer->rx_buf, |
738 | (unsigned long long)xfer->rx_dma); | ||
738 | } else { | 739 | } else { |
739 | xfer = as->next_transfer; | 740 | xfer = as->next_transfer; |
740 | remaining = as->next_remaining_bytes; | 741 | remaining = as->next_remaining_bytes; |
@@ -771,9 +772,10 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master, | |||
771 | spi_writel(as, TNCR, len); | 772 | spi_writel(as, TNCR, len); |
772 | 773 | ||
773 | dev_dbg(&msg->spi->dev, | 774 | dev_dbg(&msg->spi->dev, |
774 | " next xfer %p: len %u tx %p/%08x rx %p/%08x\n", | 775 | " next xfer %p: len %u tx %p/%08llx rx %p/%08llx\n", |
775 | xfer, xfer->len, xfer->tx_buf, xfer->tx_dma, | 776 | xfer, xfer->len, xfer->tx_buf, |
776 | xfer->rx_buf, xfer->rx_dma); | 777 | (unsigned long long)xfer->tx_dma, xfer->rx_buf, |
778 | (unsigned long long)xfer->rx_dma); | ||
777 | ieval = SPI_BIT(ENDRX) | SPI_BIT(OVRES); | 779 | ieval = SPI_BIT(ENDRX) | SPI_BIT(OVRES); |
778 | } else { | 780 | } else { |
779 | spi_writel(as, RNCR, 0); | 781 | spi_writel(as, RNCR, 0); |
@@ -1579,7 +1581,9 @@ static int atmel_spi_probe(struct platform_device *pdev) | |||
1579 | goto out_unmap_regs; | 1581 | goto out_unmap_regs; |
1580 | 1582 | ||
1581 | /* Initialize the hardware */ | 1583 | /* Initialize the hardware */ |
1582 | clk_enable(clk); | 1584 | ret = clk_prepare_enable(clk); |
1585 | if (ret) | ||
1586 | goto out_unmap_regs; | ||
1583 | spi_writel(as, CR, SPI_BIT(SWRST)); | 1587 | spi_writel(as, CR, SPI_BIT(SWRST)); |
1584 | spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ | 1588 | spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ |
1585 | if (as->caps.has_wdrbt) { | 1589 | if (as->caps.has_wdrbt) { |
@@ -1609,7 +1613,7 @@ out_free_dma: | |||
1609 | 1613 | ||
1610 | spi_writel(as, CR, SPI_BIT(SWRST)); | 1614 | spi_writel(as, CR, SPI_BIT(SWRST)); |
1611 | spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ | 1615 | spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ |
1612 | clk_disable(clk); | 1616 | clk_disable_unprepare(clk); |
1613 | free_irq(irq, master); | 1617 | free_irq(irq, master); |
1614 | out_unmap_regs: | 1618 | out_unmap_regs: |
1615 | iounmap(as->regs); | 1619 | iounmap(as->regs); |
@@ -1661,7 +1665,7 @@ static int atmel_spi_remove(struct platform_device *pdev) | |||
1661 | dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, | 1665 | dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, |
1662 | as->buffer_dma); | 1666 | as->buffer_dma); |
1663 | 1667 | ||
1664 | clk_disable(as->clk); | 1668 | clk_disable_unprepare(as->clk); |
1665 | clk_put(as->clk); | 1669 | clk_put(as->clk); |
1666 | free_irq(as->irq, master); | 1670 | free_irq(as->irq, master); |
1667 | iounmap(as->regs); | 1671 | iounmap(as->regs); |
@@ -1678,7 +1682,7 @@ static int atmel_spi_suspend(struct platform_device *pdev, pm_message_t mesg) | |||
1678 | struct spi_master *master = platform_get_drvdata(pdev); | 1682 | struct spi_master *master = platform_get_drvdata(pdev); |
1679 | struct atmel_spi *as = spi_master_get_devdata(master); | 1683 | struct atmel_spi *as = spi_master_get_devdata(master); |
1680 | 1684 | ||
1681 | clk_disable(as->clk); | 1685 | clk_disable_unprepare(as->clk); |
1682 | return 0; | 1686 | return 0; |
1683 | } | 1687 | } |
1684 | 1688 | ||
@@ -1687,7 +1691,7 @@ static int atmel_spi_resume(struct platform_device *pdev) | |||
1687 | struct spi_master *master = platform_get_drvdata(pdev); | 1691 | struct spi_master *master = platform_get_drvdata(pdev); |
1688 | struct atmel_spi *as = spi_master_get_devdata(master); | 1692 | struct atmel_spi *as = spi_master_get_devdata(master); |
1689 | 1693 | ||
1690 | clk_enable(as->clk); | 1694 | return clk_prepare_enable(as->clk); |
1691 | return 0; | 1695 | return 0; |
1692 | } | 1696 | } |
1693 | 1697 | ||
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c index e1965553ab79..1d00d9b397dd 100644 --- a/drivers/spi/spi-au1550.c +++ b/drivers/spi/spi-au1550.c | |||
@@ -776,7 +776,7 @@ static int au1550_spi_probe(struct platform_device *pdev) | |||
776 | hw = spi_master_get_devdata(master); | 776 | hw = spi_master_get_devdata(master); |
777 | 777 | ||
778 | hw->master = spi_master_get(master); | 778 | hw->master = spi_master_get(master); |
779 | hw->pdata = pdev->dev.platform_data; | 779 | hw->pdata = dev_get_platdata(&pdev->dev); |
780 | hw->dev = &pdev->dev; | 780 | hw->dev = &pdev->dev; |
781 | 781 | ||
782 | if (hw->pdata == NULL) { | 782 | if (hw->pdata == NULL) { |
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index a4185e492321..52c81481c5c7 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c | |||
@@ -314,7 +314,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev) | |||
314 | platform_set_drvdata(pdev, master); | 314 | platform_set_drvdata(pdev, master); |
315 | 315 | ||
316 | master->mode_bits = BCM2835_SPI_MODE_BITS; | 316 | master->mode_bits = BCM2835_SPI_MODE_BITS; |
317 | master->bits_per_word_mask = BIT(8 - 1); | 317 | master->bits_per_word_mask = SPI_BPW_MASK(8); |
318 | master->bus_num = -1; | 318 | master->bus_num = -1; |
319 | master->num_chipselect = 3; | 319 | master->num_chipselect = 3; |
320 | master->transfer_one_message = bcm2835_spi_transfer_one; | 320 | master->transfer_one_message = bcm2835_spi_transfer_one; |
@@ -325,12 +325,6 @@ static int bcm2835_spi_probe(struct platform_device *pdev) | |||
325 | init_completion(&bs->done); | 325 | init_completion(&bs->done); |
326 | 326 | ||
327 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 327 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
328 | if (!res) { | ||
329 | dev_err(&pdev->dev, "could not get memory resource\n"); | ||
330 | err = -ENODEV; | ||
331 | goto out_master_put; | ||
332 | } | ||
333 | |||
334 | bs->regs = devm_ioremap_resource(&pdev->dev, res); | 328 | bs->regs = devm_ioremap_resource(&pdev->dev, res); |
335 | if (IS_ERR(bs->regs)) { | 329 | if (IS_ERR(bs->regs)) { |
336 | err = PTR_ERR(bs->regs); | 330 | err = PTR_ERR(bs->regs); |
@@ -383,7 +377,7 @@ out_master_put: | |||
383 | 377 | ||
384 | static int bcm2835_spi_remove(struct platform_device *pdev) | 378 | static int bcm2835_spi_remove(struct platform_device *pdev) |
385 | { | 379 | { |
386 | struct spi_master *master = platform_get_drvdata(pdev); | 380 | struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); |
387 | struct bcm2835_spi *bs = spi_master_get_devdata(master); | 381 | struct bcm2835_spi *bs = spi_master_get_devdata(master); |
388 | 382 | ||
389 | free_irq(bs->irq, master); | 383 | free_irq(bs->irq, master); |
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c index 9fd7a39b8029..536b0e363826 100644 --- a/drivers/spi/spi-bcm63xx.c +++ b/drivers/spi/spi-bcm63xx.c | |||
@@ -231,24 +231,6 @@ static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *first, | |||
231 | return 0; | 231 | return 0; |
232 | } | 232 | } |
233 | 233 | ||
234 | static int bcm63xx_spi_prepare_transfer(struct spi_master *master) | ||
235 | { | ||
236 | struct bcm63xx_spi *bs = spi_master_get_devdata(master); | ||
237 | |||
238 | pm_runtime_get_sync(&bs->pdev->dev); | ||
239 | |||
240 | return 0; | ||
241 | } | ||
242 | |||
243 | static int bcm63xx_spi_unprepare_transfer(struct spi_master *master) | ||
244 | { | ||
245 | struct bcm63xx_spi *bs = spi_master_get_devdata(master); | ||
246 | |||
247 | pm_runtime_put(&bs->pdev->dev); | ||
248 | |||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | static int bcm63xx_spi_transfer_one(struct spi_master *master, | 234 | static int bcm63xx_spi_transfer_one(struct spi_master *master, |
253 | struct spi_message *m) | 235 | struct spi_message *m) |
254 | { | 236 | { |
@@ -353,20 +335,13 @@ static int bcm63xx_spi_probe(struct platform_device *pdev) | |||
353 | { | 335 | { |
354 | struct resource *r; | 336 | struct resource *r; |
355 | struct device *dev = &pdev->dev; | 337 | struct device *dev = &pdev->dev; |
356 | struct bcm63xx_spi_pdata *pdata = pdev->dev.platform_data; | 338 | struct bcm63xx_spi_pdata *pdata = dev_get_platdata(&pdev->dev); |
357 | int irq; | 339 | int irq; |
358 | struct spi_master *master; | 340 | struct spi_master *master; |
359 | struct clk *clk; | 341 | struct clk *clk; |
360 | struct bcm63xx_spi *bs; | 342 | struct bcm63xx_spi *bs; |
361 | int ret; | 343 | int ret; |
362 | 344 | ||
363 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
364 | if (!r) { | ||
365 | dev_err(dev, "no iomem\n"); | ||
366 | ret = -ENXIO; | ||
367 | goto out; | ||
368 | } | ||
369 | |||
370 | irq = platform_get_irq(pdev, 0); | 345 | irq = platform_get_irq(pdev, 0); |
371 | if (irq < 0) { | 346 | if (irq < 0) { |
372 | dev_err(dev, "no irq\n"); | 347 | dev_err(dev, "no irq\n"); |
@@ -393,6 +368,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev) | |||
393 | platform_set_drvdata(pdev, master); | 368 | platform_set_drvdata(pdev, master); |
394 | bs->pdev = pdev; | 369 | bs->pdev = pdev; |
395 | 370 | ||
371 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
396 | bs->regs = devm_ioremap_resource(&pdev->dev, r); | 372 | bs->regs = devm_ioremap_resource(&pdev->dev, r); |
397 | if (IS_ERR(bs->regs)) { | 373 | if (IS_ERR(bs->regs)) { |
398 | ret = PTR_ERR(bs->regs); | 374 | ret = PTR_ERR(bs->regs); |
@@ -412,11 +388,10 @@ static int bcm63xx_spi_probe(struct platform_device *pdev) | |||
412 | 388 | ||
413 | master->bus_num = pdata->bus_num; | 389 | master->bus_num = pdata->bus_num; |
414 | master->num_chipselect = pdata->num_chipselect; | 390 | master->num_chipselect = pdata->num_chipselect; |
415 | master->prepare_transfer_hardware = bcm63xx_spi_prepare_transfer; | ||
416 | master->unprepare_transfer_hardware = bcm63xx_spi_unprepare_transfer; | ||
417 | master->transfer_one_message = bcm63xx_spi_transfer_one; | 391 | master->transfer_one_message = bcm63xx_spi_transfer_one; |
418 | master->mode_bits = MODEBITS; | 392 | master->mode_bits = MODEBITS; |
419 | master->bits_per_word_mask = SPI_BPW_MASK(8); | 393 | master->bits_per_word_mask = SPI_BPW_MASK(8); |
394 | master->auto_runtime_pm = true; | ||
420 | bs->msg_type_shift = pdata->msg_type_shift; | 395 | bs->msg_type_shift = pdata->msg_type_shift; |
421 | bs->msg_ctl_width = pdata->msg_ctl_width; | 396 | bs->msg_ctl_width = pdata->msg_ctl_width; |
422 | bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA)); | 397 | bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA)); |
@@ -480,8 +455,7 @@ static int bcm63xx_spi_remove(struct platform_device *pdev) | |||
480 | #ifdef CONFIG_PM | 455 | #ifdef CONFIG_PM |
481 | static int bcm63xx_spi_suspend(struct device *dev) | 456 | static int bcm63xx_spi_suspend(struct device *dev) |
482 | { | 457 | { |
483 | struct spi_master *master = | 458 | struct spi_master *master = dev_get_drvdata(dev); |
484 | platform_get_drvdata(to_platform_device(dev)); | ||
485 | struct bcm63xx_spi *bs = spi_master_get_devdata(master); | 459 | struct bcm63xx_spi *bs = spi_master_get_devdata(master); |
486 | 460 | ||
487 | spi_master_suspend(master); | 461 | spi_master_suspend(master); |
@@ -493,8 +467,7 @@ static int bcm63xx_spi_suspend(struct device *dev) | |||
493 | 467 | ||
494 | static int bcm63xx_spi_resume(struct device *dev) | 468 | static int bcm63xx_spi_resume(struct device *dev) |
495 | { | 469 | { |
496 | struct spi_master *master = | 470 | struct spi_master *master = dev_get_drvdata(dev); |
497 | platform_get_drvdata(to_platform_device(dev)); | ||
498 | struct bcm63xx_spi *bs = spi_master_get_devdata(master); | 471 | struct bcm63xx_spi *bs = spi_master_get_devdata(master); |
499 | 472 | ||
500 | clk_prepare_enable(bs->clk); | 473 | clk_prepare_enable(bs->clk); |
diff --git a/drivers/spi/spi-bfin-sport.c b/drivers/spi/spi-bfin-sport.c index 07ec597f9732..91921b5f5817 100644 --- a/drivers/spi/spi-bfin-sport.c +++ b/drivers/spi/spi-bfin-sport.c | |||
@@ -756,7 +756,7 @@ static int bfin_sport_spi_probe(struct platform_device *pdev) | |||
756 | struct bfin_sport_spi_master_data *drv_data; | 756 | struct bfin_sport_spi_master_data *drv_data; |
757 | int status; | 757 | int status; |
758 | 758 | ||
759 | platform_info = dev->platform_data; | 759 | platform_info = dev_get_platdata(dev); |
760 | 760 | ||
761 | /* Allocate master with space for drv_data */ | 761 | /* Allocate master with space for drv_data */ |
762 | master = spi_alloc_master(dev, sizeof(*master) + 16); | 762 | master = spi_alloc_master(dev, sizeof(*master) + 16); |
diff --git a/drivers/spi/spi-bfin-v3.c b/drivers/spi/spi-bfin-v3.c new file mode 100644 index 000000000000..f4bf81347d68 --- /dev/null +++ b/drivers/spi/spi-bfin-v3.c | |||
@@ -0,0 +1,965 @@ | |||
1 | /* | ||
2 | * Analog Devices SPI3 controller driver | ||
3 | * | ||
4 | * Copyright (c) 2013 Analog Devices Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | #include <linux/delay.h> | ||
17 | #include <linux/device.h> | ||
18 | #include <linux/dma-mapping.h> | ||
19 | #include <linux/errno.h> | ||
20 | #include <linux/gpio.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/io.h> | ||
24 | #include <linux/ioport.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/platform_device.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/spi/spi.h> | ||
29 | #include <linux/types.h> | ||
30 | |||
31 | #include <asm/bfin_spi3.h> | ||
32 | #include <asm/cacheflush.h> | ||
33 | #include <asm/dma.h> | ||
34 | #include <asm/portmux.h> | ||
35 | |||
36 | enum bfin_spi_state { | ||
37 | START_STATE, | ||
38 | RUNNING_STATE, | ||
39 | DONE_STATE, | ||
40 | ERROR_STATE | ||
41 | }; | ||
42 | |||
43 | struct bfin_spi_master; | ||
44 | |||
45 | struct bfin_spi_transfer_ops { | ||
46 | void (*write) (struct bfin_spi_master *); | ||
47 | void (*read) (struct bfin_spi_master *); | ||
48 | void (*duplex) (struct bfin_spi_master *); | ||
49 | }; | ||
50 | |||
51 | /* runtime info for spi master */ | ||
52 | struct bfin_spi_master { | ||
53 | /* SPI framework hookup */ | ||
54 | struct spi_master *master; | ||
55 | |||
56 | /* Regs base of SPI controller */ | ||
57 | struct bfin_spi_regs __iomem *regs; | ||
58 | |||
59 | /* Pin request list */ | ||
60 | u16 *pin_req; | ||
61 | |||
62 | /* Message Transfer pump */ | ||
63 | struct tasklet_struct pump_transfers; | ||
64 | |||
65 | /* Current message transfer state info */ | ||
66 | struct spi_message *cur_msg; | ||
67 | struct spi_transfer *cur_transfer; | ||
68 | struct bfin_spi_device *cur_chip; | ||
69 | unsigned transfer_len; | ||
70 | |||
71 | /* transfer buffer */ | ||
72 | void *tx; | ||
73 | void *tx_end; | ||
74 | void *rx; | ||
75 | void *rx_end; | ||
76 | |||
77 | /* dma info */ | ||
78 | unsigned int tx_dma; | ||
79 | unsigned int rx_dma; | ||
80 | dma_addr_t tx_dma_addr; | ||
81 | dma_addr_t rx_dma_addr; | ||
82 | unsigned long dummy_buffer; /* used in unidirectional transfer */ | ||
83 | unsigned long tx_dma_size; | ||
84 | unsigned long rx_dma_size; | ||
85 | int tx_num; | ||
86 | int rx_num; | ||
87 | |||
88 | /* store register value for suspend/resume */ | ||
89 | u32 control; | ||
90 | u32 ssel; | ||
91 | |||
92 | unsigned long sclk; | ||
93 | enum bfin_spi_state state; | ||
94 | |||
95 | const struct bfin_spi_transfer_ops *ops; | ||
96 | }; | ||
97 | |||
98 | struct bfin_spi_device { | ||
99 | u32 control; | ||
100 | u32 clock; | ||
101 | u32 ssel; | ||
102 | |||
103 | u8 cs; | ||
104 | u16 cs_chg_udelay; /* Some devices require > 255usec delay */ | ||
105 | u32 cs_gpio; | ||
106 | u32 tx_dummy_val; /* tx value for rx only transfer */ | ||
107 | bool enable_dma; | ||
108 | const struct bfin_spi_transfer_ops *ops; | ||
109 | }; | ||
110 | |||
111 | static void bfin_spi_enable(struct bfin_spi_master *drv_data) | ||
112 | { | ||
113 | bfin_write_or(&drv_data->regs->control, SPI_CTL_EN); | ||
114 | } | ||
115 | |||
116 | static void bfin_spi_disable(struct bfin_spi_master *drv_data) | ||
117 | { | ||
118 | bfin_write_and(&drv_data->regs->control, ~SPI_CTL_EN); | ||
119 | } | ||
120 | |||
121 | /* Caculate the SPI_CLOCK register value based on input HZ */ | ||
122 | static u32 hz_to_spi_clock(u32 sclk, u32 speed_hz) | ||
123 | { | ||
124 | u32 spi_clock = sclk / speed_hz; | ||
125 | |||
126 | if (spi_clock) | ||
127 | spi_clock--; | ||
128 | return spi_clock; | ||
129 | } | ||
130 | |||
131 | static int bfin_spi_flush(struct bfin_spi_master *drv_data) | ||
132 | { | ||
133 | unsigned long limit = loops_per_jiffy << 1; | ||
134 | |||
135 | /* wait for stop and clear stat */ | ||
136 | while (!(bfin_read(&drv_data->regs->status) & SPI_STAT_SPIF) && --limit) | ||
137 | cpu_relax(); | ||
138 | |||
139 | bfin_write(&drv_data->regs->status, 0xFFFFFFFF); | ||
140 | |||
141 | return limit; | ||
142 | } | ||
143 | |||
144 | /* Chip select operation functions for cs_change flag */ | ||
145 | static void bfin_spi_cs_active(struct bfin_spi_master *drv_data, struct bfin_spi_device *chip) | ||
146 | { | ||
147 | if (likely(chip->cs < MAX_CTRL_CS)) | ||
148 | bfin_write_and(&drv_data->regs->ssel, ~chip->ssel); | ||
149 | else | ||
150 | gpio_set_value(chip->cs_gpio, 0); | ||
151 | } | ||
152 | |||
153 | static void bfin_spi_cs_deactive(struct bfin_spi_master *drv_data, | ||
154 | struct bfin_spi_device *chip) | ||
155 | { | ||
156 | if (likely(chip->cs < MAX_CTRL_CS)) | ||
157 | bfin_write_or(&drv_data->regs->ssel, chip->ssel); | ||
158 | else | ||
159 | gpio_set_value(chip->cs_gpio, 1); | ||
160 | |||
161 | /* Move delay here for consistency */ | ||
162 | if (chip->cs_chg_udelay) | ||
163 | udelay(chip->cs_chg_udelay); | ||
164 | } | ||
165 | |||
166 | /* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */ | ||
167 | static inline void bfin_spi_cs_enable(struct bfin_spi_master *drv_data, | ||
168 | struct bfin_spi_device *chip) | ||
169 | { | ||
170 | if (chip->cs < MAX_CTRL_CS) | ||
171 | bfin_write_or(&drv_data->regs->ssel, chip->ssel >> 8); | ||
172 | } | ||
173 | |||
174 | static inline void bfin_spi_cs_disable(struct bfin_spi_master *drv_data, | ||
175 | struct bfin_spi_device *chip) | ||
176 | { | ||
177 | if (chip->cs < MAX_CTRL_CS) | ||
178 | bfin_write_and(&drv_data->regs->ssel, ~(chip->ssel >> 8)); | ||
179 | } | ||
180 | |||
181 | /* stop controller and re-config current chip*/ | ||
182 | static void bfin_spi_restore_state(struct bfin_spi_master *drv_data) | ||
183 | { | ||
184 | struct bfin_spi_device *chip = drv_data->cur_chip; | ||
185 | |||
186 | /* Clear status and disable clock */ | ||
187 | bfin_write(&drv_data->regs->status, 0xFFFFFFFF); | ||
188 | bfin_write(&drv_data->regs->rx_control, 0x0); | ||
189 | bfin_write(&drv_data->regs->tx_control, 0x0); | ||
190 | bfin_spi_disable(drv_data); | ||
191 | |||
192 | SSYNC(); | ||
193 | |||
194 | /* Load the registers */ | ||
195 | bfin_write(&drv_data->regs->control, chip->control); | ||
196 | bfin_write(&drv_data->regs->clock, chip->clock); | ||
197 | |||
198 | bfin_spi_enable(drv_data); | ||
199 | drv_data->tx_num = drv_data->rx_num = 0; | ||
200 | /* we always choose tx transfer initiate */ | ||
201 | bfin_write(&drv_data->regs->rx_control, SPI_RXCTL_REN); | ||
202 | bfin_write(&drv_data->regs->tx_control, | ||
203 | SPI_TXCTL_TEN | SPI_TXCTL_TTI); | ||
204 | bfin_spi_cs_active(drv_data, chip); | ||
205 | } | ||
206 | |||
207 | /* discard invalid rx data and empty rfifo */ | ||
208 | static inline void dummy_read(struct bfin_spi_master *drv_data) | ||
209 | { | ||
210 | while (!(bfin_read(&drv_data->regs->status) & SPI_STAT_RFE)) | ||
211 | bfin_read(&drv_data->regs->rfifo); | ||
212 | } | ||
213 | |||
214 | static void bfin_spi_u8_write(struct bfin_spi_master *drv_data) | ||
215 | { | ||
216 | dummy_read(drv_data); | ||
217 | while (drv_data->tx < drv_data->tx_end) { | ||
218 | bfin_write(&drv_data->regs->tfifo, (*(u8 *)(drv_data->tx++))); | ||
219 | while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) | ||
220 | cpu_relax(); | ||
221 | bfin_read(&drv_data->regs->rfifo); | ||
222 | } | ||
223 | } | ||
224 | |||
225 | static void bfin_spi_u8_read(struct bfin_spi_master *drv_data) | ||
226 | { | ||
227 | u32 tx_val = drv_data->cur_chip->tx_dummy_val; | ||
228 | |||
229 | dummy_read(drv_data); | ||
230 | while (drv_data->rx < drv_data->rx_end) { | ||
231 | bfin_write(&drv_data->regs->tfifo, tx_val); | ||
232 | while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) | ||
233 | cpu_relax(); | ||
234 | *(u8 *)(drv_data->rx++) = bfin_read(&drv_data->regs->rfifo); | ||
235 | } | ||
236 | } | ||
237 | |||
238 | static void bfin_spi_u8_duplex(struct bfin_spi_master *drv_data) | ||
239 | { | ||
240 | dummy_read(drv_data); | ||
241 | while (drv_data->rx < drv_data->rx_end) { | ||
242 | bfin_write(&drv_data->regs->tfifo, (*(u8 *)(drv_data->tx++))); | ||
243 | while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) | ||
244 | cpu_relax(); | ||
245 | *(u8 *)(drv_data->rx++) = bfin_read(&drv_data->regs->rfifo); | ||
246 | } | ||
247 | } | ||
248 | |||
249 | static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u8 = { | ||
250 | .write = bfin_spi_u8_write, | ||
251 | .read = bfin_spi_u8_read, | ||
252 | .duplex = bfin_spi_u8_duplex, | ||
253 | }; | ||
254 | |||
255 | static void bfin_spi_u16_write(struct bfin_spi_master *drv_data) | ||
256 | { | ||
257 | dummy_read(drv_data); | ||
258 | while (drv_data->tx < drv_data->tx_end) { | ||
259 | bfin_write(&drv_data->regs->tfifo, (*(u16 *)drv_data->tx)); | ||
260 | drv_data->tx += 2; | ||
261 | while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) | ||
262 | cpu_relax(); | ||
263 | bfin_read(&drv_data->regs->rfifo); | ||
264 | } | ||
265 | } | ||
266 | |||
267 | static void bfin_spi_u16_read(struct bfin_spi_master *drv_data) | ||
268 | { | ||
269 | u32 tx_val = drv_data->cur_chip->tx_dummy_val; | ||
270 | |||
271 | dummy_read(drv_data); | ||
272 | while (drv_data->rx < drv_data->rx_end) { | ||
273 | bfin_write(&drv_data->regs->tfifo, tx_val); | ||
274 | while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) | ||
275 | cpu_relax(); | ||
276 | *(u16 *)drv_data->rx = bfin_read(&drv_data->regs->rfifo); | ||
277 | drv_data->rx += 2; | ||
278 | } | ||
279 | } | ||
280 | |||
281 | static void bfin_spi_u16_duplex(struct bfin_spi_master *drv_data) | ||
282 | { | ||
283 | dummy_read(drv_data); | ||
284 | while (drv_data->rx < drv_data->rx_end) { | ||
285 | bfin_write(&drv_data->regs->tfifo, (*(u16 *)drv_data->tx)); | ||
286 | drv_data->tx += 2; | ||
287 | while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) | ||
288 | cpu_relax(); | ||
289 | *(u16 *)drv_data->rx = bfin_read(&drv_data->regs->rfifo); | ||
290 | drv_data->rx += 2; | ||
291 | } | ||
292 | } | ||
293 | |||
294 | static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u16 = { | ||
295 | .write = bfin_spi_u16_write, | ||
296 | .read = bfin_spi_u16_read, | ||
297 | .duplex = bfin_spi_u16_duplex, | ||
298 | }; | ||
299 | |||
300 | static void bfin_spi_u32_write(struct bfin_spi_master *drv_data) | ||
301 | { | ||
302 | dummy_read(drv_data); | ||
303 | while (drv_data->tx < drv_data->tx_end) { | ||
304 | bfin_write(&drv_data->regs->tfifo, (*(u32 *)drv_data->tx)); | ||
305 | drv_data->tx += 4; | ||
306 | while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) | ||
307 | cpu_relax(); | ||
308 | bfin_read(&drv_data->regs->rfifo); | ||
309 | } | ||
310 | } | ||
311 | |||
312 | static void bfin_spi_u32_read(struct bfin_spi_master *drv_data) | ||
313 | { | ||
314 | u32 tx_val = drv_data->cur_chip->tx_dummy_val; | ||
315 | |||
316 | dummy_read(drv_data); | ||
317 | while (drv_data->rx < drv_data->rx_end) { | ||
318 | bfin_write(&drv_data->regs->tfifo, tx_val); | ||
319 | while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) | ||
320 | cpu_relax(); | ||
321 | *(u32 *)drv_data->rx = bfin_read(&drv_data->regs->rfifo); | ||
322 | drv_data->rx += 4; | ||
323 | } | ||
324 | } | ||
325 | |||
326 | static void bfin_spi_u32_duplex(struct bfin_spi_master *drv_data) | ||
327 | { | ||
328 | dummy_read(drv_data); | ||
329 | while (drv_data->rx < drv_data->rx_end) { | ||
330 | bfin_write(&drv_data->regs->tfifo, (*(u32 *)drv_data->tx)); | ||
331 | drv_data->tx += 4; | ||
332 | while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) | ||
333 | cpu_relax(); | ||
334 | *(u32 *)drv_data->rx = bfin_read(&drv_data->regs->rfifo); | ||
335 | drv_data->rx += 4; | ||
336 | } | ||
337 | } | ||
338 | |||
339 | static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u32 = { | ||
340 | .write = bfin_spi_u32_write, | ||
341 | .read = bfin_spi_u32_read, | ||
342 | .duplex = bfin_spi_u32_duplex, | ||
343 | }; | ||
344 | |||
345 | |||
346 | /* test if there is more transfer to be done */ | ||
347 | static void bfin_spi_next_transfer(struct bfin_spi_master *drv) | ||
348 | { | ||
349 | struct spi_message *msg = drv->cur_msg; | ||
350 | struct spi_transfer *t = drv->cur_transfer; | ||
351 | |||
352 | /* Move to next transfer */ | ||
353 | if (t->transfer_list.next != &msg->transfers) { | ||
354 | drv->cur_transfer = list_entry(t->transfer_list.next, | ||
355 | struct spi_transfer, transfer_list); | ||
356 | drv->state = RUNNING_STATE; | ||
357 | } else { | ||
358 | drv->state = DONE_STATE; | ||
359 | drv->cur_transfer = NULL; | ||
360 | } | ||
361 | } | ||
362 | |||
363 | static void bfin_spi_giveback(struct bfin_spi_master *drv_data) | ||
364 | { | ||
365 | struct bfin_spi_device *chip = drv_data->cur_chip; | ||
366 | |||
367 | bfin_spi_cs_deactive(drv_data, chip); | ||
368 | spi_finalize_current_message(drv_data->master); | ||
369 | } | ||
370 | |||
371 | static int bfin_spi_setup_transfer(struct bfin_spi_master *drv) | ||
372 | { | ||
373 | struct spi_transfer *t = drv->cur_transfer; | ||
374 | u32 cr, cr_width; | ||
375 | |||
376 | if (t->tx_buf) { | ||
377 | drv->tx = (void *)t->tx_buf; | ||
378 | drv->tx_end = drv->tx + t->len; | ||
379 | } else { | ||
380 | drv->tx = NULL; | ||
381 | } | ||
382 | |||
383 | if (t->rx_buf) { | ||
384 | drv->rx = t->rx_buf; | ||
385 | drv->rx_end = drv->rx + t->len; | ||
386 | } else { | ||
387 | drv->rx = NULL; | ||
388 | } | ||
389 | |||
390 | drv->transfer_len = t->len; | ||
391 | |||
392 | /* bits per word setup */ | ||
393 | switch (t->bits_per_word) { | ||
394 | case 8: | ||
395 | cr_width = SPI_CTL_SIZE08; | ||
396 | drv->ops = &bfin_bfin_spi_transfer_ops_u8; | ||
397 | break; | ||
398 | case 16: | ||
399 | cr_width = SPI_CTL_SIZE16; | ||
400 | drv->ops = &bfin_bfin_spi_transfer_ops_u16; | ||
401 | break; | ||
402 | case 32: | ||
403 | cr_width = SPI_CTL_SIZE32; | ||
404 | drv->ops = &bfin_bfin_spi_transfer_ops_u32; | ||
405 | break; | ||
406 | default: | ||
407 | return -EINVAL; | ||
408 | } | ||
409 | cr = bfin_read(&drv->regs->control) & ~SPI_CTL_SIZE; | ||
410 | cr |= cr_width; | ||
411 | bfin_write(&drv->regs->control, cr); | ||
412 | |||
413 | /* speed setup */ | ||
414 | bfin_write(&drv->regs->clock, | ||
415 | hz_to_spi_clock(drv->sclk, t->speed_hz)); | ||
416 | return 0; | ||
417 | } | ||
418 | |||
419 | static int bfin_spi_dma_xfer(struct bfin_spi_master *drv_data) | ||
420 | { | ||
421 | struct spi_transfer *t = drv_data->cur_transfer; | ||
422 | struct spi_message *msg = drv_data->cur_msg; | ||
423 | struct bfin_spi_device *chip = drv_data->cur_chip; | ||
424 | u32 dma_config; | ||
425 | unsigned long word_count, word_size; | ||
426 | void *tx_buf, *rx_buf; | ||
427 | |||
428 | switch (t->bits_per_word) { | ||
429 | case 8: | ||
430 | dma_config = WDSIZE_8 | PSIZE_8; | ||
431 | word_count = drv_data->transfer_len; | ||
432 | word_size = 1; | ||
433 | break; | ||
434 | case 16: | ||
435 | dma_config = WDSIZE_16 | PSIZE_16; | ||
436 | word_count = drv_data->transfer_len / 2; | ||
437 | word_size = 2; | ||
438 | break; | ||
439 | default: | ||
440 | dma_config = WDSIZE_32 | PSIZE_32; | ||
441 | word_count = drv_data->transfer_len / 4; | ||
442 | word_size = 4; | ||
443 | break; | ||
444 | } | ||
445 | |||
446 | if (!drv_data->rx) { | ||
447 | tx_buf = drv_data->tx; | ||
448 | rx_buf = &drv_data->dummy_buffer; | ||
449 | drv_data->tx_dma_size = drv_data->transfer_len; | ||
450 | drv_data->rx_dma_size = sizeof(drv_data->dummy_buffer); | ||
451 | set_dma_x_modify(drv_data->tx_dma, word_size); | ||
452 | set_dma_x_modify(drv_data->rx_dma, 0); | ||
453 | } else if (!drv_data->tx) { | ||
454 | drv_data->dummy_buffer = chip->tx_dummy_val; | ||
455 | tx_buf = &drv_data->dummy_buffer; | ||
456 | rx_buf = drv_data->rx; | ||
457 | drv_data->tx_dma_size = sizeof(drv_data->dummy_buffer); | ||
458 | drv_data->rx_dma_size = drv_data->transfer_len; | ||
459 | set_dma_x_modify(drv_data->tx_dma, 0); | ||
460 | set_dma_x_modify(drv_data->rx_dma, word_size); | ||
461 | } else { | ||
462 | tx_buf = drv_data->tx; | ||
463 | rx_buf = drv_data->rx; | ||
464 | drv_data->tx_dma_size = drv_data->rx_dma_size | ||
465 | = drv_data->transfer_len; | ||
466 | set_dma_x_modify(drv_data->tx_dma, word_size); | ||
467 | set_dma_x_modify(drv_data->rx_dma, word_size); | ||
468 | } | ||
469 | |||
470 | drv_data->tx_dma_addr = dma_map_single(&msg->spi->dev, | ||
471 | (void *)tx_buf, | ||
472 | drv_data->tx_dma_size, | ||
473 | DMA_TO_DEVICE); | ||
474 | if (dma_mapping_error(&msg->spi->dev, | ||
475 | drv_data->tx_dma_addr)) | ||
476 | return -ENOMEM; | ||
477 | |||
478 | drv_data->rx_dma_addr = dma_map_single(&msg->spi->dev, | ||
479 | (void *)rx_buf, | ||
480 | drv_data->rx_dma_size, | ||
481 | DMA_FROM_DEVICE); | ||
482 | if (dma_mapping_error(&msg->spi->dev, | ||
483 | drv_data->rx_dma_addr)) { | ||
484 | dma_unmap_single(&msg->spi->dev, | ||
485 | drv_data->tx_dma_addr, | ||
486 | drv_data->tx_dma_size, | ||
487 | DMA_TO_DEVICE); | ||
488 | return -ENOMEM; | ||
489 | } | ||
490 | |||
491 | dummy_read(drv_data); | ||
492 | set_dma_x_count(drv_data->tx_dma, word_count); | ||
493 | set_dma_x_count(drv_data->rx_dma, word_count); | ||
494 | set_dma_start_addr(drv_data->tx_dma, drv_data->tx_dma_addr); | ||
495 | set_dma_start_addr(drv_data->rx_dma, drv_data->rx_dma_addr); | ||
496 | dma_config |= DMAFLOW_STOP | RESTART | DI_EN; | ||
497 | set_dma_config(drv_data->tx_dma, dma_config); | ||
498 | set_dma_config(drv_data->rx_dma, dma_config | WNR); | ||
499 | enable_dma(drv_data->tx_dma); | ||
500 | enable_dma(drv_data->rx_dma); | ||
501 | SSYNC(); | ||
502 | |||
503 | bfin_write(&drv_data->regs->rx_control, SPI_RXCTL_REN | SPI_RXCTL_RDR_NE); | ||
504 | SSYNC(); | ||
505 | bfin_write(&drv_data->regs->tx_control, | ||
506 | SPI_TXCTL_TEN | SPI_TXCTL_TTI | SPI_TXCTL_TDR_NF); | ||
507 | |||
508 | return 0; | ||
509 | } | ||
510 | |||
511 | static int bfin_spi_pio_xfer(struct bfin_spi_master *drv_data) | ||
512 | { | ||
513 | struct spi_message *msg = drv_data->cur_msg; | ||
514 | |||
515 | if (!drv_data->rx) { | ||
516 | /* write only half duplex */ | ||
517 | drv_data->ops->write(drv_data); | ||
518 | if (drv_data->tx != drv_data->tx_end) | ||
519 | return -EIO; | ||
520 | } else if (!drv_data->tx) { | ||
521 | /* read only half duplex */ | ||
522 | drv_data->ops->read(drv_data); | ||
523 | if (drv_data->rx != drv_data->rx_end) | ||
524 | return -EIO; | ||
525 | } else { | ||
526 | /* full duplex mode */ | ||
527 | drv_data->ops->duplex(drv_data); | ||
528 | if (drv_data->tx != drv_data->tx_end) | ||
529 | return -EIO; | ||
530 | } | ||
531 | |||
532 | if (!bfin_spi_flush(drv_data)) | ||
533 | return -EIO; | ||
534 | msg->actual_length += drv_data->transfer_len; | ||
535 | tasklet_schedule(&drv_data->pump_transfers); | ||
536 | return 0; | ||
537 | } | ||
538 | |||
539 | static void bfin_spi_pump_transfers(unsigned long data) | ||
540 | { | ||
541 | struct bfin_spi_master *drv_data = (struct bfin_spi_master *)data; | ||
542 | struct spi_message *msg = NULL; | ||
543 | struct spi_transfer *t = NULL; | ||
544 | struct bfin_spi_device *chip = NULL; | ||
545 | int ret; | ||
546 | |||
547 | /* Get current state information */ | ||
548 | msg = drv_data->cur_msg; | ||
549 | t = drv_data->cur_transfer; | ||
550 | chip = drv_data->cur_chip; | ||
551 | |||
552 | /* Handle for abort */ | ||
553 | if (drv_data->state == ERROR_STATE) { | ||
554 | msg->status = -EIO; | ||
555 | bfin_spi_giveback(drv_data); | ||
556 | return; | ||
557 | } | ||
558 | |||
559 | if (drv_data->state == RUNNING_STATE) { | ||
560 | if (t->delay_usecs) | ||
561 | udelay(t->delay_usecs); | ||
562 | if (t->cs_change) | ||
563 | bfin_spi_cs_deactive(drv_data, chip); | ||
564 | bfin_spi_next_transfer(drv_data); | ||
565 | t = drv_data->cur_transfer; | ||
566 | } | ||
567 | /* Handle end of message */ | ||
568 | if (drv_data->state == DONE_STATE) { | ||
569 | msg->status = 0; | ||
570 | bfin_spi_giveback(drv_data); | ||
571 | return; | ||
572 | } | ||
573 | |||
574 | if ((t->len == 0) || (t->tx_buf == NULL && t->rx_buf == NULL)) { | ||
575 | /* Schedule next transfer tasklet */ | ||
576 | tasklet_schedule(&drv_data->pump_transfers); | ||
577 | return; | ||
578 | } | ||
579 | |||
580 | ret = bfin_spi_setup_transfer(drv_data); | ||
581 | if (ret) { | ||
582 | msg->status = ret; | ||
583 | bfin_spi_giveback(drv_data); | ||
584 | } | ||
585 | |||
586 | bfin_write(&drv_data->regs->status, 0xFFFFFFFF); | ||
587 | bfin_spi_cs_active(drv_data, chip); | ||
588 | drv_data->state = RUNNING_STATE; | ||
589 | |||
590 | if (chip->enable_dma) | ||
591 | ret = bfin_spi_dma_xfer(drv_data); | ||
592 | else | ||
593 | ret = bfin_spi_pio_xfer(drv_data); | ||
594 | if (ret) { | ||
595 | msg->status = ret; | ||
596 | bfin_spi_giveback(drv_data); | ||
597 | } | ||
598 | } | ||
599 | |||
600 | static int bfin_spi_transfer_one_message(struct spi_master *master, | ||
601 | struct spi_message *m) | ||
602 | { | ||
603 | struct bfin_spi_master *drv_data = spi_master_get_devdata(master); | ||
604 | |||
605 | drv_data->cur_msg = m; | ||
606 | drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); | ||
607 | bfin_spi_restore_state(drv_data); | ||
608 | |||
609 | drv_data->state = START_STATE; | ||
610 | drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, | ||
611 | struct spi_transfer, transfer_list); | ||
612 | |||
613 | tasklet_schedule(&drv_data->pump_transfers); | ||
614 | return 0; | ||
615 | } | ||
616 | |||
617 | #define MAX_SPI_SSEL 7 | ||
618 | |||
619 | static const u16 ssel[][MAX_SPI_SSEL] = { | ||
620 | {P_SPI0_SSEL1, P_SPI0_SSEL2, P_SPI0_SSEL3, | ||
621 | P_SPI0_SSEL4, P_SPI0_SSEL5, | ||
622 | P_SPI0_SSEL6, P_SPI0_SSEL7}, | ||
623 | |||
624 | {P_SPI1_SSEL1, P_SPI1_SSEL2, P_SPI1_SSEL3, | ||
625 | P_SPI1_SSEL4, P_SPI1_SSEL5, | ||
626 | P_SPI1_SSEL6, P_SPI1_SSEL7}, | ||
627 | |||
628 | {P_SPI2_SSEL1, P_SPI2_SSEL2, P_SPI2_SSEL3, | ||
629 | P_SPI2_SSEL4, P_SPI2_SSEL5, | ||
630 | P_SPI2_SSEL6, P_SPI2_SSEL7}, | ||
631 | }; | ||
632 | |||
633 | static int bfin_spi_setup(struct spi_device *spi) | ||
634 | { | ||
635 | struct bfin_spi_master *drv_data = spi_master_get_devdata(spi->master); | ||
636 | struct bfin_spi_device *chip = spi_get_ctldata(spi); | ||
637 | u32 bfin_ctl_reg = SPI_CTL_ODM | SPI_CTL_PSSE; | ||
638 | int ret = -EINVAL; | ||
639 | |||
640 | if (!chip) { | ||
641 | struct bfin_spi3_chip *chip_info = spi->controller_data; | ||
642 | |||
643 | chip = kzalloc(sizeof(*chip), GFP_KERNEL); | ||
644 | if (!chip) { | ||
645 | dev_err(&spi->dev, "can not allocate chip data\n"); | ||
646 | return -ENOMEM; | ||
647 | } | ||
648 | if (chip_info) { | ||
649 | if (chip_info->control & ~bfin_ctl_reg) { | ||
650 | dev_err(&spi->dev, | ||
651 | "do not set bits that the SPI framework manages\n"); | ||
652 | goto error; | ||
653 | } | ||
654 | chip->control = chip_info->control; | ||
655 | chip->cs_chg_udelay = chip_info->cs_chg_udelay; | ||
656 | chip->tx_dummy_val = chip_info->tx_dummy_val; | ||
657 | chip->enable_dma = chip_info->enable_dma; | ||
658 | } | ||
659 | chip->cs = spi->chip_select; | ||
660 | if (chip->cs < MAX_CTRL_CS) { | ||
661 | chip->ssel = (1 << chip->cs) << 8; | ||
662 | ret = peripheral_request(ssel[spi->master->bus_num] | ||
663 | [chip->cs-1], dev_name(&spi->dev)); | ||
664 | if (ret) { | ||
665 | dev_err(&spi->dev, "peripheral_request() error\n"); | ||
666 | goto error; | ||
667 | } | ||
668 | } else { | ||
669 | chip->cs_gpio = chip->cs - MAX_CTRL_CS; | ||
670 | ret = gpio_request_one(chip->cs_gpio, GPIOF_OUT_INIT_HIGH, | ||
671 | dev_name(&spi->dev)); | ||
672 | if (ret) { | ||
673 | dev_err(&spi->dev, "gpio_request_one() error\n"); | ||
674 | goto error; | ||
675 | } | ||
676 | } | ||
677 | spi_set_ctldata(spi, chip); | ||
678 | } | ||
679 | |||
680 | /* force a default base state */ | ||
681 | chip->control &= bfin_ctl_reg; | ||
682 | |||
683 | if (spi->mode & SPI_CPOL) | ||
684 | chip->control |= SPI_CTL_CPOL; | ||
685 | if (spi->mode & SPI_CPHA) | ||
686 | chip->control |= SPI_CTL_CPHA; | ||
687 | if (spi->mode & SPI_LSB_FIRST) | ||
688 | chip->control |= SPI_CTL_LSBF; | ||
689 | chip->control |= SPI_CTL_MSTR; | ||
690 | /* we choose software to controll cs */ | ||
691 | chip->control &= ~SPI_CTL_ASSEL; | ||
692 | |||
693 | chip->clock = hz_to_spi_clock(drv_data->sclk, spi->max_speed_hz); | ||
694 | |||
695 | bfin_spi_cs_enable(drv_data, chip); | ||
696 | bfin_spi_cs_deactive(drv_data, chip); | ||
697 | |||
698 | return 0; | ||
699 | error: | ||
700 | if (chip) { | ||
701 | kfree(chip); | ||
702 | spi_set_ctldata(spi, NULL); | ||
703 | } | ||
704 | |||
705 | return ret; | ||
706 | } | ||
707 | |||
708 | static void bfin_spi_cleanup(struct spi_device *spi) | ||
709 | { | ||
710 | struct bfin_spi_device *chip = spi_get_ctldata(spi); | ||
711 | struct bfin_spi_master *drv_data = spi_master_get_devdata(spi->master); | ||
712 | |||
713 | if (!chip) | ||
714 | return; | ||
715 | |||
716 | if (chip->cs < MAX_CTRL_CS) { | ||
717 | peripheral_free(ssel[spi->master->bus_num] | ||
718 | [chip->cs-1]); | ||
719 | bfin_spi_cs_disable(drv_data, chip); | ||
720 | } else { | ||
721 | gpio_free(chip->cs_gpio); | ||
722 | } | ||
723 | |||
724 | kfree(chip); | ||
725 | spi_set_ctldata(spi, NULL); | ||
726 | } | ||
727 | |||
728 | static irqreturn_t bfin_spi_tx_dma_isr(int irq, void *dev_id) | ||
729 | { | ||
730 | struct bfin_spi_master *drv_data = dev_id; | ||
731 | u32 dma_stat = get_dma_curr_irqstat(drv_data->tx_dma); | ||
732 | |||
733 | clear_dma_irqstat(drv_data->tx_dma); | ||
734 | if (dma_stat & DMA_DONE) { | ||
735 | drv_data->tx_num++; | ||
736 | } else { | ||
737 | dev_err(&drv_data->master->dev, | ||
738 | "spi tx dma error: %d\n", dma_stat); | ||
739 | if (drv_data->tx) | ||
740 | drv_data->state = ERROR_STATE; | ||
741 | } | ||
742 | bfin_write_and(&drv_data->regs->tx_control, ~SPI_TXCTL_TDR_NF); | ||
743 | return IRQ_HANDLED; | ||
744 | } | ||
745 | |||
746 | static irqreturn_t bfin_spi_rx_dma_isr(int irq, void *dev_id) | ||
747 | { | ||
748 | struct bfin_spi_master *drv_data = dev_id; | ||
749 | struct spi_message *msg = drv_data->cur_msg; | ||
750 | u32 dma_stat = get_dma_curr_irqstat(drv_data->rx_dma); | ||
751 | |||
752 | clear_dma_irqstat(drv_data->rx_dma); | ||
753 | if (dma_stat & DMA_DONE) { | ||
754 | drv_data->rx_num++; | ||
755 | /* we may fail on tx dma */ | ||
756 | if (drv_data->state != ERROR_STATE) | ||
757 | msg->actual_length += drv_data->transfer_len; | ||
758 | } else { | ||
759 | drv_data->state = ERROR_STATE; | ||
760 | dev_err(&drv_data->master->dev, | ||
761 | "spi rx dma error: %d\n", dma_stat); | ||
762 | } | ||
763 | bfin_write(&drv_data->regs->tx_control, 0); | ||
764 | bfin_write(&drv_data->regs->rx_control, 0); | ||
765 | if (drv_data->rx_num != drv_data->tx_num) | ||
766 | dev_dbg(&drv_data->master->dev, | ||
767 | "dma interrupt missing: tx=%d,rx=%d\n", | ||
768 | drv_data->tx_num, drv_data->rx_num); | ||
769 | tasklet_schedule(&drv_data->pump_transfers); | ||
770 | return IRQ_HANDLED; | ||
771 | } | ||
772 | |||
773 | static int bfin_spi_probe(struct platform_device *pdev) | ||
774 | { | ||
775 | struct device *dev = &pdev->dev; | ||
776 | struct bfin_spi3_master *info = dev_get_platdata(dev); | ||
777 | struct spi_master *master; | ||
778 | struct bfin_spi_master *drv_data; | ||
779 | struct resource *mem, *res; | ||
780 | unsigned int tx_dma, rx_dma; | ||
781 | unsigned long sclk; | ||
782 | int ret; | ||
783 | |||
784 | if (!info) { | ||
785 | dev_err(dev, "platform data missing!\n"); | ||
786 | return -ENODEV; | ||
787 | } | ||
788 | |||
789 | sclk = get_sclk1(); | ||
790 | if (!sclk) { | ||
791 | dev_err(dev, "can not get sclk1\n"); | ||
792 | return -ENXIO; | ||
793 | } | ||
794 | |||
795 | res = platform_get_resource(pdev, IORESOURCE_DMA, 0); | ||
796 | if (!res) { | ||
797 | dev_err(dev, "can not get tx dma resource\n"); | ||
798 | return -ENXIO; | ||
799 | } | ||
800 | tx_dma = res->start; | ||
801 | |||
802 | res = platform_get_resource(pdev, IORESOURCE_DMA, 1); | ||
803 | if (!res) { | ||
804 | dev_err(dev, "can not get rx dma resource\n"); | ||
805 | return -ENXIO; | ||
806 | } | ||
807 | rx_dma = res->start; | ||
808 | |||
809 | /* allocate master with space for drv_data */ | ||
810 | master = spi_alloc_master(dev, sizeof(*drv_data)); | ||
811 | if (!master) { | ||
812 | dev_err(dev, "can not alloc spi_master\n"); | ||
813 | return -ENOMEM; | ||
814 | } | ||
815 | platform_set_drvdata(pdev, master); | ||
816 | |||
817 | /* the mode bits supported by this driver */ | ||
818 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; | ||
819 | |||
820 | master->bus_num = pdev->id; | ||
821 | master->num_chipselect = info->num_chipselect; | ||
822 | master->cleanup = bfin_spi_cleanup; | ||
823 | master->setup = bfin_spi_setup; | ||
824 | master->transfer_one_message = bfin_spi_transfer_one_message; | ||
825 | master->bits_per_word_mask = BIT(32 - 1) | BIT(16 - 1) | BIT(8 - 1); | ||
826 | |||
827 | drv_data = spi_master_get_devdata(master); | ||
828 | drv_data->master = master; | ||
829 | drv_data->tx_dma = tx_dma; | ||
830 | drv_data->rx_dma = rx_dma; | ||
831 | drv_data->pin_req = info->pin_req; | ||
832 | drv_data->sclk = sclk; | ||
833 | |||
834 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
835 | drv_data->regs = devm_ioremap_resource(dev, mem); | ||
836 | if (IS_ERR(drv_data->regs)) { | ||
837 | ret = PTR_ERR(drv_data->regs); | ||
838 | goto err_put_master; | ||
839 | } | ||
840 | |||
841 | /* request tx and rx dma */ | ||
842 | ret = request_dma(tx_dma, "SPI_TX_DMA"); | ||
843 | if (ret) { | ||
844 | dev_err(dev, "can not request SPI TX DMA channel\n"); | ||
845 | goto err_put_master; | ||
846 | } | ||
847 | set_dma_callback(tx_dma, bfin_spi_tx_dma_isr, drv_data); | ||
848 | |||
849 | ret = request_dma(rx_dma, "SPI_RX_DMA"); | ||
850 | if (ret) { | ||
851 | dev_err(dev, "can not request SPI RX DMA channel\n"); | ||
852 | goto err_free_tx_dma; | ||
853 | } | ||
854 | set_dma_callback(drv_data->rx_dma, bfin_spi_rx_dma_isr, drv_data); | ||
855 | |||
856 | /* request CLK, MOSI and MISO */ | ||
857 | ret = peripheral_request_list(drv_data->pin_req, "bfin-spi3"); | ||
858 | if (ret < 0) { | ||
859 | dev_err(dev, "can not request spi pins\n"); | ||
860 | goto err_free_rx_dma; | ||
861 | } | ||
862 | |||
863 | bfin_write(&drv_data->regs->control, SPI_CTL_MSTR | SPI_CTL_CPHA); | ||
864 | bfin_write(&drv_data->regs->ssel, 0x0000FE00); | ||
865 | bfin_write(&drv_data->regs->delay, 0x0); | ||
866 | |||
867 | tasklet_init(&drv_data->pump_transfers, | ||
868 | bfin_spi_pump_transfers, (unsigned long)drv_data); | ||
869 | /* register with the SPI framework */ | ||
870 | ret = spi_register_master(master); | ||
871 | if (ret) { | ||
872 | dev_err(dev, "can not register spi master\n"); | ||
873 | goto err_free_peripheral; | ||
874 | } | ||
875 | |||
876 | return ret; | ||
877 | |||
878 | err_free_peripheral: | ||
879 | peripheral_free_list(drv_data->pin_req); | ||
880 | err_free_rx_dma: | ||
881 | free_dma(rx_dma); | ||
882 | err_free_tx_dma: | ||
883 | free_dma(tx_dma); | ||
884 | err_put_master: | ||
885 | spi_master_put(master); | ||
886 | |||
887 | return ret; | ||
888 | } | ||
889 | |||
890 | static int bfin_spi_remove(struct platform_device *pdev) | ||
891 | { | ||
892 | struct spi_master *master = platform_get_drvdata(pdev); | ||
893 | struct bfin_spi_master *drv_data = spi_master_get_devdata(master); | ||
894 | |||
895 | bfin_spi_disable(drv_data); | ||
896 | |||
897 | peripheral_free_list(drv_data->pin_req); | ||
898 | free_dma(drv_data->rx_dma); | ||
899 | free_dma(drv_data->tx_dma); | ||
900 | |||
901 | spi_unregister_master(drv_data->master); | ||
902 | return 0; | ||
903 | } | ||
904 | |||
905 | #ifdef CONFIG_PM | ||
906 | static int bfin_spi_suspend(struct device *dev) | ||
907 | { | ||
908 | struct spi_master *master = dev_get_drvdata(dev); | ||
909 | struct bfin_spi_master *drv_data = spi_master_get_devdata(master); | ||
910 | |||
911 | spi_master_suspend(master); | ||
912 | |||
913 | drv_data->control = bfin_read(&drv_data->regs->control); | ||
914 | drv_data->ssel = bfin_read(&drv_data->regs->ssel); | ||
915 | |||
916 | bfin_write(&drv_data->regs->control, SPI_CTL_MSTR | SPI_CTL_CPHA); | ||
917 | bfin_write(&drv_data->regs->ssel, 0x0000FE00); | ||
918 | dma_disable_irq(drv_data->rx_dma); | ||
919 | dma_disable_irq(drv_data->tx_dma); | ||
920 | |||
921 | return 0; | ||
922 | } | ||
923 | |||
924 | static int bfin_spi_resume(struct device *dev) | ||
925 | { | ||
926 | struct spi_master *master = dev_get_drvdata(dev); | ||
927 | struct bfin_spi_master *drv_data = spi_master_get_devdata(master); | ||
928 | int ret = 0; | ||
929 | |||
930 | /* bootrom may modify spi and dma status when resume in spi boot mode */ | ||
931 | disable_dma(drv_data->rx_dma); | ||
932 | |||
933 | dma_enable_irq(drv_data->rx_dma); | ||
934 | dma_enable_irq(drv_data->tx_dma); | ||
935 | bfin_write(&drv_data->regs->control, drv_data->control); | ||
936 | bfin_write(&drv_data->regs->ssel, drv_data->ssel); | ||
937 | |||
938 | ret = spi_master_resume(master); | ||
939 | if (ret) { | ||
940 | free_dma(drv_data->rx_dma); | ||
941 | free_dma(drv_data->tx_dma); | ||
942 | } | ||
943 | |||
944 | return ret; | ||
945 | } | ||
946 | #endif | ||
947 | static const struct dev_pm_ops bfin_spi_pm_ops = { | ||
948 | SET_SYSTEM_SLEEP_PM_OPS(bfin_spi_suspend, bfin_spi_resume) | ||
949 | }; | ||
950 | |||
951 | MODULE_ALIAS("platform:bfin-spi3"); | ||
952 | static struct platform_driver bfin_spi_driver = { | ||
953 | .driver = { | ||
954 | .name = "bfin-spi3", | ||
955 | .owner = THIS_MODULE, | ||
956 | .pm = &bfin_spi_pm_ops, | ||
957 | }, | ||
958 | .remove = bfin_spi_remove, | ||
959 | }; | ||
960 | |||
961 | module_platform_driver_probe(bfin_spi_driver, bfin_spi_probe); | ||
962 | |||
963 | MODULE_DESCRIPTION("Analog Devices SPI3 controller driver"); | ||
964 | MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>"); | ||
965 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c index 59a73424419c..45bdf73d6868 100644 --- a/drivers/spi/spi-bfin5xx.c +++ b/drivers/spi/spi-bfin5xx.c | |||
@@ -1271,7 +1271,7 @@ static int bfin_spi_probe(struct platform_device *pdev) | |||
1271 | struct resource *res; | 1271 | struct resource *res; |
1272 | int status = 0; | 1272 | int status = 0; |
1273 | 1273 | ||
1274 | platform_info = dev->platform_data; | 1274 | platform_info = dev_get_platdata(dev); |
1275 | 1275 | ||
1276 | /* Allocate master with space for drv_data */ | 1276 | /* Allocate master with space for drv_data */ |
1277 | master = spi_alloc_master(dev, sizeof(*drv_data)); | 1277 | master = spi_alloc_master(dev, sizeof(*drv_data)); |
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c index a63d7da3bfe2..e3946e44e076 100644 --- a/drivers/spi/spi-bitbang.c +++ b/drivers/spi/spi-bitbang.c | |||
@@ -255,150 +255,140 @@ static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t) | |||
255 | * Drivers can provide word-at-a-time i/o primitives, or provide | 255 | * Drivers can provide word-at-a-time i/o primitives, or provide |
256 | * transfer-at-a-time ones to leverage dma or fifo hardware. | 256 | * transfer-at-a-time ones to leverage dma or fifo hardware. |
257 | */ | 257 | */ |
258 | static void bitbang_work(struct work_struct *work) | 258 | |
259 | static int spi_bitbang_prepare_hardware(struct spi_master *spi) | ||
259 | { | 260 | { |
260 | struct spi_bitbang *bitbang = | 261 | struct spi_bitbang *bitbang; |
261 | container_of(work, struct spi_bitbang, work); | ||
262 | unsigned long flags; | 262 | unsigned long flags; |
263 | struct spi_message *m, *_m; | 263 | |
264 | bitbang = spi_master_get_devdata(spi); | ||
264 | 265 | ||
265 | spin_lock_irqsave(&bitbang->lock, flags); | 266 | spin_lock_irqsave(&bitbang->lock, flags); |
266 | bitbang->busy = 1; | 267 | bitbang->busy = 1; |
267 | list_for_each_entry_safe(m, _m, &bitbang->queue, queue) { | 268 | spin_unlock_irqrestore(&bitbang->lock, flags); |
268 | struct spi_device *spi; | ||
269 | unsigned nsecs; | ||
270 | struct spi_transfer *t = NULL; | ||
271 | unsigned tmp; | ||
272 | unsigned cs_change; | ||
273 | int status; | ||
274 | int do_setup = -1; | ||
275 | |||
276 | list_del(&m->queue); | ||
277 | spin_unlock_irqrestore(&bitbang->lock, flags); | ||
278 | |||
279 | /* FIXME this is made-up ... the correct value is known to | ||
280 | * word-at-a-time bitbang code, and presumably chipselect() | ||
281 | * should enforce these requirements too? | ||
282 | */ | ||
283 | nsecs = 100; | ||
284 | 269 | ||
285 | spi = m->spi; | 270 | return 0; |
286 | tmp = 0; | 271 | } |
287 | cs_change = 1; | ||
288 | status = 0; | ||
289 | 272 | ||
290 | list_for_each_entry (t, &m->transfers, transfer_list) { | 273 | static int spi_bitbang_transfer_one(struct spi_master *master, |
291 | 274 | struct spi_message *m) | |
292 | /* override speed or wordsize? */ | 275 | { |
293 | if (t->speed_hz || t->bits_per_word) | 276 | struct spi_bitbang *bitbang; |
294 | do_setup = 1; | 277 | unsigned nsecs; |
295 | 278 | struct spi_transfer *t = NULL; | |
296 | /* init (-1) or override (1) transfer params */ | 279 | unsigned cs_change; |
297 | if (do_setup != 0) { | 280 | int status; |
298 | status = bitbang->setup_transfer(spi, t); | 281 | int do_setup = -1; |
299 | if (status < 0) | 282 | struct spi_device *spi = m->spi; |
300 | break; | 283 | |
301 | if (do_setup == -1) | 284 | bitbang = spi_master_get_devdata(master); |
302 | do_setup = 0; | 285 | |
303 | } | 286 | /* FIXME this is made-up ... the correct value is known to |
304 | 287 | * word-at-a-time bitbang code, and presumably chipselect() | |
305 | /* set up default clock polarity, and activate chip; | 288 | * should enforce these requirements too? |
306 | * this implicitly updates clock and spi modes as | 289 | */ |
307 | * previously recorded for this device via setup(). | 290 | nsecs = 100; |
308 | * (and also deselects any other chip that might be | ||
309 | * selected ...) | ||
310 | */ | ||
311 | if (cs_change) { | ||
312 | bitbang->chipselect(spi, BITBANG_CS_ACTIVE); | ||
313 | ndelay(nsecs); | ||
314 | } | ||
315 | cs_change = t->cs_change; | ||
316 | if (!t->tx_buf && !t->rx_buf && t->len) { | ||
317 | status = -EINVAL; | ||
318 | break; | ||
319 | } | ||
320 | 291 | ||
321 | /* transfer data. the lower level code handles any | 292 | cs_change = 1; |
322 | * new dma mappings it needs. our caller always gave | 293 | status = 0; |
323 | * us dma-safe buffers. | 294 | |
324 | */ | 295 | list_for_each_entry (t, &m->transfers, transfer_list) { |
325 | if (t->len) { | 296 | |
326 | /* REVISIT dma API still needs a designated | 297 | /* override speed or wordsize? */ |
327 | * DMA_ADDR_INVALID; ~0 might be better. | 298 | if (t->speed_hz || t->bits_per_word) |
328 | */ | 299 | do_setup = 1; |
329 | if (!m->is_dma_mapped) | 300 | |
330 | t->rx_dma = t->tx_dma = 0; | 301 | /* init (-1) or override (1) transfer params */ |
331 | status = bitbang->txrx_bufs(spi, t); | 302 | if (do_setup != 0) { |
332 | } | 303 | status = bitbang->setup_transfer(spi, t); |
333 | if (status > 0) | 304 | if (status < 0) |
334 | m->actual_length += status; | ||
335 | if (status != t->len) { | ||
336 | /* always report some kind of error */ | ||
337 | if (status >= 0) | ||
338 | status = -EREMOTEIO; | ||
339 | break; | 305 | break; |
340 | } | 306 | if (do_setup == -1) |
341 | status = 0; | 307 | do_setup = 0; |
342 | |||
343 | /* protocol tweaks before next transfer */ | ||
344 | if (t->delay_usecs) | ||
345 | udelay(t->delay_usecs); | ||
346 | |||
347 | if (cs_change && !list_is_last(&t->transfer_list, &m->transfers)) { | ||
348 | /* sometimes a short mid-message deselect of the chip | ||
349 | * may be needed to terminate a mode or command | ||
350 | */ | ||
351 | ndelay(nsecs); | ||
352 | bitbang->chipselect(spi, BITBANG_CS_INACTIVE); | ||
353 | ndelay(nsecs); | ||
354 | } | ||
355 | } | 308 | } |
356 | 309 | ||
357 | m->status = status; | 310 | /* set up default clock polarity, and activate chip; |
358 | m->complete(m->context); | 311 | * this implicitly updates clock and spi modes as |
312 | * previously recorded for this device via setup(). | ||
313 | * (and also deselects any other chip that might be | ||
314 | * selected ...) | ||
315 | */ | ||
316 | if (cs_change) { | ||
317 | bitbang->chipselect(spi, BITBANG_CS_ACTIVE); | ||
318 | ndelay(nsecs); | ||
319 | } | ||
320 | cs_change = t->cs_change; | ||
321 | if (!t->tx_buf && !t->rx_buf && t->len) { | ||
322 | status = -EINVAL; | ||
323 | break; | ||
324 | } | ||
359 | 325 | ||
360 | /* normally deactivate chipselect ... unless no error and | 326 | /* transfer data. the lower level code handles any |
361 | * cs_change has hinted that the next message will probably | 327 | * new dma mappings it needs. our caller always gave |
362 | * be for this chip too. | 328 | * us dma-safe buffers. |
363 | */ | 329 | */ |
364 | if (!(status == 0 && cs_change)) { | 330 | if (t->len) { |
331 | /* REVISIT dma API still needs a designated | ||
332 | * DMA_ADDR_INVALID; ~0 might be better. | ||
333 | */ | ||
334 | if (!m->is_dma_mapped) | ||
335 | t->rx_dma = t->tx_dma = 0; | ||
336 | status = bitbang->txrx_bufs(spi, t); | ||
337 | } | ||
338 | if (status > 0) | ||
339 | m->actual_length += status; | ||
340 | if (status != t->len) { | ||
341 | /* always report some kind of error */ | ||
342 | if (status >= 0) | ||
343 | status = -EREMOTEIO; | ||
344 | break; | ||
345 | } | ||
346 | status = 0; | ||
347 | |||
348 | /* protocol tweaks before next transfer */ | ||
349 | if (t->delay_usecs) | ||
350 | udelay(t->delay_usecs); | ||
351 | |||
352 | if (cs_change && !list_is_last(&t->transfer_list, &m->transfers)) { | ||
353 | /* sometimes a short mid-message deselect of the chip | ||
354 | * may be needed to terminate a mode or command | ||
355 | */ | ||
365 | ndelay(nsecs); | 356 | ndelay(nsecs); |
366 | bitbang->chipselect(spi, BITBANG_CS_INACTIVE); | 357 | bitbang->chipselect(spi, BITBANG_CS_INACTIVE); |
367 | ndelay(nsecs); | 358 | ndelay(nsecs); |
368 | } | 359 | } |
360 | } | ||
361 | |||
362 | m->status = status; | ||
369 | 363 | ||
370 | spin_lock_irqsave(&bitbang->lock, flags); | 364 | /* normally deactivate chipselect ... unless no error and |
365 | * cs_change has hinted that the next message will probably | ||
366 | * be for this chip too. | ||
367 | */ | ||
368 | if (!(status == 0 && cs_change)) { | ||
369 | ndelay(nsecs); | ||
370 | bitbang->chipselect(spi, BITBANG_CS_INACTIVE); | ||
371 | ndelay(nsecs); | ||
371 | } | 372 | } |
372 | bitbang->busy = 0; | 373 | |
373 | spin_unlock_irqrestore(&bitbang->lock, flags); | 374 | spi_finalize_current_message(master); |
375 | |||
376 | return status; | ||
374 | } | 377 | } |
375 | 378 | ||
376 | /** | 379 | static int spi_bitbang_unprepare_hardware(struct spi_master *spi) |
377 | * spi_bitbang_transfer - default submit to transfer queue | ||
378 | */ | ||
379 | int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m) | ||
380 | { | 380 | { |
381 | struct spi_bitbang *bitbang; | 381 | struct spi_bitbang *bitbang; |
382 | unsigned long flags; | 382 | unsigned long flags; |
383 | int status = 0; | ||
384 | 383 | ||
385 | m->actual_length = 0; | 384 | bitbang = spi_master_get_devdata(spi); |
386 | m->status = -EINPROGRESS; | ||
387 | |||
388 | bitbang = spi_master_get_devdata(spi->master); | ||
389 | 385 | ||
390 | spin_lock_irqsave(&bitbang->lock, flags); | 386 | spin_lock_irqsave(&bitbang->lock, flags); |
391 | if (!spi->max_speed_hz) | 387 | bitbang->busy = 0; |
392 | status = -ENETDOWN; | ||
393 | else { | ||
394 | list_add_tail(&m->queue, &bitbang->queue); | ||
395 | queue_work(bitbang->workqueue, &bitbang->work); | ||
396 | } | ||
397 | spin_unlock_irqrestore(&bitbang->lock, flags); | 388 | spin_unlock_irqrestore(&bitbang->lock, flags); |
398 | 389 | ||
399 | return status; | 390 | return 0; |
400 | } | 391 | } |
401 | EXPORT_SYMBOL_GPL(spi_bitbang_transfer); | ||
402 | 392 | ||
403 | /*----------------------------------------------------------------------*/ | 393 | /*----------------------------------------------------------------------*/ |
404 | 394 | ||
@@ -428,20 +418,22 @@ EXPORT_SYMBOL_GPL(spi_bitbang_transfer); | |||
428 | int spi_bitbang_start(struct spi_bitbang *bitbang) | 418 | int spi_bitbang_start(struct spi_bitbang *bitbang) |
429 | { | 419 | { |
430 | struct spi_master *master = bitbang->master; | 420 | struct spi_master *master = bitbang->master; |
431 | int status; | ||
432 | 421 | ||
433 | if (!master || !bitbang->chipselect) | 422 | if (!master || !bitbang->chipselect) |
434 | return -EINVAL; | 423 | return -EINVAL; |
435 | 424 | ||
436 | INIT_WORK(&bitbang->work, bitbang_work); | ||
437 | spin_lock_init(&bitbang->lock); | 425 | spin_lock_init(&bitbang->lock); |
438 | INIT_LIST_HEAD(&bitbang->queue); | ||
439 | 426 | ||
440 | if (!master->mode_bits) | 427 | if (!master->mode_bits) |
441 | master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags; | 428 | master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags; |
442 | 429 | ||
443 | if (!master->transfer) | 430 | if (master->transfer || master->transfer_one_message) |
444 | master->transfer = spi_bitbang_transfer; | 431 | return -EINVAL; |
432 | |||
433 | master->prepare_transfer_hardware = spi_bitbang_prepare_hardware; | ||
434 | master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware; | ||
435 | master->transfer_one_message = spi_bitbang_transfer_one; | ||
436 | |||
445 | if (!bitbang->txrx_bufs) { | 437 | if (!bitbang->txrx_bufs) { |
446 | bitbang->use_dma = 0; | 438 | bitbang->use_dma = 0; |
447 | bitbang->txrx_bufs = spi_bitbang_bufs; | 439 | bitbang->txrx_bufs = spi_bitbang_bufs; |
@@ -452,34 +444,12 @@ int spi_bitbang_start(struct spi_bitbang *bitbang) | |||
452 | master->setup = spi_bitbang_setup; | 444 | master->setup = spi_bitbang_setup; |
453 | master->cleanup = spi_bitbang_cleanup; | 445 | master->cleanup = spi_bitbang_cleanup; |
454 | } | 446 | } |
455 | } else if (!master->setup) | ||
456 | return -EINVAL; | ||
457 | if (master->transfer == spi_bitbang_transfer && | ||
458 | !bitbang->setup_transfer) | ||
459 | return -EINVAL; | ||
460 | |||
461 | /* this task is the only thing to touch the SPI bits */ | ||
462 | bitbang->busy = 0; | ||
463 | bitbang->workqueue = create_singlethread_workqueue( | ||
464 | dev_name(master->dev.parent)); | ||
465 | if (bitbang->workqueue == NULL) { | ||
466 | status = -EBUSY; | ||
467 | goto err1; | ||
468 | } | 447 | } |
469 | 448 | ||
470 | /* driver may get busy before register() returns, especially | 449 | /* driver may get busy before register() returns, especially |
471 | * if someone registered boardinfo for devices | 450 | * if someone registered boardinfo for devices |
472 | */ | 451 | */ |
473 | status = spi_register_master(master); | 452 | return spi_register_master(master); |
474 | if (status < 0) | ||
475 | goto err2; | ||
476 | |||
477 | return status; | ||
478 | |||
479 | err2: | ||
480 | destroy_workqueue(bitbang->workqueue); | ||
481 | err1: | ||
482 | return status; | ||
483 | } | 453 | } |
484 | EXPORT_SYMBOL_GPL(spi_bitbang_start); | 454 | EXPORT_SYMBOL_GPL(spi_bitbang_start); |
485 | 455 | ||
@@ -490,10 +460,6 @@ int spi_bitbang_stop(struct spi_bitbang *bitbang) | |||
490 | { | 460 | { |
491 | spi_unregister_master(bitbang->master); | 461 | spi_unregister_master(bitbang->master); |
492 | 462 | ||
493 | WARN_ON(!list_empty(&bitbang->queue)); | ||
494 | |||
495 | destroy_workqueue(bitbang->workqueue); | ||
496 | |||
497 | return 0; | 463 | return 0; |
498 | } | 464 | } |
499 | EXPORT_SYMBOL_GPL(spi_bitbang_stop); | 465 | EXPORT_SYMBOL_GPL(spi_bitbang_stop); |
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c index 17965fe225cc..5655acf55bfe 100644 --- a/drivers/spi/spi-clps711x.c +++ b/drivers/spi/spi-clps711x.c | |||
@@ -239,11 +239,8 @@ static int spi_clps711x_probe(struct platform_device *pdev) | |||
239 | } | 239 | } |
240 | 240 | ||
241 | dev_err(&pdev->dev, "Failed to register master\n"); | 241 | dev_err(&pdev->dev, "Failed to register master\n"); |
242 | devm_free_irq(&pdev->dev, IRQ_SSEOTI, hw); | ||
243 | 242 | ||
244 | clk_out: | 243 | clk_out: |
245 | devm_clk_put(&pdev->dev, hw->spi_clk); | ||
246 | |||
247 | err_out: | 244 | err_out: |
248 | while (--i >= 0) | 245 | while (--i >= 0) |
249 | if (gpio_is_valid(hw->chipselect[i])) | 246 | if (gpio_is_valid(hw->chipselect[i])) |
@@ -261,13 +258,10 @@ static int spi_clps711x_remove(struct platform_device *pdev) | |||
261 | struct spi_master *master = platform_get_drvdata(pdev); | 258 | struct spi_master *master = platform_get_drvdata(pdev); |
262 | struct spi_clps711x_data *hw = spi_master_get_devdata(master); | 259 | struct spi_clps711x_data *hw = spi_master_get_devdata(master); |
263 | 260 | ||
264 | devm_free_irq(&pdev->dev, IRQ_SSEOTI, hw); | ||
265 | |||
266 | for (i = 0; i < master->num_chipselect; i++) | 261 | for (i = 0; i < master->num_chipselect; i++) |
267 | if (gpio_is_valid(hw->chipselect[i])) | 262 | if (gpio_is_valid(hw->chipselect[i])) |
268 | gpio_free(hw->chipselect[i]); | 263 | gpio_free(hw->chipselect[i]); |
269 | 264 | ||
270 | devm_clk_put(&pdev->dev, hw->spi_clk); | ||
271 | spi_unregister_master(master); | 265 | spi_unregister_master(master); |
272 | kfree(master); | 266 | kfree(master); |
273 | 267 | ||
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c index 0631b9d4a5de..cc5b75d10c38 100644 --- a/drivers/spi/spi-coldfire-qspi.c +++ b/drivers/spi/spi-coldfire-qspi.c | |||
@@ -354,24 +354,6 @@ static int mcfqspi_transfer_one_message(struct spi_master *master, | |||
354 | 354 | ||
355 | } | 355 | } |
356 | 356 | ||
357 | static int mcfqspi_prepare_transfer_hw(struct spi_master *master) | ||
358 | { | ||
359 | struct mcfqspi *mcfqspi = spi_master_get_devdata(master); | ||
360 | |||
361 | pm_runtime_get_sync(mcfqspi->dev); | ||
362 | |||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | static int mcfqspi_unprepare_transfer_hw(struct spi_master *master) | ||
367 | { | ||
368 | struct mcfqspi *mcfqspi = spi_master_get_devdata(master); | ||
369 | |||
370 | pm_runtime_put_sync(mcfqspi->dev); | ||
371 | |||
372 | return 0; | ||
373 | } | ||
374 | |||
375 | static int mcfqspi_setup(struct spi_device *spi) | 357 | static int mcfqspi_setup(struct spi_device *spi) |
376 | { | 358 | { |
377 | if (spi->chip_select >= spi->master->num_chipselect) { | 359 | if (spi->chip_select >= spi->master->num_chipselect) { |
@@ -400,7 +382,7 @@ static int mcfqspi_probe(struct platform_device *pdev) | |||
400 | struct mcfqspi_platform_data *pdata; | 382 | struct mcfqspi_platform_data *pdata; |
401 | int status; | 383 | int status; |
402 | 384 | ||
403 | pdata = pdev->dev.platform_data; | 385 | pdata = dev_get_platdata(&pdev->dev); |
404 | if (!pdata) { | 386 | if (!pdata) { |
405 | dev_dbg(&pdev->dev, "platform data is missing\n"); | 387 | dev_dbg(&pdev->dev, "platform data is missing\n"); |
406 | return -ENOENT; | 388 | return -ENOENT; |
@@ -473,8 +455,7 @@ static int mcfqspi_probe(struct platform_device *pdev) | |||
473 | master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16); | 455 | master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16); |
474 | master->setup = mcfqspi_setup; | 456 | master->setup = mcfqspi_setup; |
475 | master->transfer_one_message = mcfqspi_transfer_one_message; | 457 | master->transfer_one_message = mcfqspi_transfer_one_message; |
476 | master->prepare_transfer_hardware = mcfqspi_prepare_transfer_hw; | 458 | master->auto_runtime_pm = true; |
477 | master->unprepare_transfer_hardware = mcfqspi_unprepare_transfer_hw; | ||
478 | 459 | ||
479 | platform_set_drvdata(pdev, master); | 460 | platform_set_drvdata(pdev, master); |
480 | 461 | ||
@@ -558,7 +539,7 @@ static int mcfqspi_resume(struct device *dev) | |||
558 | #ifdef CONFIG_PM_RUNTIME | 539 | #ifdef CONFIG_PM_RUNTIME |
559 | static int mcfqspi_runtime_suspend(struct device *dev) | 540 | static int mcfqspi_runtime_suspend(struct device *dev) |
560 | { | 541 | { |
561 | struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev)); | 542 | struct mcfqspi *mcfqspi = dev_get_drvdata(dev); |
562 | 543 | ||
563 | clk_disable(mcfqspi->clk); | 544 | clk_disable(mcfqspi->clk); |
564 | 545 | ||
@@ -567,7 +548,7 @@ static int mcfqspi_runtime_suspend(struct device *dev) | |||
567 | 548 | ||
568 | static int mcfqspi_runtime_resume(struct device *dev) | 549 | static int mcfqspi_runtime_resume(struct device *dev) |
569 | { | 550 | { |
570 | struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev)); | 551 | struct mcfqspi *mcfqspi = dev_get_drvdata(dev); |
571 | 552 | ||
572 | clk_enable(mcfqspi->clk); | 553 | clk_enable(mcfqspi->clk); |
573 | 554 | ||
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index 707966bd5610..8fbfe2483ffd 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c | |||
@@ -872,8 +872,8 @@ static int davinci_spi_probe(struct platform_device *pdev) | |||
872 | goto free_master; | 872 | goto free_master; |
873 | } | 873 | } |
874 | 874 | ||
875 | if (pdev->dev.platform_data) { | 875 | if (dev_get_platdata(&pdev->dev)) { |
876 | pdata = pdev->dev.platform_data; | 876 | pdata = dev_get_platdata(&pdev->dev); |
877 | dspi->pdata = *pdata; | 877 | dspi->pdata = *pdata; |
878 | } else { | 878 | } else { |
879 | /* update dspi pdata with that from the DT */ | 879 | /* update dspi pdata with that from the DT */ |
diff --git a/drivers/spi/spi-efm32.c b/drivers/spi/spi-efm32.c new file mode 100644 index 000000000000..7d84418a01d8 --- /dev/null +++ b/drivers/spi/spi-efm32.c | |||
@@ -0,0 +1,516 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012-2013 Uwe Kleine-Koenig for Pengutronix | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it under | ||
5 | * the terms of the GNU General Public License version 2 as published by the | ||
6 | * Free Software Foundation. | ||
7 | */ | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/io.h> | ||
10 | #include <linux/spi/spi.h> | ||
11 | #include <linux/spi/spi_bitbang.h> | ||
12 | #include <linux/gpio.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/platform_device.h> | ||
15 | #include <linux/clk.h> | ||
16 | #include <linux/err.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/of_gpio.h> | ||
19 | #include <linux/platform_data/efm32-spi.h> | ||
20 | |||
21 | #define DRIVER_NAME "efm32-spi" | ||
22 | |||
23 | #define MASK_VAL(mask, val) ((val << __ffs(mask)) & mask) | ||
24 | |||
25 | #define REG_CTRL 0x00 | ||
26 | #define REG_CTRL_SYNC 0x0001 | ||
27 | #define REG_CTRL_CLKPOL 0x0100 | ||
28 | #define REG_CTRL_CLKPHA 0x0200 | ||
29 | #define REG_CTRL_MSBF 0x0400 | ||
30 | #define REG_CTRL_TXBIL 0x1000 | ||
31 | |||
32 | #define REG_FRAME 0x04 | ||
33 | #define REG_FRAME_DATABITS__MASK 0x000f | ||
34 | #define REG_FRAME_DATABITS(n) ((n) - 3) | ||
35 | |||
36 | #define REG_CMD 0x0c | ||
37 | #define REG_CMD_RXEN 0x0001 | ||
38 | #define REG_CMD_RXDIS 0x0002 | ||
39 | #define REG_CMD_TXEN 0x0004 | ||
40 | #define REG_CMD_TXDIS 0x0008 | ||
41 | #define REG_CMD_MASTEREN 0x0010 | ||
42 | |||
43 | #define REG_STATUS 0x10 | ||
44 | #define REG_STATUS_TXENS 0x0002 | ||
45 | #define REG_STATUS_TXC 0x0020 | ||
46 | #define REG_STATUS_TXBL 0x0040 | ||
47 | #define REG_STATUS_RXDATAV 0x0080 | ||
48 | |||
49 | #define REG_CLKDIV 0x14 | ||
50 | |||
51 | #define REG_RXDATAX 0x18 | ||
52 | #define REG_RXDATAX_RXDATA__MASK 0x01ff | ||
53 | #define REG_RXDATAX_PERR 0x4000 | ||
54 | #define REG_RXDATAX_FERR 0x8000 | ||
55 | |||
56 | #define REG_TXDATA 0x34 | ||
57 | |||
58 | #define REG_IF 0x40 | ||
59 | #define REG_IF_TXBL 0x0002 | ||
60 | #define REG_IF_RXDATAV 0x0004 | ||
61 | |||
62 | #define REG_IFS 0x44 | ||
63 | #define REG_IFC 0x48 | ||
64 | #define REG_IEN 0x4c | ||
65 | |||
66 | #define REG_ROUTE 0x54 | ||
67 | #define REG_ROUTE_RXPEN 0x0001 | ||
68 | #define REG_ROUTE_TXPEN 0x0002 | ||
69 | #define REG_ROUTE_CLKPEN 0x0008 | ||
70 | #define REG_ROUTE_LOCATION__MASK 0x0700 | ||
71 | #define REG_ROUTE_LOCATION(n) MASK_VAL(REG_ROUTE_LOCATION__MASK, (n)) | ||
72 | |||
73 | struct efm32_spi_ddata { | ||
74 | struct spi_bitbang bitbang; | ||
75 | |||
76 | spinlock_t lock; | ||
77 | |||
78 | struct clk *clk; | ||
79 | void __iomem *base; | ||
80 | unsigned int rxirq, txirq; | ||
81 | struct efm32_spi_pdata pdata; | ||
82 | |||
83 | /* irq data */ | ||
84 | struct completion done; | ||
85 | const u8 *tx_buf; | ||
86 | u8 *rx_buf; | ||
87 | unsigned tx_len, rx_len; | ||
88 | |||
89 | /* chip selects */ | ||
90 | unsigned csgpio[]; | ||
91 | }; | ||
92 | |||
93 | #define ddata_to_dev(ddata) (&(ddata->bitbang.master->dev)) | ||
94 | #define efm32_spi_vdbg(ddata, format, arg...) \ | ||
95 | dev_vdbg(ddata_to_dev(ddata), format, ##arg) | ||
96 | |||
97 | static void efm32_spi_write32(struct efm32_spi_ddata *ddata, | ||
98 | u32 value, unsigned offset) | ||
99 | { | ||
100 | writel_relaxed(value, ddata->base + offset); | ||
101 | } | ||
102 | |||
103 | static u32 efm32_spi_read32(struct efm32_spi_ddata *ddata, unsigned offset) | ||
104 | { | ||
105 | return readl_relaxed(ddata->base + offset); | ||
106 | } | ||
107 | |||
108 | static void efm32_spi_chipselect(struct spi_device *spi, int is_on) | ||
109 | { | ||
110 | struct efm32_spi_ddata *ddata = spi_master_get_devdata(spi->master); | ||
111 | int value = !(spi->mode & SPI_CS_HIGH) == !(is_on == BITBANG_CS_ACTIVE); | ||
112 | |||
113 | gpio_set_value(ddata->csgpio[spi->chip_select], value); | ||
114 | } | ||
115 | |||
116 | static int efm32_spi_setup_transfer(struct spi_device *spi, | ||
117 | struct spi_transfer *t) | ||
118 | { | ||
119 | struct efm32_spi_ddata *ddata = spi_master_get_devdata(spi->master); | ||
120 | |||
121 | unsigned bpw = t->bits_per_word ?: spi->bits_per_word; | ||
122 | unsigned speed = t->speed_hz ?: spi->max_speed_hz; | ||
123 | unsigned long clkfreq = clk_get_rate(ddata->clk); | ||
124 | u32 clkdiv; | ||
125 | |||
126 | efm32_spi_write32(ddata, REG_CTRL_SYNC | REG_CTRL_MSBF | | ||
127 | (spi->mode & SPI_CPHA ? REG_CTRL_CLKPHA : 0) | | ||
128 | (spi->mode & SPI_CPOL ? REG_CTRL_CLKPOL : 0), REG_CTRL); | ||
129 | |||
130 | efm32_spi_write32(ddata, | ||
131 | REG_FRAME_DATABITS(bpw), REG_FRAME); | ||
132 | |||
133 | if (2 * speed >= clkfreq) | ||
134 | clkdiv = 0; | ||
135 | else | ||
136 | clkdiv = 64 * (DIV_ROUND_UP(2 * clkfreq, speed) - 4); | ||
137 | |||
138 | if (clkdiv > (1U << 21)) | ||
139 | return -EINVAL; | ||
140 | |||
141 | efm32_spi_write32(ddata, clkdiv, REG_CLKDIV); | ||
142 | efm32_spi_write32(ddata, REG_CMD_MASTEREN, REG_CMD); | ||
143 | efm32_spi_write32(ddata, REG_CMD_RXEN | REG_CMD_TXEN, REG_CMD); | ||
144 | |||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | static void efm32_spi_tx_u8(struct efm32_spi_ddata *ddata) | ||
149 | { | ||
150 | u8 val = 0; | ||
151 | |||
152 | if (ddata->tx_buf) { | ||
153 | val = *ddata->tx_buf; | ||
154 | ddata->tx_buf++; | ||
155 | } | ||
156 | |||
157 | ddata->tx_len--; | ||
158 | efm32_spi_write32(ddata, val, REG_TXDATA); | ||
159 | efm32_spi_vdbg(ddata, "%s: tx 0x%x\n", __func__, val); | ||
160 | } | ||
161 | |||
162 | static void efm32_spi_rx_u8(struct efm32_spi_ddata *ddata) | ||
163 | { | ||
164 | u32 rxdata = efm32_spi_read32(ddata, REG_RXDATAX); | ||
165 | efm32_spi_vdbg(ddata, "%s: rx 0x%x\n", __func__, rxdata); | ||
166 | |||
167 | if (ddata->rx_buf) { | ||
168 | *ddata->rx_buf = rxdata; | ||
169 | ddata->rx_buf++; | ||
170 | } | ||
171 | |||
172 | ddata->rx_len--; | ||
173 | } | ||
174 | |||
175 | static void efm32_spi_filltx(struct efm32_spi_ddata *ddata) | ||
176 | { | ||
177 | while (ddata->tx_len && | ||
178 | ddata->tx_len + 2 > ddata->rx_len && | ||
179 | efm32_spi_read32(ddata, REG_STATUS) & REG_STATUS_TXBL) { | ||
180 | efm32_spi_tx_u8(ddata); | ||
181 | } | ||
182 | } | ||
183 | |||
184 | static int efm32_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) | ||
185 | { | ||
186 | struct efm32_spi_ddata *ddata = spi_master_get_devdata(spi->master); | ||
187 | int ret = -EBUSY; | ||
188 | |||
189 | spin_lock_irq(&ddata->lock); | ||
190 | |||
191 | if (ddata->tx_buf || ddata->rx_buf) | ||
192 | goto out_unlock; | ||
193 | |||
194 | ddata->tx_buf = t->tx_buf; | ||
195 | ddata->rx_buf = t->rx_buf; | ||
196 | ddata->tx_len = ddata->rx_len = | ||
197 | t->len * DIV_ROUND_UP(t->bits_per_word, 8); | ||
198 | |||
199 | efm32_spi_filltx(ddata); | ||
200 | |||
201 | init_completion(&ddata->done); | ||
202 | |||
203 | efm32_spi_write32(ddata, REG_IF_TXBL | REG_IF_RXDATAV, REG_IEN); | ||
204 | |||
205 | spin_unlock_irq(&ddata->lock); | ||
206 | |||
207 | wait_for_completion(&ddata->done); | ||
208 | |||
209 | spin_lock_irq(&ddata->lock); | ||
210 | |||
211 | ret = t->len - max(ddata->tx_len, ddata->rx_len); | ||
212 | |||
213 | efm32_spi_write32(ddata, 0, REG_IEN); | ||
214 | ddata->tx_buf = ddata->rx_buf = NULL; | ||
215 | |||
216 | out_unlock: | ||
217 | spin_unlock_irq(&ddata->lock); | ||
218 | |||
219 | return ret; | ||
220 | } | ||
221 | |||
222 | static irqreturn_t efm32_spi_rxirq(int irq, void *data) | ||
223 | { | ||
224 | struct efm32_spi_ddata *ddata = data; | ||
225 | irqreturn_t ret = IRQ_NONE; | ||
226 | |||
227 | spin_lock(&ddata->lock); | ||
228 | |||
229 | while (ddata->rx_len > 0 && | ||
230 | efm32_spi_read32(ddata, REG_STATUS) & | ||
231 | REG_STATUS_RXDATAV) { | ||
232 | efm32_spi_rx_u8(ddata); | ||
233 | |||
234 | ret = IRQ_HANDLED; | ||
235 | } | ||
236 | |||
237 | if (!ddata->rx_len) { | ||
238 | u32 ien = efm32_spi_read32(ddata, REG_IEN); | ||
239 | |||
240 | ien &= ~REG_IF_RXDATAV; | ||
241 | |||
242 | efm32_spi_write32(ddata, ien, REG_IEN); | ||
243 | |||
244 | complete(&ddata->done); | ||
245 | } | ||
246 | |||
247 | spin_unlock(&ddata->lock); | ||
248 | |||
249 | return ret; | ||
250 | } | ||
251 | |||
252 | static irqreturn_t efm32_spi_txirq(int irq, void *data) | ||
253 | { | ||
254 | struct efm32_spi_ddata *ddata = data; | ||
255 | |||
256 | efm32_spi_vdbg(ddata, | ||
257 | "%s: txlen = %u, rxlen = %u, if=0x%08x, stat=0x%08x\n", | ||
258 | __func__, ddata->tx_len, ddata->rx_len, | ||
259 | efm32_spi_read32(ddata, REG_IF), | ||
260 | efm32_spi_read32(ddata, REG_STATUS)); | ||
261 | |||
262 | spin_lock(&ddata->lock); | ||
263 | |||
264 | efm32_spi_filltx(ddata); | ||
265 | |||
266 | efm32_spi_vdbg(ddata, "%s: txlen = %u, rxlen = %u\n", | ||
267 | __func__, ddata->tx_len, ddata->rx_len); | ||
268 | |||
269 | if (!ddata->tx_len) { | ||
270 | u32 ien = efm32_spi_read32(ddata, REG_IEN); | ||
271 | |||
272 | ien &= ~REG_IF_TXBL; | ||
273 | |||
274 | efm32_spi_write32(ddata, ien, REG_IEN); | ||
275 | efm32_spi_vdbg(ddata, "disable TXBL\n"); | ||
276 | } | ||
277 | |||
278 | spin_unlock(&ddata->lock); | ||
279 | |||
280 | return IRQ_HANDLED; | ||
281 | } | ||
282 | |||
283 | static const struct efm32_spi_pdata efm32_spi_pdata_default = { | ||
284 | .location = 1, | ||
285 | }; | ||
286 | |||
287 | static u32 efm32_spi_get_configured_location(struct efm32_spi_ddata *ddata) | ||
288 | { | ||
289 | u32 reg = efm32_spi_read32(ddata, REG_ROUTE); | ||
290 | |||
291 | return (reg & REG_ROUTE_LOCATION__MASK) >> __ffs(REG_ROUTE_LOCATION__MASK); | ||
292 | } | ||
293 | |||
294 | static int efm32_spi_probe_dt(struct platform_device *pdev, | ||
295 | struct spi_master *master, struct efm32_spi_ddata *ddata) | ||
296 | { | ||
297 | struct device_node *np = pdev->dev.of_node; | ||
298 | u32 location; | ||
299 | int ret; | ||
300 | |||
301 | if (!np) | ||
302 | return 1; | ||
303 | |||
304 | ret = of_property_read_u32(np, "location", &location); | ||
305 | if (!ret) { | ||
306 | dev_dbg(&pdev->dev, "using location %u\n", location); | ||
307 | } else { | ||
308 | /* default to location configured in hardware */ | ||
309 | location = efm32_spi_get_configured_location(ddata); | ||
310 | |||
311 | dev_info(&pdev->dev, "fall back to location %u\n", location); | ||
312 | } | ||
313 | |||
314 | ddata->pdata.location = location; | ||
315 | |||
316 | /* spi core takes care about the bus number using an alias */ | ||
317 | master->bus_num = -1; | ||
318 | |||
319 | return 0; | ||
320 | } | ||
321 | |||
322 | static int efm32_spi_probe(struct platform_device *pdev) | ||
323 | { | ||
324 | struct efm32_spi_ddata *ddata; | ||
325 | struct resource *res; | ||
326 | int ret; | ||
327 | struct spi_master *master; | ||
328 | struct device_node *np = pdev->dev.of_node; | ||
329 | unsigned int num_cs, i; | ||
330 | |||
331 | num_cs = of_gpio_named_count(np, "cs-gpios"); | ||
332 | |||
333 | master = spi_alloc_master(&pdev->dev, | ||
334 | sizeof(*ddata) + num_cs * sizeof(unsigned)); | ||
335 | if (!master) { | ||
336 | dev_dbg(&pdev->dev, | ||
337 | "failed to allocate spi master controller\n"); | ||
338 | return -ENOMEM; | ||
339 | } | ||
340 | platform_set_drvdata(pdev, master); | ||
341 | |||
342 | master->dev.of_node = pdev->dev.of_node; | ||
343 | |||
344 | master->num_chipselect = num_cs; | ||
345 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; | ||
346 | master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); | ||
347 | |||
348 | ddata = spi_master_get_devdata(master); | ||
349 | |||
350 | ddata->bitbang.master = spi_master_get(master); | ||
351 | ddata->bitbang.chipselect = efm32_spi_chipselect; | ||
352 | ddata->bitbang.setup_transfer = efm32_spi_setup_transfer; | ||
353 | ddata->bitbang.txrx_bufs = efm32_spi_txrx_bufs; | ||
354 | |||
355 | spin_lock_init(&ddata->lock); | ||
356 | |||
357 | ddata->clk = devm_clk_get(&pdev->dev, NULL); | ||
358 | if (IS_ERR(ddata->clk)) { | ||
359 | ret = PTR_ERR(ddata->clk); | ||
360 | dev_err(&pdev->dev, "failed to get clock: %d\n", ret); | ||
361 | goto err; | ||
362 | } | ||
363 | |||
364 | for (i = 0; i < num_cs; ++i) { | ||
365 | ret = of_get_named_gpio(np, "cs-gpios", i); | ||
366 | if (ret < 0) { | ||
367 | dev_err(&pdev->dev, "failed to get csgpio#%u (%d)\n", | ||
368 | i, ret); | ||
369 | goto err; | ||
370 | } | ||
371 | ddata->csgpio[i] = ret; | ||
372 | dev_dbg(&pdev->dev, "csgpio#%u = %u\n", i, ddata->csgpio[i]); | ||
373 | ret = devm_gpio_request_one(&pdev->dev, ddata->csgpio[i], | ||
374 | GPIOF_OUT_INIT_LOW, DRIVER_NAME); | ||
375 | if (ret < 0) { | ||
376 | dev_err(&pdev->dev, | ||
377 | "failed to configure csgpio#%u (%d)\n", | ||
378 | i, ret); | ||
379 | goto err; | ||
380 | } | ||
381 | } | ||
382 | |||
383 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
384 | if (!res) { | ||
385 | ret = -ENODEV; | ||
386 | dev_err(&pdev->dev, "failed to determine base address\n"); | ||
387 | goto err; | ||
388 | } | ||
389 | |||
390 | if (resource_size(res) < 60) { | ||
391 | ret = -EINVAL; | ||
392 | dev_err(&pdev->dev, "memory resource too small\n"); | ||
393 | goto err; | ||
394 | } | ||
395 | |||
396 | ddata->base = devm_ioremap_resource(&pdev->dev, res); | ||
397 | if (IS_ERR(ddata->base)) { | ||
398 | ret = PTR_ERR(ddata->base); | ||
399 | goto err; | ||
400 | } | ||
401 | |||
402 | ret = platform_get_irq(pdev, 0); | ||
403 | if (ret <= 0) { | ||
404 | dev_err(&pdev->dev, "failed to get rx irq (%d)\n", ret); | ||
405 | goto err; | ||
406 | } | ||
407 | |||
408 | ddata->rxirq = ret; | ||
409 | |||
410 | ret = platform_get_irq(pdev, 1); | ||
411 | if (ret <= 0) | ||
412 | ret = ddata->rxirq + 1; | ||
413 | |||
414 | ddata->txirq = ret; | ||
415 | |||
416 | ret = clk_prepare_enable(ddata->clk); | ||
417 | if (ret < 0) { | ||
418 | dev_err(&pdev->dev, "failed to enable clock (%d)\n", ret); | ||
419 | goto err; | ||
420 | } | ||
421 | |||
422 | ret = efm32_spi_probe_dt(pdev, master, ddata); | ||
423 | if (ret > 0) { | ||
424 | /* not created by device tree */ | ||
425 | const struct efm32_spi_pdata *pdata = | ||
426 | dev_get_platdata(&pdev->dev); | ||
427 | |||
428 | if (pdata) | ||
429 | ddata->pdata = *pdata; | ||
430 | else | ||
431 | ddata->pdata.location = | ||
432 | efm32_spi_get_configured_location(ddata); | ||
433 | |||
434 | master->bus_num = pdev->id; | ||
435 | |||
436 | } else if (ret < 0) { | ||
437 | goto err_disable_clk; | ||
438 | } | ||
439 | |||
440 | efm32_spi_write32(ddata, 0, REG_IEN); | ||
441 | efm32_spi_write32(ddata, REG_ROUTE_TXPEN | REG_ROUTE_RXPEN | | ||
442 | REG_ROUTE_CLKPEN | | ||
443 | REG_ROUTE_LOCATION(ddata->pdata.location), REG_ROUTE); | ||
444 | |||
445 | ret = request_irq(ddata->rxirq, efm32_spi_rxirq, | ||
446 | 0, DRIVER_NAME " rx", ddata); | ||
447 | if (ret) { | ||
448 | dev_err(&pdev->dev, "failed to register rxirq (%d)\n", ret); | ||
449 | goto err_disable_clk; | ||
450 | } | ||
451 | |||
452 | ret = request_irq(ddata->txirq, efm32_spi_txirq, | ||
453 | 0, DRIVER_NAME " tx", ddata); | ||
454 | if (ret) { | ||
455 | dev_err(&pdev->dev, "failed to register txirq (%d)\n", ret); | ||
456 | goto err_free_rx_irq; | ||
457 | } | ||
458 | |||
459 | ret = spi_bitbang_start(&ddata->bitbang); | ||
460 | if (ret) { | ||
461 | dev_err(&pdev->dev, "spi_bitbang_start failed (%d)\n", ret); | ||
462 | |||
463 | free_irq(ddata->txirq, ddata); | ||
464 | err_free_rx_irq: | ||
465 | free_irq(ddata->rxirq, ddata); | ||
466 | err_disable_clk: | ||
467 | clk_disable_unprepare(ddata->clk); | ||
468 | err: | ||
469 | spi_master_put(master); | ||
470 | kfree(master); | ||
471 | } | ||
472 | |||
473 | return ret; | ||
474 | } | ||
475 | |||
476 | static int efm32_spi_remove(struct platform_device *pdev) | ||
477 | { | ||
478 | struct spi_master *master = platform_get_drvdata(pdev); | ||
479 | struct efm32_spi_ddata *ddata = spi_master_get_devdata(master); | ||
480 | |||
481 | efm32_spi_write32(ddata, 0, REG_IEN); | ||
482 | |||
483 | free_irq(ddata->txirq, ddata); | ||
484 | free_irq(ddata->rxirq, ddata); | ||
485 | clk_disable_unprepare(ddata->clk); | ||
486 | spi_master_put(master); | ||
487 | kfree(master); | ||
488 | |||
489 | return 0; | ||
490 | } | ||
491 | |||
492 | static const struct of_device_id efm32_spi_dt_ids[] = { | ||
493 | { | ||
494 | .compatible = "efm32,spi", | ||
495 | }, { | ||
496 | /* sentinel */ | ||
497 | } | ||
498 | }; | ||
499 | MODULE_DEVICE_TABLE(of, efm32_spi_dt_ids); | ||
500 | |||
501 | static struct platform_driver efm32_spi_driver = { | ||
502 | .probe = efm32_spi_probe, | ||
503 | .remove = efm32_spi_remove, | ||
504 | |||
505 | .driver = { | ||
506 | .name = DRIVER_NAME, | ||
507 | .owner = THIS_MODULE, | ||
508 | .of_match_table = efm32_spi_dt_ids, | ||
509 | }, | ||
510 | }; | ||
511 | module_platform_driver(efm32_spi_driver); | ||
512 | |||
513 | MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>"); | ||
514 | MODULE_DESCRIPTION("EFM32 SPI driver"); | ||
515 | MODULE_LICENSE("GPL v2"); | ||
516 | MODULE_ALIAS("platform:" DRIVER_NAME); | ||
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c index cad30b8a1d71..d22c00a227b6 100644 --- a/drivers/spi/spi-ep93xx.c +++ b/drivers/spi/spi-ep93xx.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/platform_device.h> | 28 | #include <linux/platform_device.h> |
29 | #include <linux/workqueue.h> | ||
30 | #include <linux/sched.h> | 29 | #include <linux/sched.h> |
31 | #include <linux/scatterlist.h> | 30 | #include <linux/scatterlist.h> |
32 | #include <linux/spi/spi.h> | 31 | #include <linux/spi/spi.h> |
@@ -70,19 +69,13 @@ | |||
70 | 69 | ||
71 | /** | 70 | /** |
72 | * struct ep93xx_spi - EP93xx SPI controller structure | 71 | * struct ep93xx_spi - EP93xx SPI controller structure |
73 | * @lock: spinlock that protects concurrent accesses to fields @running, | ||
74 | * @current_msg and @msg_queue | ||
75 | * @pdev: pointer to platform device | 72 | * @pdev: pointer to platform device |
76 | * @clk: clock for the controller | 73 | * @clk: clock for the controller |
77 | * @regs_base: pointer to ioremap()'d registers | 74 | * @regs_base: pointer to ioremap()'d registers |
78 | * @sspdr_phys: physical address of the SSPDR register | 75 | * @sspdr_phys: physical address of the SSPDR register |
79 | * @min_rate: minimum clock rate (in Hz) supported by the controller | 76 | * @min_rate: minimum clock rate (in Hz) supported by the controller |
80 | * @max_rate: maximum clock rate (in Hz) supported by the controller | 77 | * @max_rate: maximum clock rate (in Hz) supported by the controller |
81 | * @running: is the queue running | ||
82 | * @wq: workqueue used by the driver | ||
83 | * @msg_work: work that is queued for the driver | ||
84 | * @wait: wait here until given transfer is completed | 78 | * @wait: wait here until given transfer is completed |
85 | * @msg_queue: queue for the messages | ||
86 | * @current_msg: message that is currently processed (or %NULL if none) | 79 | * @current_msg: message that is currently processed (or %NULL if none) |
87 | * @tx: current byte in transfer to transmit | 80 | * @tx: current byte in transfer to transmit |
88 | * @rx: current byte in transfer to receive | 81 | * @rx: current byte in transfer to receive |
@@ -96,30 +89,15 @@ | |||
96 | * @tx_sgt: sg table for TX transfers | 89 | * @tx_sgt: sg table for TX transfers |
97 | * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by | 90 | * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by |
98 | * the client | 91 | * the client |
99 | * | ||
100 | * This structure holds EP93xx SPI controller specific information. When | ||
101 | * @running is %true, driver accepts transfer requests from protocol drivers. | ||
102 | * @current_msg is used to hold pointer to the message that is currently | ||
103 | * processed. If @current_msg is %NULL, it means that no processing is going | ||
104 | * on. | ||
105 | * | ||
106 | * Most of the fields are only written once and they can be accessed without | ||
107 | * taking the @lock. Fields that are accessed concurrently are: @current_msg, | ||
108 | * @running, and @msg_queue. | ||
109 | */ | 92 | */ |
110 | struct ep93xx_spi { | 93 | struct ep93xx_spi { |
111 | spinlock_t lock; | ||
112 | const struct platform_device *pdev; | 94 | const struct platform_device *pdev; |
113 | struct clk *clk; | 95 | struct clk *clk; |
114 | void __iomem *regs_base; | 96 | void __iomem *regs_base; |
115 | unsigned long sspdr_phys; | 97 | unsigned long sspdr_phys; |
116 | unsigned long min_rate; | 98 | unsigned long min_rate; |
117 | unsigned long max_rate; | 99 | unsigned long max_rate; |
118 | bool running; | ||
119 | struct workqueue_struct *wq; | ||
120 | struct work_struct msg_work; | ||
121 | struct completion wait; | 100 | struct completion wait; |
122 | struct list_head msg_queue; | ||
123 | struct spi_message *current_msg; | 101 | struct spi_message *current_msg; |
124 | size_t tx; | 102 | size_t tx; |
125 | size_t rx; | 103 | size_t rx; |
@@ -136,50 +114,36 @@ struct ep93xx_spi { | |||
136 | /** | 114 | /** |
137 | * struct ep93xx_spi_chip - SPI device hardware settings | 115 | * struct ep93xx_spi_chip - SPI device hardware settings |
138 | * @spi: back pointer to the SPI device | 116 | * @spi: back pointer to the SPI device |
139 | * @rate: max rate in hz this chip supports | ||
140 | * @div_cpsr: cpsr (pre-scaler) divider | ||
141 | * @div_scr: scr divider | ||
142 | * @dss: bits per word (4 - 16 bits) | ||
143 | * @ops: private chip operations | 117 | * @ops: private chip operations |
144 | * | ||
145 | * This structure is used to store hardware register specific settings for each | ||
146 | * SPI device. Settings are written to hardware by function | ||
147 | * ep93xx_spi_chip_setup(). | ||
148 | */ | 118 | */ |
149 | struct ep93xx_spi_chip { | 119 | struct ep93xx_spi_chip { |
150 | const struct spi_device *spi; | 120 | const struct spi_device *spi; |
151 | unsigned long rate; | ||
152 | u8 div_cpsr; | ||
153 | u8 div_scr; | ||
154 | u8 dss; | ||
155 | struct ep93xx_spi_chip_ops *ops; | 121 | struct ep93xx_spi_chip_ops *ops; |
156 | }; | 122 | }; |
157 | 123 | ||
158 | /* converts bits per word to CR0.DSS value */ | 124 | /* converts bits per word to CR0.DSS value */ |
159 | #define bits_per_word_to_dss(bpw) ((bpw) - 1) | 125 | #define bits_per_word_to_dss(bpw) ((bpw) - 1) |
160 | 126 | ||
161 | static inline void | 127 | static void ep93xx_spi_write_u8(const struct ep93xx_spi *espi, |
162 | ep93xx_spi_write_u8(const struct ep93xx_spi *espi, u16 reg, u8 value) | 128 | u16 reg, u8 value) |
163 | { | 129 | { |
164 | __raw_writeb(value, espi->regs_base + reg); | 130 | writeb(value, espi->regs_base + reg); |
165 | } | 131 | } |
166 | 132 | ||
167 | static inline u8 | 133 | static u8 ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg) |
168 | ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg) | ||
169 | { | 134 | { |
170 | return __raw_readb(spi->regs_base + reg); | 135 | return readb(spi->regs_base + reg); |
171 | } | 136 | } |
172 | 137 | ||
173 | static inline void | 138 | static void ep93xx_spi_write_u16(const struct ep93xx_spi *espi, |
174 | ep93xx_spi_write_u16(const struct ep93xx_spi *espi, u16 reg, u16 value) | 139 | u16 reg, u16 value) |
175 | { | 140 | { |
176 | __raw_writew(value, espi->regs_base + reg); | 141 | writew(value, espi->regs_base + reg); |
177 | } | 142 | } |
178 | 143 | ||
179 | static inline u16 | 144 | static u16 ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg) |
180 | ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg) | ||
181 | { | 145 | { |
182 | return __raw_readw(spi->regs_base + reg); | 146 | return readw(spi->regs_base + reg); |
183 | } | 147 | } |
184 | 148 | ||
185 | static int ep93xx_spi_enable(const struct ep93xx_spi *espi) | 149 | static int ep93xx_spi_enable(const struct ep93xx_spi *espi) |
@@ -230,17 +194,13 @@ static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi) | |||
230 | /** | 194 | /** |
231 | * ep93xx_spi_calc_divisors() - calculates SPI clock divisors | 195 | * ep93xx_spi_calc_divisors() - calculates SPI clock divisors |
232 | * @espi: ep93xx SPI controller struct | 196 | * @espi: ep93xx SPI controller struct |
233 | * @chip: divisors are calculated for this chip | ||
234 | * @rate: desired SPI output clock rate | 197 | * @rate: desired SPI output clock rate |
235 | * | 198 | * @div_cpsr: pointer to return the cpsr (pre-scaler) divider |
236 | * Function calculates cpsr (clock pre-scaler) and scr divisors based on | 199 | * @div_scr: pointer to return the scr divider |
237 | * given @rate and places them to @chip->div_cpsr and @chip->div_scr. If, | ||
238 | * for some reason, divisors cannot be calculated nothing is stored and | ||
239 | * %-EINVAL is returned. | ||
240 | */ | 200 | */ |
241 | static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi, | 201 | static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi, |
242 | struct ep93xx_spi_chip *chip, | 202 | unsigned long rate, |
243 | unsigned long rate) | 203 | u8 *div_cpsr, u8 *div_scr) |
244 | { | 204 | { |
245 | unsigned long spi_clk_rate = clk_get_rate(espi->clk); | 205 | unsigned long spi_clk_rate = clk_get_rate(espi->clk); |
246 | int cpsr, scr; | 206 | int cpsr, scr; |
@@ -248,7 +208,7 @@ static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi, | |||
248 | /* | 208 | /* |
249 | * Make sure that max value is between values supported by the | 209 | * Make sure that max value is between values supported by the |
250 | * controller. Note that minimum value is already checked in | 210 | * controller. Note that minimum value is already checked in |
251 | * ep93xx_spi_transfer(). | 211 | * ep93xx_spi_transfer_one_message(). |
252 | */ | 212 | */ |
253 | rate = clamp(rate, espi->min_rate, espi->max_rate); | 213 | rate = clamp(rate, espi->min_rate, espi->max_rate); |
254 | 214 | ||
@@ -263,8 +223,8 @@ static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi, | |||
263 | for (cpsr = 2; cpsr <= 254; cpsr += 2) { | 223 | for (cpsr = 2; cpsr <= 254; cpsr += 2) { |
264 | for (scr = 0; scr <= 255; scr++) { | 224 | for (scr = 0; scr <= 255; scr++) { |
265 | if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) { | 225 | if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) { |
266 | chip->div_scr = (u8)scr; | 226 | *div_scr = (u8)scr; |
267 | chip->div_cpsr = (u8)cpsr; | 227 | *div_cpsr = (u8)cpsr; |
268 | return 0; | 228 | return 0; |
269 | } | 229 | } |
270 | } | 230 | } |
@@ -319,73 +279,11 @@ static int ep93xx_spi_setup(struct spi_device *spi) | |||
319 | spi_set_ctldata(spi, chip); | 279 | spi_set_ctldata(spi, chip); |
320 | } | 280 | } |
321 | 281 | ||
322 | if (spi->max_speed_hz != chip->rate) { | ||
323 | int err; | ||
324 | |||
325 | err = ep93xx_spi_calc_divisors(espi, chip, spi->max_speed_hz); | ||
326 | if (err != 0) { | ||
327 | spi_set_ctldata(spi, NULL); | ||
328 | kfree(chip); | ||
329 | return err; | ||
330 | } | ||
331 | chip->rate = spi->max_speed_hz; | ||
332 | } | ||
333 | |||
334 | chip->dss = bits_per_word_to_dss(spi->bits_per_word); | ||
335 | |||
336 | ep93xx_spi_cs_control(spi, false); | 282 | ep93xx_spi_cs_control(spi, false); |
337 | return 0; | 283 | return 0; |
338 | } | 284 | } |
339 | 285 | ||
340 | /** | 286 | /** |
341 | * ep93xx_spi_transfer() - queue message to be transferred | ||
342 | * @spi: target SPI device | ||
343 | * @msg: message to be transferred | ||
344 | * | ||
345 | * This function is called by SPI device drivers when they are going to transfer | ||
346 | * a new message. It simply puts the message in the queue and schedules | ||
347 | * workqueue to perform the actual transfer later on. | ||
348 | * | ||
349 | * Returns %0 on success and negative error in case of failure. | ||
350 | */ | ||
351 | static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg) | ||
352 | { | ||
353 | struct ep93xx_spi *espi = spi_master_get_devdata(spi->master); | ||
354 | struct spi_transfer *t; | ||
355 | unsigned long flags; | ||
356 | |||
357 | if (!msg || !msg->complete) | ||
358 | return -EINVAL; | ||
359 | |||
360 | /* first validate each transfer */ | ||
361 | list_for_each_entry(t, &msg->transfers, transfer_list) { | ||
362 | if (t->speed_hz && t->speed_hz < espi->min_rate) | ||
363 | return -EINVAL; | ||
364 | } | ||
365 | |||
366 | /* | ||
367 | * Now that we own the message, let's initialize it so that it is | ||
368 | * suitable for us. We use @msg->status to signal whether there was | ||
369 | * error in transfer and @msg->state is used to hold pointer to the | ||
370 | * current transfer (or %NULL if no active current transfer). | ||
371 | */ | ||
372 | msg->state = NULL; | ||
373 | msg->status = 0; | ||
374 | msg->actual_length = 0; | ||
375 | |||
376 | spin_lock_irqsave(&espi->lock, flags); | ||
377 | if (!espi->running) { | ||
378 | spin_unlock_irqrestore(&espi->lock, flags); | ||
379 | return -ESHUTDOWN; | ||
380 | } | ||
381 | list_add_tail(&msg->queue, &espi->msg_queue); | ||
382 | queue_work(espi->wq, &espi->msg_work); | ||
383 | spin_unlock_irqrestore(&espi->lock, flags); | ||
384 | |||
385 | return 0; | ||
386 | } | ||
387 | |||
388 | /** | ||
389 | * ep93xx_spi_cleanup() - cleans up master controller specific state | 287 | * ep93xx_spi_cleanup() - cleans up master controller specific state |
390 | * @spi: SPI device to cleanup | 288 | * @spi: SPI device to cleanup |
391 | * | 289 | * |
@@ -409,39 +307,40 @@ static void ep93xx_spi_cleanup(struct spi_device *spi) | |||
409 | * ep93xx_spi_chip_setup() - configures hardware according to given @chip | 307 | * ep93xx_spi_chip_setup() - configures hardware according to given @chip |
410 | * @espi: ep93xx SPI controller struct | 308 | * @espi: ep93xx SPI controller struct |
411 | * @chip: chip specific settings | 309 | * @chip: chip specific settings |
412 | * | 310 | * @speed_hz: transfer speed |
413 | * This function sets up the actual hardware registers with settings given in | 311 | * @bits_per_word: transfer bits_per_word |
414 | * @chip. Note that no validation is done so make sure that callers validate | ||
415 | * settings before calling this. | ||
416 | */ | 312 | */ |
417 | static void ep93xx_spi_chip_setup(const struct ep93xx_spi *espi, | 313 | static int ep93xx_spi_chip_setup(const struct ep93xx_spi *espi, |
418 | const struct ep93xx_spi_chip *chip) | 314 | const struct ep93xx_spi_chip *chip, |
315 | u32 speed_hz, u8 bits_per_word) | ||
419 | { | 316 | { |
317 | u8 dss = bits_per_word_to_dss(bits_per_word); | ||
318 | u8 div_cpsr = 0; | ||
319 | u8 div_scr = 0; | ||
420 | u16 cr0; | 320 | u16 cr0; |
321 | int err; | ||
421 | 322 | ||
422 | cr0 = chip->div_scr << SSPCR0_SCR_SHIFT; | 323 | err = ep93xx_spi_calc_divisors(espi, speed_hz, &div_cpsr, &div_scr); |
324 | if (err) | ||
325 | return err; | ||
326 | |||
327 | cr0 = div_scr << SSPCR0_SCR_SHIFT; | ||
423 | cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT; | 328 | cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT; |
424 | cr0 |= chip->dss; | 329 | cr0 |= dss; |
425 | 330 | ||
426 | dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n", | 331 | dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n", |
427 | chip->spi->mode, chip->div_cpsr, chip->div_scr, chip->dss); | 332 | chip->spi->mode, div_cpsr, div_scr, dss); |
428 | dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0); | 333 | dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0); |
429 | 334 | ||
430 | ep93xx_spi_write_u8(espi, SSPCPSR, chip->div_cpsr); | 335 | ep93xx_spi_write_u8(espi, SSPCPSR, div_cpsr); |
431 | ep93xx_spi_write_u16(espi, SSPCR0, cr0); | 336 | ep93xx_spi_write_u16(espi, SSPCR0, cr0); |
432 | } | ||
433 | |||
434 | static inline int bits_per_word(const struct ep93xx_spi *espi) | ||
435 | { | ||
436 | struct spi_message *msg = espi->current_msg; | ||
437 | struct spi_transfer *t = msg->state; | ||
438 | 337 | ||
439 | return t->bits_per_word; | 338 | return 0; |
440 | } | 339 | } |
441 | 340 | ||
442 | static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t) | 341 | static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t) |
443 | { | 342 | { |
444 | if (bits_per_word(espi) > 8) { | 343 | if (t->bits_per_word > 8) { |
445 | u16 tx_val = 0; | 344 | u16 tx_val = 0; |
446 | 345 | ||
447 | if (t->tx_buf) | 346 | if (t->tx_buf) |
@@ -460,7 +359,7 @@ static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t) | |||
460 | 359 | ||
461 | static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t) | 360 | static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t) |
462 | { | 361 | { |
463 | if (bits_per_word(espi) > 8) { | 362 | if (t->bits_per_word > 8) { |
464 | u16 rx_val; | 363 | u16 rx_val; |
465 | 364 | ||
466 | rx_val = ep93xx_spi_read_u16(espi, SSPDR); | 365 | rx_val = ep93xx_spi_read_u16(espi, SSPDR); |
@@ -546,7 +445,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir) | |||
546 | size_t len = t->len; | 445 | size_t len = t->len; |
547 | int i, ret, nents; | 446 | int i, ret, nents; |
548 | 447 | ||
549 | if (bits_per_word(espi) > 8) | 448 | if (t->bits_per_word > 8) |
550 | buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; | 449 | buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; |
551 | else | 450 | else |
552 | buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; | 451 | buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; |
@@ -610,7 +509,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir) | |||
610 | } | 509 | } |
611 | 510 | ||
612 | if (WARN_ON(len)) { | 511 | if (WARN_ON(len)) { |
613 | dev_warn(&espi->pdev->dev, "len = %d expected 0!", len); | 512 | dev_warn(&espi->pdev->dev, "len = %zu expected 0!", len); |
614 | return ERR_PTR(-EINVAL); | 513 | return ERR_PTR(-EINVAL); |
615 | } | 514 | } |
616 | 515 | ||
@@ -708,37 +607,16 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi, | |||
708 | struct spi_transfer *t) | 607 | struct spi_transfer *t) |
709 | { | 608 | { |
710 | struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi); | 609 | struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi); |
610 | int err; | ||
711 | 611 | ||
712 | msg->state = t; | 612 | msg->state = t; |
713 | 613 | ||
714 | /* | 614 | err = ep93xx_spi_chip_setup(espi, chip, t->speed_hz, t->bits_per_word); |
715 | * Handle any transfer specific settings if needed. We use | 615 | if (err) { |
716 | * temporary chip settings here and restore original later when | 616 | dev_err(&espi->pdev->dev, |
717 | * the transfer is finished. | 617 | "failed to setup chip for transfer\n"); |
718 | */ | 618 | msg->status = err; |
719 | if (t->speed_hz || t->bits_per_word) { | 619 | return; |
720 | struct ep93xx_spi_chip tmp_chip = *chip; | ||
721 | |||
722 | if (t->speed_hz) { | ||
723 | int err; | ||
724 | |||
725 | err = ep93xx_spi_calc_divisors(espi, &tmp_chip, | ||
726 | t->speed_hz); | ||
727 | if (err) { | ||
728 | dev_err(&espi->pdev->dev, | ||
729 | "failed to adjust speed\n"); | ||
730 | msg->status = err; | ||
731 | return; | ||
732 | } | ||
733 | } | ||
734 | |||
735 | if (t->bits_per_word) | ||
736 | tmp_chip.dss = bits_per_word_to_dss(t->bits_per_word); | ||
737 | |||
738 | /* | ||
739 | * Set up temporary new hw settings for this transfer. | ||
740 | */ | ||
741 | ep93xx_spi_chip_setup(espi, &tmp_chip); | ||
742 | } | 620 | } |
743 | 621 | ||
744 | espi->rx = 0; | 622 | espi->rx = 0; |
@@ -783,9 +661,6 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi, | |||
783 | ep93xx_spi_cs_control(msg->spi, true); | 661 | ep93xx_spi_cs_control(msg->spi, true); |
784 | } | 662 | } |
785 | } | 663 | } |
786 | |||
787 | if (t->speed_hz || t->bits_per_word) | ||
788 | ep93xx_spi_chip_setup(espi, chip); | ||
789 | } | 664 | } |
790 | 665 | ||
791 | /* | 666 | /* |
@@ -838,10 +713,8 @@ static void ep93xx_spi_process_message(struct ep93xx_spi *espi, | |||
838 | espi->fifo_level = 0; | 713 | espi->fifo_level = 0; |
839 | 714 | ||
840 | /* | 715 | /* |
841 | * Update SPI controller registers according to spi device and assert | 716 | * Assert the chipselect. |
842 | * the chipselect. | ||
843 | */ | 717 | */ |
844 | ep93xx_spi_chip_setup(espi, spi_get_ctldata(msg->spi)); | ||
845 | ep93xx_spi_cs_control(msg->spi, true); | 718 | ep93xx_spi_cs_control(msg->spi, true); |
846 | 719 | ||
847 | list_for_each_entry(t, &msg->transfers, transfer_list) { | 720 | list_for_each_entry(t, &msg->transfers, transfer_list) { |
@@ -858,50 +731,29 @@ static void ep93xx_spi_process_message(struct ep93xx_spi *espi, | |||
858 | ep93xx_spi_disable(espi); | 731 | ep93xx_spi_disable(espi); |
859 | } | 732 | } |
860 | 733 | ||
861 | #define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work)) | 734 | static int ep93xx_spi_transfer_one_message(struct spi_master *master, |
862 | 735 | struct spi_message *msg) | |
863 | /** | ||
864 | * ep93xx_spi_work() - EP93xx SPI workqueue worker function | ||
865 | * @work: work struct | ||
866 | * | ||
867 | * Workqueue worker function. This function is called when there are new | ||
868 | * SPI messages to be processed. Message is taken out from the queue and then | ||
869 | * passed to ep93xx_spi_process_message(). | ||
870 | * | ||
871 | * After message is transferred, protocol driver is notified by calling | ||
872 | * @msg->complete(). In case of error, @msg->status is set to negative error | ||
873 | * number, otherwise it contains zero (and @msg->actual_length is updated). | ||
874 | */ | ||
875 | static void ep93xx_spi_work(struct work_struct *work) | ||
876 | { | 736 | { |
877 | struct ep93xx_spi *espi = work_to_espi(work); | 737 | struct ep93xx_spi *espi = spi_master_get_devdata(master); |
878 | struct spi_message *msg; | 738 | struct spi_transfer *t; |
879 | 739 | ||
880 | spin_lock_irq(&espi->lock); | 740 | /* first validate each transfer */ |
881 | if (!espi->running || espi->current_msg || | 741 | list_for_each_entry(t, &msg->transfers, transfer_list) { |
882 | list_empty(&espi->msg_queue)) { | 742 | if (t->speed_hz < espi->min_rate) |
883 | spin_unlock_irq(&espi->lock); | 743 | return -EINVAL; |
884 | return; | ||
885 | } | 744 | } |
886 | msg = list_first_entry(&espi->msg_queue, struct spi_message, queue); | ||
887 | list_del_init(&msg->queue); | ||
888 | espi->current_msg = msg; | ||
889 | spin_unlock_irq(&espi->lock); | ||
890 | 745 | ||
891 | ep93xx_spi_process_message(espi, msg); | 746 | msg->state = NULL; |
747 | msg->status = 0; | ||
748 | msg->actual_length = 0; | ||
892 | 749 | ||
893 | /* | 750 | espi->current_msg = msg; |
894 | * Update the current message and re-schedule ourselves if there are | 751 | ep93xx_spi_process_message(espi, msg); |
895 | * more messages in the queue. | ||
896 | */ | ||
897 | spin_lock_irq(&espi->lock); | ||
898 | espi->current_msg = NULL; | 752 | espi->current_msg = NULL; |
899 | if (espi->running && !list_empty(&espi->msg_queue)) | ||
900 | queue_work(espi->wq, &espi->msg_work); | ||
901 | spin_unlock_irq(&espi->lock); | ||
902 | 753 | ||
903 | /* notify the protocol driver that we are done with this message */ | 754 | spi_finalize_current_message(master); |
904 | msg->complete(msg->context); | 755 | |
756 | return 0; | ||
905 | } | 757 | } |
906 | 758 | ||
907 | static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id) | 759 | static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id) |
@@ -1022,16 +874,26 @@ static int ep93xx_spi_probe(struct platform_device *pdev) | |||
1022 | int irq; | 874 | int irq; |
1023 | int error; | 875 | int error; |
1024 | 876 | ||
1025 | info = pdev->dev.platform_data; | 877 | info = dev_get_platdata(&pdev->dev); |
878 | |||
879 | irq = platform_get_irq(pdev, 0); | ||
880 | if (irq < 0) { | ||
881 | dev_err(&pdev->dev, "failed to get irq resources\n"); | ||
882 | return -EBUSY; | ||
883 | } | ||
884 | |||
885 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
886 | if (!res) { | ||
887 | dev_err(&pdev->dev, "unable to get iomem resource\n"); | ||
888 | return -ENODEV; | ||
889 | } | ||
1026 | 890 | ||
1027 | master = spi_alloc_master(&pdev->dev, sizeof(*espi)); | 891 | master = spi_alloc_master(&pdev->dev, sizeof(*espi)); |
1028 | if (!master) { | 892 | if (!master) |
1029 | dev_err(&pdev->dev, "failed to allocate spi master\n"); | ||
1030 | return -ENOMEM; | 893 | return -ENOMEM; |
1031 | } | ||
1032 | 894 | ||
1033 | master->setup = ep93xx_spi_setup; | 895 | master->setup = ep93xx_spi_setup; |
1034 | master->transfer = ep93xx_spi_transfer; | 896 | master->transfer_one_message = ep93xx_spi_transfer_one_message; |
1035 | master->cleanup = ep93xx_spi_cleanup; | 897 | master->cleanup = ep93xx_spi_cleanup; |
1036 | master->bus_num = pdev->id; | 898 | master->bus_num = pdev->id; |
1037 | master->num_chipselect = info->num_chipselect; | 899 | master->num_chipselect = info->num_chipselect; |
@@ -1042,14 +904,13 @@ static int ep93xx_spi_probe(struct platform_device *pdev) | |||
1042 | 904 | ||
1043 | espi = spi_master_get_devdata(master); | 905 | espi = spi_master_get_devdata(master); |
1044 | 906 | ||
1045 | espi->clk = clk_get(&pdev->dev, NULL); | 907 | espi->clk = devm_clk_get(&pdev->dev, NULL); |
1046 | if (IS_ERR(espi->clk)) { | 908 | if (IS_ERR(espi->clk)) { |
1047 | dev_err(&pdev->dev, "unable to get spi clock\n"); | 909 | dev_err(&pdev->dev, "unable to get spi clock\n"); |
1048 | error = PTR_ERR(espi->clk); | 910 | error = PTR_ERR(espi->clk); |
1049 | goto fail_release_master; | 911 | goto fail_release_master; |
1050 | } | 912 | } |
1051 | 913 | ||
1052 | spin_lock_init(&espi->lock); | ||
1053 | init_completion(&espi->wait); | 914 | init_completion(&espi->wait); |
1054 | 915 | ||
1055 | /* | 916 | /* |
@@ -1060,55 +921,31 @@ static int ep93xx_spi_probe(struct platform_device *pdev) | |||
1060 | espi->min_rate = clk_get_rate(espi->clk) / (254 * 256); | 921 | espi->min_rate = clk_get_rate(espi->clk) / (254 * 256); |
1061 | espi->pdev = pdev; | 922 | espi->pdev = pdev; |
1062 | 923 | ||
1063 | irq = platform_get_irq(pdev, 0); | ||
1064 | if (irq < 0) { | ||
1065 | error = -EBUSY; | ||
1066 | dev_err(&pdev->dev, "failed to get irq resources\n"); | ||
1067 | goto fail_put_clock; | ||
1068 | } | ||
1069 | |||
1070 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1071 | if (!res) { | ||
1072 | dev_err(&pdev->dev, "unable to get iomem resource\n"); | ||
1073 | error = -ENODEV; | ||
1074 | goto fail_put_clock; | ||
1075 | } | ||
1076 | |||
1077 | espi->sspdr_phys = res->start + SSPDR; | 924 | espi->sspdr_phys = res->start + SSPDR; |
1078 | 925 | ||
1079 | espi->regs_base = devm_ioremap_resource(&pdev->dev, res); | 926 | espi->regs_base = devm_ioremap_resource(&pdev->dev, res); |
1080 | if (IS_ERR(espi->regs_base)) { | 927 | if (IS_ERR(espi->regs_base)) { |
1081 | error = PTR_ERR(espi->regs_base); | 928 | error = PTR_ERR(espi->regs_base); |
1082 | goto fail_put_clock; | 929 | goto fail_release_master; |
1083 | } | 930 | } |
1084 | 931 | ||
1085 | error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt, | 932 | error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt, |
1086 | 0, "ep93xx-spi", espi); | 933 | 0, "ep93xx-spi", espi); |
1087 | if (error) { | 934 | if (error) { |
1088 | dev_err(&pdev->dev, "failed to request irq\n"); | 935 | dev_err(&pdev->dev, "failed to request irq\n"); |
1089 | goto fail_put_clock; | 936 | goto fail_release_master; |
1090 | } | 937 | } |
1091 | 938 | ||
1092 | if (info->use_dma && ep93xx_spi_setup_dma(espi)) | 939 | if (info->use_dma && ep93xx_spi_setup_dma(espi)) |
1093 | dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n"); | 940 | dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n"); |
1094 | 941 | ||
1095 | espi->wq = create_singlethread_workqueue("ep93xx_spid"); | ||
1096 | if (!espi->wq) { | ||
1097 | dev_err(&pdev->dev, "unable to create workqueue\n"); | ||
1098 | error = -ENOMEM; | ||
1099 | goto fail_free_dma; | ||
1100 | } | ||
1101 | INIT_WORK(&espi->msg_work, ep93xx_spi_work); | ||
1102 | INIT_LIST_HEAD(&espi->msg_queue); | ||
1103 | espi->running = true; | ||
1104 | |||
1105 | /* make sure that the hardware is disabled */ | 942 | /* make sure that the hardware is disabled */ |
1106 | ep93xx_spi_write_u8(espi, SSPCR1, 0); | 943 | ep93xx_spi_write_u8(espi, SSPCR1, 0); |
1107 | 944 | ||
1108 | error = spi_register_master(master); | 945 | error = spi_register_master(master); |
1109 | if (error) { | 946 | if (error) { |
1110 | dev_err(&pdev->dev, "failed to register SPI master\n"); | 947 | dev_err(&pdev->dev, "failed to register SPI master\n"); |
1111 | goto fail_free_queue; | 948 | goto fail_free_dma; |
1112 | } | 949 | } |
1113 | 950 | ||
1114 | dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n", | 951 | dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n", |
@@ -1116,12 +953,8 @@ static int ep93xx_spi_probe(struct platform_device *pdev) | |||
1116 | 953 | ||
1117 | return 0; | 954 | return 0; |
1118 | 955 | ||
1119 | fail_free_queue: | ||
1120 | destroy_workqueue(espi->wq); | ||
1121 | fail_free_dma: | 956 | fail_free_dma: |
1122 | ep93xx_spi_release_dma(espi); | 957 | ep93xx_spi_release_dma(espi); |
1123 | fail_put_clock: | ||
1124 | clk_put(espi->clk); | ||
1125 | fail_release_master: | 958 | fail_release_master: |
1126 | spi_master_put(master); | 959 | spi_master_put(master); |
1127 | 960 | ||
@@ -1133,31 +966,7 @@ static int ep93xx_spi_remove(struct platform_device *pdev) | |||
1133 | struct spi_master *master = platform_get_drvdata(pdev); | 966 | struct spi_master *master = platform_get_drvdata(pdev); |
1134 | struct ep93xx_spi *espi = spi_master_get_devdata(master); | 967 | struct ep93xx_spi *espi = spi_master_get_devdata(master); |
1135 | 968 | ||
1136 | spin_lock_irq(&espi->lock); | ||
1137 | espi->running = false; | ||
1138 | spin_unlock_irq(&espi->lock); | ||
1139 | |||
1140 | destroy_workqueue(espi->wq); | ||
1141 | |||
1142 | /* | ||
1143 | * Complete remaining messages with %-ESHUTDOWN status. | ||
1144 | */ | ||
1145 | spin_lock_irq(&espi->lock); | ||
1146 | while (!list_empty(&espi->msg_queue)) { | ||
1147 | struct spi_message *msg; | ||
1148 | |||
1149 | msg = list_first_entry(&espi->msg_queue, | ||
1150 | struct spi_message, queue); | ||
1151 | list_del_init(&msg->queue); | ||
1152 | msg->status = -ESHUTDOWN; | ||
1153 | spin_unlock_irq(&espi->lock); | ||
1154 | msg->complete(msg->context); | ||
1155 | spin_lock_irq(&espi->lock); | ||
1156 | } | ||
1157 | spin_unlock_irq(&espi->lock); | ||
1158 | |||
1159 | ep93xx_spi_release_dma(espi); | 969 | ep93xx_spi_release_dma(espi); |
1160 | clk_put(espi->clk); | ||
1161 | 970 | ||
1162 | spi_unregister_master(master); | 971 | spi_unregister_master(master); |
1163 | return 0; | 972 | return 0; |
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c new file mode 100644 index 000000000000..6cd07d13ecab --- /dev/null +++ b/drivers/spi/spi-fsl-dspi.c | |||
@@ -0,0 +1,557 @@ | |||
1 | /* | ||
2 | * drivers/spi/spi-fsl-dspi.c | ||
3 | * | ||
4 | * Copyright 2013 Freescale Semiconductor, Inc. | ||
5 | * | ||
6 | * Freescale DSPI driver | ||
7 | * This file contains a driver for the Freescale DSPI | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/errno.h> | ||
20 | #include <linux/platform_device.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/delay.h> | ||
23 | #include <linux/io.h> | ||
24 | #include <linux/clk.h> | ||
25 | #include <linux/err.h> | ||
26 | #include <linux/spi/spi.h> | ||
27 | #include <linux/spi/spi_bitbang.h> | ||
28 | #include <linux/pm_runtime.h> | ||
29 | #include <linux/of.h> | ||
30 | #include <linux/of_device.h> | ||
31 | |||
32 | #define DRIVER_NAME "fsl-dspi" | ||
33 | |||
34 | #define TRAN_STATE_RX_VOID 0x01 | ||
35 | #define TRAN_STATE_TX_VOID 0x02 | ||
36 | #define TRAN_STATE_WORD_ODD_NUM 0x04 | ||
37 | |||
38 | #define DSPI_FIFO_SIZE 4 | ||
39 | |||
40 | #define SPI_MCR 0x00 | ||
41 | #define SPI_MCR_MASTER (1 << 31) | ||
42 | #define SPI_MCR_PCSIS (0x3F << 16) | ||
43 | #define SPI_MCR_CLR_TXF (1 << 11) | ||
44 | #define SPI_MCR_CLR_RXF (1 << 10) | ||
45 | |||
46 | #define SPI_TCR 0x08 | ||
47 | |||
48 | #define SPI_CTAR(x) (0x0c + (x * 4)) | ||
49 | #define SPI_CTAR_FMSZ(x) (((x) & 0x0000000f) << 27) | ||
50 | #define SPI_CTAR_CPOL(x) ((x) << 26) | ||
51 | #define SPI_CTAR_CPHA(x) ((x) << 25) | ||
52 | #define SPI_CTAR_LSBFE(x) ((x) << 24) | ||
53 | #define SPI_CTAR_PCSSCR(x) (((x) & 0x00000003) << 22) | ||
54 | #define SPI_CTAR_PASC(x) (((x) & 0x00000003) << 20) | ||
55 | #define SPI_CTAR_PDT(x) (((x) & 0x00000003) << 18) | ||
56 | #define SPI_CTAR_PBR(x) (((x) & 0x00000003) << 16) | ||
57 | #define SPI_CTAR_CSSCK(x) (((x) & 0x0000000f) << 12) | ||
58 | #define SPI_CTAR_ASC(x) (((x) & 0x0000000f) << 8) | ||
59 | #define SPI_CTAR_DT(x) (((x) & 0x0000000f) << 4) | ||
60 | #define SPI_CTAR_BR(x) ((x) & 0x0000000f) | ||
61 | |||
62 | #define SPI_CTAR0_SLAVE 0x0c | ||
63 | |||
64 | #define SPI_SR 0x2c | ||
65 | #define SPI_SR_EOQF 0x10000000 | ||
66 | |||
67 | #define SPI_RSER 0x30 | ||
68 | #define SPI_RSER_EOQFE 0x10000000 | ||
69 | |||
70 | #define SPI_PUSHR 0x34 | ||
71 | #define SPI_PUSHR_CONT (1 << 31) | ||
72 | #define SPI_PUSHR_CTAS(x) (((x) & 0x00000007) << 28) | ||
73 | #define SPI_PUSHR_EOQ (1 << 27) | ||
74 | #define SPI_PUSHR_CTCNT (1 << 26) | ||
75 | #define SPI_PUSHR_PCS(x) (((1 << x) & 0x0000003f) << 16) | ||
76 | #define SPI_PUSHR_TXDATA(x) ((x) & 0x0000ffff) | ||
77 | |||
78 | #define SPI_PUSHR_SLAVE 0x34 | ||
79 | |||
80 | #define SPI_POPR 0x38 | ||
81 | #define SPI_POPR_RXDATA(x) ((x) & 0x0000ffff) | ||
82 | |||
83 | #define SPI_TXFR0 0x3c | ||
84 | #define SPI_TXFR1 0x40 | ||
85 | #define SPI_TXFR2 0x44 | ||
86 | #define SPI_TXFR3 0x48 | ||
87 | #define SPI_RXFR0 0x7c | ||
88 | #define SPI_RXFR1 0x80 | ||
89 | #define SPI_RXFR2 0x84 | ||
90 | #define SPI_RXFR3 0x88 | ||
91 | |||
92 | #define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1) | ||
93 | #define SPI_FRAME_BITS_MASK SPI_CTAR_FMSZ(0xf) | ||
94 | #define SPI_FRAME_BITS_16 SPI_CTAR_FMSZ(0xf) | ||
95 | #define SPI_FRAME_BITS_8 SPI_CTAR_FMSZ(0x7) | ||
96 | |||
97 | #define SPI_CS_INIT 0x01 | ||
98 | #define SPI_CS_ASSERT 0x02 | ||
99 | #define SPI_CS_DROP 0x04 | ||
100 | |||
101 | struct chip_data { | ||
102 | u32 mcr_val; | ||
103 | u32 ctar_val; | ||
104 | u16 void_write_data; | ||
105 | }; | ||
106 | |||
107 | struct fsl_dspi { | ||
108 | struct spi_bitbang bitbang; | ||
109 | struct platform_device *pdev; | ||
110 | |||
111 | void *base; | ||
112 | int irq; | ||
113 | struct clk *clk; | ||
114 | |||
115 | struct spi_transfer *cur_transfer; | ||
116 | struct chip_data *cur_chip; | ||
117 | size_t len; | ||
118 | void *tx; | ||
119 | void *tx_end; | ||
120 | void *rx; | ||
121 | void *rx_end; | ||
122 | char dataflags; | ||
123 | u8 cs; | ||
124 | u16 void_write_data; | ||
125 | |||
126 | wait_queue_head_t waitq; | ||
127 | u32 waitflags; | ||
128 | }; | ||
129 | |||
130 | static inline int is_double_byte_mode(struct fsl_dspi *dspi) | ||
131 | { | ||
132 | return ((readl(dspi->base + SPI_CTAR(dspi->cs)) & SPI_FRAME_BITS_MASK) | ||
133 | == SPI_FRAME_BITS(8)) ? 0 : 1; | ||
134 | } | ||
135 | |||
136 | static void set_bit_mode(struct fsl_dspi *dspi, unsigned char bits) | ||
137 | { | ||
138 | u32 temp; | ||
139 | |||
140 | temp = readl(dspi->base + SPI_CTAR(dspi->cs)); | ||
141 | temp &= ~SPI_FRAME_BITS_MASK; | ||
142 | temp |= SPI_FRAME_BITS(bits); | ||
143 | writel(temp, dspi->base + SPI_CTAR(dspi->cs)); | ||
144 | } | ||
145 | |||
146 | static void hz_to_spi_baud(char *pbr, char *br, int speed_hz, | ||
147 | unsigned long clkrate) | ||
148 | { | ||
149 | /* Valid baud rate pre-scaler values */ | ||
150 | int pbr_tbl[4] = {2, 3, 5, 7}; | ||
151 | int brs[16] = { 2, 4, 6, 8, | ||
152 | 16, 32, 64, 128, | ||
153 | 256, 512, 1024, 2048, | ||
154 | 4096, 8192, 16384, 32768 }; | ||
155 | int temp, i = 0, j = 0; | ||
156 | |||
157 | temp = clkrate / 2 / speed_hz; | ||
158 | |||
159 | for (i = 0; i < ARRAY_SIZE(pbr_tbl); i++) | ||
160 | for (j = 0; j < ARRAY_SIZE(brs); j++) { | ||
161 | if (pbr_tbl[i] * brs[j] >= temp) { | ||
162 | *pbr = i; | ||
163 | *br = j; | ||
164 | return; | ||
165 | } | ||
166 | } | ||
167 | |||
168 | pr_warn("Can not find valid buad rate,speed_hz is %d,clkrate is %ld\ | ||
169 | ,we use the max prescaler value.\n", speed_hz, clkrate); | ||
170 | *pbr = ARRAY_SIZE(pbr_tbl) - 1; | ||
171 | *br = ARRAY_SIZE(brs) - 1; | ||
172 | } | ||
173 | |||
174 | static int dspi_transfer_write(struct fsl_dspi *dspi) | ||
175 | { | ||
176 | int tx_count = 0; | ||
177 | int tx_word; | ||
178 | u16 d16; | ||
179 | u8 d8; | ||
180 | u32 dspi_pushr = 0; | ||
181 | int first = 1; | ||
182 | |||
183 | tx_word = is_double_byte_mode(dspi); | ||
184 | |||
185 | /* If we are in word mode, but only have a single byte to transfer | ||
186 | * then switch to byte mode temporarily. Will switch back at the | ||
187 | * end of the transfer. | ||
188 | */ | ||
189 | if (tx_word && (dspi->len == 1)) { | ||
190 | dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM; | ||
191 | set_bit_mode(dspi, 8); | ||
192 | tx_word = 0; | ||
193 | } | ||
194 | |||
195 | while (dspi->len && (tx_count < DSPI_FIFO_SIZE)) { | ||
196 | if (tx_word) { | ||
197 | if (dspi->len == 1) | ||
198 | break; | ||
199 | |||
200 | if (!(dspi->dataflags & TRAN_STATE_TX_VOID)) { | ||
201 | d16 = *(u16 *)dspi->tx; | ||
202 | dspi->tx += 2; | ||
203 | } else { | ||
204 | d16 = dspi->void_write_data; | ||
205 | } | ||
206 | |||
207 | dspi_pushr = SPI_PUSHR_TXDATA(d16) | | ||
208 | SPI_PUSHR_PCS(dspi->cs) | | ||
209 | SPI_PUSHR_CTAS(dspi->cs) | | ||
210 | SPI_PUSHR_CONT; | ||
211 | |||
212 | dspi->len -= 2; | ||
213 | } else { | ||
214 | if (!(dspi->dataflags & TRAN_STATE_TX_VOID)) { | ||
215 | |||
216 | d8 = *(u8 *)dspi->tx; | ||
217 | dspi->tx++; | ||
218 | } else { | ||
219 | d8 = (u8)dspi->void_write_data; | ||
220 | } | ||
221 | |||
222 | dspi_pushr = SPI_PUSHR_TXDATA(d8) | | ||
223 | SPI_PUSHR_PCS(dspi->cs) | | ||
224 | SPI_PUSHR_CTAS(dspi->cs) | | ||
225 | SPI_PUSHR_CONT; | ||
226 | |||
227 | dspi->len--; | ||
228 | } | ||
229 | |||
230 | if (dspi->len == 0 || tx_count == DSPI_FIFO_SIZE - 1) { | ||
231 | /* last transfer in the transfer */ | ||
232 | dspi_pushr |= SPI_PUSHR_EOQ; | ||
233 | } else if (tx_word && (dspi->len == 1)) | ||
234 | dspi_pushr |= SPI_PUSHR_EOQ; | ||
235 | |||
236 | if (first) { | ||
237 | first = 0; | ||
238 | dspi_pushr |= SPI_PUSHR_CTCNT; /* clear counter */ | ||
239 | } | ||
240 | |||
241 | writel(dspi_pushr, dspi->base + SPI_PUSHR); | ||
242 | tx_count++; | ||
243 | } | ||
244 | |||
245 | return tx_count * (tx_word + 1); | ||
246 | } | ||
247 | |||
248 | static int dspi_transfer_read(struct fsl_dspi *dspi) | ||
249 | { | ||
250 | int rx_count = 0; | ||
251 | int rx_word = is_double_byte_mode(dspi); | ||
252 | u16 d; | ||
253 | while ((dspi->rx < dspi->rx_end) | ||
254 | && (rx_count < DSPI_FIFO_SIZE)) { | ||
255 | if (rx_word) { | ||
256 | if ((dspi->rx_end - dspi->rx) == 1) | ||
257 | break; | ||
258 | |||
259 | d = SPI_POPR_RXDATA(readl(dspi->base + SPI_POPR)); | ||
260 | |||
261 | if (!(dspi->dataflags & TRAN_STATE_RX_VOID)) | ||
262 | *(u16 *)dspi->rx = d; | ||
263 | dspi->rx += 2; | ||
264 | |||
265 | } else { | ||
266 | d = SPI_POPR_RXDATA(readl(dspi->base + SPI_POPR)); | ||
267 | if (!(dspi->dataflags & TRAN_STATE_RX_VOID)) | ||
268 | *(u8 *)dspi->rx = d; | ||
269 | dspi->rx++; | ||
270 | } | ||
271 | rx_count++; | ||
272 | } | ||
273 | |||
274 | return rx_count; | ||
275 | } | ||
276 | |||
277 | static int dspi_txrx_transfer(struct spi_device *spi, struct spi_transfer *t) | ||
278 | { | ||
279 | struct fsl_dspi *dspi = spi_master_get_devdata(spi->master); | ||
280 | dspi->cur_transfer = t; | ||
281 | dspi->cur_chip = spi_get_ctldata(spi); | ||
282 | dspi->cs = spi->chip_select; | ||
283 | dspi->void_write_data = dspi->cur_chip->void_write_data; | ||
284 | |||
285 | dspi->dataflags = 0; | ||
286 | dspi->tx = (void *)t->tx_buf; | ||
287 | dspi->tx_end = dspi->tx + t->len; | ||
288 | dspi->rx = t->rx_buf; | ||
289 | dspi->rx_end = dspi->rx + t->len; | ||
290 | dspi->len = t->len; | ||
291 | |||
292 | if (!dspi->rx) | ||
293 | dspi->dataflags |= TRAN_STATE_RX_VOID; | ||
294 | |||
295 | if (!dspi->tx) | ||
296 | dspi->dataflags |= TRAN_STATE_TX_VOID; | ||
297 | |||
298 | writel(dspi->cur_chip->mcr_val, dspi->base + SPI_MCR); | ||
299 | writel(dspi->cur_chip->ctar_val, dspi->base + SPI_CTAR(dspi->cs)); | ||
300 | writel(SPI_RSER_EOQFE, dspi->base + SPI_RSER); | ||
301 | |||
302 | if (t->speed_hz) | ||
303 | writel(dspi->cur_chip->ctar_val, | ||
304 | dspi->base + SPI_CTAR(dspi->cs)); | ||
305 | |||
306 | dspi_transfer_write(dspi); | ||
307 | |||
308 | if (wait_event_interruptible(dspi->waitq, dspi->waitflags)) | ||
309 | dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n"); | ||
310 | dspi->waitflags = 0; | ||
311 | |||
312 | return t->len - dspi->len; | ||
313 | } | ||
314 | |||
315 | static void dspi_chipselect(struct spi_device *spi, int value) | ||
316 | { | ||
317 | struct fsl_dspi *dspi = spi_master_get_devdata(spi->master); | ||
318 | u32 pushr = readl(dspi->base + SPI_PUSHR); | ||
319 | |||
320 | switch (value) { | ||
321 | case BITBANG_CS_ACTIVE: | ||
322 | pushr |= SPI_PUSHR_CONT; | ||
323 | case BITBANG_CS_INACTIVE: | ||
324 | pushr &= ~SPI_PUSHR_CONT; | ||
325 | } | ||
326 | |||
327 | writel(pushr, dspi->base + SPI_PUSHR); | ||
328 | } | ||
329 | |||
330 | static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) | ||
331 | { | ||
332 | struct chip_data *chip; | ||
333 | struct fsl_dspi *dspi = spi_master_get_devdata(spi->master); | ||
334 | unsigned char br = 0, pbr = 0, fmsz = 0; | ||
335 | |||
336 | /* Only alloc on first setup */ | ||
337 | chip = spi_get_ctldata(spi); | ||
338 | if (chip == NULL) { | ||
339 | chip = kcalloc(1, sizeof(struct chip_data), GFP_KERNEL); | ||
340 | if (!chip) | ||
341 | return -ENOMEM; | ||
342 | } | ||
343 | |||
344 | chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS | | ||
345 | SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF; | ||
346 | if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) { | ||
347 | fmsz = spi->bits_per_word - 1; | ||
348 | } else { | ||
349 | pr_err("Invalid wordsize\n"); | ||
350 | kfree(chip); | ||
351 | return -ENODEV; | ||
352 | } | ||
353 | |||
354 | chip->void_write_data = 0; | ||
355 | |||
356 | hz_to_spi_baud(&pbr, &br, | ||
357 | spi->max_speed_hz, clk_get_rate(dspi->clk)); | ||
358 | |||
359 | chip->ctar_val = SPI_CTAR_FMSZ(fmsz) | ||
360 | | SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0) | ||
361 | | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0) | ||
362 | | SPI_CTAR_LSBFE(spi->mode & SPI_LSB_FIRST ? 1 : 0) | ||
363 | | SPI_CTAR_PBR(pbr) | ||
364 | | SPI_CTAR_BR(br); | ||
365 | |||
366 | spi_set_ctldata(spi, chip); | ||
367 | |||
368 | return 0; | ||
369 | } | ||
370 | |||
371 | static int dspi_setup(struct spi_device *spi) | ||
372 | { | ||
373 | if (!spi->max_speed_hz) | ||
374 | return -EINVAL; | ||
375 | |||
376 | if (!spi->bits_per_word) | ||
377 | spi->bits_per_word = 8; | ||
378 | |||
379 | return dspi_setup_transfer(spi, NULL); | ||
380 | } | ||
381 | |||
382 | static irqreturn_t dspi_interrupt(int irq, void *dev_id) | ||
383 | { | ||
384 | struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id; | ||
385 | |||
386 | writel(SPI_SR_EOQF, dspi->base + SPI_SR); | ||
387 | |||
388 | dspi_transfer_read(dspi); | ||
389 | |||
390 | if (!dspi->len) { | ||
391 | if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM) | ||
392 | set_bit_mode(dspi, 16); | ||
393 | dspi->waitflags = 1; | ||
394 | wake_up_interruptible(&dspi->waitq); | ||
395 | } else { | ||
396 | dspi_transfer_write(dspi); | ||
397 | |||
398 | return IRQ_HANDLED; | ||
399 | } | ||
400 | |||
401 | return IRQ_HANDLED; | ||
402 | } | ||
403 | |||
404 | static struct of_device_id fsl_dspi_dt_ids[] = { | ||
405 | { .compatible = "fsl,vf610-dspi", .data = NULL, }, | ||
406 | { /* sentinel */ } | ||
407 | }; | ||
408 | MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids); | ||
409 | |||
410 | #ifdef CONFIG_PM_SLEEP | ||
411 | static int dspi_suspend(struct device *dev) | ||
412 | { | ||
413 | struct spi_master *master = dev_get_drvdata(dev); | ||
414 | struct fsl_dspi *dspi = spi_master_get_devdata(master); | ||
415 | |||
416 | spi_master_suspend(master); | ||
417 | clk_disable_unprepare(dspi->clk); | ||
418 | |||
419 | return 0; | ||
420 | } | ||
421 | |||
422 | static int dspi_resume(struct device *dev) | ||
423 | { | ||
424 | |||
425 | struct spi_master *master = dev_get_drvdata(dev); | ||
426 | struct fsl_dspi *dspi = spi_master_get_devdata(master); | ||
427 | |||
428 | clk_prepare_enable(dspi->clk); | ||
429 | spi_master_resume(master); | ||
430 | |||
431 | return 0; | ||
432 | } | ||
433 | #endif /* CONFIG_PM_SLEEP */ | ||
434 | |||
435 | static const struct dev_pm_ops dspi_pm = { | ||
436 | SET_SYSTEM_SLEEP_PM_OPS(dspi_suspend, dspi_resume) | ||
437 | }; | ||
438 | |||
439 | static int dspi_probe(struct platform_device *pdev) | ||
440 | { | ||
441 | struct device_node *np = pdev->dev.of_node; | ||
442 | struct spi_master *master; | ||
443 | struct fsl_dspi *dspi; | ||
444 | struct resource *res; | ||
445 | int ret = 0, cs_num, bus_num; | ||
446 | |||
447 | master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi)); | ||
448 | if (!master) | ||
449 | return -ENOMEM; | ||
450 | |||
451 | dspi = spi_master_get_devdata(master); | ||
452 | dspi->pdev = pdev; | ||
453 | dspi->bitbang.master = spi_master_get(master); | ||
454 | dspi->bitbang.chipselect = dspi_chipselect; | ||
455 | dspi->bitbang.setup_transfer = dspi_setup_transfer; | ||
456 | dspi->bitbang.txrx_bufs = dspi_txrx_transfer; | ||
457 | dspi->bitbang.master->setup = dspi_setup; | ||
458 | dspi->bitbang.master->dev.of_node = pdev->dev.of_node; | ||
459 | |||
460 | master->mode_bits = SPI_CPOL | SPI_CPHA; | ||
461 | master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) | | ||
462 | SPI_BPW_MASK(16); | ||
463 | |||
464 | ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num); | ||
465 | if (ret < 0) { | ||
466 | dev_err(&pdev->dev, "can't get spi-num-chipselects\n"); | ||
467 | goto out_master_put; | ||
468 | } | ||
469 | master->num_chipselect = cs_num; | ||
470 | |||
471 | ret = of_property_read_u32(np, "bus-num", &bus_num); | ||
472 | if (ret < 0) { | ||
473 | dev_err(&pdev->dev, "can't get bus-num\n"); | ||
474 | goto out_master_put; | ||
475 | } | ||
476 | master->bus_num = bus_num; | ||
477 | |||
478 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
479 | if (!res) { | ||
480 | dev_err(&pdev->dev, "can't get platform resource\n"); | ||
481 | ret = -EINVAL; | ||
482 | goto out_master_put; | ||
483 | } | ||
484 | |||
485 | dspi->base = devm_ioremap_resource(&pdev->dev, res); | ||
486 | if (!dspi->base) { | ||
487 | ret = -EINVAL; | ||
488 | goto out_master_put; | ||
489 | } | ||
490 | |||
491 | dspi->irq = platform_get_irq(pdev, 0); | ||
492 | if (dspi->irq < 0) { | ||
493 | dev_err(&pdev->dev, "can't get platform irq\n"); | ||
494 | ret = dspi->irq; | ||
495 | goto out_master_put; | ||
496 | } | ||
497 | |||
498 | ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0, | ||
499 | pdev->name, dspi); | ||
500 | if (ret < 0) { | ||
501 | dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n"); | ||
502 | goto out_master_put; | ||
503 | } | ||
504 | |||
505 | dspi->clk = devm_clk_get(&pdev->dev, "dspi"); | ||
506 | if (IS_ERR(dspi->clk)) { | ||
507 | ret = PTR_ERR(dspi->clk); | ||
508 | dev_err(&pdev->dev, "unable to get clock\n"); | ||
509 | goto out_master_put; | ||
510 | } | ||
511 | clk_prepare_enable(dspi->clk); | ||
512 | |||
513 | init_waitqueue_head(&dspi->waitq); | ||
514 | platform_set_drvdata(pdev, dspi); | ||
515 | |||
516 | ret = spi_bitbang_start(&dspi->bitbang); | ||
517 | if (ret != 0) { | ||
518 | dev_err(&pdev->dev, "Problem registering DSPI master\n"); | ||
519 | goto out_clk_put; | ||
520 | } | ||
521 | |||
522 | pr_info(KERN_INFO "Freescale DSPI master initialized\n"); | ||
523 | return ret; | ||
524 | |||
525 | out_clk_put: | ||
526 | clk_disable_unprepare(dspi->clk); | ||
527 | out_master_put: | ||
528 | spi_master_put(master); | ||
529 | platform_set_drvdata(pdev, NULL); | ||
530 | |||
531 | return ret; | ||
532 | } | ||
533 | |||
534 | static int dspi_remove(struct platform_device *pdev) | ||
535 | { | ||
536 | struct fsl_dspi *dspi = platform_get_drvdata(pdev); | ||
537 | |||
538 | /* Disconnect from the SPI framework */ | ||
539 | spi_bitbang_stop(&dspi->bitbang); | ||
540 | spi_master_put(dspi->bitbang.master); | ||
541 | |||
542 | return 0; | ||
543 | } | ||
544 | |||
545 | static struct platform_driver fsl_dspi_driver = { | ||
546 | .driver.name = DRIVER_NAME, | ||
547 | .driver.of_match_table = fsl_dspi_dt_ids, | ||
548 | .driver.owner = THIS_MODULE, | ||
549 | .driver.pm = &dspi_pm, | ||
550 | .probe = dspi_probe, | ||
551 | .remove = dspi_remove, | ||
552 | }; | ||
553 | module_platform_driver(fsl_dspi_driver); | ||
554 | |||
555 | MODULE_DESCRIPTION("Freescale DSPI Controller Driver"); | ||
556 | MODULE_LICENSE("GPL v2"); | ||
557 | MODULE_ALIAS("platform:" DRIVER_NAME); | ||
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c index 6a74d7848d93..b8f1103fe28e 100644 --- a/drivers/spi/spi-fsl-espi.c +++ b/drivers/spi/spi-fsl-espi.c | |||
@@ -584,7 +584,7 @@ static void fsl_espi_remove(struct mpc8xxx_spi *mspi) | |||
584 | static struct spi_master * fsl_espi_probe(struct device *dev, | 584 | static struct spi_master * fsl_espi_probe(struct device *dev, |
585 | struct resource *mem, unsigned int irq) | 585 | struct resource *mem, unsigned int irq) |
586 | { | 586 | { |
587 | struct fsl_spi_platform_data *pdata = dev->platform_data; | 587 | struct fsl_spi_platform_data *pdata = dev_get_platdata(dev); |
588 | struct spi_master *master; | 588 | struct spi_master *master; |
589 | struct mpc8xxx_spi *mpc8xxx_spi; | 589 | struct mpc8xxx_spi *mpc8xxx_spi; |
590 | struct fsl_espi_reg *reg_base; | 590 | struct fsl_espi_reg *reg_base; |
@@ -665,7 +665,7 @@ err: | |||
665 | static int of_fsl_espi_get_chipselects(struct device *dev) | 665 | static int of_fsl_espi_get_chipselects(struct device *dev) |
666 | { | 666 | { |
667 | struct device_node *np = dev->of_node; | 667 | struct device_node *np = dev->of_node; |
668 | struct fsl_spi_platform_data *pdata = dev->platform_data; | 668 | struct fsl_spi_platform_data *pdata = dev_get_platdata(dev); |
669 | const u32 *prop; | 669 | const u32 *prop; |
670 | int len; | 670 | int len; |
671 | 671 | ||
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c index e947f2d1b2f5..0b75f26158ab 100644 --- a/drivers/spi/spi-fsl-lib.c +++ b/drivers/spi/spi-fsl-lib.c | |||
@@ -122,7 +122,7 @@ const char *mpc8xxx_spi_strmode(unsigned int flags) | |||
122 | int mpc8xxx_spi_probe(struct device *dev, struct resource *mem, | 122 | int mpc8xxx_spi_probe(struct device *dev, struct resource *mem, |
123 | unsigned int irq) | 123 | unsigned int irq) |
124 | { | 124 | { |
125 | struct fsl_spi_platform_data *pdata = dev->platform_data; | 125 | struct fsl_spi_platform_data *pdata = dev_get_platdata(dev); |
126 | struct spi_master *master; | 126 | struct spi_master *master; |
127 | struct mpc8xxx_spi *mpc8xxx_spi; | 127 | struct mpc8xxx_spi *mpc8xxx_spi; |
128 | int ret = 0; | 128 | int ret = 0; |
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c index 41e89c3e3edc..bbc94294891c 100644 --- a/drivers/spi/spi-fsl-spi.c +++ b/drivers/spi/spi-fsl-spi.c | |||
@@ -574,7 +574,7 @@ static void fsl_spi_grlib_cs_control(struct spi_device *spi, bool on) | |||
574 | 574 | ||
575 | static void fsl_spi_grlib_probe(struct device *dev) | 575 | static void fsl_spi_grlib_probe(struct device *dev) |
576 | { | 576 | { |
577 | struct fsl_spi_platform_data *pdata = dev->platform_data; | 577 | struct fsl_spi_platform_data *pdata = dev_get_platdata(dev); |
578 | struct spi_master *master = dev_get_drvdata(dev); | 578 | struct spi_master *master = dev_get_drvdata(dev); |
579 | struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master); | 579 | struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master); |
580 | struct fsl_spi_reg *reg_base = mpc8xxx_spi->reg_base; | 580 | struct fsl_spi_reg *reg_base = mpc8xxx_spi->reg_base; |
@@ -600,7 +600,7 @@ static void fsl_spi_grlib_probe(struct device *dev) | |||
600 | static struct spi_master * fsl_spi_probe(struct device *dev, | 600 | static struct spi_master * fsl_spi_probe(struct device *dev, |
601 | struct resource *mem, unsigned int irq) | 601 | struct resource *mem, unsigned int irq) |
602 | { | 602 | { |
603 | struct fsl_spi_platform_data *pdata = dev->platform_data; | 603 | struct fsl_spi_platform_data *pdata = dev_get_platdata(dev); |
604 | struct spi_master *master; | 604 | struct spi_master *master; |
605 | struct mpc8xxx_spi *mpc8xxx_spi; | 605 | struct mpc8xxx_spi *mpc8xxx_spi; |
606 | struct fsl_spi_reg *reg_base; | 606 | struct fsl_spi_reg *reg_base; |
@@ -700,7 +700,8 @@ err: | |||
700 | static void fsl_spi_cs_control(struct spi_device *spi, bool on) | 700 | static void fsl_spi_cs_control(struct spi_device *spi, bool on) |
701 | { | 701 | { |
702 | struct device *dev = spi->dev.parent->parent; | 702 | struct device *dev = spi->dev.parent->parent; |
703 | struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data); | 703 | struct fsl_spi_platform_data *pdata = dev_get_platdata(dev); |
704 | struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); | ||
704 | u16 cs = spi->chip_select; | 705 | u16 cs = spi->chip_select; |
705 | int gpio = pinfo->gpios[cs]; | 706 | int gpio = pinfo->gpios[cs]; |
706 | bool alow = pinfo->alow_flags[cs]; | 707 | bool alow = pinfo->alow_flags[cs]; |
@@ -711,7 +712,7 @@ static void fsl_spi_cs_control(struct spi_device *spi, bool on) | |||
711 | static int of_fsl_spi_get_chipselects(struct device *dev) | 712 | static int of_fsl_spi_get_chipselects(struct device *dev) |
712 | { | 713 | { |
713 | struct device_node *np = dev->of_node; | 714 | struct device_node *np = dev->of_node; |
714 | struct fsl_spi_platform_data *pdata = dev->platform_data; | 715 | struct fsl_spi_platform_data *pdata = dev_get_platdata(dev); |
715 | struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); | 716 | struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); |
716 | int ngpios; | 717 | int ngpios; |
717 | int i = 0; | 718 | int i = 0; |
@@ -790,7 +791,7 @@ err_alloc_flags: | |||
790 | 791 | ||
791 | static int of_fsl_spi_free_chipselects(struct device *dev) | 792 | static int of_fsl_spi_free_chipselects(struct device *dev) |
792 | { | 793 | { |
793 | struct fsl_spi_platform_data *pdata = dev->platform_data; | 794 | struct fsl_spi_platform_data *pdata = dev_get_platdata(dev); |
794 | struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); | 795 | struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); |
795 | int i; | 796 | int i; |
796 | 797 | ||
@@ -889,7 +890,7 @@ static int plat_mpc8xxx_spi_probe(struct platform_device *pdev) | |||
889 | int irq; | 890 | int irq; |
890 | struct spi_master *master; | 891 | struct spi_master *master; |
891 | 892 | ||
892 | if (!pdev->dev.platform_data) | 893 | if (!dev_get_platdata(&pdev->dev)) |
893 | return -EINVAL; | 894 | return -EINVAL; |
894 | 895 | ||
895 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 896 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c index a54524cf42cc..68b69fec13a9 100644 --- a/drivers/spi/spi-gpio.c +++ b/drivers/spi/spi-gpio.c | |||
@@ -420,7 +420,7 @@ static int spi_gpio_probe(struct platform_device *pdev) | |||
420 | if (status > 0) | 420 | if (status > 0) |
421 | use_of = 1; | 421 | use_of = 1; |
422 | 422 | ||
423 | pdata = pdev->dev.platform_data; | 423 | pdata = dev_get_platdata(&pdev->dev); |
424 | #ifdef GENERIC_BITBANG | 424 | #ifdef GENERIC_BITBANG |
425 | if (!pdata || !pdata->num_chipselect) | 425 | if (!pdata || !pdata->num_chipselect) |
426 | return -ENODEV; | 426 | return -ENODEV; |
@@ -506,7 +506,7 @@ static int spi_gpio_remove(struct platform_device *pdev) | |||
506 | int status; | 506 | int status; |
507 | 507 | ||
508 | spi_gpio = platform_get_drvdata(pdev); | 508 | spi_gpio = platform_get_drvdata(pdev); |
509 | pdata = pdev->dev.platform_data; | 509 | pdata = dev_get_platdata(&pdev->dev); |
510 | 510 | ||
511 | /* stop() unregisters child devices too */ | 511 | /* stop() unregisters child devices too */ |
512 | status = spi_bitbang_stop(&spi_gpio->bitbang); | 512 | status = spi_bitbang_stop(&spi_gpio->bitbang); |
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 7db4f43ee4d8..15323d8bd9cf 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c | |||
@@ -619,6 +619,7 @@ static const struct of_device_id spi_imx_dt_ids[] = { | |||
619 | { .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, }, | 619 | { .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, }, |
620 | { /* sentinel */ } | 620 | { /* sentinel */ } |
621 | }; | 621 | }; |
622 | MODULE_DEVICE_TABLE(of, spi_imx_dt_ids); | ||
622 | 623 | ||
623 | static void spi_imx_chipselect(struct spi_device *spi, int is_active) | 624 | static void spi_imx_chipselect(struct spi_device *spi, int is_active) |
624 | { | 625 | { |
@@ -796,10 +797,11 @@ static int spi_imx_probe(struct platform_device *pdev) | |||
796 | if (!gpio_is_valid(cs_gpio)) | 797 | if (!gpio_is_valid(cs_gpio)) |
797 | continue; | 798 | continue; |
798 | 799 | ||
799 | ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME); | 800 | ret = devm_gpio_request(&pdev->dev, spi_imx->chipselect[i], |
801 | DRIVER_NAME); | ||
800 | if (ret) { | 802 | if (ret) { |
801 | dev_err(&pdev->dev, "can't get cs gpios\n"); | 803 | dev_err(&pdev->dev, "can't get cs gpios\n"); |
802 | goto out_gpio_free; | 804 | goto out_master_put; |
803 | } | 805 | } |
804 | } | 806 | } |
805 | 807 | ||
@@ -816,50 +818,44 @@ static int spi_imx_probe(struct platform_device *pdev) | |||
816 | (struct spi_imx_devtype_data *) pdev->id_entry->driver_data; | 818 | (struct spi_imx_devtype_data *) pdev->id_entry->driver_data; |
817 | 819 | ||
818 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 820 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
819 | if (!res) { | 821 | spi_imx->base = devm_ioremap_resource(&pdev->dev, res); |
820 | dev_err(&pdev->dev, "can't get platform resource\n"); | 822 | if (IS_ERR(spi_imx->base)) { |
821 | ret = -ENOMEM; | 823 | ret = PTR_ERR(spi_imx->base); |
822 | goto out_gpio_free; | 824 | goto out_master_put; |
823 | } | ||
824 | |||
825 | if (!request_mem_region(res->start, resource_size(res), pdev->name)) { | ||
826 | dev_err(&pdev->dev, "request_mem_region failed\n"); | ||
827 | ret = -EBUSY; | ||
828 | goto out_gpio_free; | ||
829 | } | ||
830 | |||
831 | spi_imx->base = ioremap(res->start, resource_size(res)); | ||
832 | if (!spi_imx->base) { | ||
833 | ret = -EINVAL; | ||
834 | goto out_release_mem; | ||
835 | } | 825 | } |
836 | 826 | ||
837 | spi_imx->irq = platform_get_irq(pdev, 0); | 827 | spi_imx->irq = platform_get_irq(pdev, 0); |
838 | if (spi_imx->irq < 0) { | 828 | if (spi_imx->irq < 0) { |
839 | ret = -EINVAL; | 829 | ret = -EINVAL; |
840 | goto out_iounmap; | 830 | goto out_master_put; |
841 | } | 831 | } |
842 | 832 | ||
843 | ret = request_irq(spi_imx->irq, spi_imx_isr, 0, DRIVER_NAME, spi_imx); | 833 | ret = devm_request_irq(&pdev->dev, spi_imx->irq, spi_imx_isr, 0, |
834 | DRIVER_NAME, spi_imx); | ||
844 | if (ret) { | 835 | if (ret) { |
845 | dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret); | 836 | dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret); |
846 | goto out_iounmap; | 837 | goto out_master_put; |
847 | } | 838 | } |
848 | 839 | ||
849 | spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); | 840 | spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); |
850 | if (IS_ERR(spi_imx->clk_ipg)) { | 841 | if (IS_ERR(spi_imx->clk_ipg)) { |
851 | ret = PTR_ERR(spi_imx->clk_ipg); | 842 | ret = PTR_ERR(spi_imx->clk_ipg); |
852 | goto out_free_irq; | 843 | goto out_master_put; |
853 | } | 844 | } |
854 | 845 | ||
855 | spi_imx->clk_per = devm_clk_get(&pdev->dev, "per"); | 846 | spi_imx->clk_per = devm_clk_get(&pdev->dev, "per"); |
856 | if (IS_ERR(spi_imx->clk_per)) { | 847 | if (IS_ERR(spi_imx->clk_per)) { |
857 | ret = PTR_ERR(spi_imx->clk_per); | 848 | ret = PTR_ERR(spi_imx->clk_per); |
858 | goto out_free_irq; | 849 | goto out_master_put; |
859 | } | 850 | } |
860 | 851 | ||
861 | clk_prepare_enable(spi_imx->clk_per); | 852 | ret = clk_prepare_enable(spi_imx->clk_per); |
862 | clk_prepare_enable(spi_imx->clk_ipg); | 853 | if (ret) |
854 | goto out_master_put; | ||
855 | |||
856 | ret = clk_prepare_enable(spi_imx->clk_ipg); | ||
857 | if (ret) | ||
858 | goto out_put_per; | ||
863 | 859 | ||
864 | spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per); | 860 | spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per); |
865 | 861 | ||
@@ -879,47 +875,27 @@ static int spi_imx_probe(struct platform_device *pdev) | |||
879 | return ret; | 875 | return ret; |
880 | 876 | ||
881 | out_clk_put: | 877 | out_clk_put: |
882 | clk_disable_unprepare(spi_imx->clk_per); | ||
883 | clk_disable_unprepare(spi_imx->clk_ipg); | 878 | clk_disable_unprepare(spi_imx->clk_ipg); |
884 | out_free_irq: | 879 | out_put_per: |
885 | free_irq(spi_imx->irq, spi_imx); | 880 | clk_disable_unprepare(spi_imx->clk_per); |
886 | out_iounmap: | 881 | out_master_put: |
887 | iounmap(spi_imx->base); | ||
888 | out_release_mem: | ||
889 | release_mem_region(res->start, resource_size(res)); | ||
890 | out_gpio_free: | ||
891 | while (--i >= 0) { | ||
892 | if (gpio_is_valid(spi_imx->chipselect[i])) | ||
893 | gpio_free(spi_imx->chipselect[i]); | ||
894 | } | ||
895 | spi_master_put(master); | 882 | spi_master_put(master); |
896 | kfree(master); | 883 | |
897 | return ret; | 884 | return ret; |
898 | } | 885 | } |
899 | 886 | ||
900 | static int spi_imx_remove(struct platform_device *pdev) | 887 | static int spi_imx_remove(struct platform_device *pdev) |
901 | { | 888 | { |
902 | struct spi_master *master = platform_get_drvdata(pdev); | 889 | struct spi_master *master = platform_get_drvdata(pdev); |
903 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
904 | struct spi_imx_data *spi_imx = spi_master_get_devdata(master); | 890 | struct spi_imx_data *spi_imx = spi_master_get_devdata(master); |
905 | int i; | ||
906 | 891 | ||
907 | spi_bitbang_stop(&spi_imx->bitbang); | 892 | spi_bitbang_stop(&spi_imx->bitbang); |
908 | 893 | ||
909 | writel(0, spi_imx->base + MXC_CSPICTRL); | 894 | writel(0, spi_imx->base + MXC_CSPICTRL); |
910 | clk_disable_unprepare(spi_imx->clk_per); | ||
911 | clk_disable_unprepare(spi_imx->clk_ipg); | 895 | clk_disable_unprepare(spi_imx->clk_ipg); |
912 | free_irq(spi_imx->irq, spi_imx); | 896 | clk_disable_unprepare(spi_imx->clk_per); |
913 | iounmap(spi_imx->base); | ||
914 | |||
915 | for (i = 0; i < master->num_chipselect; i++) | ||
916 | if (gpio_is_valid(spi_imx->chipselect[i])) | ||
917 | gpio_free(spi_imx->chipselect[i]); | ||
918 | |||
919 | spi_master_put(master); | 897 | spi_master_put(master); |
920 | 898 | ||
921 | release_mem_region(res->start, resource_size(res)); | ||
922 | |||
923 | return 0; | 899 | return 0; |
924 | } | 900 | } |
925 | 901 | ||
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c index 29fce6af5145..dbc5e999a1f5 100644 --- a/drivers/spi/spi-mpc512x-psc.c +++ b/drivers/spi/spi-mpc512x-psc.c | |||
@@ -38,7 +38,8 @@ struct mpc512x_psc_spi { | |||
38 | struct mpc512x_psc_fifo __iomem *fifo; | 38 | struct mpc512x_psc_fifo __iomem *fifo; |
39 | unsigned int irq; | 39 | unsigned int irq; |
40 | u8 bits_per_word; | 40 | u8 bits_per_word; |
41 | u32 mclk; | 41 | struct clk *clk_mclk; |
42 | u32 mclk_rate; | ||
42 | 43 | ||
43 | struct completion txisrdone; | 44 | struct completion txisrdone; |
44 | }; | 45 | }; |
@@ -72,6 +73,7 @@ static void mpc512x_psc_spi_activate_cs(struct spi_device *spi) | |||
72 | struct mpc52xx_psc __iomem *psc = mps->psc; | 73 | struct mpc52xx_psc __iomem *psc = mps->psc; |
73 | u32 sicr; | 74 | u32 sicr; |
74 | u32 ccr; | 75 | u32 ccr; |
76 | int speed; | ||
75 | u16 bclkdiv; | 77 | u16 bclkdiv; |
76 | 78 | ||
77 | sicr = in_be32(&psc->sicr); | 79 | sicr = in_be32(&psc->sicr); |
@@ -95,10 +97,10 @@ static void mpc512x_psc_spi_activate_cs(struct spi_device *spi) | |||
95 | 97 | ||
96 | ccr = in_be32(&psc->ccr); | 98 | ccr = in_be32(&psc->ccr); |
97 | ccr &= 0xFF000000; | 99 | ccr &= 0xFF000000; |
98 | if (cs->speed_hz) | 100 | speed = cs->speed_hz; |
99 | bclkdiv = (mps->mclk / cs->speed_hz) - 1; | 101 | if (!speed) |
100 | else | 102 | speed = 1000000; /* default 1MHz */ |
101 | bclkdiv = (mps->mclk / 1000000) - 1; /* default 1MHz */ | 103 | bclkdiv = (mps->mclk_rate / speed) - 1; |
102 | 104 | ||
103 | ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8)); | 105 | ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8)); |
104 | out_be32(&psc->ccr, ccr); | 106 | out_be32(&psc->ccr, ccr); |
@@ -386,19 +388,11 @@ static int mpc512x_psc_spi_port_config(struct spi_master *master, | |||
386 | { | 388 | { |
387 | struct mpc52xx_psc __iomem *psc = mps->psc; | 389 | struct mpc52xx_psc __iomem *psc = mps->psc; |
388 | struct mpc512x_psc_fifo __iomem *fifo = mps->fifo; | 390 | struct mpc512x_psc_fifo __iomem *fifo = mps->fifo; |
389 | struct clk *spiclk; | ||
390 | int ret = 0; | ||
391 | char name[32]; | ||
392 | u32 sicr; | 391 | u32 sicr; |
393 | u32 ccr; | 392 | u32 ccr; |
393 | int speed; | ||
394 | u16 bclkdiv; | 394 | u16 bclkdiv; |
395 | 395 | ||
396 | sprintf(name, "psc%d_mclk", master->bus_num); | ||
397 | spiclk = clk_get(&master->dev, name); | ||
398 | clk_enable(spiclk); | ||
399 | mps->mclk = clk_get_rate(spiclk); | ||
400 | clk_put(spiclk); | ||
401 | |||
402 | /* Reset the PSC into a known state */ | 396 | /* Reset the PSC into a known state */ |
403 | out_8(&psc->command, MPC52xx_PSC_RST_RX); | 397 | out_8(&psc->command, MPC52xx_PSC_RST_RX); |
404 | out_8(&psc->command, MPC52xx_PSC_RST_TX); | 398 | out_8(&psc->command, MPC52xx_PSC_RST_TX); |
@@ -425,7 +419,8 @@ static int mpc512x_psc_spi_port_config(struct spi_master *master, | |||
425 | 419 | ||
426 | ccr = in_be32(&psc->ccr); | 420 | ccr = in_be32(&psc->ccr); |
427 | ccr &= 0xFF000000; | 421 | ccr &= 0xFF000000; |
428 | bclkdiv = (mps->mclk / 1000000) - 1; /* default 1MHz */ | 422 | speed = 1000000; /* default 1MHz */ |
423 | bclkdiv = (mps->mclk_rate / speed) - 1; | ||
429 | ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8)); | 424 | ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8)); |
430 | out_be32(&psc->ccr, ccr); | 425 | out_be32(&psc->ccr, ccr); |
431 | 426 | ||
@@ -445,7 +440,7 @@ static int mpc512x_psc_spi_port_config(struct spi_master *master, | |||
445 | 440 | ||
446 | mps->bits_per_word = 8; | 441 | mps->bits_per_word = 8; |
447 | 442 | ||
448 | return ret; | 443 | return 0; |
449 | } | 444 | } |
450 | 445 | ||
451 | static irqreturn_t mpc512x_psc_spi_isr(int irq, void *dev_id) | 446 | static irqreturn_t mpc512x_psc_spi_isr(int irq, void *dev_id) |
@@ -474,11 +469,14 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr, | |||
474 | u32 size, unsigned int irq, | 469 | u32 size, unsigned int irq, |
475 | s16 bus_num) | 470 | s16 bus_num) |
476 | { | 471 | { |
477 | struct fsl_spi_platform_data *pdata = dev->platform_data; | 472 | struct fsl_spi_platform_data *pdata = dev_get_platdata(dev); |
478 | struct mpc512x_psc_spi *mps; | 473 | struct mpc512x_psc_spi *mps; |
479 | struct spi_master *master; | 474 | struct spi_master *master; |
480 | int ret; | 475 | int ret; |
481 | void *tempp; | 476 | void *tempp; |
477 | int psc_num; | ||
478 | char clk_name[16]; | ||
479 | struct clk *clk; | ||
482 | 480 | ||
483 | master = spi_alloc_master(dev, sizeof *mps); | 481 | master = spi_alloc_master(dev, sizeof *mps); |
484 | if (master == NULL) | 482 | if (master == NULL) |
@@ -521,16 +519,29 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr, | |||
521 | goto free_master; | 519 | goto free_master; |
522 | init_completion(&mps->txisrdone); | 520 | init_completion(&mps->txisrdone); |
523 | 521 | ||
522 | psc_num = master->bus_num; | ||
523 | snprintf(clk_name, sizeof(clk_name), "psc%d_mclk", psc_num); | ||
524 | clk = devm_clk_get(dev, clk_name); | ||
525 | if (IS_ERR(clk)) | ||
526 | goto free_irq; | ||
527 | ret = clk_prepare_enable(clk); | ||
528 | if (ret) | ||
529 | goto free_irq; | ||
530 | mps->clk_mclk = clk; | ||
531 | mps->mclk_rate = clk_get_rate(clk); | ||
532 | |||
524 | ret = mpc512x_psc_spi_port_config(master, mps); | 533 | ret = mpc512x_psc_spi_port_config(master, mps); |
525 | if (ret < 0) | 534 | if (ret < 0) |
526 | goto free_irq; | 535 | goto free_clock; |
527 | 536 | ||
528 | ret = spi_register_master(master); | 537 | ret = spi_register_master(master); |
529 | if (ret < 0) | 538 | if (ret < 0) |
530 | goto free_irq; | 539 | goto free_clock; |
531 | 540 | ||
532 | return ret; | 541 | return ret; |
533 | 542 | ||
543 | free_clock: | ||
544 | clk_disable_unprepare(mps->clk_mclk); | ||
534 | free_irq: | 545 | free_irq: |
535 | free_irq(mps->irq, mps); | 546 | free_irq(mps->irq, mps); |
536 | free_master: | 547 | free_master: |
@@ -547,6 +558,7 @@ static int mpc512x_psc_spi_do_remove(struct device *dev) | |||
547 | struct mpc512x_psc_spi *mps = spi_master_get_devdata(master); | 558 | struct mpc512x_psc_spi *mps = spi_master_get_devdata(master); |
548 | 559 | ||
549 | spi_unregister_master(master); | 560 | spi_unregister_master(master); |
561 | clk_disable_unprepare(mps->clk_mclk); | ||
550 | free_irq(mps->irq, mps); | 562 | free_irq(mps->irq, mps); |
551 | if (mps->psc) | 563 | if (mps->psc) |
552 | iounmap(mps->psc); | 564 | iounmap(mps->psc); |
diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c index fed0571d4dec..6e925dc34396 100644 --- a/drivers/spi/spi-mpc52xx-psc.c +++ b/drivers/spi/spi-mpc52xx-psc.c | |||
@@ -366,7 +366,7 @@ static irqreturn_t mpc52xx_psc_spi_isr(int irq, void *dev_id) | |||
366 | static int mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, | 366 | static int mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, |
367 | u32 size, unsigned int irq, s16 bus_num) | 367 | u32 size, unsigned int irq, s16 bus_num) |
368 | { | 368 | { |
369 | struct fsl_spi_platform_data *pdata = dev->platform_data; | 369 | struct fsl_spi_platform_data *pdata = dev_get_platdata(dev); |
370 | struct mpc52xx_psc_spi *mps; | 370 | struct mpc52xx_psc_spi *mps; |
371 | struct spi_master *master; | 371 | struct spi_master *master; |
372 | int ret; | 372 | int ret; |
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c index 424d38e59421..de7b1141b90f 100644 --- a/drivers/spi/spi-mxs.c +++ b/drivers/spi/spi-mxs.c | |||
@@ -67,13 +67,8 @@ static int mxs_spi_setup_transfer(struct spi_device *dev, | |||
67 | { | 67 | { |
68 | struct mxs_spi *spi = spi_master_get_devdata(dev->master); | 68 | struct mxs_spi *spi = spi_master_get_devdata(dev->master); |
69 | struct mxs_ssp *ssp = &spi->ssp; | 69 | struct mxs_ssp *ssp = &spi->ssp; |
70 | uint8_t bits_per_word; | ||
71 | uint32_t hz = 0; | 70 | uint32_t hz = 0; |
72 | 71 | ||
73 | bits_per_word = dev->bits_per_word; | ||
74 | if (t && t->bits_per_word) | ||
75 | bits_per_word = t->bits_per_word; | ||
76 | |||
77 | hz = dev->max_speed_hz; | 72 | hz = dev->max_speed_hz; |
78 | if (t && t->speed_hz) | 73 | if (t && t->speed_hz) |
79 | hz = min(hz, t->speed_hz); | 74 | hz = min(hz, t->speed_hz); |
@@ -513,7 +508,7 @@ static int mxs_spi_probe(struct platform_device *pdev) | |||
513 | 508 | ||
514 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 509 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
515 | irq_err = platform_get_irq(pdev, 0); | 510 | irq_err = platform_get_irq(pdev, 0); |
516 | if (!iores || irq_err < 0) | 511 | if (irq_err < 0) |
517 | return -EINVAL; | 512 | return -EINVAL; |
518 | 513 | ||
519 | base = devm_ioremap_resource(&pdev->dev, iores); | 514 | base = devm_ioremap_resource(&pdev->dev, iores); |
@@ -563,25 +558,31 @@ static int mxs_spi_probe(struct platform_device *pdev) | |||
563 | goto out_master_free; | 558 | goto out_master_free; |
564 | } | 559 | } |
565 | 560 | ||
566 | clk_prepare_enable(ssp->clk); | 561 | ret = clk_prepare_enable(ssp->clk); |
562 | if (ret) | ||
563 | goto out_dma_release; | ||
564 | |||
567 | clk_set_rate(ssp->clk, clk_freq); | 565 | clk_set_rate(ssp->clk, clk_freq); |
568 | ssp->clk_rate = clk_get_rate(ssp->clk) / 1000; | 566 | ssp->clk_rate = clk_get_rate(ssp->clk) / 1000; |
569 | 567 | ||
570 | stmp_reset_block(ssp->base); | 568 | ret = stmp_reset_block(ssp->base); |
569 | if (ret) | ||
570 | goto out_disable_clk; | ||
571 | 571 | ||
572 | platform_set_drvdata(pdev, master); | 572 | platform_set_drvdata(pdev, master); |
573 | 573 | ||
574 | ret = spi_register_master(master); | 574 | ret = spi_register_master(master); |
575 | if (ret) { | 575 | if (ret) { |
576 | dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret); | 576 | dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret); |
577 | goto out_free_dma; | 577 | goto out_disable_clk; |
578 | } | 578 | } |
579 | 579 | ||
580 | return 0; | 580 | return 0; |
581 | 581 | ||
582 | out_free_dma: | 582 | out_disable_clk: |
583 | dma_release_channel(ssp->dmach); | ||
584 | clk_disable_unprepare(ssp->clk); | 583 | clk_disable_unprepare(ssp->clk); |
584 | out_dma_release: | ||
585 | dma_release_channel(ssp->dmach); | ||
585 | out_master_free: | 586 | out_master_free: |
586 | spi_master_put(master); | 587 | spi_master_put(master); |
587 | return ret; | 588 | return ret; |
@@ -598,11 +599,8 @@ static int mxs_spi_remove(struct platform_device *pdev) | |||
598 | ssp = &spi->ssp; | 599 | ssp = &spi->ssp; |
599 | 600 | ||
600 | spi_unregister_master(master); | 601 | spi_unregister_master(master); |
601 | |||
602 | dma_release_channel(ssp->dmach); | ||
603 | |||
604 | clk_disable_unprepare(ssp->clk); | 602 | clk_disable_unprepare(ssp->clk); |
605 | 603 | dma_release_channel(ssp->dmach); | |
606 | spi_master_put(master); | 604 | spi_master_put(master); |
607 | 605 | ||
608 | return 0; | 606 | return 0; |
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c index 150d85453c27..47a68b43bcd5 100644 --- a/drivers/spi/spi-nuc900.c +++ b/drivers/spi/spi-nuc900.c | |||
@@ -174,17 +174,6 @@ static void nuc900_spi_gobusy(struct nuc900_spi *hw) | |||
174 | spin_unlock_irqrestore(&hw->lock, flags); | 174 | spin_unlock_irqrestore(&hw->lock, flags); |
175 | } | 175 | } |
176 | 176 | ||
177 | static int nuc900_spi_setupxfer(struct spi_device *spi, | ||
178 | struct spi_transfer *t) | ||
179 | { | ||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | static int nuc900_spi_setup(struct spi_device *spi) | ||
184 | { | ||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | static inline unsigned int hw_txbyte(struct nuc900_spi *hw, int count) | 177 | static inline unsigned int hw_txbyte(struct nuc900_spi *hw, int count) |
189 | { | 178 | { |
190 | return hw->tx ? hw->tx[count] : 0; | 179 | return hw->tx ? hw->tx[count] : 0; |
@@ -361,7 +350,7 @@ static int nuc900_spi_probe(struct platform_device *pdev) | |||
361 | 350 | ||
362 | hw = spi_master_get_devdata(master); | 351 | hw = spi_master_get_devdata(master); |
363 | hw->master = spi_master_get(master); | 352 | hw->master = spi_master_get(master); |
364 | hw->pdata = pdev->dev.platform_data; | 353 | hw->pdata = dev_get_platdata(&pdev->dev); |
365 | hw->dev = &pdev->dev; | 354 | hw->dev = &pdev->dev; |
366 | 355 | ||
367 | if (hw->pdata == NULL) { | 356 | if (hw->pdata == NULL) { |
@@ -373,14 +362,12 @@ static int nuc900_spi_probe(struct platform_device *pdev) | |||
373 | platform_set_drvdata(pdev, hw); | 362 | platform_set_drvdata(pdev, hw); |
374 | init_completion(&hw->done); | 363 | init_completion(&hw->done); |
375 | 364 | ||
376 | master->mode_bits = SPI_MODE_0; | 365 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; |
377 | master->num_chipselect = hw->pdata->num_cs; | 366 | master->num_chipselect = hw->pdata->num_cs; |
378 | master->bus_num = hw->pdata->bus_num; | 367 | master->bus_num = hw->pdata->bus_num; |
379 | hw->bitbang.master = hw->master; | 368 | hw->bitbang.master = hw->master; |
380 | hw->bitbang.setup_transfer = nuc900_spi_setupxfer; | ||
381 | hw->bitbang.chipselect = nuc900_spi_chipsel; | 369 | hw->bitbang.chipselect = nuc900_spi_chipsel; |
382 | hw->bitbang.txrx_bufs = nuc900_spi_txrx; | 370 | hw->bitbang.txrx_bufs = nuc900_spi_txrx; |
383 | hw->bitbang.master->setup = nuc900_spi_setup; | ||
384 | 371 | ||
385 | hw->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 372 | hw->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
386 | if (hw->res == NULL) { | 373 | if (hw->res == NULL) { |
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c index 58deb79d046b..333cb1badcd7 100644 --- a/drivers/spi/spi-oc-tiny.c +++ b/drivers/spi/spi-oc-tiny.c | |||
@@ -285,7 +285,7 @@ static int tiny_spi_of_probe(struct platform_device *pdev) | |||
285 | 285 | ||
286 | static int tiny_spi_probe(struct platform_device *pdev) | 286 | static int tiny_spi_probe(struct platform_device *pdev) |
287 | { | 287 | { |
288 | struct tiny_spi_platform_data *platp = pdev->dev.platform_data; | 288 | struct tiny_spi_platform_data *platp = dev_get_platdata(&pdev->dev); |
289 | struct tiny_spi *hw; | 289 | struct tiny_spi *hw; |
290 | struct spi_master *master; | 290 | struct spi_master *master; |
291 | struct resource *res; | 291 | struct resource *res; |
@@ -315,15 +315,11 @@ static int tiny_spi_probe(struct platform_device *pdev) | |||
315 | 315 | ||
316 | /* find and map our resources */ | 316 | /* find and map our resources */ |
317 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 317 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
318 | if (!res) | 318 | hw->base = devm_ioremap_resource(&pdev->dev, res); |
319 | goto exit_busy; | 319 | if (IS_ERR(hw->base)) { |
320 | if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), | 320 | err = PTR_ERR(hw->base); |
321 | pdev->name)) | 321 | goto exit; |
322 | goto exit_busy; | 322 | } |
323 | hw->base = devm_ioremap_nocache(&pdev->dev, res->start, | ||
324 | resource_size(res)); | ||
325 | if (!hw->base) | ||
326 | goto exit_busy; | ||
327 | /* irq is optional */ | 323 | /* irq is optional */ |
328 | hw->irq = platform_get_irq(pdev, 0); | 324 | hw->irq = platform_get_irq(pdev, 0); |
329 | if (hw->irq >= 0) { | 325 | if (hw->irq >= 0) { |
@@ -337,8 +333,10 @@ static int tiny_spi_probe(struct platform_device *pdev) | |||
337 | if (platp) { | 333 | if (platp) { |
338 | hw->gpio_cs_count = platp->gpio_cs_count; | 334 | hw->gpio_cs_count = platp->gpio_cs_count; |
339 | hw->gpio_cs = platp->gpio_cs; | 335 | hw->gpio_cs = platp->gpio_cs; |
340 | if (platp->gpio_cs_count && !platp->gpio_cs) | 336 | if (platp->gpio_cs_count && !platp->gpio_cs) { |
341 | goto exit_busy; | 337 | err = -EBUSY; |
338 | goto exit; | ||
339 | } | ||
342 | hw->freq = platp->freq; | 340 | hw->freq = platp->freq; |
343 | hw->baudwidth = platp->baudwidth; | 341 | hw->baudwidth = platp->baudwidth; |
344 | } else { | 342 | } else { |
@@ -365,8 +363,6 @@ static int tiny_spi_probe(struct platform_device *pdev) | |||
365 | exit_gpio: | 363 | exit_gpio: |
366 | while (i-- > 0) | 364 | while (i-- > 0) |
367 | gpio_free(hw->gpio_cs[i]); | 365 | gpio_free(hw->gpio_cs[i]); |
368 | exit_busy: | ||
369 | err = -EBUSY; | ||
370 | exit: | 366 | exit: |
371 | spi_master_put(master); | 367 | spi_master_put(master); |
372 | return err; | 368 | return err; |
diff --git a/drivers/spi/spi-octeon.c b/drivers/spi/spi-octeon.c index 24daf964a409..5f28ddbe4f7e 100644 --- a/drivers/spi/spi-octeon.c +++ b/drivers/spi/spi-octeon.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #define OCTEON_SPI_MAX_CLOCK_HZ 16000000 | 28 | #define OCTEON_SPI_MAX_CLOCK_HZ 16000000 |
29 | 29 | ||
30 | struct octeon_spi { | 30 | struct octeon_spi { |
31 | struct spi_master *my_master; | ||
32 | u64 register_base; | 31 | u64 register_base; |
33 | u64 last_cfg; | 32 | u64 last_cfg; |
34 | u64 cs_enax; | 33 | u64 cs_enax; |
@@ -64,7 +63,6 @@ static int octeon_spi_do_transfer(struct octeon_spi *p, | |||
64 | unsigned int speed_hz; | 63 | unsigned int speed_hz; |
65 | int mode; | 64 | int mode; |
66 | bool cpha, cpol; | 65 | bool cpha, cpol; |
67 | int bits_per_word; | ||
68 | const u8 *tx_buf; | 66 | const u8 *tx_buf; |
69 | u8 *rx_buf; | 67 | u8 *rx_buf; |
70 | int len; | 68 | int len; |
@@ -76,12 +74,9 @@ static int octeon_spi_do_transfer(struct octeon_spi *p, | |||
76 | mode = msg_setup->mode; | 74 | mode = msg_setup->mode; |
77 | cpha = mode & SPI_CPHA; | 75 | cpha = mode & SPI_CPHA; |
78 | cpol = mode & SPI_CPOL; | 76 | cpol = mode & SPI_CPOL; |
79 | bits_per_word = msg_setup->bits_per_word; | ||
80 | 77 | ||
81 | if (xfer->speed_hz) | 78 | if (xfer->speed_hz) |
82 | speed_hz = xfer->speed_hz; | 79 | speed_hz = xfer->speed_hz; |
83 | if (xfer->bits_per_word) | ||
84 | bits_per_word = xfer->bits_per_word; | ||
85 | 80 | ||
86 | if (speed_hz > OCTEON_SPI_MAX_CLOCK_HZ) | 81 | if (speed_hz > OCTEON_SPI_MAX_CLOCK_HZ) |
87 | speed_hz = OCTEON_SPI_MAX_CLOCK_HZ; | 82 | speed_hz = OCTEON_SPI_MAX_CLOCK_HZ; |
@@ -166,19 +161,6 @@ static int octeon_spi_do_transfer(struct octeon_spi *p, | |||
166 | return xfer->len; | 161 | return xfer->len; |
167 | } | 162 | } |
168 | 163 | ||
169 | static int octeon_spi_validate_bpw(struct spi_device *spi, u32 speed) | ||
170 | { | ||
171 | switch (speed) { | ||
172 | case 8: | ||
173 | break; | ||
174 | default: | ||
175 | dev_err(&spi->dev, "Error: %d bits per word not supported\n", | ||
176 | speed); | ||
177 | return -EINVAL; | ||
178 | } | ||
179 | return 0; | ||
180 | } | ||
181 | |||
182 | static int octeon_spi_transfer_one_message(struct spi_master *master, | 164 | static int octeon_spi_transfer_one_message(struct spi_master *master, |
183 | struct spi_message *msg) | 165 | struct spi_message *msg) |
184 | { | 166 | { |
@@ -197,15 +179,6 @@ static int octeon_spi_transfer_one_message(struct spi_master *master, | |||
197 | } | 179 | } |
198 | 180 | ||
199 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | 181 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
200 | if (xfer->bits_per_word) { | ||
201 | status = octeon_spi_validate_bpw(msg->spi, | ||
202 | xfer->bits_per_word); | ||
203 | if (status) | ||
204 | goto err; | ||
205 | } | ||
206 | } | ||
207 | |||
208 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
209 | bool last_xfer = &xfer->transfer_list == msg->transfers.prev; | 182 | bool last_xfer = &xfer->transfer_list == msg->transfers.prev; |
210 | int r = octeon_spi_do_transfer(p, msg, xfer, last_xfer); | 183 | int r = octeon_spi_do_transfer(p, msg, xfer, last_xfer); |
211 | if (r < 0) { | 184 | if (r < 0) { |
@@ -236,14 +209,9 @@ static struct octeon_spi_setup *octeon_spi_new_setup(struct spi_device *spi) | |||
236 | 209 | ||
237 | static int octeon_spi_setup(struct spi_device *spi) | 210 | static int octeon_spi_setup(struct spi_device *spi) |
238 | { | 211 | { |
239 | int r; | ||
240 | struct octeon_spi_setup *new_setup; | 212 | struct octeon_spi_setup *new_setup; |
241 | struct octeon_spi_setup *old_setup = spi_get_ctldata(spi); | 213 | struct octeon_spi_setup *old_setup = spi_get_ctldata(spi); |
242 | 214 | ||
243 | r = octeon_spi_validate_bpw(spi, spi->bits_per_word); | ||
244 | if (r) | ||
245 | return r; | ||
246 | |||
247 | new_setup = octeon_spi_new_setup(spi); | 215 | new_setup = octeon_spi_new_setup(spi); |
248 | if (!new_setup) | 216 | if (!new_setup) |
249 | return -ENOMEM; | 217 | return -ENOMEM; |
@@ -261,14 +229,8 @@ static void octeon_spi_cleanup(struct spi_device *spi) | |||
261 | kfree(old_setup); | 229 | kfree(old_setup); |
262 | } | 230 | } |
263 | 231 | ||
264 | static int octeon_spi_nop_transfer_hardware(struct spi_master *master) | ||
265 | { | ||
266 | return 0; | ||
267 | } | ||
268 | |||
269 | static int octeon_spi_probe(struct platform_device *pdev) | 232 | static int octeon_spi_probe(struct platform_device *pdev) |
270 | { | 233 | { |
271 | |||
272 | struct resource *res_mem; | 234 | struct resource *res_mem; |
273 | struct spi_master *master; | 235 | struct spi_master *master; |
274 | struct octeon_spi *p; | 236 | struct octeon_spi *p; |
@@ -278,8 +240,7 @@ static int octeon_spi_probe(struct platform_device *pdev) | |||
278 | if (!master) | 240 | if (!master) |
279 | return -ENOMEM; | 241 | return -ENOMEM; |
280 | p = spi_master_get_devdata(master); | 242 | p = spi_master_get_devdata(master); |
281 | platform_set_drvdata(pdev, p); | 243 | platform_set_drvdata(pdev, master); |
282 | p->my_master = master; | ||
283 | 244 | ||
284 | res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 245 | res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
285 | 246 | ||
@@ -307,9 +268,8 @@ static int octeon_spi_probe(struct platform_device *pdev) | |||
307 | 268 | ||
308 | master->setup = octeon_spi_setup; | 269 | master->setup = octeon_spi_setup; |
309 | master->cleanup = octeon_spi_cleanup; | 270 | master->cleanup = octeon_spi_cleanup; |
310 | master->prepare_transfer_hardware = octeon_spi_nop_transfer_hardware; | ||
311 | master->transfer_one_message = octeon_spi_transfer_one_message; | 271 | master->transfer_one_message = octeon_spi_transfer_one_message; |
312 | master->unprepare_transfer_hardware = octeon_spi_nop_transfer_hardware; | 272 | master->bits_per_word_mask = SPI_BPW_MASK(8); |
313 | 273 | ||
314 | master->dev.of_node = pdev->dev.of_node; | 274 | master->dev.of_node = pdev->dev.of_node; |
315 | err = spi_register_master(master); | 275 | err = spi_register_master(master); |
@@ -328,10 +288,11 @@ fail: | |||
328 | 288 | ||
329 | static int octeon_spi_remove(struct platform_device *pdev) | 289 | static int octeon_spi_remove(struct platform_device *pdev) |
330 | { | 290 | { |
331 | struct octeon_spi *p = platform_get_drvdata(pdev); | 291 | struct spi_master *master = platform_get_drvdata(pdev); |
292 | struct octeon_spi *p = spi_master_get_devdata(master); | ||
332 | u64 register_base = p->register_base; | 293 | u64 register_base = p->register_base; |
333 | 294 | ||
334 | spi_unregister_master(p->my_master); | 295 | spi_unregister_master(master); |
335 | 296 | ||
336 | /* Clear the CSENA* and put everything in a known state. */ | 297 | /* Clear the CSENA* and put everything in a known state. */ |
337 | cvmx_write_csr(register_base + OCTEON_SPI_CFG, 0); | 298 | cvmx_write_csr(register_base + OCTEON_SPI_CFG, 0); |
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c index ee25670f8cfd..69ecf05757dd 100644 --- a/drivers/spi/spi-omap-100k.c +++ b/drivers/spi/spi-omap-100k.c | |||
@@ -83,11 +83,6 @@ | |||
83 | #define SPI_SHUTDOWN 1 | 83 | #define SPI_SHUTDOWN 1 |
84 | 84 | ||
85 | struct omap1_spi100k { | 85 | struct omap1_spi100k { |
86 | struct work_struct work; | ||
87 | |||
88 | /* lock protects queue and registers */ | ||
89 | spinlock_t lock; | ||
90 | struct list_head msg_queue; | ||
91 | struct spi_master *master; | 86 | struct spi_master *master; |
92 | struct clk *ick; | 87 | struct clk *ick; |
93 | struct clk *fck; | 88 | struct clk *fck; |
@@ -104,8 +99,6 @@ struct omap1_spi100k_cs { | |||
104 | int word_len; | 99 | int word_len; |
105 | }; | 100 | }; |
106 | 101 | ||
107 | static struct workqueue_struct *omap1_spi100k_wq; | ||
108 | |||
109 | #define MOD_REG_BIT(val, mask, set) do { \ | 102 | #define MOD_REG_BIT(val, mask, set) do { \ |
110 | if (set) \ | 103 | if (set) \ |
111 | val |= mask; \ | 104 | val |= mask; \ |
@@ -310,170 +303,102 @@ static int omap1_spi100k_setup(struct spi_device *spi) | |||
310 | 303 | ||
311 | spi100k_open(spi->master); | 304 | spi100k_open(spi->master); |
312 | 305 | ||
313 | clk_enable(spi100k->ick); | 306 | clk_prepare_enable(spi100k->ick); |
314 | clk_enable(spi100k->fck); | 307 | clk_prepare_enable(spi100k->fck); |
315 | 308 | ||
316 | ret = omap1_spi100k_setup_transfer(spi, NULL); | 309 | ret = omap1_spi100k_setup_transfer(spi, NULL); |
317 | 310 | ||
318 | clk_disable(spi100k->ick); | 311 | clk_disable_unprepare(spi100k->ick); |
319 | clk_disable(spi100k->fck); | 312 | clk_disable_unprepare(spi100k->fck); |
320 | 313 | ||
321 | return ret; | 314 | return ret; |
322 | } | 315 | } |
323 | 316 | ||
324 | static void omap1_spi100k_work(struct work_struct *work) | 317 | static int omap1_spi100k_prepare_hardware(struct spi_master *master) |
325 | { | 318 | { |
326 | struct omap1_spi100k *spi100k; | 319 | struct omap1_spi100k *spi100k = spi_master_get_devdata(master); |
327 | int status = 0; | ||
328 | 320 | ||
329 | spi100k = container_of(work, struct omap1_spi100k, work); | 321 | clk_prepare_enable(spi100k->ick); |
330 | spin_lock_irq(&spi100k->lock); | 322 | clk_prepare_enable(spi100k->fck); |
331 | 323 | ||
332 | clk_enable(spi100k->ick); | 324 | return 0; |
333 | clk_enable(spi100k->fck); | 325 | } |
334 | 326 | ||
335 | /* We only enable one channel at a time -- the one whose message is | 327 | static int omap1_spi100k_transfer_one_message(struct spi_master *master, |
336 | * at the head of the queue -- although this controller would gladly | 328 | struct spi_message *m) |
337 | * arbitrate among multiple channels. This corresponds to "single | 329 | { |
338 | * channel" master mode. As a side effect, we need to manage the | 330 | struct omap1_spi100k *spi100k = spi_master_get_devdata(master); |
339 | * chipselect with the FORCE bit ... CS != channel enable. | 331 | struct spi_device *spi = m->spi; |
340 | */ | 332 | struct spi_transfer *t = NULL; |
341 | while (!list_empty(&spi100k->msg_queue)) { | 333 | int cs_active = 0; |
342 | struct spi_message *m; | 334 | int par_override = 0; |
343 | struct spi_device *spi; | 335 | int status = 0; |
344 | struct spi_transfer *t = NULL; | 336 | |
345 | int cs_active = 0; | 337 | list_for_each_entry(t, &m->transfers, transfer_list) { |
346 | struct omap1_spi100k_cs *cs; | 338 | if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) { |
347 | int par_override = 0; | 339 | status = -EINVAL; |
348 | 340 | break; | |
349 | m = container_of(spi100k->msg_queue.next, struct spi_message, | 341 | } |
350 | queue); | 342 | if (par_override || t->speed_hz || t->bits_per_word) { |
351 | 343 | par_override = 1; | |
352 | list_del_init(&m->queue); | 344 | status = omap1_spi100k_setup_transfer(spi, t); |
353 | spin_unlock_irq(&spi100k->lock); | 345 | if (status < 0) |
354 | |||
355 | spi = m->spi; | ||
356 | cs = spi->controller_state; | ||
357 | |||
358 | list_for_each_entry(t, &m->transfers, transfer_list) { | ||
359 | if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) { | ||
360 | status = -EINVAL; | ||
361 | break; | 346 | break; |
362 | } | 347 | if (!t->speed_hz && !t->bits_per_word) |
363 | if (par_override || t->speed_hz || t->bits_per_word) { | 348 | par_override = 0; |
364 | par_override = 1; | 349 | } |
365 | status = omap1_spi100k_setup_transfer(spi, t); | ||
366 | if (status < 0) | ||
367 | break; | ||
368 | if (!t->speed_hz && !t->bits_per_word) | ||
369 | par_override = 0; | ||
370 | } | ||
371 | 350 | ||
372 | if (!cs_active) { | 351 | if (!cs_active) { |
373 | omap1_spi100k_force_cs(spi100k, 1); | 352 | omap1_spi100k_force_cs(spi100k, 1); |
374 | cs_active = 1; | 353 | cs_active = 1; |
375 | } | 354 | } |
376 | 355 | ||
377 | if (t->len) { | 356 | if (t->len) { |
378 | unsigned count; | 357 | unsigned count; |
379 | 358 | ||
380 | count = omap1_spi100k_txrx_pio(spi, t); | 359 | count = omap1_spi100k_txrx_pio(spi, t); |
381 | m->actual_length += count; | 360 | m->actual_length += count; |
382 | 361 | ||
383 | if (count != t->len) { | 362 | if (count != t->len) { |
384 | status = -EIO; | 363 | status = -EIO; |
385 | break; | 364 | break; |
386 | } | ||
387 | } | 365 | } |
366 | } | ||
388 | 367 | ||
389 | if (t->delay_usecs) | 368 | if (t->delay_usecs) |
390 | udelay(t->delay_usecs); | 369 | udelay(t->delay_usecs); |
391 | 370 | ||
392 | /* ignore the "leave it on after last xfer" hint */ | 371 | /* ignore the "leave it on after last xfer" hint */ |
393 | 372 | ||
394 | if (t->cs_change) { | 373 | if (t->cs_change) { |
395 | omap1_spi100k_force_cs(spi100k, 0); | 374 | omap1_spi100k_force_cs(spi100k, 0); |
396 | cs_active = 0; | 375 | cs_active = 0; |
397 | } | ||
398 | } | ||
399 | |||
400 | /* Restore defaults if they were overriden */ | ||
401 | if (par_override) { | ||
402 | par_override = 0; | ||
403 | status = omap1_spi100k_setup_transfer(spi, NULL); | ||
404 | } | 376 | } |
377 | } | ||
405 | 378 | ||
406 | if (cs_active) | 379 | /* Restore defaults if they were overriden */ |
407 | omap1_spi100k_force_cs(spi100k, 0); | 380 | if (par_override) { |
381 | par_override = 0; | ||
382 | status = omap1_spi100k_setup_transfer(spi, NULL); | ||
383 | } | ||
408 | 384 | ||
409 | m->status = status; | 385 | if (cs_active) |
410 | m->complete(m->context); | 386 | omap1_spi100k_force_cs(spi100k, 0); |
411 | 387 | ||
412 | spin_lock_irq(&spi100k->lock); | 388 | m->status = status; |
413 | } | ||
414 | 389 | ||
415 | clk_disable(spi100k->ick); | 390 | spi_finalize_current_message(master); |
416 | clk_disable(spi100k->fck); | ||
417 | spin_unlock_irq(&spi100k->lock); | ||
418 | 391 | ||
419 | if (status < 0) | 392 | return status; |
420 | printk(KERN_WARNING "spi transfer failed with %d\n", status); | ||
421 | } | 393 | } |
422 | 394 | ||
423 | static int omap1_spi100k_transfer(struct spi_device *spi, struct spi_message *m) | 395 | static int omap1_spi100k_unprepare_hardware(struct spi_master *master) |
424 | { | 396 | { |
425 | struct omap1_spi100k *spi100k; | 397 | struct omap1_spi100k *spi100k = spi_master_get_devdata(master); |
426 | unsigned long flags; | ||
427 | struct spi_transfer *t; | ||
428 | |||
429 | m->actual_length = 0; | ||
430 | m->status = -EINPROGRESS; | ||
431 | |||
432 | spi100k = spi_master_get_devdata(spi->master); | ||
433 | |||
434 | /* Don't accept new work if we're shutting down */ | ||
435 | if (spi100k->state == SPI_SHUTDOWN) | ||
436 | return -ESHUTDOWN; | ||
437 | |||
438 | /* reject invalid messages and transfers */ | ||
439 | if (list_empty(&m->transfers) || !m->complete) | ||
440 | return -EINVAL; | ||
441 | |||
442 | list_for_each_entry(t, &m->transfers, transfer_list) { | ||
443 | const void *tx_buf = t->tx_buf; | ||
444 | void *rx_buf = t->rx_buf; | ||
445 | unsigned len = t->len; | ||
446 | |||
447 | if (t->speed_hz > OMAP1_SPI100K_MAX_FREQ | ||
448 | || (len && !(rx_buf || tx_buf))) { | ||
449 | dev_dbg(&spi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n", | ||
450 | t->speed_hz, | ||
451 | len, | ||
452 | tx_buf ? "tx" : "", | ||
453 | rx_buf ? "rx" : "", | ||
454 | t->bits_per_word); | ||
455 | return -EINVAL; | ||
456 | } | ||
457 | |||
458 | if (t->speed_hz && t->speed_hz < OMAP1_SPI100K_MAX_FREQ/(1<<16)) { | ||
459 | dev_dbg(&spi->dev, "%d Hz max exceeds %d\n", | ||
460 | t->speed_hz, | ||
461 | OMAP1_SPI100K_MAX_FREQ/(1<<16)); | ||
462 | return -EINVAL; | ||
463 | } | ||
464 | |||
465 | } | ||
466 | |||
467 | spin_lock_irqsave(&spi100k->lock, flags); | ||
468 | list_add_tail(&m->queue, &spi100k->msg_queue); | ||
469 | queue_work(omap1_spi100k_wq, &spi100k->work); | ||
470 | spin_unlock_irqrestore(&spi100k->lock, flags); | ||
471 | 398 | ||
472 | return 0; | 399 | clk_disable_unprepare(spi100k->ick); |
473 | } | 400 | clk_disable_unprepare(spi100k->fck); |
474 | 401 | ||
475 | static int omap1_spi100k_reset(struct omap1_spi100k *spi100k) | ||
476 | { | ||
477 | return 0; | 402 | return 0; |
478 | } | 403 | } |
479 | 404 | ||
@@ -496,11 +421,15 @@ static int omap1_spi100k_probe(struct platform_device *pdev) | |||
496 | master->bus_num = pdev->id; | 421 | master->bus_num = pdev->id; |
497 | 422 | ||
498 | master->setup = omap1_spi100k_setup; | 423 | master->setup = omap1_spi100k_setup; |
499 | master->transfer = omap1_spi100k_transfer; | 424 | master->transfer_one_message = omap1_spi100k_transfer_one_message; |
425 | master->prepare_transfer_hardware = omap1_spi100k_prepare_hardware; | ||
426 | master->unprepare_transfer_hardware = omap1_spi100k_unprepare_hardware; | ||
500 | master->cleanup = NULL; | 427 | master->cleanup = NULL; |
501 | master->num_chipselect = 2; | 428 | master->num_chipselect = 2; |
502 | master->mode_bits = MODEBITS; | 429 | master->mode_bits = MODEBITS; |
503 | master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); | 430 | master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); |
431 | master->min_speed_hz = OMAP1_SPI100K_MAX_FREQ/(1<<16); | ||
432 | master->max_speed_hz = OMAP1_SPI100K_MAX_FREQ; | ||
504 | 433 | ||
505 | platform_set_drvdata(pdev, master); | 434 | platform_set_drvdata(pdev, master); |
506 | 435 | ||
@@ -512,42 +441,31 @@ static int omap1_spi100k_probe(struct platform_device *pdev) | |||
512 | * You should allocate this with ioremap() before initializing | 441 | * You should allocate this with ioremap() before initializing |
513 | * the SPI. | 442 | * the SPI. |
514 | */ | 443 | */ |
515 | spi100k->base = (void __iomem *) pdev->dev.platform_data; | 444 | spi100k->base = (void __iomem *)dev_get_platdata(&pdev->dev); |
516 | |||
517 | INIT_WORK(&spi100k->work, omap1_spi100k_work); | ||
518 | 445 | ||
519 | spin_lock_init(&spi100k->lock); | 446 | spi100k->ick = devm_clk_get(&pdev->dev, "ick"); |
520 | INIT_LIST_HEAD(&spi100k->msg_queue); | ||
521 | spi100k->ick = clk_get(&pdev->dev, "ick"); | ||
522 | if (IS_ERR(spi100k->ick)) { | 447 | if (IS_ERR(spi100k->ick)) { |
523 | dev_dbg(&pdev->dev, "can't get spi100k_ick\n"); | 448 | dev_dbg(&pdev->dev, "can't get spi100k_ick\n"); |
524 | status = PTR_ERR(spi100k->ick); | 449 | status = PTR_ERR(spi100k->ick); |
525 | goto err1; | 450 | goto err; |
526 | } | 451 | } |
527 | 452 | ||
528 | spi100k->fck = clk_get(&pdev->dev, "fck"); | 453 | spi100k->fck = devm_clk_get(&pdev->dev, "fck"); |
529 | if (IS_ERR(spi100k->fck)) { | 454 | if (IS_ERR(spi100k->fck)) { |
530 | dev_dbg(&pdev->dev, "can't get spi100k_fck\n"); | 455 | dev_dbg(&pdev->dev, "can't get spi100k_fck\n"); |
531 | status = PTR_ERR(spi100k->fck); | 456 | status = PTR_ERR(spi100k->fck); |
532 | goto err2; | 457 | goto err; |
533 | } | 458 | } |
534 | 459 | ||
535 | if (omap1_spi100k_reset(spi100k) < 0) | ||
536 | goto err3; | ||
537 | |||
538 | status = spi_register_master(master); | 460 | status = spi_register_master(master); |
539 | if (status < 0) | 461 | if (status < 0) |
540 | goto err3; | 462 | goto err; |
541 | 463 | ||
542 | spi100k->state = SPI_RUNNING; | 464 | spi100k->state = SPI_RUNNING; |
543 | 465 | ||
544 | return status; | 466 | return status; |
545 | 467 | ||
546 | err3: | 468 | err: |
547 | clk_put(spi100k->fck); | ||
548 | err2: | ||
549 | clk_put(spi100k->ick); | ||
550 | err1: | ||
551 | spi_master_put(master); | 469 | spi_master_put(master); |
552 | return status; | 470 | return status; |
553 | } | 471 | } |
@@ -557,33 +475,14 @@ static int omap1_spi100k_remove(struct platform_device *pdev) | |||
557 | struct spi_master *master; | 475 | struct spi_master *master; |
558 | struct omap1_spi100k *spi100k; | 476 | struct omap1_spi100k *spi100k; |
559 | struct resource *r; | 477 | struct resource *r; |
560 | unsigned limit = 500; | ||
561 | unsigned long flags; | ||
562 | int status = 0; | 478 | int status = 0; |
563 | 479 | ||
564 | master = platform_get_drvdata(pdev); | 480 | master = platform_get_drvdata(pdev); |
565 | spi100k = spi_master_get_devdata(master); | 481 | spi100k = spi_master_get_devdata(master); |
566 | 482 | ||
567 | spin_lock_irqsave(&spi100k->lock, flags); | ||
568 | |||
569 | spi100k->state = SPI_SHUTDOWN; | ||
570 | while (!list_empty(&spi100k->msg_queue) && limit--) { | ||
571 | spin_unlock_irqrestore(&spi100k->lock, flags); | ||
572 | msleep(10); | ||
573 | spin_lock_irqsave(&spi100k->lock, flags); | ||
574 | } | ||
575 | |||
576 | if (!list_empty(&spi100k->msg_queue)) | ||
577 | status = -EBUSY; | ||
578 | |||
579 | spin_unlock_irqrestore(&spi100k->lock, flags); | ||
580 | |||
581 | if (status != 0) | 483 | if (status != 0) |
582 | return status; | 484 | return status; |
583 | 485 | ||
584 | clk_put(spi100k->fck); | ||
585 | clk_put(spi100k->ick); | ||
586 | |||
587 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 486 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
588 | 487 | ||
589 | spi_unregister_master(master); | 488 | spi_unregister_master(master); |
@@ -596,30 +495,11 @@ static struct platform_driver omap1_spi100k_driver = { | |||
596 | .name = "omap1_spi100k", | 495 | .name = "omap1_spi100k", |
597 | .owner = THIS_MODULE, | 496 | .owner = THIS_MODULE, |
598 | }, | 497 | }, |
498 | .probe = omap1_spi100k_probe, | ||
599 | .remove = omap1_spi100k_remove, | 499 | .remove = omap1_spi100k_remove, |
600 | }; | 500 | }; |
601 | 501 | ||
602 | 502 | module_platform_driver(omap1_spi100k_driver); | |
603 | static int __init omap1_spi100k_init(void) | ||
604 | { | ||
605 | omap1_spi100k_wq = create_singlethread_workqueue( | ||
606 | omap1_spi100k_driver.driver.name); | ||
607 | |||
608 | if (omap1_spi100k_wq == NULL) | ||
609 | return -1; | ||
610 | |||
611 | return platform_driver_probe(&omap1_spi100k_driver, omap1_spi100k_probe); | ||
612 | } | ||
613 | |||
614 | static void __exit omap1_spi100k_exit(void) | ||
615 | { | ||
616 | platform_driver_unregister(&omap1_spi100k_driver); | ||
617 | |||
618 | destroy_workqueue(omap1_spi100k_wq); | ||
619 | } | ||
620 | |||
621 | module_init(omap1_spi100k_init); | ||
622 | module_exit(omap1_spi100k_exit); | ||
623 | 503 | ||
624 | MODULE_DESCRIPTION("OMAP7xx SPI 100k controller driver"); | 504 | MODULE_DESCRIPTION("OMAP7xx SPI 100k controller driver"); |
625 | MODULE_AUTHOR("Fabrice Crohas <fcrohas@gmail.com>"); | 505 | MODULE_AUTHOR("Fabrice Crohas <fcrohas@gmail.com>"); |
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 5994039758de..ed4af4708d9a 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c | |||
@@ -335,23 +335,6 @@ static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi) | |||
335 | __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0); | 335 | __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0); |
336 | } | 336 | } |
337 | 337 | ||
338 | static int omap2_prepare_transfer(struct spi_master *master) | ||
339 | { | ||
340 | struct omap2_mcspi *mcspi = spi_master_get_devdata(master); | ||
341 | |||
342 | pm_runtime_get_sync(mcspi->dev); | ||
343 | return 0; | ||
344 | } | ||
345 | |||
346 | static int omap2_unprepare_transfer(struct spi_master *master) | ||
347 | { | ||
348 | struct omap2_mcspi *mcspi = spi_master_get_devdata(master); | ||
349 | |||
350 | pm_runtime_mark_last_busy(mcspi->dev); | ||
351 | pm_runtime_put_autosuspend(mcspi->dev); | ||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) | 338 | static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) |
356 | { | 339 | { |
357 | unsigned long timeout; | 340 | unsigned long timeout; |
@@ -1318,8 +1301,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev) | |||
1318 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; | 1301 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; |
1319 | master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); | 1302 | master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); |
1320 | master->setup = omap2_mcspi_setup; | 1303 | master->setup = omap2_mcspi_setup; |
1321 | master->prepare_transfer_hardware = omap2_prepare_transfer; | 1304 | master->auto_runtime_pm = true; |
1322 | master->unprepare_transfer_hardware = omap2_unprepare_transfer; | ||
1323 | master->transfer_one_message = omap2_mcspi_transfer_one_message; | 1305 | master->transfer_one_message = omap2_mcspi_transfer_one_message; |
1324 | master->cleanup = omap2_mcspi_cleanup; | 1306 | master->cleanup = omap2_mcspi_cleanup; |
1325 | master->dev.of_node = node; | 1307 | master->dev.of_node = node; |
@@ -1340,7 +1322,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev) | |||
1340 | if (of_get_property(node, "ti,pindir-d0-out-d1-in", NULL)) | 1322 | if (of_get_property(node, "ti,pindir-d0-out-d1-in", NULL)) |
1341 | mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN; | 1323 | mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN; |
1342 | } else { | 1324 | } else { |
1343 | pdata = pdev->dev.platform_data; | 1325 | pdata = dev_get_platdata(&pdev->dev); |
1344 | master->num_chipselect = pdata->num_cs; | 1326 | master->num_chipselect = pdata->num_cs; |
1345 | if (pdev->id != -1) | 1327 | if (pdev->id != -1) |
1346 | master->bus_num = pdev->id; | 1328 | master->bus_num = pdev->id; |
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c index 5d90bebaa0fa..1d1d321d90c4 100644 --- a/drivers/spi/spi-orion.c +++ b/drivers/spi/spi-orion.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/of.h> | 20 | #include <linux/of.h> |
21 | #include <linux/clk.h> | 21 | #include <linux/clk.h> |
22 | #include <linux/sizes.h> | ||
22 | #include <asm/unaligned.h> | 23 | #include <asm/unaligned.h> |
23 | 24 | ||
24 | #define DRIVER_NAME "orion_spi" | 25 | #define DRIVER_NAME "orion_spi" |
@@ -446,30 +447,22 @@ static int orion_spi_probe(struct platform_device *pdev) | |||
446 | spi->min_speed = DIV_ROUND_UP(tclk_hz, 30); | 447 | spi->min_speed = DIV_ROUND_UP(tclk_hz, 30); |
447 | 448 | ||
448 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 449 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
449 | if (r == NULL) { | 450 | spi->base = devm_ioremap_resource(&pdev->dev, r); |
450 | status = -ENODEV; | 451 | if (IS_ERR(spi->base)) { |
452 | status = PTR_ERR(spi->base); | ||
451 | goto out_rel_clk; | 453 | goto out_rel_clk; |
452 | } | 454 | } |
453 | 455 | ||
454 | if (!request_mem_region(r->start, resource_size(r), | ||
455 | dev_name(&pdev->dev))) { | ||
456 | status = -EBUSY; | ||
457 | goto out_rel_clk; | ||
458 | } | ||
459 | spi->base = ioremap(r->start, SZ_1K); | ||
460 | |||
461 | if (orion_spi_reset(spi) < 0) | 456 | if (orion_spi_reset(spi) < 0) |
462 | goto out_rel_mem; | 457 | goto out_rel_clk; |
463 | 458 | ||
464 | master->dev.of_node = pdev->dev.of_node; | 459 | master->dev.of_node = pdev->dev.of_node; |
465 | status = spi_register_master(master); | 460 | status = spi_register_master(master); |
466 | if (status < 0) | 461 | if (status < 0) |
467 | goto out_rel_mem; | 462 | goto out_rel_clk; |
468 | 463 | ||
469 | return status; | 464 | return status; |
470 | 465 | ||
471 | out_rel_mem: | ||
472 | release_mem_region(r->start, resource_size(r)); | ||
473 | out_rel_clk: | 466 | out_rel_clk: |
474 | clk_disable_unprepare(spi->clk); | 467 | clk_disable_unprepare(spi->clk); |
475 | clk_put(spi->clk); | 468 | clk_put(spi->clk); |
@@ -482,7 +475,6 @@ out: | |||
482 | static int orion_spi_remove(struct platform_device *pdev) | 475 | static int orion_spi_remove(struct platform_device *pdev) |
483 | { | 476 | { |
484 | struct spi_master *master; | 477 | struct spi_master *master; |
485 | struct resource *r; | ||
486 | struct orion_spi *spi; | 478 | struct orion_spi *spi; |
487 | 479 | ||
488 | master = platform_get_drvdata(pdev); | 480 | master = platform_get_drvdata(pdev); |
@@ -491,9 +483,6 @@ static int orion_spi_remove(struct platform_device *pdev) | |||
491 | clk_disable_unprepare(spi->clk); | 483 | clk_disable_unprepare(spi->clk); |
492 | clk_put(spi->clk); | 484 | clk_put(spi->clk); |
493 | 485 | ||
494 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
495 | release_mem_region(r->start, resource_size(r)); | ||
496 | |||
497 | spi_unregister_master(master); | 486 | spi_unregister_master(master); |
498 | 487 | ||
499 | return 0; | 488 | return 0; |
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index abef061fb84a..9c511a954d21 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c | |||
@@ -1555,18 +1555,6 @@ static int pl022_transfer_one_message(struct spi_master *master, | |||
1555 | return 0; | 1555 | return 0; |
1556 | } | 1556 | } |
1557 | 1557 | ||
1558 | static int pl022_prepare_transfer_hardware(struct spi_master *master) | ||
1559 | { | ||
1560 | struct pl022 *pl022 = spi_master_get_devdata(master); | ||
1561 | |||
1562 | /* | ||
1563 | * Just make sure we have all we need to run the transfer by syncing | ||
1564 | * with the runtime PM framework. | ||
1565 | */ | ||
1566 | pm_runtime_get_sync(&pl022->adev->dev); | ||
1567 | return 0; | ||
1568 | } | ||
1569 | |||
1570 | static int pl022_unprepare_transfer_hardware(struct spi_master *master) | 1558 | static int pl022_unprepare_transfer_hardware(struct spi_master *master) |
1571 | { | 1559 | { |
1572 | struct pl022 *pl022 = spi_master_get_devdata(master); | 1560 | struct pl022 *pl022 = spi_master_get_devdata(master); |
@@ -1575,13 +1563,6 @@ static int pl022_unprepare_transfer_hardware(struct spi_master *master) | |||
1575 | writew((readw(SSP_CR1(pl022->virtbase)) & | 1563 | writew((readw(SSP_CR1(pl022->virtbase)) & |
1576 | (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); | 1564 | (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); |
1577 | 1565 | ||
1578 | if (pl022->master_info->autosuspend_delay > 0) { | ||
1579 | pm_runtime_mark_last_busy(&pl022->adev->dev); | ||
1580 | pm_runtime_put_autosuspend(&pl022->adev->dev); | ||
1581 | } else { | ||
1582 | pm_runtime_put(&pl022->adev->dev); | ||
1583 | } | ||
1584 | |||
1585 | return 0; | 1566 | return 0; |
1586 | } | 1567 | } |
1587 | 1568 | ||
@@ -2091,7 +2072,8 @@ pl022_platform_data_dt_get(struct device *dev) | |||
2091 | static int pl022_probe(struct amba_device *adev, const struct amba_id *id) | 2072 | static int pl022_probe(struct amba_device *adev, const struct amba_id *id) |
2092 | { | 2073 | { |
2093 | struct device *dev = &adev->dev; | 2074 | struct device *dev = &adev->dev; |
2094 | struct pl022_ssp_controller *platform_info = adev->dev.platform_data; | 2075 | struct pl022_ssp_controller *platform_info = |
2076 | dev_get_platdata(&adev->dev); | ||
2095 | struct spi_master *master; | 2077 | struct spi_master *master; |
2096 | struct pl022 *pl022 = NULL; /*Data for this driver */ | 2078 | struct pl022 *pl022 = NULL; /*Data for this driver */ |
2097 | struct device_node *np = adev->dev.of_node; | 2079 | struct device_node *np = adev->dev.of_node; |
@@ -2139,7 +2121,7 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id) | |||
2139 | master->num_chipselect = num_cs; | 2121 | master->num_chipselect = num_cs; |
2140 | master->cleanup = pl022_cleanup; | 2122 | master->cleanup = pl022_cleanup; |
2141 | master->setup = pl022_setup; | 2123 | master->setup = pl022_setup; |
2142 | master->prepare_transfer_hardware = pl022_prepare_transfer_hardware; | 2124 | master->auto_runtime_pm = true; |
2143 | master->transfer_one_message = pl022_transfer_one_message; | 2125 | master->transfer_one_message = pl022_transfer_one_message; |
2144 | master->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware; | 2126 | master->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware; |
2145 | master->rt = platform_info->rt; | 2127 | master->rt = platform_info->rt; |
@@ -2193,8 +2175,8 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id) | |||
2193 | status = -ENOMEM; | 2175 | status = -ENOMEM; |
2194 | goto err_no_ioremap; | 2176 | goto err_no_ioremap; |
2195 | } | 2177 | } |
2196 | printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n", | 2178 | printk(KERN_INFO "pl022: mapped registers from %pa to %p\n", |
2197 | adev->res.start, pl022->virtbase); | 2179 | &adev->res.start, pl022->virtbase); |
2198 | 2180 | ||
2199 | pl022->clk = devm_clk_get(&adev->dev, NULL); | 2181 | pl022->clk = devm_clk_get(&adev->dev, NULL); |
2200 | if (IS_ERR(pl022->clk)) { | 2182 | if (IS_ERR(pl022->clk)) { |
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index f440dcee852b..2eb06ee0b326 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c | |||
@@ -69,6 +69,8 @@ MODULE_ALIAS("platform:pxa2xx-spi"); | |||
69 | #define LPSS_TX_HITHRESH_DFLT 224 | 69 | #define LPSS_TX_HITHRESH_DFLT 224 |
70 | 70 | ||
71 | /* Offset from drv_data->lpss_base */ | 71 | /* Offset from drv_data->lpss_base */ |
72 | #define GENERAL_REG 0x08 | ||
73 | #define GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24) | ||
72 | #define SSP_REG 0x0c | 74 | #define SSP_REG 0x0c |
73 | #define SPI_CS_CONTROL 0x18 | 75 | #define SPI_CS_CONTROL 0x18 |
74 | #define SPI_CS_CONTROL_SW_MODE BIT(0) | 76 | #define SPI_CS_CONTROL_SW_MODE BIT(0) |
@@ -142,8 +144,13 @@ detection_done: | |||
142 | __lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value); | 144 | __lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value); |
143 | 145 | ||
144 | /* Enable multiblock DMA transfers */ | 146 | /* Enable multiblock DMA transfers */ |
145 | if (drv_data->master_info->enable_dma) | 147 | if (drv_data->master_info->enable_dma) { |
146 | __lpss_ssp_write_priv(drv_data, SSP_REG, 1); | 148 | __lpss_ssp_write_priv(drv_data, SSP_REG, 1); |
149 | |||
150 | value = __lpss_ssp_read_priv(drv_data, GENERAL_REG); | ||
151 | value |= GENERAL_REG_RXTO_HOLDOFF_DISABLE; | ||
152 | __lpss_ssp_write_priv(drv_data, GENERAL_REG, value); | ||
153 | } | ||
147 | } | 154 | } |
148 | 155 | ||
149 | static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable) | 156 | static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable) |
@@ -804,14 +811,6 @@ static int pxa2xx_spi_transfer_one_message(struct spi_master *master, | |||
804 | return 0; | 811 | return 0; |
805 | } | 812 | } |
806 | 813 | ||
807 | static int pxa2xx_spi_prepare_transfer(struct spi_master *master) | ||
808 | { | ||
809 | struct driver_data *drv_data = spi_master_get_devdata(master); | ||
810 | |||
811 | pm_runtime_get_sync(&drv_data->pdev->dev); | ||
812 | return 0; | ||
813 | } | ||
814 | |||
815 | static int pxa2xx_spi_unprepare_transfer(struct spi_master *master) | 814 | static int pxa2xx_spi_unprepare_transfer(struct spi_master *master) |
816 | { | 815 | { |
817 | struct driver_data *drv_data = spi_master_get_devdata(master); | 816 | struct driver_data *drv_data = spi_master_get_devdata(master); |
@@ -820,8 +819,6 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_master *master) | |||
820 | write_SSCR0(read_SSCR0(drv_data->ioaddr) & ~SSCR0_SSE, | 819 | write_SSCR0(read_SSCR0(drv_data->ioaddr) & ~SSCR0_SSE, |
821 | drv_data->ioaddr); | 820 | drv_data->ioaddr); |
822 | 821 | ||
823 | pm_runtime_mark_last_busy(&drv_data->pdev->dev); | ||
824 | pm_runtime_put_autosuspend(&drv_data->pdev->dev); | ||
825 | return 0; | 822 | return 0; |
826 | } | 823 | } |
827 | 824 | ||
@@ -1134,8 +1131,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) | |||
1134 | master->cleanup = cleanup; | 1131 | master->cleanup = cleanup; |
1135 | master->setup = setup; | 1132 | master->setup = setup; |
1136 | master->transfer_one_message = pxa2xx_spi_transfer_one_message; | 1133 | master->transfer_one_message = pxa2xx_spi_transfer_one_message; |
1137 | master->prepare_transfer_hardware = pxa2xx_spi_prepare_transfer; | ||
1138 | master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer; | 1134 | master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer; |
1135 | master->auto_runtime_pm = true; | ||
1139 | 1136 | ||
1140 | drv_data->ssp_type = ssp->type; | 1137 | drv_data->ssp_type = ssp->type; |
1141 | drv_data->null_dma_buf = (u32 *)PTR_ALIGN(&drv_data[1], DMA_ALIGNMENT); | 1138 | drv_data->null_dma_buf = (u32 *)PTR_ALIGN(&drv_data[1], DMA_ALIGNMENT); |
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index b44a6ac3cec9..8719206a03a0 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c | |||
@@ -564,8 +564,12 @@ static void rspi_work(struct work_struct *work) | |||
564 | unsigned long flags; | 564 | unsigned long flags; |
565 | int ret; | 565 | int ret; |
566 | 566 | ||
567 | spin_lock_irqsave(&rspi->lock, flags); | 567 | while (1) { |
568 | while (!list_empty(&rspi->queue)) { | 568 | spin_lock_irqsave(&rspi->lock, flags); |
569 | if (list_empty(&rspi->queue)) { | ||
570 | spin_unlock_irqrestore(&rspi->lock, flags); | ||
571 | break; | ||
572 | } | ||
569 | mesg = list_entry(rspi->queue.next, struct spi_message, queue); | 573 | mesg = list_entry(rspi->queue.next, struct spi_message, queue); |
570 | list_del_init(&mesg->queue); | 574 | list_del_init(&mesg->queue); |
571 | spin_unlock_irqrestore(&rspi->lock, flags); | 575 | spin_unlock_irqrestore(&rspi->lock, flags); |
@@ -595,8 +599,6 @@ static void rspi_work(struct work_struct *work) | |||
595 | 599 | ||
596 | mesg->status = 0; | 600 | mesg->status = 0; |
597 | mesg->complete(mesg->context); | 601 | mesg->complete(mesg->context); |
598 | |||
599 | spin_lock_irqsave(&rspi->lock, flags); | ||
600 | } | 602 | } |
601 | 603 | ||
602 | return; | 604 | return; |
@@ -664,12 +666,13 @@ static irqreturn_t rspi_irq(int irq, void *_sr) | |||
664 | static int rspi_request_dma(struct rspi_data *rspi, | 666 | static int rspi_request_dma(struct rspi_data *rspi, |
665 | struct platform_device *pdev) | 667 | struct platform_device *pdev) |
666 | { | 668 | { |
667 | struct rspi_plat_data *rspi_pd = pdev->dev.platform_data; | 669 | struct rspi_plat_data *rspi_pd = dev_get_platdata(&pdev->dev); |
670 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
668 | dma_cap_mask_t mask; | 671 | dma_cap_mask_t mask; |
669 | struct dma_slave_config cfg; | 672 | struct dma_slave_config cfg; |
670 | int ret; | 673 | int ret; |
671 | 674 | ||
672 | if (!rspi_pd) | 675 | if (!res || !rspi_pd) |
673 | return 0; /* The driver assumes no error. */ | 676 | return 0; /* The driver assumes no error. */ |
674 | 677 | ||
675 | rspi->dma_width_16bit = rspi_pd->dma_width_16bit; | 678 | rspi->dma_width_16bit = rspi_pd->dma_width_16bit; |
@@ -683,6 +686,8 @@ static int rspi_request_dma(struct rspi_data *rspi, | |||
683 | if (rspi->chan_rx) { | 686 | if (rspi->chan_rx) { |
684 | cfg.slave_id = rspi_pd->dma_rx_id; | 687 | cfg.slave_id = rspi_pd->dma_rx_id; |
685 | cfg.direction = DMA_DEV_TO_MEM; | 688 | cfg.direction = DMA_DEV_TO_MEM; |
689 | cfg.dst_addr = 0; | ||
690 | cfg.src_addr = res->start + RSPI_SPDR; | ||
686 | ret = dmaengine_slave_config(rspi->chan_rx, &cfg); | 691 | ret = dmaengine_slave_config(rspi->chan_rx, &cfg); |
687 | if (!ret) | 692 | if (!ret) |
688 | dev_info(&pdev->dev, "Use DMA when rx.\n"); | 693 | dev_info(&pdev->dev, "Use DMA when rx.\n"); |
@@ -698,6 +703,8 @@ static int rspi_request_dma(struct rspi_data *rspi, | |||
698 | if (rspi->chan_tx) { | 703 | if (rspi->chan_tx) { |
699 | cfg.slave_id = rspi_pd->dma_tx_id; | 704 | cfg.slave_id = rspi_pd->dma_tx_id; |
700 | cfg.direction = DMA_MEM_TO_DEV; | 705 | cfg.direction = DMA_MEM_TO_DEV; |
706 | cfg.dst_addr = res->start + RSPI_SPDR; | ||
707 | cfg.src_addr = 0; | ||
701 | ret = dmaengine_slave_config(rspi->chan_tx, &cfg); | 708 | ret = dmaengine_slave_config(rspi->chan_tx, &cfg); |
702 | if (!ret) | 709 | if (!ret) |
703 | dev_info(&pdev->dev, "Use DMA when tx\n"); | 710 | dev_info(&pdev->dev, "Use DMA when tx\n"); |
@@ -719,7 +726,7 @@ static void rspi_release_dma(struct rspi_data *rspi) | |||
719 | 726 | ||
720 | static int rspi_remove(struct platform_device *pdev) | 727 | static int rspi_remove(struct platform_device *pdev) |
721 | { | 728 | { |
722 | struct rspi_data *rspi = platform_get_drvdata(pdev); | 729 | struct rspi_data *rspi = spi_master_get(platform_get_drvdata(pdev)); |
723 | 730 | ||
724 | spi_unregister_master(rspi->master); | 731 | spi_unregister_master(rspi->master); |
725 | rspi_release_dma(rspi); | 732 | rspi_release_dma(rspi); |
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c index 68910b310152..ce318d95a6ee 100644 --- a/drivers/spi/spi-s3c24xx.c +++ b/drivers/spi/spi-s3c24xx.c | |||
@@ -525,7 +525,7 @@ static int s3c24xx_spi_probe(struct platform_device *pdev) | |||
525 | memset(hw, 0, sizeof(struct s3c24xx_spi)); | 525 | memset(hw, 0, sizeof(struct s3c24xx_spi)); |
526 | 526 | ||
527 | hw->master = spi_master_get(master); | 527 | hw->master = spi_master_get(master); |
528 | hw->pdata = pdata = pdev->dev.platform_data; | 528 | hw->pdata = pdata = dev_get_platdata(&pdev->dev); |
529 | hw->dev = &pdev->dev; | 529 | hw->dev = &pdev->dev; |
530 | 530 | ||
531 | if (pdata == NULL) { | 531 | if (pdata == NULL) { |
@@ -690,7 +690,7 @@ static int s3c24xx_spi_remove(struct platform_device *dev) | |||
690 | 690 | ||
691 | static int s3c24xx_spi_suspend(struct device *dev) | 691 | static int s3c24xx_spi_suspend(struct device *dev) |
692 | { | 692 | { |
693 | struct s3c24xx_spi *hw = platform_get_drvdata(to_platform_device(dev)); | 693 | struct s3c24xx_spi *hw = dev_get_drvdata(dev); |
694 | 694 | ||
695 | if (hw->pdata && hw->pdata->gpio_setup) | 695 | if (hw->pdata && hw->pdata->gpio_setup) |
696 | hw->pdata->gpio_setup(hw->pdata, 0); | 696 | hw->pdata->gpio_setup(hw->pdata, 0); |
@@ -701,7 +701,7 @@ static int s3c24xx_spi_suspend(struct device *dev) | |||
701 | 701 | ||
702 | static int s3c24xx_spi_resume(struct device *dev) | 702 | static int s3c24xx_spi_resume(struct device *dev) |
703 | { | 703 | { |
704 | struct s3c24xx_spi *hw = platform_get_drvdata(to_platform_device(dev)); | 704 | struct s3c24xx_spi *hw = dev_get_drvdata(dev); |
705 | 705 | ||
706 | s3c24xx_spi_initialsetup(hw); | 706 | s3c24xx_spi_initialsetup(hw); |
707 | return 0; | 707 | return 0; |
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 63e2070c6c14..512b8893893b 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c | |||
@@ -172,7 +172,6 @@ struct s3c64xx_spi_port_config { | |||
172 | * @master: Pointer to the SPI Protocol master. | 172 | * @master: Pointer to the SPI Protocol master. |
173 | * @cntrlr_info: Platform specific data for the controller this driver manages. | 173 | * @cntrlr_info: Platform specific data for the controller this driver manages. |
174 | * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint. | 174 | * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint. |
175 | * @queue: To log SPI xfer requests. | ||
176 | * @lock: Controller specific lock. | 175 | * @lock: Controller specific lock. |
177 | * @state: Set of FLAGS to indicate status. | 176 | * @state: Set of FLAGS to indicate status. |
178 | * @rx_dmach: Controller's DMA channel for Rx. | 177 | * @rx_dmach: Controller's DMA channel for Rx. |
@@ -193,7 +192,6 @@ struct s3c64xx_spi_driver_data { | |||
193 | struct spi_master *master; | 192 | struct spi_master *master; |
194 | struct s3c64xx_spi_info *cntrlr_info; | 193 | struct s3c64xx_spi_info *cntrlr_info; |
195 | struct spi_device *tgl_spi; | 194 | struct spi_device *tgl_spi; |
196 | struct list_head queue; | ||
197 | spinlock_t lock; | 195 | spinlock_t lock; |
198 | unsigned long sfr_start; | 196 | unsigned long sfr_start; |
199 | struct completion xfer_completion; | 197 | struct completion xfer_completion; |
@@ -338,8 +336,10 @@ static int acquire_dma(struct s3c64xx_spi_driver_data *sdd) | |||
338 | req.cap = DMA_SLAVE; | 336 | req.cap = DMA_SLAVE; |
339 | req.client = &s3c64xx_spi_dma_client; | 337 | req.client = &s3c64xx_spi_dma_client; |
340 | 338 | ||
341 | sdd->rx_dma.ch = (void *)sdd->ops->request(sdd->rx_dma.dmach, &req, dev, "rx"); | 339 | sdd->rx_dma.ch = (struct dma_chan *)(unsigned long)sdd->ops->request( |
342 | sdd->tx_dma.ch = (void *)sdd->ops->request(sdd->tx_dma.dmach, &req, dev, "tx"); | 340 | sdd->rx_dma.dmach, &req, dev, "rx"); |
341 | sdd->tx_dma.ch = (struct dma_chan *)(unsigned long)sdd->ops->request( | ||
342 | sdd->tx_dma.dmach, &req, dev, "tx"); | ||
343 | 343 | ||
344 | return 1; | 344 | return 1; |
345 | } | 345 | } |
@@ -356,8 +356,6 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) | |||
356 | while (!is_polling(sdd) && !acquire_dma(sdd)) | 356 | while (!is_polling(sdd) && !acquire_dma(sdd)) |
357 | usleep_range(10000, 11000); | 357 | usleep_range(10000, 11000); |
358 | 358 | ||
359 | pm_runtime_get_sync(&sdd->pdev->dev); | ||
360 | |||
361 | return 0; | 359 | return 0; |
362 | } | 360 | } |
363 | 361 | ||
@@ -372,7 +370,6 @@ static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi) | |||
372 | sdd->ops->release((enum dma_ch)sdd->tx_dma.ch, | 370 | sdd->ops->release((enum dma_ch)sdd->tx_dma.ch, |
373 | &s3c64xx_spi_dma_client); | 371 | &s3c64xx_spi_dma_client); |
374 | } | 372 | } |
375 | pm_runtime_put(&sdd->pdev->dev); | ||
376 | 373 | ||
377 | return 0; | 374 | return 0; |
378 | } | 375 | } |
@@ -389,9 +386,10 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma, | |||
389 | { | 386 | { |
390 | struct s3c64xx_spi_driver_data *sdd; | 387 | struct s3c64xx_spi_driver_data *sdd; |
391 | struct dma_slave_config config; | 388 | struct dma_slave_config config; |
392 | struct scatterlist sg; | ||
393 | struct dma_async_tx_descriptor *desc; | 389 | struct dma_async_tx_descriptor *desc; |
394 | 390 | ||
391 | memset(&config, 0, sizeof(config)); | ||
392 | |||
395 | if (dma->direction == DMA_DEV_TO_MEM) { | 393 | if (dma->direction == DMA_DEV_TO_MEM) { |
396 | sdd = container_of((void *)dma, | 394 | sdd = container_of((void *)dma, |
397 | struct s3c64xx_spi_driver_data, rx_dma); | 395 | struct s3c64xx_spi_driver_data, rx_dma); |
@@ -410,14 +408,8 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma, | |||
410 | dmaengine_slave_config(dma->ch, &config); | 408 | dmaengine_slave_config(dma->ch, &config); |
411 | } | 409 | } |
412 | 410 | ||
413 | sg_init_table(&sg, 1); | 411 | desc = dmaengine_prep_slave_single(dma->ch, buf, len, |
414 | sg_dma_len(&sg) = len; | 412 | dma->direction, DMA_PREP_INTERRUPT); |
415 | sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf)), | ||
416 | len, offset_in_page(buf)); | ||
417 | sg_dma_address(&sg) = buf; | ||
418 | |||
419 | desc = dmaengine_prep_slave_sg(dma->ch, | ||
420 | &sg, 1, dma->direction, DMA_PREP_INTERRUPT); | ||
421 | 413 | ||
422 | desc->callback = s3c64xx_spi_dmacb; | 414 | desc->callback = s3c64xx_spi_dmacb; |
423 | desc->callback_param = dma; | 415 | desc->callback_param = dma; |
@@ -434,27 +426,26 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) | |||
434 | dma_cap_mask_t mask; | 426 | dma_cap_mask_t mask; |
435 | int ret; | 427 | int ret; |
436 | 428 | ||
437 | if (is_polling(sdd)) | 429 | if (!is_polling(sdd)) { |
438 | return 0; | 430 | dma_cap_zero(mask); |
439 | 431 | dma_cap_set(DMA_SLAVE, mask); | |
440 | dma_cap_zero(mask); | 432 | |
441 | dma_cap_set(DMA_SLAVE, mask); | 433 | /* Acquire DMA channels */ |
442 | 434 | sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter, | |
443 | /* Acquire DMA channels */ | 435 | (void *)sdd->rx_dma.dmach, dev, "rx"); |
444 | sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter, | 436 | if (!sdd->rx_dma.ch) { |
445 | (void*)sdd->rx_dma.dmach, dev, "rx"); | 437 | dev_err(dev, "Failed to get RX DMA channel\n"); |
446 | if (!sdd->rx_dma.ch) { | 438 | ret = -EBUSY; |
447 | dev_err(dev, "Failed to get RX DMA channel\n"); | 439 | goto out; |
448 | ret = -EBUSY; | 440 | } |
449 | goto out; | ||
450 | } | ||
451 | 441 | ||
452 | sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter, | 442 | sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter, |
453 | (void*)sdd->tx_dma.dmach, dev, "tx"); | 443 | (void *)sdd->tx_dma.dmach, dev, "tx"); |
454 | if (!sdd->tx_dma.ch) { | 444 | if (!sdd->tx_dma.ch) { |
455 | dev_err(dev, "Failed to get TX DMA channel\n"); | 445 | dev_err(dev, "Failed to get TX DMA channel\n"); |
456 | ret = -EBUSY; | 446 | ret = -EBUSY; |
457 | goto out_rx; | 447 | goto out_rx; |
448 | } | ||
458 | } | 449 | } |
459 | 450 | ||
460 | ret = pm_runtime_get_sync(&sdd->pdev->dev); | 451 | ret = pm_runtime_get_sync(&sdd->pdev->dev); |
@@ -1056,8 +1047,6 @@ static int s3c64xx_spi_setup(struct spi_device *spi) | |||
1056 | struct s3c64xx_spi_csinfo *cs = spi->controller_data; | 1047 | struct s3c64xx_spi_csinfo *cs = spi->controller_data; |
1057 | struct s3c64xx_spi_driver_data *sdd; | 1048 | struct s3c64xx_spi_driver_data *sdd; |
1058 | struct s3c64xx_spi_info *sci; | 1049 | struct s3c64xx_spi_info *sci; |
1059 | struct spi_message *msg; | ||
1060 | unsigned long flags; | ||
1061 | int err; | 1050 | int err; |
1062 | 1051 | ||
1063 | sdd = spi_master_get_devdata(spi->master); | 1052 | sdd = spi_master_get_devdata(spi->master); |
@@ -1071,37 +1060,23 @@ static int s3c64xx_spi_setup(struct spi_device *spi) | |||
1071 | return -ENODEV; | 1060 | return -ENODEV; |
1072 | } | 1061 | } |
1073 | 1062 | ||
1074 | /* Request gpio only if cs line is asserted by gpio pins */ | 1063 | if (!spi_get_ctldata(spi)) { |
1075 | if (sdd->cs_gpio) { | 1064 | /* Request gpio only if cs line is asserted by gpio pins */ |
1076 | err = gpio_request_one(cs->line, GPIOF_OUT_INIT_HIGH, | 1065 | if (sdd->cs_gpio) { |
1077 | dev_name(&spi->dev)); | 1066 | err = gpio_request_one(cs->line, GPIOF_OUT_INIT_HIGH, |
1078 | if (err) { | 1067 | dev_name(&spi->dev)); |
1079 | dev_err(&spi->dev, | 1068 | if (err) { |
1080 | "Failed to get /CS gpio [%d]: %d\n", | 1069 | dev_err(&spi->dev, |
1081 | cs->line, err); | 1070 | "Failed to get /CS gpio [%d]: %d\n", |
1082 | goto err_gpio_req; | 1071 | cs->line, err); |
1072 | goto err_gpio_req; | ||
1073 | } | ||
1083 | } | 1074 | } |
1084 | } | ||
1085 | 1075 | ||
1086 | if (!spi_get_ctldata(spi)) | ||
1087 | spi_set_ctldata(spi, cs); | 1076 | spi_set_ctldata(spi, cs); |
1088 | |||
1089 | sci = sdd->cntrlr_info; | ||
1090 | |||
1091 | spin_lock_irqsave(&sdd->lock, flags); | ||
1092 | |||
1093 | list_for_each_entry(msg, &sdd->queue, queue) { | ||
1094 | /* Is some mssg is already queued for this device */ | ||
1095 | if (msg->spi == spi) { | ||
1096 | dev_err(&spi->dev, | ||
1097 | "setup: attempt while mssg in queue!\n"); | ||
1098 | spin_unlock_irqrestore(&sdd->lock, flags); | ||
1099 | err = -EBUSY; | ||
1100 | goto err_msgq; | ||
1101 | } | ||
1102 | } | 1077 | } |
1103 | 1078 | ||
1104 | spin_unlock_irqrestore(&sdd->lock, flags); | 1079 | sci = sdd->cntrlr_info; |
1105 | 1080 | ||
1106 | pm_runtime_get_sync(&sdd->pdev->dev); | 1081 | pm_runtime_get_sync(&sdd->pdev->dev); |
1107 | 1082 | ||
@@ -1149,7 +1124,6 @@ setup_exit: | |||
1149 | /* setup() returns with device de-selected */ | 1124 | /* setup() returns with device de-selected */ |
1150 | disable_cs(sdd, spi); | 1125 | disable_cs(sdd, spi); |
1151 | 1126 | ||
1152 | err_msgq: | ||
1153 | gpio_free(cs->line); | 1127 | gpio_free(cs->line); |
1154 | spi_set_ctldata(spi, NULL); | 1128 | spi_set_ctldata(spi, NULL); |
1155 | 1129 | ||
@@ -1275,7 +1249,7 @@ static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev) | |||
1275 | #else | 1249 | #else |
1276 | static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev) | 1250 | static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev) |
1277 | { | 1251 | { |
1278 | return dev->platform_data; | 1252 | return dev_get_platdata(dev); |
1279 | } | 1253 | } |
1280 | #endif | 1254 | #endif |
1281 | 1255 | ||
@@ -1300,7 +1274,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) | |||
1300 | struct resource *mem_res; | 1274 | struct resource *mem_res; |
1301 | struct resource *res; | 1275 | struct resource *res; |
1302 | struct s3c64xx_spi_driver_data *sdd; | 1276 | struct s3c64xx_spi_driver_data *sdd; |
1303 | struct s3c64xx_spi_info *sci = pdev->dev.platform_data; | 1277 | struct s3c64xx_spi_info *sci = dev_get_platdata(&pdev->dev); |
1304 | struct spi_master *master; | 1278 | struct spi_master *master; |
1305 | int ret, irq; | 1279 | int ret, irq; |
1306 | char clk_name[16]; | 1280 | char clk_name[16]; |
@@ -1364,16 +1338,14 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) | |||
1364 | if (!sdd->pdev->dev.of_node) { | 1338 | if (!sdd->pdev->dev.of_node) { |
1365 | res = platform_get_resource(pdev, IORESOURCE_DMA, 0); | 1339 | res = platform_get_resource(pdev, IORESOURCE_DMA, 0); |
1366 | if (!res) { | 1340 | if (!res) { |
1367 | dev_warn(&pdev->dev, "Unable to get SPI tx dma " | 1341 | dev_warn(&pdev->dev, "Unable to get SPI tx dma resource. Switching to poll mode\n"); |
1368 | "resource. Switching to poll mode\n"); | ||
1369 | sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL; | 1342 | sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL; |
1370 | } else | 1343 | } else |
1371 | sdd->tx_dma.dmach = res->start; | 1344 | sdd->tx_dma.dmach = res->start; |
1372 | 1345 | ||
1373 | res = platform_get_resource(pdev, IORESOURCE_DMA, 1); | 1346 | res = platform_get_resource(pdev, IORESOURCE_DMA, 1); |
1374 | if (!res) { | 1347 | if (!res) { |
1375 | dev_warn(&pdev->dev, "Unable to get SPI rx dma " | 1348 | dev_warn(&pdev->dev, "Unable to get SPI rx dma resource. Switching to poll mode\n"); |
1376 | "resource. Switching to poll mode\n"); | ||
1377 | sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL; | 1349 | sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL; |
1378 | } else | 1350 | } else |
1379 | sdd->rx_dma.dmach = res->start; | 1351 | sdd->rx_dma.dmach = res->start; |
@@ -1395,6 +1367,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) | |||
1395 | SPI_BPW_MASK(8); | 1367 | SPI_BPW_MASK(8); |
1396 | /* the spi->mode bits understood by this driver: */ | 1368 | /* the spi->mode bits understood by this driver: */ |
1397 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; | 1369 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; |
1370 | master->auto_runtime_pm = true; | ||
1398 | 1371 | ||
1399 | sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res); | 1372 | sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res); |
1400 | if (IS_ERR(sdd->regs)) { | 1373 | if (IS_ERR(sdd->regs)) { |
@@ -1442,7 +1415,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) | |||
1442 | 1415 | ||
1443 | spin_lock_init(&sdd->lock); | 1416 | spin_lock_init(&sdd->lock); |
1444 | init_completion(&sdd->xfer_completion); | 1417 | init_completion(&sdd->xfer_completion); |
1445 | INIT_LIST_HEAD(&sdd->queue); | ||
1446 | 1418 | ||
1447 | ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0, | 1419 | ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0, |
1448 | "spi-s3c64xx", sdd); | 1420 | "spi-s3c64xx", sdd); |
@@ -1464,8 +1436,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) | |||
1464 | 1436 | ||
1465 | dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n", | 1437 | dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n", |
1466 | sdd->port_id, master->num_chipselect); | 1438 | sdd->port_id, master->num_chipselect); |
1467 | dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n", | 1439 | dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tDMA=[Rx-%d, Tx-%d]\n", |
1468 | mem_res->end, mem_res->start, | 1440 | mem_res, |
1469 | sdd->rx_dma.dmach, sdd->tx_dma.dmach); | 1441 | sdd->rx_dma.dmach, sdd->tx_dma.dmach); |
1470 | 1442 | ||
1471 | pm_runtime_enable(&pdev->dev); | 1443 | pm_runtime_enable(&pdev->dev); |
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c index 716edf999538..0b68cb592fa4 100644 --- a/drivers/spi/spi-sh-hspi.c +++ b/drivers/spi/spi-sh-hspi.c | |||
@@ -99,21 +99,6 @@ static int hspi_status_check_timeout(struct hspi_priv *hspi, u32 mask, u32 val) | |||
99 | /* | 99 | /* |
100 | * spi master function | 100 | * spi master function |
101 | */ | 101 | */ |
102 | static int hspi_prepare_transfer(struct spi_master *master) | ||
103 | { | ||
104 | struct hspi_priv *hspi = spi_master_get_devdata(master); | ||
105 | |||
106 | pm_runtime_get_sync(hspi->dev); | ||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | static int hspi_unprepare_transfer(struct spi_master *master) | ||
111 | { | ||
112 | struct hspi_priv *hspi = spi_master_get_devdata(master); | ||
113 | |||
114 | pm_runtime_put_sync(hspi->dev); | ||
115 | return 0; | ||
116 | } | ||
117 | 102 | ||
118 | #define hspi_hw_cs_enable(hspi) hspi_hw_cs_ctrl(hspi, 0) | 103 | #define hspi_hw_cs_enable(hspi) hspi_hw_cs_ctrl(hspi, 0) |
119 | #define hspi_hw_cs_disable(hspi) hspi_hw_cs_ctrl(hspi, 1) | 104 | #define hspi_hw_cs_disable(hspi) hspi_hw_cs_ctrl(hspi, 1) |
@@ -316,9 +301,8 @@ static int hspi_probe(struct platform_device *pdev) | |||
316 | master->setup = hspi_setup; | 301 | master->setup = hspi_setup; |
317 | master->cleanup = hspi_cleanup; | 302 | master->cleanup = hspi_cleanup; |
318 | master->mode_bits = SPI_CPOL | SPI_CPHA; | 303 | master->mode_bits = SPI_CPOL | SPI_CPHA; |
319 | master->prepare_transfer_hardware = hspi_prepare_transfer; | 304 | master->auto_runtime_pm = true; |
320 | master->transfer_one_message = hspi_transfer_one_message; | 305 | master->transfer_one_message = hspi_transfer_one_message; |
321 | master->unprepare_transfer_hardware = hspi_unprepare_transfer; | ||
322 | ret = spi_register_master(master); | 306 | ret = spi_register_master(master); |
323 | if (ret < 0) { | 307 | if (ret < 0) { |
324 | dev_err(&pdev->dev, "spi_register_master error.\n"); | 308 | dev_err(&pdev->dev, "spi_register_master error.\n"); |
@@ -327,8 +311,6 @@ static int hspi_probe(struct platform_device *pdev) | |||
327 | 311 | ||
328 | pm_runtime_enable(&pdev->dev); | 312 | pm_runtime_enable(&pdev->dev); |
329 | 313 | ||
330 | dev_info(&pdev->dev, "probed\n"); | ||
331 | |||
332 | return 0; | 314 | return 0; |
333 | 315 | ||
334 | error1: | 316 | error1: |
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 2bc5a6b86300..2a95435a6a11 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c | |||
@@ -645,7 +645,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) | |||
645 | if (pdev->dev.of_node) | 645 | if (pdev->dev.of_node) |
646 | p->info = sh_msiof_spi_parse_dt(&pdev->dev); | 646 | p->info = sh_msiof_spi_parse_dt(&pdev->dev); |
647 | else | 647 | else |
648 | p->info = pdev->dev.platform_data; | 648 | p->info = dev_get_platdata(&pdev->dev); |
649 | 649 | ||
650 | if (!p->info) { | 650 | if (!p->info) { |
651 | dev_err(&pdev->dev, "failed to obtain device info\n"); | 651 | dev_err(&pdev->dev, "failed to obtain device info\n"); |
@@ -745,18 +745,6 @@ static int sh_msiof_spi_remove(struct platform_device *pdev) | |||
745 | return ret; | 745 | return ret; |
746 | } | 746 | } |
747 | 747 | ||
748 | static int sh_msiof_spi_runtime_nop(struct device *dev) | ||
749 | { | ||
750 | /* Runtime PM callback shared between ->runtime_suspend() | ||
751 | * and ->runtime_resume(). Simply returns success. | ||
752 | * | ||
753 | * This driver re-initializes all registers after | ||
754 | * pm_runtime_get_sync() anyway so there is no need | ||
755 | * to save and restore registers here. | ||
756 | */ | ||
757 | return 0; | ||
758 | } | ||
759 | |||
760 | #ifdef CONFIG_OF | 748 | #ifdef CONFIG_OF |
761 | static const struct of_device_id sh_msiof_match[] = { | 749 | static const struct of_device_id sh_msiof_match[] = { |
762 | { .compatible = "renesas,sh-msiof", }, | 750 | { .compatible = "renesas,sh-msiof", }, |
@@ -766,18 +754,12 @@ static const struct of_device_id sh_msiof_match[] = { | |||
766 | MODULE_DEVICE_TABLE(of, sh_msiof_match); | 754 | MODULE_DEVICE_TABLE(of, sh_msiof_match); |
767 | #endif | 755 | #endif |
768 | 756 | ||
769 | static struct dev_pm_ops sh_msiof_spi_dev_pm_ops = { | ||
770 | .runtime_suspend = sh_msiof_spi_runtime_nop, | ||
771 | .runtime_resume = sh_msiof_spi_runtime_nop, | ||
772 | }; | ||
773 | |||
774 | static struct platform_driver sh_msiof_spi_drv = { | 757 | static struct platform_driver sh_msiof_spi_drv = { |
775 | .probe = sh_msiof_spi_probe, | 758 | .probe = sh_msiof_spi_probe, |
776 | .remove = sh_msiof_spi_remove, | 759 | .remove = sh_msiof_spi_remove, |
777 | .driver = { | 760 | .driver = { |
778 | .name = "spi_sh_msiof", | 761 | .name = "spi_sh_msiof", |
779 | .owner = THIS_MODULE, | 762 | .owner = THIS_MODULE, |
780 | .pm = &sh_msiof_spi_dev_pm_ops, | ||
781 | .of_match_table = of_match_ptr(sh_msiof_match), | 763 | .of_match_table = of_match_ptr(sh_msiof_match), |
782 | }, | 764 | }, |
783 | }; | 765 | }; |
diff --git a/drivers/spi/spi-sh-sci.c b/drivers/spi/spi-sh-sci.c index 097e506042be..8eefeb6007df 100644 --- a/drivers/spi/spi-sh-sci.c +++ b/drivers/spi/spi-sh-sci.c | |||
@@ -130,7 +130,7 @@ static int sh_sci_spi_probe(struct platform_device *dev) | |||
130 | sp = spi_master_get_devdata(master); | 130 | sp = spi_master_get_devdata(master); |
131 | 131 | ||
132 | platform_set_drvdata(dev, sp); | 132 | platform_set_drvdata(dev, sp); |
133 | sp->info = dev->dev.platform_data; | 133 | sp->info = dev_get_platdata(&dev->dev); |
134 | 134 | ||
135 | /* setup spi bitbang adaptor */ | 135 | /* setup spi bitbang adaptor */ |
136 | sp->bitbang.master = spi_master_get(master); | 136 | sp->bitbang.master = spi_master_get(master); |
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c index fc20bcfd90c3..a1f21b747733 100644 --- a/drivers/spi/spi-sirf.c +++ b/drivers/spi/spi-sirf.c | |||
@@ -19,6 +19,10 @@ | |||
19 | #include <linux/of_gpio.h> | 19 | #include <linux/of_gpio.h> |
20 | #include <linux/spi/spi.h> | 20 | #include <linux/spi/spi.h> |
21 | #include <linux/spi/spi_bitbang.h> | 21 | #include <linux/spi/spi_bitbang.h> |
22 | #include <linux/dmaengine.h> | ||
23 | #include <linux/dma-direction.h> | ||
24 | #include <linux/dma-mapping.h> | ||
25 | #include <linux/sirfsoc_dma.h> | ||
22 | 26 | ||
23 | #define DRIVER_NAME "sirfsoc_spi" | 27 | #define DRIVER_NAME "sirfsoc_spi" |
24 | 28 | ||
@@ -119,9 +123,19 @@ | |||
119 | #define SIRFSOC_SPI_FIFO_HC(x) (((x) & 0x3F) << 20) | 123 | #define SIRFSOC_SPI_FIFO_HC(x) (((x) & 0x3F) << 20) |
120 | #define SIRFSOC_SPI_FIFO_THD(x) (((x) & 0xFF) << 2) | 124 | #define SIRFSOC_SPI_FIFO_THD(x) (((x) & 0xFF) << 2) |
121 | 125 | ||
126 | /* | ||
127 | * only if the rx/tx buffer and transfer size are 4-bytes aligned, we use dma | ||
128 | * due to the limitation of dma controller | ||
129 | */ | ||
130 | |||
131 | #define ALIGNED(x) (!((u32)x & 0x3)) | ||
132 | #define IS_DMA_VALID(x) (x && ALIGNED(x->tx_buf) && ALIGNED(x->rx_buf) && \ | ||
133 | ALIGNED(x->len) && (x->len < 2 * PAGE_SIZE)) | ||
134 | |||
122 | struct sirfsoc_spi { | 135 | struct sirfsoc_spi { |
123 | struct spi_bitbang bitbang; | 136 | struct spi_bitbang bitbang; |
124 | struct completion done; | 137 | struct completion rx_done; |
138 | struct completion tx_done; | ||
125 | 139 | ||
126 | void __iomem *base; | 140 | void __iomem *base; |
127 | u32 ctrl_freq; /* SPI controller clock speed */ | 141 | u32 ctrl_freq; /* SPI controller clock speed */ |
@@ -137,8 +151,16 @@ struct sirfsoc_spi { | |||
137 | void (*tx_word) (struct sirfsoc_spi *); | 151 | void (*tx_word) (struct sirfsoc_spi *); |
138 | 152 | ||
139 | /* number of words left to be tranmitted/received */ | 153 | /* number of words left to be tranmitted/received */ |
140 | unsigned int left_tx_cnt; | 154 | unsigned int left_tx_word; |
141 | unsigned int left_rx_cnt; | 155 | unsigned int left_rx_word; |
156 | |||
157 | /* rx & tx DMA channels */ | ||
158 | struct dma_chan *rx_chan; | ||
159 | struct dma_chan *tx_chan; | ||
160 | dma_addr_t src_start; | ||
161 | dma_addr_t dst_start; | ||
162 | void *dummypage; | ||
163 | int word_width; /* in bytes */ | ||
142 | 164 | ||
143 | int chipselect[0]; | 165 | int chipselect[0]; |
144 | }; | 166 | }; |
@@ -155,7 +177,7 @@ static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi) | |||
155 | sspi->rx = rx; | 177 | sspi->rx = rx; |
156 | } | 178 | } |
157 | 179 | ||
158 | sspi->left_rx_cnt--; | 180 | sspi->left_rx_word--; |
159 | } | 181 | } |
160 | 182 | ||
161 | static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi) | 183 | static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi) |
@@ -169,7 +191,7 @@ static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi) | |||
169 | } | 191 | } |
170 | 192 | ||
171 | writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA); | 193 | writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA); |
172 | sspi->left_tx_cnt--; | 194 | sspi->left_tx_word--; |
173 | } | 195 | } |
174 | 196 | ||
175 | static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi) | 197 | static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi) |
@@ -184,7 +206,7 @@ static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi) | |||
184 | sspi->rx = rx; | 206 | sspi->rx = rx; |
185 | } | 207 | } |
186 | 208 | ||
187 | sspi->left_rx_cnt--; | 209 | sspi->left_rx_word--; |
188 | } | 210 | } |
189 | 211 | ||
190 | static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi) | 212 | static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi) |
@@ -198,7 +220,7 @@ static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi) | |||
198 | } | 220 | } |
199 | 221 | ||
200 | writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA); | 222 | writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA); |
201 | sspi->left_tx_cnt--; | 223 | sspi->left_tx_word--; |
202 | } | 224 | } |
203 | 225 | ||
204 | static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi) | 226 | static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi) |
@@ -213,7 +235,7 @@ static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi) | |||
213 | sspi->rx = rx; | 235 | sspi->rx = rx; |
214 | } | 236 | } |
215 | 237 | ||
216 | sspi->left_rx_cnt--; | 238 | sspi->left_rx_word--; |
217 | 239 | ||
218 | } | 240 | } |
219 | 241 | ||
@@ -228,7 +250,7 @@ static void spi_sirfsoc_tx_word_u32(struct sirfsoc_spi *sspi) | |||
228 | } | 250 | } |
229 | 251 | ||
230 | writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA); | 252 | writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA); |
231 | sspi->left_tx_cnt--; | 253 | sspi->left_tx_word--; |
232 | } | 254 | } |
233 | 255 | ||
234 | static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id) | 256 | static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id) |
@@ -241,7 +263,7 @@ static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id) | |||
241 | /* Error Conditions */ | 263 | /* Error Conditions */ |
242 | if (spi_stat & SIRFSOC_SPI_RX_OFLOW || | 264 | if (spi_stat & SIRFSOC_SPI_RX_OFLOW || |
243 | spi_stat & SIRFSOC_SPI_TX_UFLOW) { | 265 | spi_stat & SIRFSOC_SPI_TX_UFLOW) { |
244 | complete(&sspi->done); | 266 | complete(&sspi->rx_done); |
245 | writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN); | 267 | writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN); |
246 | } | 268 | } |
247 | 269 | ||
@@ -249,50 +271,61 @@ static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id) | |||
249 | | SIRFSOC_SPI_RXFIFO_THD_REACH)) | 271 | | SIRFSOC_SPI_RXFIFO_THD_REACH)) |
250 | while (!((readl(sspi->base + SIRFSOC_SPI_RXFIFO_STATUS) | 272 | while (!((readl(sspi->base + SIRFSOC_SPI_RXFIFO_STATUS) |
251 | & SIRFSOC_SPI_FIFO_EMPTY)) && | 273 | & SIRFSOC_SPI_FIFO_EMPTY)) && |
252 | sspi->left_rx_cnt) | 274 | sspi->left_rx_word) |
253 | sspi->rx_word(sspi); | 275 | sspi->rx_word(sspi); |
254 | 276 | ||
255 | if (spi_stat & (SIRFSOC_SPI_FIFO_EMPTY | 277 | if (spi_stat & (SIRFSOC_SPI_FIFO_EMPTY |
256 | | SIRFSOC_SPI_TXFIFO_THD_REACH)) | 278 | | SIRFSOC_SPI_TXFIFO_THD_REACH)) |
257 | while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS) | 279 | while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS) |
258 | & SIRFSOC_SPI_FIFO_FULL)) && | 280 | & SIRFSOC_SPI_FIFO_FULL)) && |
259 | sspi->left_tx_cnt) | 281 | sspi->left_tx_word) |
260 | sspi->tx_word(sspi); | 282 | sspi->tx_word(sspi); |
261 | 283 | ||
262 | /* Received all words */ | 284 | /* Received all words */ |
263 | if ((sspi->left_rx_cnt == 0) && (sspi->left_tx_cnt == 0)) { | 285 | if ((sspi->left_rx_word == 0) && (sspi->left_tx_word == 0)) { |
264 | complete(&sspi->done); | 286 | complete(&sspi->rx_done); |
265 | writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN); | 287 | writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN); |
266 | } | 288 | } |
267 | return IRQ_HANDLED; | 289 | return IRQ_HANDLED; |
268 | } | 290 | } |
269 | 291 | ||
292 | static void spi_sirfsoc_dma_fini_callback(void *data) | ||
293 | { | ||
294 | struct completion *dma_complete = data; | ||
295 | |||
296 | complete(dma_complete); | ||
297 | } | ||
298 | |||
270 | static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t) | 299 | static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t) |
271 | { | 300 | { |
272 | struct sirfsoc_spi *sspi; | 301 | struct sirfsoc_spi *sspi; |
273 | int timeout = t->len * 10; | 302 | int timeout = t->len * 10; |
274 | sspi = spi_master_get_devdata(spi->master); | 303 | sspi = spi_master_get_devdata(spi->master); |
275 | 304 | ||
276 | sspi->tx = t->tx_buf; | 305 | sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage; |
277 | sspi->rx = t->rx_buf; | 306 | sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage; |
278 | sspi->left_tx_cnt = sspi->left_rx_cnt = t->len; | 307 | sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width; |
279 | INIT_COMPLETION(sspi->done); | 308 | INIT_COMPLETION(sspi->rx_done); |
309 | INIT_COMPLETION(sspi->tx_done); | ||
280 | 310 | ||
281 | writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS); | 311 | writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS); |
282 | 312 | ||
283 | if (t->len == 1) { | 313 | if (sspi->left_tx_word == 1) { |
284 | writel(readl(sspi->base + SIRFSOC_SPI_CTRL) | | 314 | writel(readl(sspi->base + SIRFSOC_SPI_CTRL) | |
285 | SIRFSOC_SPI_ENA_AUTO_CLR, | 315 | SIRFSOC_SPI_ENA_AUTO_CLR, |
286 | sspi->base + SIRFSOC_SPI_CTRL); | 316 | sspi->base + SIRFSOC_SPI_CTRL); |
287 | writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN); | 317 | writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN); |
288 | writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN); | 318 | writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN); |
289 | } else if ((t->len > 1) && (t->len < SIRFSOC_SPI_DAT_FRM_LEN_MAX)) { | 319 | } else if ((sspi->left_tx_word > 1) && (sspi->left_tx_word < |
320 | SIRFSOC_SPI_DAT_FRM_LEN_MAX)) { | ||
290 | writel(readl(sspi->base + SIRFSOC_SPI_CTRL) | | 321 | writel(readl(sspi->base + SIRFSOC_SPI_CTRL) | |
291 | SIRFSOC_SPI_MUL_DAT_MODE | | 322 | SIRFSOC_SPI_MUL_DAT_MODE | |
292 | SIRFSOC_SPI_ENA_AUTO_CLR, | 323 | SIRFSOC_SPI_ENA_AUTO_CLR, |
293 | sspi->base + SIRFSOC_SPI_CTRL); | 324 | sspi->base + SIRFSOC_SPI_CTRL); |
294 | writel(t->len - 1, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN); | 325 | writel(sspi->left_tx_word - 1, |
295 | writel(t->len - 1, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN); | 326 | sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN); |
327 | writel(sspi->left_tx_word - 1, | ||
328 | sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN); | ||
296 | } else { | 329 | } else { |
297 | writel(readl(sspi->base + SIRFSOC_SPI_CTRL), | 330 | writel(readl(sspi->base + SIRFSOC_SPI_CTRL), |
298 | sspi->base + SIRFSOC_SPI_CTRL); | 331 | sspi->base + SIRFSOC_SPI_CTRL); |
@@ -305,17 +338,64 @@ static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t) | |||
305 | writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP); | 338 | writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP); |
306 | writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP); | 339 | writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP); |
307 | 340 | ||
308 | /* Send the first word to trigger the whole tx/rx process */ | 341 | if (IS_DMA_VALID(t)) { |
309 | sspi->tx_word(sspi); | 342 | struct dma_async_tx_descriptor *rx_desc, *tx_desc; |
343 | |||
344 | sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len, DMA_FROM_DEVICE); | ||
345 | rx_desc = dmaengine_prep_slave_single(sspi->rx_chan, | ||
346 | sspi->dst_start, t->len, DMA_DEV_TO_MEM, | ||
347 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
348 | rx_desc->callback = spi_sirfsoc_dma_fini_callback; | ||
349 | rx_desc->callback_param = &sspi->rx_done; | ||
350 | |||
351 | sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len, DMA_TO_DEVICE); | ||
352 | tx_desc = dmaengine_prep_slave_single(sspi->tx_chan, | ||
353 | sspi->src_start, t->len, DMA_MEM_TO_DEV, | ||
354 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
355 | tx_desc->callback = spi_sirfsoc_dma_fini_callback; | ||
356 | tx_desc->callback_param = &sspi->tx_done; | ||
357 | |||
358 | dmaengine_submit(tx_desc); | ||
359 | dmaengine_submit(rx_desc); | ||
360 | dma_async_issue_pending(sspi->tx_chan); | ||
361 | dma_async_issue_pending(sspi->rx_chan); | ||
362 | } else { | ||
363 | /* Send the first word to trigger the whole tx/rx process */ | ||
364 | sspi->tx_word(sspi); | ||
365 | |||
366 | writel(SIRFSOC_SPI_RX_OFLOW_INT_EN | SIRFSOC_SPI_TX_UFLOW_INT_EN | | ||
367 | SIRFSOC_SPI_RXFIFO_THD_INT_EN | SIRFSOC_SPI_TXFIFO_THD_INT_EN | | ||
368 | SIRFSOC_SPI_FRM_END_INT_EN | SIRFSOC_SPI_RXFIFO_FULL_INT_EN | | ||
369 | SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN, sspi->base + SIRFSOC_SPI_INT_EN); | ||
370 | } | ||
310 | 371 | ||
311 | writel(SIRFSOC_SPI_RX_OFLOW_INT_EN | SIRFSOC_SPI_TX_UFLOW_INT_EN | | ||
312 | SIRFSOC_SPI_RXFIFO_THD_INT_EN | SIRFSOC_SPI_TXFIFO_THD_INT_EN | | ||
313 | SIRFSOC_SPI_FRM_END_INT_EN | SIRFSOC_SPI_RXFIFO_FULL_INT_EN | | ||
314 | SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN, sspi->base + SIRFSOC_SPI_INT_EN); | ||
315 | writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, sspi->base + SIRFSOC_SPI_TX_RX_EN); | 372 | writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, sspi->base + SIRFSOC_SPI_TX_RX_EN); |
316 | 373 | ||
317 | if (wait_for_completion_timeout(&sspi->done, timeout) == 0) | 374 | if (!IS_DMA_VALID(t)) { /* for PIO */ |
375 | if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) | ||
376 | dev_err(&spi->dev, "transfer timeout\n"); | ||
377 | } else if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) { | ||
318 | dev_err(&spi->dev, "transfer timeout\n"); | 378 | dev_err(&spi->dev, "transfer timeout\n"); |
379 | dmaengine_terminate_all(sspi->rx_chan); | ||
380 | } else | ||
381 | sspi->left_rx_word = 0; | ||
382 | |||
383 | /* | ||
384 | * we only wait tx-done event if transferring by DMA. for PIO, | ||
385 | * we get rx data by writing tx data, so if rx is done, tx has | ||
386 | * done earlier | ||
387 | */ | ||
388 | if (IS_DMA_VALID(t)) { | ||
389 | if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) { | ||
390 | dev_err(&spi->dev, "transfer timeout\n"); | ||
391 | dmaengine_terminate_all(sspi->tx_chan); | ||
392 | } | ||
393 | } | ||
394 | |||
395 | if (IS_DMA_VALID(t)) { | ||
396 | dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE); | ||
397 | dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE); | ||
398 | } | ||
319 | 399 | ||
320 | /* TX, RX FIFO stop */ | 400 | /* TX, RX FIFO stop */ |
321 | writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP); | 401 | writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP); |
@@ -323,7 +403,7 @@ static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t) | |||
323 | writel(0, sspi->base + SIRFSOC_SPI_TX_RX_EN); | 403 | writel(0, sspi->base + SIRFSOC_SPI_TX_RX_EN); |
324 | writel(0, sspi->base + SIRFSOC_SPI_INT_EN); | 404 | writel(0, sspi->base + SIRFSOC_SPI_INT_EN); |
325 | 405 | ||
326 | return t->len - sspi->left_rx_cnt; | 406 | return t->len - sspi->left_rx_word * sspi->word_width; |
327 | } | 407 | } |
328 | 408 | ||
329 | static void spi_sirfsoc_chipselect(struct spi_device *spi, int value) | 409 | static void spi_sirfsoc_chipselect(struct spi_device *spi, int value) |
@@ -332,7 +412,6 @@ static void spi_sirfsoc_chipselect(struct spi_device *spi, int value) | |||
332 | 412 | ||
333 | if (sspi->chipselect[spi->chip_select] == 0) { | 413 | if (sspi->chipselect[spi->chip_select] == 0) { |
334 | u32 regval = readl(sspi->base + SIRFSOC_SPI_CTRL); | 414 | u32 regval = readl(sspi->base + SIRFSOC_SPI_CTRL); |
335 | regval |= SIRFSOC_SPI_CS_IO_OUT; | ||
336 | switch (value) { | 415 | switch (value) { |
337 | case BITBANG_CS_ACTIVE: | 416 | case BITBANG_CS_ACTIVE: |
338 | if (spi->mode & SPI_CS_HIGH) | 417 | if (spi->mode & SPI_CS_HIGH) |
@@ -369,11 +448,7 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t) | |||
369 | bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word; | 448 | bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word; |
370 | hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz; | 449 | hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz; |
371 | 450 | ||
372 | /* Enable IO mode for RX, TX */ | ||
373 | writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL); | ||
374 | writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL); | ||
375 | regval = (sspi->ctrl_freq / (2 * hz)) - 1; | 451 | regval = (sspi->ctrl_freq / (2 * hz)) - 1; |
376 | |||
377 | if (regval > 0xFFFF || regval < 0) { | 452 | if (regval > 0xFFFF || regval < 0) { |
378 | dev_err(&spi->dev, "Speed %d not supported\n", hz); | 453 | dev_err(&spi->dev, "Speed %d not supported\n", hz); |
379 | return -EINVAL; | 454 | return -EINVAL; |
@@ -388,6 +463,7 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t) | |||
388 | SIRFSOC_SPI_FIFO_WIDTH_BYTE; | 463 | SIRFSOC_SPI_FIFO_WIDTH_BYTE; |
389 | rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | | 464 | rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | |
390 | SIRFSOC_SPI_FIFO_WIDTH_BYTE; | 465 | SIRFSOC_SPI_FIFO_WIDTH_BYTE; |
466 | sspi->word_width = 1; | ||
391 | break; | 467 | break; |
392 | case 12: | 468 | case 12: |
393 | case 16: | 469 | case 16: |
@@ -399,6 +475,7 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t) | |||
399 | SIRFSOC_SPI_FIFO_WIDTH_WORD; | 475 | SIRFSOC_SPI_FIFO_WIDTH_WORD; |
400 | rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | | 476 | rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | |
401 | SIRFSOC_SPI_FIFO_WIDTH_WORD; | 477 | SIRFSOC_SPI_FIFO_WIDTH_WORD; |
478 | sspi->word_width = 2; | ||
402 | break; | 479 | break; |
403 | case 32: | 480 | case 32: |
404 | regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_32; | 481 | regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_32; |
@@ -408,6 +485,7 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t) | |||
408 | SIRFSOC_SPI_FIFO_WIDTH_DWORD; | 485 | SIRFSOC_SPI_FIFO_WIDTH_DWORD; |
409 | rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | | 486 | rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | |
410 | SIRFSOC_SPI_FIFO_WIDTH_DWORD; | 487 | SIRFSOC_SPI_FIFO_WIDTH_DWORD; |
488 | sspi->word_width = 4; | ||
411 | break; | 489 | break; |
412 | default: | 490 | default: |
413 | BUG(); | 491 | BUG(); |
@@ -442,6 +520,17 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t) | |||
442 | writel(rxfifo_ctrl, sspi->base + SIRFSOC_SPI_RXFIFO_CTRL); | 520 | writel(rxfifo_ctrl, sspi->base + SIRFSOC_SPI_RXFIFO_CTRL); |
443 | 521 | ||
444 | writel(regval, sspi->base + SIRFSOC_SPI_CTRL); | 522 | writel(regval, sspi->base + SIRFSOC_SPI_CTRL); |
523 | |||
524 | if (IS_DMA_VALID(t)) { | ||
525 | /* Enable DMA mode for RX, TX */ | ||
526 | writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL); | ||
527 | writel(SIRFSOC_SPI_RX_DMA_FLUSH, sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL); | ||
528 | } else { | ||
529 | /* Enable IO mode for RX, TX */ | ||
530 | writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL); | ||
531 | writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL); | ||
532 | } | ||
533 | |||
445 | return 0; | 534 | return 0; |
446 | } | 535 | } |
447 | 536 | ||
@@ -466,6 +555,8 @@ static int spi_sirfsoc_probe(struct platform_device *pdev) | |||
466 | struct spi_master *master; | 555 | struct spi_master *master; |
467 | struct resource *mem_res; | 556 | struct resource *mem_res; |
468 | int num_cs, cs_gpio, irq; | 557 | int num_cs, cs_gpio, irq; |
558 | u32 rx_dma_ch, tx_dma_ch; | ||
559 | dma_cap_mask_t dma_cap_mask; | ||
469 | int i; | 560 | int i; |
470 | int ret; | 561 | int ret; |
471 | 562 | ||
@@ -476,6 +567,20 @@ static int spi_sirfsoc_probe(struct platform_device *pdev) | |||
476 | goto err_cs; | 567 | goto err_cs; |
477 | } | 568 | } |
478 | 569 | ||
570 | ret = of_property_read_u32(pdev->dev.of_node, | ||
571 | "sirf,spi-dma-rx-channel", &rx_dma_ch); | ||
572 | if (ret < 0) { | ||
573 | dev_err(&pdev->dev, "Unable to get rx dma channel\n"); | ||
574 | goto err_cs; | ||
575 | } | ||
576 | |||
577 | ret = of_property_read_u32(pdev->dev.of_node, | ||
578 | "sirf,spi-dma-tx-channel", &tx_dma_ch); | ||
579 | if (ret < 0) { | ||
580 | dev_err(&pdev->dev, "Unable to get tx dma channel\n"); | ||
581 | goto err_cs; | ||
582 | } | ||
583 | |||
479 | master = spi_alloc_master(&pdev->dev, sizeof(*sspi) + sizeof(int) * num_cs); | 584 | master = spi_alloc_master(&pdev->dev, sizeof(*sspi) + sizeof(int) * num_cs); |
480 | if (!master) { | 585 | if (!master) { |
481 | dev_err(&pdev->dev, "Unable to allocate SPI master\n"); | 586 | dev_err(&pdev->dev, "Unable to allocate SPI master\n"); |
@@ -484,12 +589,6 @@ static int spi_sirfsoc_probe(struct platform_device *pdev) | |||
484 | platform_set_drvdata(pdev, master); | 589 | platform_set_drvdata(pdev, master); |
485 | sspi = spi_master_get_devdata(master); | 590 | sspi = spi_master_get_devdata(master); |
486 | 591 | ||
487 | mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
488 | if (!mem_res) { | ||
489 | dev_err(&pdev->dev, "Unable to get IO resource\n"); | ||
490 | ret = -ENODEV; | ||
491 | goto free_master; | ||
492 | } | ||
493 | master->num_chipselect = num_cs; | 592 | master->num_chipselect = num_cs; |
494 | 593 | ||
495 | for (i = 0; i < master->num_chipselect; i++) { | 594 | for (i = 0; i < master->num_chipselect; i++) { |
@@ -516,6 +615,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev) | |||
516 | } | 615 | } |
517 | } | 616 | } |
518 | 617 | ||
618 | mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
519 | sspi->base = devm_ioremap_resource(&pdev->dev, mem_res); | 619 | sspi->base = devm_ioremap_resource(&pdev->dev, mem_res); |
520 | if (IS_ERR(sspi->base)) { | 620 | if (IS_ERR(sspi->base)) { |
521 | ret = PTR_ERR(sspi->base); | 621 | ret = PTR_ERR(sspi->base); |
@@ -538,19 +638,40 @@ static int spi_sirfsoc_probe(struct platform_device *pdev) | |||
538 | sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer; | 638 | sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer; |
539 | sspi->bitbang.master->setup = spi_sirfsoc_setup; | 639 | sspi->bitbang.master->setup = spi_sirfsoc_setup; |
540 | master->bus_num = pdev->id; | 640 | master->bus_num = pdev->id; |
641 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH; | ||
541 | master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) | | 642 | master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) | |
542 | SPI_BPW_MASK(16) | SPI_BPW_MASK(32); | 643 | SPI_BPW_MASK(16) | SPI_BPW_MASK(32); |
543 | sspi->bitbang.master->dev.of_node = pdev->dev.of_node; | 644 | sspi->bitbang.master->dev.of_node = pdev->dev.of_node; |
544 | 645 | ||
646 | /* request DMA channels */ | ||
647 | dma_cap_zero(dma_cap_mask); | ||
648 | dma_cap_set(DMA_INTERLEAVE, dma_cap_mask); | ||
649 | |||
650 | sspi->rx_chan = dma_request_channel(dma_cap_mask, (dma_filter_fn)sirfsoc_dma_filter_id, | ||
651 | (void *)rx_dma_ch); | ||
652 | if (!sspi->rx_chan) { | ||
653 | dev_err(&pdev->dev, "can not allocate rx dma channel\n"); | ||
654 | ret = -ENODEV; | ||
655 | goto free_master; | ||
656 | } | ||
657 | sspi->tx_chan = dma_request_channel(dma_cap_mask, (dma_filter_fn)sirfsoc_dma_filter_id, | ||
658 | (void *)tx_dma_ch); | ||
659 | if (!sspi->tx_chan) { | ||
660 | dev_err(&pdev->dev, "can not allocate tx dma channel\n"); | ||
661 | ret = -ENODEV; | ||
662 | goto free_rx_dma; | ||
663 | } | ||
664 | |||
545 | sspi->clk = clk_get(&pdev->dev, NULL); | 665 | sspi->clk = clk_get(&pdev->dev, NULL); |
546 | if (IS_ERR(sspi->clk)) { | 666 | if (IS_ERR(sspi->clk)) { |
547 | ret = -EINVAL; | 667 | ret = PTR_ERR(sspi->clk); |
548 | goto free_master; | 668 | goto free_tx_dma; |
549 | } | 669 | } |
550 | clk_prepare_enable(sspi->clk); | 670 | clk_prepare_enable(sspi->clk); |
551 | sspi->ctrl_freq = clk_get_rate(sspi->clk); | 671 | sspi->ctrl_freq = clk_get_rate(sspi->clk); |
552 | 672 | ||
553 | init_completion(&sspi->done); | 673 | init_completion(&sspi->rx_done); |
674 | init_completion(&sspi->tx_done); | ||
554 | 675 | ||
555 | writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP); | 676 | writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP); |
556 | writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP); | 677 | writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP); |
@@ -559,17 +680,28 @@ static int spi_sirfsoc_probe(struct platform_device *pdev) | |||
559 | /* We are not using dummy delay between command and data */ | 680 | /* We are not using dummy delay between command and data */ |
560 | writel(0, sspi->base + SIRFSOC_SPI_DUMMY_DELAY_CTL); | 681 | writel(0, sspi->base + SIRFSOC_SPI_DUMMY_DELAY_CTL); |
561 | 682 | ||
683 | sspi->dummypage = kmalloc(2 * PAGE_SIZE, GFP_KERNEL); | ||
684 | if (!sspi->dummypage) { | ||
685 | ret = -ENOMEM; | ||
686 | goto free_clk; | ||
687 | } | ||
688 | |||
562 | ret = spi_bitbang_start(&sspi->bitbang); | 689 | ret = spi_bitbang_start(&sspi->bitbang); |
563 | if (ret) | 690 | if (ret) |
564 | goto free_clk; | 691 | goto free_dummypage; |
565 | 692 | ||
566 | dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num); | 693 | dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num); |
567 | 694 | ||
568 | return 0; | 695 | return 0; |
569 | 696 | free_dummypage: | |
697 | kfree(sspi->dummypage); | ||
570 | free_clk: | 698 | free_clk: |
571 | clk_disable_unprepare(sspi->clk); | 699 | clk_disable_unprepare(sspi->clk); |
572 | clk_put(sspi->clk); | 700 | clk_put(sspi->clk); |
701 | free_tx_dma: | ||
702 | dma_release_channel(sspi->tx_chan); | ||
703 | free_rx_dma: | ||
704 | dma_release_channel(sspi->rx_chan); | ||
573 | free_master: | 705 | free_master: |
574 | spi_master_put(master); | 706 | spi_master_put(master); |
575 | err_cs: | 707 | err_cs: |
@@ -590,8 +722,11 @@ static int spi_sirfsoc_remove(struct platform_device *pdev) | |||
590 | if (sspi->chipselect[i] > 0) | 722 | if (sspi->chipselect[i] > 0) |
591 | gpio_free(sspi->chipselect[i]); | 723 | gpio_free(sspi->chipselect[i]); |
592 | } | 724 | } |
725 | kfree(sspi->dummypage); | ||
593 | clk_disable_unprepare(sspi->clk); | 726 | clk_disable_unprepare(sspi->clk); |
594 | clk_put(sspi->clk); | 727 | clk_put(sspi->clk); |
728 | dma_release_channel(sspi->rx_chan); | ||
729 | dma_release_channel(sspi->tx_chan); | ||
595 | spi_master_put(master); | 730 | spi_master_put(master); |
596 | return 0; | 731 | return 0; |
597 | } | 732 | } |
@@ -599,8 +734,7 @@ static int spi_sirfsoc_remove(struct platform_device *pdev) | |||
599 | #ifdef CONFIG_PM | 734 | #ifdef CONFIG_PM |
600 | static int spi_sirfsoc_suspend(struct device *dev) | 735 | static int spi_sirfsoc_suspend(struct device *dev) |
601 | { | 736 | { |
602 | struct platform_device *pdev = to_platform_device(dev); | 737 | struct spi_master *master = dev_get_drvdata(dev); |
603 | struct spi_master *master = platform_get_drvdata(pdev); | ||
604 | struct sirfsoc_spi *sspi = spi_master_get_devdata(master); | 738 | struct sirfsoc_spi *sspi = spi_master_get_devdata(master); |
605 | 739 | ||
606 | clk_disable(sspi->clk); | 740 | clk_disable(sspi->clk); |
@@ -609,8 +743,7 @@ static int spi_sirfsoc_suspend(struct device *dev) | |||
609 | 743 | ||
610 | static int spi_sirfsoc_resume(struct device *dev) | 744 | static int spi_sirfsoc_resume(struct device *dev) |
611 | { | 745 | { |
612 | struct platform_device *pdev = to_platform_device(dev); | 746 | struct spi_master *master = dev_get_drvdata(dev); |
613 | struct spi_master *master = platform_get_drvdata(pdev); | ||
614 | struct sirfsoc_spi *sspi = spi_master_get_devdata(master); | 747 | struct sirfsoc_spi *sspi = spi_master_get_devdata(master); |
615 | 748 | ||
616 | clk_enable(sspi->clk); | 749 | clk_enable(sspi->clk); |
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c index e8f542ab8935..145dd435483b 100644 --- a/drivers/spi/spi-tegra114.c +++ b/drivers/spi/spi-tegra114.c | |||
@@ -816,14 +816,6 @@ static int tegra_spi_transfer_one_message(struct spi_master *master, | |||
816 | msg->status = 0; | 816 | msg->status = 0; |
817 | msg->actual_length = 0; | 817 | msg->actual_length = 0; |
818 | 818 | ||
819 | ret = pm_runtime_get_sync(tspi->dev); | ||
820 | if (ret < 0) { | ||
821 | dev_err(tspi->dev, "runtime PM get failed: %d\n", ret); | ||
822 | msg->status = ret; | ||
823 | spi_finalize_current_message(master); | ||
824 | return ret; | ||
825 | } | ||
826 | |||
827 | single_xfer = list_is_singular(&msg->transfers); | 819 | single_xfer = list_is_singular(&msg->transfers); |
828 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | 820 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
829 | INIT_COMPLETION(tspi->xfer_completion); | 821 | INIT_COMPLETION(tspi->xfer_completion); |
@@ -859,7 +851,6 @@ static int tegra_spi_transfer_one_message(struct spi_master *master, | |||
859 | ret = 0; | 851 | ret = 0; |
860 | exit: | 852 | exit: |
861 | tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1); | 853 | tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1); |
862 | pm_runtime_put(tspi->dev); | ||
863 | msg->status = ret; | 854 | msg->status = ret; |
864 | spi_finalize_current_message(master); | 855 | spi_finalize_current_message(master); |
865 | return ret; | 856 | return ret; |
@@ -1053,24 +1044,19 @@ static int tegra_spi_probe(struct platform_device *pdev) | |||
1053 | master->transfer_one_message = tegra_spi_transfer_one_message; | 1044 | master->transfer_one_message = tegra_spi_transfer_one_message; |
1054 | master->num_chipselect = MAX_CHIP_SELECT; | 1045 | master->num_chipselect = MAX_CHIP_SELECT; |
1055 | master->bus_num = -1; | 1046 | master->bus_num = -1; |
1047 | master->auto_runtime_pm = true; | ||
1056 | 1048 | ||
1057 | tspi->master = master; | 1049 | tspi->master = master; |
1058 | tspi->dev = &pdev->dev; | 1050 | tspi->dev = &pdev->dev; |
1059 | spin_lock_init(&tspi->lock); | 1051 | spin_lock_init(&tspi->lock); |
1060 | 1052 | ||
1061 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1053 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1062 | if (!r) { | ||
1063 | dev_err(&pdev->dev, "No IO memory resource\n"); | ||
1064 | ret = -ENODEV; | ||
1065 | goto exit_free_master; | ||
1066 | } | ||
1067 | tspi->phys = r->start; | ||
1068 | tspi->base = devm_ioremap_resource(&pdev->dev, r); | 1054 | tspi->base = devm_ioremap_resource(&pdev->dev, r); |
1069 | if (IS_ERR(tspi->base)) { | 1055 | if (IS_ERR(tspi->base)) { |
1070 | ret = PTR_ERR(tspi->base); | 1056 | ret = PTR_ERR(tspi->base); |
1071 | dev_err(&pdev->dev, "ioremap failed: err = %d\n", ret); | ||
1072 | goto exit_free_master; | 1057 | goto exit_free_master; |
1073 | } | 1058 | } |
1059 | tspi->phys = r->start; | ||
1074 | 1060 | ||
1075 | spi_irq = platform_get_irq(pdev, 0); | 1061 | spi_irq = platform_get_irq(pdev, 0); |
1076 | tspi->irq = spi_irq; | 1062 | tspi->irq = spi_irq; |
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c index c1d5d95e70ea..1d814dc6e000 100644 --- a/drivers/spi/spi-tegra20-sflash.c +++ b/drivers/spi/spi-tegra20-sflash.c | |||
@@ -335,12 +335,6 @@ static int tegra_sflash_transfer_one_message(struct spi_master *master, | |||
335 | struct spi_device *spi = msg->spi; | 335 | struct spi_device *spi = msg->spi; |
336 | int ret; | 336 | int ret; |
337 | 337 | ||
338 | ret = pm_runtime_get_sync(tsd->dev); | ||
339 | if (ret < 0) { | ||
340 | dev_err(tsd->dev, "pm_runtime_get() failed, err = %d\n", ret); | ||
341 | return ret; | ||
342 | } | ||
343 | |||
344 | msg->status = 0; | 338 | msg->status = 0; |
345 | msg->actual_length = 0; | 339 | msg->actual_length = 0; |
346 | single_xfer = list_is_singular(&msg->transfers); | 340 | single_xfer = list_is_singular(&msg->transfers); |
@@ -380,7 +374,6 @@ exit: | |||
380 | tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND); | 374 | tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND); |
381 | msg->status = ret; | 375 | msg->status = ret; |
382 | spi_finalize_current_message(master); | 376 | spi_finalize_current_message(master); |
383 | pm_runtime_put(tsd->dev); | ||
384 | return ret; | 377 | return ret; |
385 | } | 378 | } |
386 | 379 | ||
@@ -477,6 +470,7 @@ static int tegra_sflash_probe(struct platform_device *pdev) | |||
477 | master->mode_bits = SPI_CPOL | SPI_CPHA; | 470 | master->mode_bits = SPI_CPOL | SPI_CPHA; |
478 | master->setup = tegra_sflash_setup; | 471 | master->setup = tegra_sflash_setup; |
479 | master->transfer_one_message = tegra_sflash_transfer_one_message; | 472 | master->transfer_one_message = tegra_sflash_transfer_one_message; |
473 | master->auto_runtime_pm = true; | ||
480 | master->num_chipselect = MAX_CHIP_SELECT; | 474 | master->num_chipselect = MAX_CHIP_SELECT; |
481 | master->bus_num = -1; | 475 | master->bus_num = -1; |
482 | 476 | ||
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index 80490cc11ce5..c70353672a23 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c | |||
@@ -836,11 +836,6 @@ static int tegra_slink_transfer_one_message(struct spi_master *master, | |||
836 | 836 | ||
837 | msg->status = 0; | 837 | msg->status = 0; |
838 | msg->actual_length = 0; | 838 | msg->actual_length = 0; |
839 | ret = pm_runtime_get_sync(tspi->dev); | ||
840 | if (ret < 0) { | ||
841 | dev_err(tspi->dev, "runtime get failed: %d\n", ret); | ||
842 | goto done; | ||
843 | } | ||
844 | 839 | ||
845 | single_xfer = list_is_singular(&msg->transfers); | 840 | single_xfer = list_is_singular(&msg->transfers); |
846 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | 841 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
@@ -878,8 +873,6 @@ static int tegra_slink_transfer_one_message(struct spi_master *master, | |||
878 | exit: | 873 | exit: |
879 | tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND); | 874 | tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND); |
880 | tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2); | 875 | tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2); |
881 | pm_runtime_put(tspi->dev); | ||
882 | done: | ||
883 | msg->status = ret; | 876 | msg->status = ret; |
884 | spi_finalize_current_message(master); | 877 | spi_finalize_current_message(master); |
885 | return ret; | 878 | return ret; |
@@ -1086,6 +1079,7 @@ static int tegra_slink_probe(struct platform_device *pdev) | |||
1086 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; | 1079 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; |
1087 | master->setup = tegra_slink_setup; | 1080 | master->setup = tegra_slink_setup; |
1088 | master->transfer_one_message = tegra_slink_transfer_one_message; | 1081 | master->transfer_one_message = tegra_slink_transfer_one_message; |
1082 | master->auto_runtime_pm = true; | ||
1089 | master->num_chipselect = MAX_CHIP_SELECT; | 1083 | master->num_chipselect = MAX_CHIP_SELECT; |
1090 | master->bus_num = -1; | 1084 | master->bus_num = -1; |
1091 | 1085 | ||
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c new file mode 100644 index 000000000000..e12d962a289f --- /dev/null +++ b/drivers/spi/spi-ti-qspi.c | |||
@@ -0,0 +1,574 @@ | |||
1 | /* | ||
2 | * TI QSPI driver | ||
3 | * | ||
4 | * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com | ||
5 | * Author: Sourav Poddar <sourav.poddar@ti.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GPLv2. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/device.h> | ||
21 | #include <linux/delay.h> | ||
22 | #include <linux/dma-mapping.h> | ||
23 | #include <linux/dmaengine.h> | ||
24 | #include <linux/omap-dma.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <linux/err.h> | ||
27 | #include <linux/clk.h> | ||
28 | #include <linux/io.h> | ||
29 | #include <linux/slab.h> | ||
30 | #include <linux/pm_runtime.h> | ||
31 | #include <linux/of.h> | ||
32 | #include <linux/of_device.h> | ||
33 | #include <linux/pinctrl/consumer.h> | ||
34 | |||
35 | #include <linux/spi/spi.h> | ||
36 | |||
37 | struct ti_qspi_regs { | ||
38 | u32 clkctrl; | ||
39 | }; | ||
40 | |||
41 | struct ti_qspi { | ||
42 | struct completion transfer_complete; | ||
43 | |||
44 | /* IRQ synchronization */ | ||
45 | spinlock_t lock; | ||
46 | |||
47 | /* list synchronization */ | ||
48 | struct mutex list_lock; | ||
49 | |||
50 | struct spi_master *master; | ||
51 | void __iomem *base; | ||
52 | struct clk *fclk; | ||
53 | struct device *dev; | ||
54 | |||
55 | struct ti_qspi_regs ctx_reg; | ||
56 | |||
57 | u32 spi_max_frequency; | ||
58 | u32 cmd; | ||
59 | u32 dc; | ||
60 | u32 stat; | ||
61 | }; | ||
62 | |||
63 | #define QSPI_PID (0x0) | ||
64 | #define QSPI_SYSCONFIG (0x10) | ||
65 | #define QSPI_INTR_STATUS_RAW_SET (0x20) | ||
66 | #define QSPI_INTR_STATUS_ENABLED_CLEAR (0x24) | ||
67 | #define QSPI_INTR_ENABLE_SET_REG (0x28) | ||
68 | #define QSPI_INTR_ENABLE_CLEAR_REG (0x2c) | ||
69 | #define QSPI_SPI_CLOCK_CNTRL_REG (0x40) | ||
70 | #define QSPI_SPI_DC_REG (0x44) | ||
71 | #define QSPI_SPI_CMD_REG (0x48) | ||
72 | #define QSPI_SPI_STATUS_REG (0x4c) | ||
73 | #define QSPI_SPI_DATA_REG (0x50) | ||
74 | #define QSPI_SPI_SETUP0_REG (0x54) | ||
75 | #define QSPI_SPI_SWITCH_REG (0x64) | ||
76 | #define QSPI_SPI_SETUP1_REG (0x58) | ||
77 | #define QSPI_SPI_SETUP2_REG (0x5c) | ||
78 | #define QSPI_SPI_SETUP3_REG (0x60) | ||
79 | #define QSPI_SPI_DATA_REG_1 (0x68) | ||
80 | #define QSPI_SPI_DATA_REG_2 (0x6c) | ||
81 | #define QSPI_SPI_DATA_REG_3 (0x70) | ||
82 | |||
83 | #define QSPI_COMPLETION_TIMEOUT msecs_to_jiffies(2000) | ||
84 | |||
85 | #define QSPI_FCLK 192000000 | ||
86 | |||
87 | /* Clock Control */ | ||
88 | #define QSPI_CLK_EN (1 << 31) | ||
89 | #define QSPI_CLK_DIV_MAX 0xffff | ||
90 | |||
91 | /* Command */ | ||
92 | #define QSPI_EN_CS(n) (n << 28) | ||
93 | #define QSPI_WLEN(n) ((n - 1) << 19) | ||
94 | #define QSPI_3_PIN (1 << 18) | ||
95 | #define QSPI_RD_SNGL (1 << 16) | ||
96 | #define QSPI_WR_SNGL (2 << 16) | ||
97 | #define QSPI_RD_DUAL (3 << 16) | ||
98 | #define QSPI_RD_QUAD (7 << 16) | ||
99 | #define QSPI_INVAL (4 << 16) | ||
100 | #define QSPI_WC_CMD_INT_EN (1 << 14) | ||
101 | #define QSPI_FLEN(n) ((n - 1) << 0) | ||
102 | |||
103 | /* STATUS REGISTER */ | ||
104 | #define WC 0x02 | ||
105 | |||
106 | /* INTERRUPT REGISTER */ | ||
107 | #define QSPI_WC_INT_EN (1 << 1) | ||
108 | #define QSPI_WC_INT_DISABLE (1 << 1) | ||
109 | |||
110 | /* Device Control */ | ||
111 | #define QSPI_DD(m, n) (m << (3 + n * 8)) | ||
112 | #define QSPI_CKPHA(n) (1 << (2 + n * 8)) | ||
113 | #define QSPI_CSPOL(n) (1 << (1 + n * 8)) | ||
114 | #define QSPI_CKPOL(n) (1 << (n * 8)) | ||
115 | |||
116 | #define QSPI_FRAME 4096 | ||
117 | |||
118 | #define QSPI_AUTOSUSPEND_TIMEOUT 2000 | ||
119 | |||
120 | static inline unsigned long ti_qspi_read(struct ti_qspi *qspi, | ||
121 | unsigned long reg) | ||
122 | { | ||
123 | return readl(qspi->base + reg); | ||
124 | } | ||
125 | |||
126 | static inline void ti_qspi_write(struct ti_qspi *qspi, | ||
127 | unsigned long val, unsigned long reg) | ||
128 | { | ||
129 | writel(val, qspi->base + reg); | ||
130 | } | ||
131 | |||
132 | static int ti_qspi_setup(struct spi_device *spi) | ||
133 | { | ||
134 | struct ti_qspi *qspi = spi_master_get_devdata(spi->master); | ||
135 | struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg; | ||
136 | int clk_div = 0, ret; | ||
137 | u32 clk_ctrl_reg, clk_rate, clk_mask; | ||
138 | |||
139 | if (spi->master->busy) { | ||
140 | dev_dbg(qspi->dev, "master busy doing other trasnfers\n"); | ||
141 | return -EBUSY; | ||
142 | } | ||
143 | |||
144 | if (!qspi->spi_max_frequency) { | ||
145 | dev_err(qspi->dev, "spi max frequency not defined\n"); | ||
146 | return -EINVAL; | ||
147 | } | ||
148 | |||
149 | clk_rate = clk_get_rate(qspi->fclk); | ||
150 | |||
151 | clk_div = DIV_ROUND_UP(clk_rate, qspi->spi_max_frequency) - 1; | ||
152 | |||
153 | if (clk_div < 0) { | ||
154 | dev_dbg(qspi->dev, "clock divider < 0, using /1 divider\n"); | ||
155 | return -EINVAL; | ||
156 | } | ||
157 | |||
158 | if (clk_div > QSPI_CLK_DIV_MAX) { | ||
159 | dev_dbg(qspi->dev, "clock divider >%d , using /%d divider\n", | ||
160 | QSPI_CLK_DIV_MAX, QSPI_CLK_DIV_MAX + 1); | ||
161 | return -EINVAL; | ||
162 | } | ||
163 | |||
164 | dev_dbg(qspi->dev, "hz: %d, clock divider %d\n", | ||
165 | qspi->spi_max_frequency, clk_div); | ||
166 | |||
167 | ret = pm_runtime_get_sync(qspi->dev); | ||
168 | if (ret) { | ||
169 | dev_err(qspi->dev, "pm_runtime_get_sync() failed\n"); | ||
170 | return ret; | ||
171 | } | ||
172 | |||
173 | clk_ctrl_reg = ti_qspi_read(qspi, QSPI_SPI_CLOCK_CNTRL_REG); | ||
174 | |||
175 | clk_ctrl_reg &= ~QSPI_CLK_EN; | ||
176 | |||
177 | /* disable SCLK */ | ||
178 | ti_qspi_write(qspi, clk_ctrl_reg, QSPI_SPI_CLOCK_CNTRL_REG); | ||
179 | |||
180 | /* enable SCLK */ | ||
181 | clk_mask = QSPI_CLK_EN | clk_div; | ||
182 | ti_qspi_write(qspi, clk_mask, QSPI_SPI_CLOCK_CNTRL_REG); | ||
183 | ctx_reg->clkctrl = clk_mask; | ||
184 | |||
185 | pm_runtime_mark_last_busy(qspi->dev); | ||
186 | ret = pm_runtime_put_autosuspend(qspi->dev); | ||
187 | if (ret < 0) { | ||
188 | dev_err(qspi->dev, "pm_runtime_put_autosuspend() failed\n"); | ||
189 | return ret; | ||
190 | } | ||
191 | |||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | static void ti_qspi_restore_ctx(struct ti_qspi *qspi) | ||
196 | { | ||
197 | struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg; | ||
198 | |||
199 | ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG); | ||
200 | } | ||
201 | |||
202 | static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) | ||
203 | { | ||
204 | int wlen, count, ret; | ||
205 | unsigned int cmd; | ||
206 | const u8 *txbuf; | ||
207 | |||
208 | txbuf = t->tx_buf; | ||
209 | cmd = qspi->cmd | QSPI_WR_SNGL; | ||
210 | count = t->len; | ||
211 | wlen = t->bits_per_word; | ||
212 | |||
213 | while (count) { | ||
214 | switch (wlen) { | ||
215 | case 8: | ||
216 | dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n", | ||
217 | cmd, qspi->dc, *txbuf); | ||
218 | writeb(*txbuf, qspi->base + QSPI_SPI_DATA_REG); | ||
219 | ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); | ||
220 | ret = wait_for_completion_timeout(&qspi->transfer_complete, | ||
221 | QSPI_COMPLETION_TIMEOUT); | ||
222 | if (ret == 0) { | ||
223 | dev_err(qspi->dev, "write timed out\n"); | ||
224 | return -ETIMEDOUT; | ||
225 | } | ||
226 | txbuf += 1; | ||
227 | count -= 1; | ||
228 | break; | ||
229 | case 16: | ||
230 | dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %04x\n", | ||
231 | cmd, qspi->dc, *txbuf); | ||
232 | writew(*((u16 *)txbuf), qspi->base + QSPI_SPI_DATA_REG); | ||
233 | ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); | ||
234 | ret = wait_for_completion_timeout(&qspi->transfer_complete, | ||
235 | QSPI_COMPLETION_TIMEOUT); | ||
236 | if (ret == 0) { | ||
237 | dev_err(qspi->dev, "write timed out\n"); | ||
238 | return -ETIMEDOUT; | ||
239 | } | ||
240 | txbuf += 2; | ||
241 | count -= 2; | ||
242 | break; | ||
243 | case 32: | ||
244 | dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %08x\n", | ||
245 | cmd, qspi->dc, *txbuf); | ||
246 | writel(*((u32 *)txbuf), qspi->base + QSPI_SPI_DATA_REG); | ||
247 | ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); | ||
248 | ret = wait_for_completion_timeout(&qspi->transfer_complete, | ||
249 | QSPI_COMPLETION_TIMEOUT); | ||
250 | if (ret == 0) { | ||
251 | dev_err(qspi->dev, "write timed out\n"); | ||
252 | return -ETIMEDOUT; | ||
253 | } | ||
254 | txbuf += 4; | ||
255 | count -= 4; | ||
256 | break; | ||
257 | } | ||
258 | } | ||
259 | |||
260 | return 0; | ||
261 | } | ||
262 | |||
263 | static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t) | ||
264 | { | ||
265 | int wlen, count, ret; | ||
266 | unsigned int cmd; | ||
267 | u8 *rxbuf; | ||
268 | |||
269 | rxbuf = t->rx_buf; | ||
270 | cmd = qspi->cmd; | ||
271 | switch (t->rx_nbits) { | ||
272 | case SPI_NBITS_DUAL: | ||
273 | cmd |= QSPI_RD_DUAL; | ||
274 | break; | ||
275 | case SPI_NBITS_QUAD: | ||
276 | cmd |= QSPI_RD_QUAD; | ||
277 | break; | ||
278 | default: | ||
279 | cmd |= QSPI_RD_SNGL; | ||
280 | break; | ||
281 | } | ||
282 | count = t->len; | ||
283 | wlen = t->bits_per_word; | ||
284 | |||
285 | while (count) { | ||
286 | dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); | ||
287 | ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); | ||
288 | ret = wait_for_completion_timeout(&qspi->transfer_complete, | ||
289 | QSPI_COMPLETION_TIMEOUT); | ||
290 | if (ret == 0) { | ||
291 | dev_err(qspi->dev, "read timed out\n"); | ||
292 | return -ETIMEDOUT; | ||
293 | } | ||
294 | switch (wlen) { | ||
295 | case 8: | ||
296 | *rxbuf = readb(qspi->base + QSPI_SPI_DATA_REG); | ||
297 | rxbuf += 1; | ||
298 | count -= 1; | ||
299 | break; | ||
300 | case 16: | ||
301 | *((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG); | ||
302 | rxbuf += 2; | ||
303 | count -= 2; | ||
304 | break; | ||
305 | case 32: | ||
306 | *((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG); | ||
307 | rxbuf += 4; | ||
308 | count -= 4; | ||
309 | break; | ||
310 | } | ||
311 | } | ||
312 | |||
313 | return 0; | ||
314 | } | ||
315 | |||
316 | static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t) | ||
317 | { | ||
318 | int ret; | ||
319 | |||
320 | if (t->tx_buf) { | ||
321 | ret = qspi_write_msg(qspi, t); | ||
322 | if (ret) { | ||
323 | dev_dbg(qspi->dev, "Error while writing\n"); | ||
324 | return ret; | ||
325 | } | ||
326 | } | ||
327 | |||
328 | if (t->rx_buf) { | ||
329 | ret = qspi_read_msg(qspi, t); | ||
330 | if (ret) { | ||
331 | dev_dbg(qspi->dev, "Error while reading\n"); | ||
332 | return ret; | ||
333 | } | ||
334 | } | ||
335 | |||
336 | return 0; | ||
337 | } | ||
338 | |||
339 | static int ti_qspi_start_transfer_one(struct spi_master *master, | ||
340 | struct spi_message *m) | ||
341 | { | ||
342 | struct ti_qspi *qspi = spi_master_get_devdata(master); | ||
343 | struct spi_device *spi = m->spi; | ||
344 | struct spi_transfer *t; | ||
345 | int status = 0, ret; | ||
346 | int frame_length; | ||
347 | |||
348 | /* setup device control reg */ | ||
349 | qspi->dc = 0; | ||
350 | |||
351 | if (spi->mode & SPI_CPHA) | ||
352 | qspi->dc |= QSPI_CKPHA(spi->chip_select); | ||
353 | if (spi->mode & SPI_CPOL) | ||
354 | qspi->dc |= QSPI_CKPOL(spi->chip_select); | ||
355 | if (spi->mode & SPI_CS_HIGH) | ||
356 | qspi->dc |= QSPI_CSPOL(spi->chip_select); | ||
357 | |||
358 | frame_length = (m->frame_length << 3) / spi->bits_per_word; | ||
359 | |||
360 | frame_length = clamp(frame_length, 0, QSPI_FRAME); | ||
361 | |||
362 | /* setup command reg */ | ||
363 | qspi->cmd = 0; | ||
364 | qspi->cmd |= QSPI_EN_CS(spi->chip_select); | ||
365 | qspi->cmd |= QSPI_FLEN(frame_length); | ||
366 | qspi->cmd |= QSPI_WC_CMD_INT_EN; | ||
367 | |||
368 | ti_qspi_write(qspi, QSPI_WC_INT_EN, QSPI_INTR_ENABLE_SET_REG); | ||
369 | ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG); | ||
370 | |||
371 | mutex_lock(&qspi->list_lock); | ||
372 | |||
373 | list_for_each_entry(t, &m->transfers, transfer_list) { | ||
374 | qspi->cmd |= QSPI_WLEN(t->bits_per_word); | ||
375 | |||
376 | ret = qspi_transfer_msg(qspi, t); | ||
377 | if (ret) { | ||
378 | dev_dbg(qspi->dev, "transfer message failed\n"); | ||
379 | mutex_unlock(&qspi->list_lock); | ||
380 | return -EINVAL; | ||
381 | } | ||
382 | |||
383 | m->actual_length += t->len; | ||
384 | } | ||
385 | |||
386 | mutex_unlock(&qspi->list_lock); | ||
387 | |||
388 | m->status = status; | ||
389 | spi_finalize_current_message(master); | ||
390 | |||
391 | ti_qspi_write(qspi, qspi->cmd | QSPI_INVAL, QSPI_SPI_CMD_REG); | ||
392 | |||
393 | return status; | ||
394 | } | ||
395 | |||
396 | static irqreturn_t ti_qspi_isr(int irq, void *dev_id) | ||
397 | { | ||
398 | struct ti_qspi *qspi = dev_id; | ||
399 | u16 int_stat; | ||
400 | |||
401 | irqreturn_t ret = IRQ_HANDLED; | ||
402 | |||
403 | spin_lock(&qspi->lock); | ||
404 | |||
405 | int_stat = ti_qspi_read(qspi, QSPI_INTR_STATUS_ENABLED_CLEAR); | ||
406 | qspi->stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG); | ||
407 | |||
408 | if (!int_stat) { | ||
409 | dev_dbg(qspi->dev, "No IRQ triggered\n"); | ||
410 | ret = IRQ_NONE; | ||
411 | goto out; | ||
412 | } | ||
413 | |||
414 | ret = IRQ_WAKE_THREAD; | ||
415 | |||
416 | ti_qspi_write(qspi, QSPI_WC_INT_DISABLE, QSPI_INTR_ENABLE_CLEAR_REG); | ||
417 | ti_qspi_write(qspi, QSPI_WC_INT_DISABLE, | ||
418 | QSPI_INTR_STATUS_ENABLED_CLEAR); | ||
419 | |||
420 | out: | ||
421 | spin_unlock(&qspi->lock); | ||
422 | |||
423 | return ret; | ||
424 | } | ||
425 | |||
426 | static irqreturn_t ti_qspi_threaded_isr(int this_irq, void *dev_id) | ||
427 | { | ||
428 | struct ti_qspi *qspi = dev_id; | ||
429 | unsigned long flags; | ||
430 | |||
431 | spin_lock_irqsave(&qspi->lock, flags); | ||
432 | |||
433 | if (qspi->stat & WC) | ||
434 | complete(&qspi->transfer_complete); | ||
435 | |||
436 | spin_unlock_irqrestore(&qspi->lock, flags); | ||
437 | |||
438 | ti_qspi_write(qspi, QSPI_WC_INT_EN, QSPI_INTR_ENABLE_SET_REG); | ||
439 | |||
440 | return IRQ_HANDLED; | ||
441 | } | ||
442 | |||
443 | static int ti_qspi_runtime_resume(struct device *dev) | ||
444 | { | ||
445 | struct ti_qspi *qspi; | ||
446 | struct spi_master *master; | ||
447 | |||
448 | master = dev_get_drvdata(dev); | ||
449 | qspi = spi_master_get_devdata(master); | ||
450 | ti_qspi_restore_ctx(qspi); | ||
451 | |||
452 | return 0; | ||
453 | } | ||
454 | |||
455 | static const struct of_device_id ti_qspi_match[] = { | ||
456 | {.compatible = "ti,dra7xxx-qspi" }, | ||
457 | {.compatible = "ti,am4372-qspi" }, | ||
458 | {}, | ||
459 | }; | ||
460 | MODULE_DEVICE_TABLE(of, ti_qspi_match); | ||
461 | |||
462 | static int ti_qspi_probe(struct platform_device *pdev) | ||
463 | { | ||
464 | struct ti_qspi *qspi; | ||
465 | struct spi_master *master; | ||
466 | struct resource *r; | ||
467 | struct device_node *np = pdev->dev.of_node; | ||
468 | u32 max_freq; | ||
469 | int ret = 0, num_cs, irq; | ||
470 | |||
471 | master = spi_alloc_master(&pdev->dev, sizeof(*qspi)); | ||
472 | if (!master) | ||
473 | return -ENOMEM; | ||
474 | |||
475 | master->mode_bits = SPI_CPOL | SPI_CPHA; | ||
476 | |||
477 | master->bus_num = -1; | ||
478 | master->flags = SPI_MASTER_HALF_DUPLEX; | ||
479 | master->setup = ti_qspi_setup; | ||
480 | master->auto_runtime_pm = true; | ||
481 | master->transfer_one_message = ti_qspi_start_transfer_one; | ||
482 | master->dev.of_node = pdev->dev.of_node; | ||
483 | master->bits_per_word_mask = BIT(32 - 1) | BIT(16 - 1) | BIT(8 - 1); | ||
484 | |||
485 | if (!of_property_read_u32(np, "num-cs", &num_cs)) | ||
486 | master->num_chipselect = num_cs; | ||
487 | |||
488 | platform_set_drvdata(pdev, master); | ||
489 | |||
490 | qspi = spi_master_get_devdata(master); | ||
491 | qspi->master = master; | ||
492 | qspi->dev = &pdev->dev; | ||
493 | |||
494 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
495 | |||
496 | irq = platform_get_irq(pdev, 0); | ||
497 | if (irq < 0) { | ||
498 | dev_err(&pdev->dev, "no irq resource?\n"); | ||
499 | return irq; | ||
500 | } | ||
501 | |||
502 | spin_lock_init(&qspi->lock); | ||
503 | mutex_init(&qspi->list_lock); | ||
504 | |||
505 | qspi->base = devm_ioremap_resource(&pdev->dev, r); | ||
506 | if (IS_ERR(qspi->base)) { | ||
507 | ret = PTR_ERR(qspi->base); | ||
508 | goto free_master; | ||
509 | } | ||
510 | |||
511 | ret = devm_request_threaded_irq(&pdev->dev, irq, ti_qspi_isr, | ||
512 | ti_qspi_threaded_isr, 0, | ||
513 | dev_name(&pdev->dev), qspi); | ||
514 | if (ret < 0) { | ||
515 | dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n", | ||
516 | irq); | ||
517 | goto free_master; | ||
518 | } | ||
519 | |||
520 | qspi->fclk = devm_clk_get(&pdev->dev, "fck"); | ||
521 | if (IS_ERR(qspi->fclk)) { | ||
522 | ret = PTR_ERR(qspi->fclk); | ||
523 | dev_err(&pdev->dev, "could not get clk: %d\n", ret); | ||
524 | } | ||
525 | |||
526 | init_completion(&qspi->transfer_complete); | ||
527 | |||
528 | pm_runtime_use_autosuspend(&pdev->dev); | ||
529 | pm_runtime_set_autosuspend_delay(&pdev->dev, QSPI_AUTOSUSPEND_TIMEOUT); | ||
530 | pm_runtime_enable(&pdev->dev); | ||
531 | |||
532 | if (!of_property_read_u32(np, "spi-max-frequency", &max_freq)) | ||
533 | qspi->spi_max_frequency = max_freq; | ||
534 | |||
535 | ret = spi_register_master(master); | ||
536 | if (ret) | ||
537 | goto free_master; | ||
538 | |||
539 | return 0; | ||
540 | |||
541 | free_master: | ||
542 | spi_master_put(master); | ||
543 | return ret; | ||
544 | } | ||
545 | |||
546 | static int ti_qspi_remove(struct platform_device *pdev) | ||
547 | { | ||
548 | struct ti_qspi *qspi = platform_get_drvdata(pdev); | ||
549 | |||
550 | spi_unregister_master(qspi->master); | ||
551 | |||
552 | return 0; | ||
553 | } | ||
554 | |||
555 | static const struct dev_pm_ops ti_qspi_pm_ops = { | ||
556 | .runtime_resume = ti_qspi_runtime_resume, | ||
557 | }; | ||
558 | |||
559 | static struct platform_driver ti_qspi_driver = { | ||
560 | .probe = ti_qspi_probe, | ||
561 | .remove = ti_qspi_remove, | ||
562 | .driver = { | ||
563 | .name = "ti,dra7xxx-qspi", | ||
564 | .owner = THIS_MODULE, | ||
565 | .pm = &ti_qspi_pm_ops, | ||
566 | .of_match_table = ti_qspi_match, | ||
567 | } | ||
568 | }; | ||
569 | |||
570 | module_platform_driver(ti_qspi_driver); | ||
571 | |||
572 | MODULE_AUTHOR("Sourav Poddar <sourav.poddar@ti.com>"); | ||
573 | MODULE_LICENSE("GPL v2"); | ||
574 | MODULE_DESCRIPTION("TI QSPI controller driver"); | ||
diff --git a/drivers/spi/spi-ti-ssp.c b/drivers/spi/spi-ti-ssp.c index 10606fcc6efc..7d20e121e4c1 100644 --- a/drivers/spi/spi-ti-ssp.c +++ b/drivers/spi/spi-ti-ssp.c | |||
@@ -283,7 +283,7 @@ static int ti_ssp_spi_probe(struct platform_device *pdev) | |||
283 | struct device *dev = &pdev->dev; | 283 | struct device *dev = &pdev->dev; |
284 | int error = 0; | 284 | int error = 0; |
285 | 285 | ||
286 | pdata = dev->platform_data; | 286 | pdata = dev_get_platdata(dev); |
287 | if (!pdata) { | 287 | if (!pdata) { |
288 | dev_err(dev, "platform data not found\n"); | 288 | dev_err(dev, "platform data not found\n"); |
289 | return -EINVAL; | 289 | return -EINVAL; |
diff --git a/drivers/spi/spi-tle62x0.c b/drivers/spi/spi-tle62x0.c index 6b0874d782ed..2d4010d80824 100644 --- a/drivers/spi/spi-tle62x0.c +++ b/drivers/spi/spi-tle62x0.c | |||
@@ -52,8 +52,7 @@ static inline int tle62x0_write(struct tle62x0_state *st) | |||
52 | buff[1] = gpio_state; | 52 | buff[1] = gpio_state; |
53 | } | 53 | } |
54 | 54 | ||
55 | dev_dbg(&st->us->dev, "buff %02x,%02x,%02x\n", | 55 | dev_dbg(&st->us->dev, "buff %3ph\n", buff); |
56 | buff[0], buff[1], buff[2]); | ||
57 | 56 | ||
58 | return spi_write(st->us, buff, (st->nr_gpio == 16) ? 3 : 2); | 57 | return spi_write(st->us, buff, (st->nr_gpio == 16) ? 3 : 2); |
59 | } | 58 | } |
@@ -247,7 +246,7 @@ static int tle62x0_probe(struct spi_device *spi) | |||
247 | int ptr; | 246 | int ptr; |
248 | int ret; | 247 | int ret; |
249 | 248 | ||
250 | pdata = spi->dev.platform_data; | 249 | pdata = dev_get_platdata(&spi->dev); |
251 | if (pdata == NULL) { | 250 | if (pdata == NULL) { |
252 | dev_err(&spi->dev, "no device data specified\n"); | 251 | dev_err(&spi->dev, "no device data specified\n"); |
253 | return -EINVAL; | 252 | return -EINVAL; |
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c index 09a942852593..0bf1b2c457a1 100644 --- a/drivers/spi/spi-xilinx.c +++ b/drivers/spi/spi-xilinx.c | |||
@@ -80,10 +80,9 @@ struct xilinx_spi { | |||
80 | /* bitbang has to be first */ | 80 | /* bitbang has to be first */ |
81 | struct spi_bitbang bitbang; | 81 | struct spi_bitbang bitbang; |
82 | struct completion done; | 82 | struct completion done; |
83 | struct resource mem; /* phys mem */ | ||
84 | void __iomem *regs; /* virt. address of the control registers */ | 83 | void __iomem *regs; /* virt. address of the control registers */ |
85 | 84 | ||
86 | u32 irq; | 85 | int irq; |
87 | 86 | ||
88 | u8 *rx_ptr; /* pointer in the Tx buffer */ | 87 | u8 *rx_ptr; /* pointer in the Tx buffer */ |
89 | const u8 *tx_ptr; /* pointer in the Rx buffer */ | 88 | const u8 *tx_ptr; /* pointer in the Rx buffer */ |
@@ -233,21 +232,6 @@ static int xilinx_spi_setup_transfer(struct spi_device *spi, | |||
233 | return 0; | 232 | return 0; |
234 | } | 233 | } |
235 | 234 | ||
236 | static int xilinx_spi_setup(struct spi_device *spi) | ||
237 | { | ||
238 | /* always return 0, we can not check the number of bits. | ||
239 | * There are cases when SPI setup is called before any driver is | ||
240 | * there, in that case the SPI core defaults to 8 bits, which we | ||
241 | * do not support in some cases. But if we return an error, the | ||
242 | * SPI device would not be registered and no driver can get hold of it | ||
243 | * When the driver is there, it will call SPI setup again with the | ||
244 | * correct number of bits per transfer. | ||
245 | * If a driver setups with the wrong bit number, it will fail when | ||
246 | * it tries to do a transfer | ||
247 | */ | ||
248 | return 0; | ||
249 | } | ||
250 | |||
251 | static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi) | 235 | static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi) |
252 | { | 236 | { |
253 | u8 sr; | 237 | u8 sr; |
@@ -355,17 +339,34 @@ static const struct of_device_id xilinx_spi_of_match[] = { | |||
355 | }; | 339 | }; |
356 | MODULE_DEVICE_TABLE(of, xilinx_spi_of_match); | 340 | MODULE_DEVICE_TABLE(of, xilinx_spi_of_match); |
357 | 341 | ||
358 | struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem, | 342 | static int xilinx_spi_probe(struct platform_device *pdev) |
359 | u32 irq, s16 bus_num, int num_cs, int bits_per_word) | ||
360 | { | 343 | { |
361 | struct spi_master *master; | ||
362 | struct xilinx_spi *xspi; | 344 | struct xilinx_spi *xspi; |
363 | int ret; | 345 | struct xspi_platform_data *pdata; |
346 | struct resource *res; | ||
347 | int ret, num_cs = 0, bits_per_word = 8; | ||
348 | struct spi_master *master; | ||
364 | u32 tmp; | 349 | u32 tmp; |
350 | u8 i; | ||
351 | |||
352 | pdata = dev_get_platdata(&pdev->dev); | ||
353 | if (pdata) { | ||
354 | num_cs = pdata->num_chipselect; | ||
355 | bits_per_word = pdata->bits_per_word; | ||
356 | } else { | ||
357 | of_property_read_u32(pdev->dev.of_node, "xlnx,num-ss-bits", | ||
358 | &num_cs); | ||
359 | } | ||
360 | |||
361 | if (!num_cs) { | ||
362 | dev_err(&pdev->dev, | ||
363 | "Missing slave select configuration data\n"); | ||
364 | return -EINVAL; | ||
365 | } | ||
365 | 366 | ||
366 | master = spi_alloc_master(dev, sizeof(struct xilinx_spi)); | 367 | master = spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi)); |
367 | if (!master) | 368 | if (!master) |
368 | return NULL; | 369 | return -ENODEV; |
369 | 370 | ||
370 | /* the spi->mode bits understood by this driver: */ | 371 | /* the spi->mode bits understood by this driver: */ |
371 | master->mode_bits = SPI_CPOL | SPI_CPHA; | 372 | master->mode_bits = SPI_CPOL | SPI_CPHA; |
@@ -375,25 +376,18 @@ struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem, | |||
375 | xspi->bitbang.chipselect = xilinx_spi_chipselect; | 376 | xspi->bitbang.chipselect = xilinx_spi_chipselect; |
376 | xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer; | 377 | xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer; |
377 | xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs; | 378 | xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs; |
378 | xspi->bitbang.master->setup = xilinx_spi_setup; | ||
379 | init_completion(&xspi->done); | 379 | init_completion(&xspi->done); |
380 | 380 | ||
381 | if (!request_mem_region(mem->start, resource_size(mem), | 381 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
382 | XILINX_SPI_NAME)) | 382 | xspi->regs = devm_ioremap_resource(&pdev->dev, res); |
383 | if (IS_ERR(xspi->regs)) { | ||
384 | ret = PTR_ERR(xspi->regs); | ||
383 | goto put_master; | 385 | goto put_master; |
384 | |||
385 | xspi->regs = ioremap(mem->start, resource_size(mem)); | ||
386 | if (xspi->regs == NULL) { | ||
387 | dev_warn(dev, "ioremap failure\n"); | ||
388 | goto map_failed; | ||
389 | } | 386 | } |
390 | 387 | ||
391 | master->bus_num = bus_num; | 388 | master->bus_num = pdev->dev.id; |
392 | master->num_chipselect = num_cs; | 389 | master->num_chipselect = num_cs; |
393 | master->dev.of_node = dev->of_node; | 390 | master->dev.of_node = pdev->dev.of_node; |
394 | |||
395 | xspi->mem = *mem; | ||
396 | xspi->irq = irq; | ||
397 | 391 | ||
398 | /* | 392 | /* |
399 | * Detect endianess on the IP via loop bit in CR. Detection | 393 | * Detect endianess on the IP via loop bit in CR. Detection |
@@ -423,113 +417,63 @@ struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem, | |||
423 | } else if (xspi->bits_per_word == 32) { | 417 | } else if (xspi->bits_per_word == 32) { |
424 | xspi->tx_fn = xspi_tx32; | 418 | xspi->tx_fn = xspi_tx32; |
425 | xspi->rx_fn = xspi_rx32; | 419 | xspi->rx_fn = xspi_rx32; |
426 | } else | 420 | } else { |
427 | goto unmap_io; | 421 | ret = -EINVAL; |
428 | 422 | goto put_master; | |
423 | } | ||
429 | 424 | ||
430 | /* SPI controller initializations */ | 425 | /* SPI controller initializations */ |
431 | xspi_init_hw(xspi); | 426 | xspi_init_hw(xspi); |
432 | 427 | ||
428 | xspi->irq = platform_get_irq(pdev, 0); | ||
429 | if (xspi->irq < 0) { | ||
430 | ret = xspi->irq; | ||
431 | goto put_master; | ||
432 | } | ||
433 | |||
433 | /* Register for SPI Interrupt */ | 434 | /* Register for SPI Interrupt */ |
434 | ret = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi); | 435 | ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0, |
436 | dev_name(&pdev->dev), xspi); | ||
435 | if (ret) | 437 | if (ret) |
436 | goto unmap_io; | 438 | goto put_master; |
437 | 439 | ||
438 | ret = spi_bitbang_start(&xspi->bitbang); | 440 | ret = spi_bitbang_start(&xspi->bitbang); |
439 | if (ret) { | 441 | if (ret) { |
440 | dev_err(dev, "spi_bitbang_start FAILED\n"); | 442 | dev_err(&pdev->dev, "spi_bitbang_start FAILED\n"); |
441 | goto free_irq; | 443 | goto put_master; |
442 | } | ||
443 | |||
444 | dev_info(dev, "at 0x%08llX mapped to 0x%p, irq=%d\n", | ||
445 | (unsigned long long)mem->start, xspi->regs, xspi->irq); | ||
446 | return master; | ||
447 | |||
448 | free_irq: | ||
449 | free_irq(xspi->irq, xspi); | ||
450 | unmap_io: | ||
451 | iounmap(xspi->regs); | ||
452 | map_failed: | ||
453 | release_mem_region(mem->start, resource_size(mem)); | ||
454 | put_master: | ||
455 | spi_master_put(master); | ||
456 | return NULL; | ||
457 | } | ||
458 | EXPORT_SYMBOL(xilinx_spi_init); | ||
459 | |||
460 | void xilinx_spi_deinit(struct spi_master *master) | ||
461 | { | ||
462 | struct xilinx_spi *xspi; | ||
463 | |||
464 | xspi = spi_master_get_devdata(master); | ||
465 | |||
466 | spi_bitbang_stop(&xspi->bitbang); | ||
467 | free_irq(xspi->irq, xspi); | ||
468 | iounmap(xspi->regs); | ||
469 | |||
470 | release_mem_region(xspi->mem.start, resource_size(&xspi->mem)); | ||
471 | spi_master_put(xspi->bitbang.master); | ||
472 | } | ||
473 | EXPORT_SYMBOL(xilinx_spi_deinit); | ||
474 | |||
475 | static int xilinx_spi_probe(struct platform_device *dev) | ||
476 | { | ||
477 | struct xspi_platform_data *pdata; | ||
478 | struct resource *r; | ||
479 | int irq, num_cs = 0, bits_per_word = 8; | ||
480 | struct spi_master *master; | ||
481 | u8 i; | ||
482 | |||
483 | pdata = dev->dev.platform_data; | ||
484 | if (pdata) { | ||
485 | num_cs = pdata->num_chipselect; | ||
486 | bits_per_word = pdata->bits_per_word; | ||
487 | } | ||
488 | |||
489 | #ifdef CONFIG_OF | ||
490 | if (dev->dev.of_node) { | ||
491 | const __be32 *prop; | ||
492 | int len; | ||
493 | |||
494 | /* number of slave select bits is required */ | ||
495 | prop = of_get_property(dev->dev.of_node, "xlnx,num-ss-bits", | ||
496 | &len); | ||
497 | if (prop && len >= sizeof(*prop)) | ||
498 | num_cs = __be32_to_cpup(prop); | ||
499 | } | ||
500 | #endif | ||
501 | |||
502 | if (!num_cs) { | ||
503 | dev_err(&dev->dev, "Missing slave select configuration data\n"); | ||
504 | return -EINVAL; | ||
505 | } | 444 | } |
506 | 445 | ||
507 | 446 | dev_info(&pdev->dev, "at 0x%08llX mapped to 0x%p, irq=%d\n", | |
508 | r = platform_get_resource(dev, IORESOURCE_MEM, 0); | 447 | (unsigned long long)res->start, xspi->regs, xspi->irq); |
509 | if (!r) | ||
510 | return -ENODEV; | ||
511 | |||
512 | irq = platform_get_irq(dev, 0); | ||
513 | if (irq < 0) | ||
514 | return -ENXIO; | ||
515 | |||
516 | master = xilinx_spi_init(&dev->dev, r, irq, dev->id, num_cs, | ||
517 | bits_per_word); | ||
518 | if (!master) | ||
519 | return -ENODEV; | ||
520 | 448 | ||
521 | if (pdata) { | 449 | if (pdata) { |
522 | for (i = 0; i < pdata->num_devices; i++) | 450 | for (i = 0; i < pdata->num_devices; i++) |
523 | spi_new_device(master, pdata->devices + i); | 451 | spi_new_device(master, pdata->devices + i); |
524 | } | 452 | } |
525 | 453 | ||
526 | platform_set_drvdata(dev, master); | 454 | platform_set_drvdata(pdev, master); |
527 | return 0; | 455 | return 0; |
456 | |||
457 | put_master: | ||
458 | spi_master_put(master); | ||
459 | |||
460 | return ret; | ||
528 | } | 461 | } |
529 | 462 | ||
530 | static int xilinx_spi_remove(struct platform_device *dev) | 463 | static int xilinx_spi_remove(struct platform_device *pdev) |
531 | { | 464 | { |
532 | xilinx_spi_deinit(platform_get_drvdata(dev)); | 465 | struct spi_master *master = platform_get_drvdata(pdev); |
466 | struct xilinx_spi *xspi = spi_master_get_devdata(master); | ||
467 | void __iomem *regs_base = xspi->regs; | ||
468 | |||
469 | spi_bitbang_stop(&xspi->bitbang); | ||
470 | |||
471 | /* Disable all the interrupts just in case */ | ||
472 | xspi->write_fn(0, regs_base + XIPIF_V123B_IIER_OFFSET); | ||
473 | /* Disable the global IPIF interrupt */ | ||
474 | xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET); | ||
475 | |||
476 | spi_master_put(xspi->bitbang.master); | ||
533 | 477 | ||
534 | return 0; | 478 | return 0; |
535 | } | 479 | } |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 978dda2c5239..9e039c60c068 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -553,6 +553,10 @@ static void spi_pump_messages(struct kthread_work *work) | |||
553 | master->unprepare_transfer_hardware(master)) | 553 | master->unprepare_transfer_hardware(master)) |
554 | dev_err(&master->dev, | 554 | dev_err(&master->dev, |
555 | "failed to unprepare transfer hardware\n"); | 555 | "failed to unprepare transfer hardware\n"); |
556 | if (master->auto_runtime_pm) { | ||
557 | pm_runtime_mark_last_busy(master->dev.parent); | ||
558 | pm_runtime_put_autosuspend(master->dev.parent); | ||
559 | } | ||
556 | return; | 560 | return; |
557 | } | 561 | } |
558 | 562 | ||
@@ -572,11 +576,23 @@ static void spi_pump_messages(struct kthread_work *work) | |||
572 | master->busy = true; | 576 | master->busy = true; |
573 | spin_unlock_irqrestore(&master->queue_lock, flags); | 577 | spin_unlock_irqrestore(&master->queue_lock, flags); |
574 | 578 | ||
579 | if (!was_busy && master->auto_runtime_pm) { | ||
580 | ret = pm_runtime_get_sync(master->dev.parent); | ||
581 | if (ret < 0) { | ||
582 | dev_err(&master->dev, "Failed to power device: %d\n", | ||
583 | ret); | ||
584 | return; | ||
585 | } | ||
586 | } | ||
587 | |||
575 | if (!was_busy && master->prepare_transfer_hardware) { | 588 | if (!was_busy && master->prepare_transfer_hardware) { |
576 | ret = master->prepare_transfer_hardware(master); | 589 | ret = master->prepare_transfer_hardware(master); |
577 | if (ret) { | 590 | if (ret) { |
578 | dev_err(&master->dev, | 591 | dev_err(&master->dev, |
579 | "failed to prepare transfer hardware\n"); | 592 | "failed to prepare transfer hardware\n"); |
593 | |||
594 | if (master->auto_runtime_pm) | ||
595 | pm_runtime_put(master->dev.parent); | ||
580 | return; | 596 | return; |
581 | } | 597 | } |
582 | } | 598 | } |
@@ -774,7 +790,7 @@ static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) | |||
774 | msg->status = -EINPROGRESS; | 790 | msg->status = -EINPROGRESS; |
775 | 791 | ||
776 | list_add_tail(&msg->queue, &master->queue); | 792 | list_add_tail(&msg->queue, &master->queue); |
777 | if (master->running && !master->busy) | 793 | if (!master->busy) |
778 | queue_kthread_work(&master->kworker, &master->pump_messages); | 794 | queue_kthread_work(&master->kworker, &master->pump_messages); |
779 | 795 | ||
780 | spin_unlock_irqrestore(&master->queue_lock, flags); | 796 | spin_unlock_irqrestore(&master->queue_lock, flags); |
@@ -869,6 +885,47 @@ static void of_register_spi_devices(struct spi_master *master) | |||
869 | if (of_find_property(nc, "spi-3wire", NULL)) | 885 | if (of_find_property(nc, "spi-3wire", NULL)) |
870 | spi->mode |= SPI_3WIRE; | 886 | spi->mode |= SPI_3WIRE; |
871 | 887 | ||
888 | /* Device DUAL/QUAD mode */ | ||
889 | prop = of_get_property(nc, "spi-tx-bus-width", &len); | ||
890 | if (prop && len == sizeof(*prop)) { | ||
891 | switch (be32_to_cpup(prop)) { | ||
892 | case SPI_NBITS_SINGLE: | ||
893 | break; | ||
894 | case SPI_NBITS_DUAL: | ||
895 | spi->mode |= SPI_TX_DUAL; | ||
896 | break; | ||
897 | case SPI_NBITS_QUAD: | ||
898 | spi->mode |= SPI_TX_QUAD; | ||
899 | break; | ||
900 | default: | ||
901 | dev_err(&master->dev, | ||
902 | "spi-tx-bus-width %d not supported\n", | ||
903 | be32_to_cpup(prop)); | ||
904 | spi_dev_put(spi); | ||
905 | continue; | ||
906 | } | ||
907 | } | ||
908 | |||
909 | prop = of_get_property(nc, "spi-rx-bus-width", &len); | ||
910 | if (prop && len == sizeof(*prop)) { | ||
911 | switch (be32_to_cpup(prop)) { | ||
912 | case SPI_NBITS_SINGLE: | ||
913 | break; | ||
914 | case SPI_NBITS_DUAL: | ||
915 | spi->mode |= SPI_RX_DUAL; | ||
916 | break; | ||
917 | case SPI_NBITS_QUAD: | ||
918 | spi->mode |= SPI_RX_QUAD; | ||
919 | break; | ||
920 | default: | ||
921 | dev_err(&master->dev, | ||
922 | "spi-rx-bus-width %d not supported\n", | ||
923 | be32_to_cpup(prop)); | ||
924 | spi_dev_put(spi); | ||
925 | continue; | ||
926 | } | ||
927 | } | ||
928 | |||
872 | /* Device speed */ | 929 | /* Device speed */ |
873 | prop = of_get_property(nc, "spi-max-frequency", &len); | 930 | prop = of_get_property(nc, "spi-max-frequency", &len); |
874 | if (!prop || len < sizeof(*prop)) { | 931 | if (!prop || len < sizeof(*prop)) { |
@@ -1169,7 +1226,7 @@ int spi_register_master(struct spi_master *master) | |||
1169 | else { | 1226 | else { |
1170 | status = spi_master_initialize_queue(master); | 1227 | status = spi_master_initialize_queue(master); |
1171 | if (status) { | 1228 | if (status) { |
1172 | device_unregister(&master->dev); | 1229 | device_del(&master->dev); |
1173 | goto done; | 1230 | goto done; |
1174 | } | 1231 | } |
1175 | } | 1232 | } |
@@ -1316,6 +1373,19 @@ int spi_setup(struct spi_device *spi) | |||
1316 | unsigned bad_bits; | 1373 | unsigned bad_bits; |
1317 | int status = 0; | 1374 | int status = 0; |
1318 | 1375 | ||
1376 | /* check mode to prevent that DUAL and QUAD set at the same time | ||
1377 | */ | ||
1378 | if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) || | ||
1379 | ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) { | ||
1380 | dev_err(&spi->dev, | ||
1381 | "setup: can not select dual and quad at the same time\n"); | ||
1382 | return -EINVAL; | ||
1383 | } | ||
1384 | /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden | ||
1385 | */ | ||
1386 | if ((spi->mode & SPI_3WIRE) && (spi->mode & | ||
1387 | (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) | ||
1388 | return -EINVAL; | ||
1319 | /* help drivers fail *cleanly* when they need options | 1389 | /* help drivers fail *cleanly* when they need options |
1320 | * that aren't supported with their current master | 1390 | * that aren't supported with their current master |
1321 | */ | 1391 | */ |
@@ -1351,6 +1421,11 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message) | |||
1351 | struct spi_master *master = spi->master; | 1421 | struct spi_master *master = spi->master; |
1352 | struct spi_transfer *xfer; | 1422 | struct spi_transfer *xfer; |
1353 | 1423 | ||
1424 | if (list_empty(&message->transfers)) | ||
1425 | return -EINVAL; | ||
1426 | if (!message->complete) | ||
1427 | return -EINVAL; | ||
1428 | |||
1354 | /* Half-duplex links include original MicroWire, and ones with | 1429 | /* Half-duplex links include original MicroWire, and ones with |
1355 | * only one data pin like SPI_3WIRE (switches direction) or where | 1430 | * only one data pin like SPI_3WIRE (switches direction) or where |
1356 | * either MOSI or MISO is missing. They can also be caused by | 1431 | * either MOSI or MISO is missing. They can also be caused by |
@@ -1373,12 +1448,20 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message) | |||
1373 | /** | 1448 | /** |
1374 | * Set transfer bits_per_word and max speed as spi device default if | 1449 | * Set transfer bits_per_word and max speed as spi device default if |
1375 | * it is not set for this transfer. | 1450 | * it is not set for this transfer. |
1451 | * Set transfer tx_nbits and rx_nbits as single transfer default | ||
1452 | * (SPI_NBITS_SINGLE) if it is not set for this transfer. | ||
1376 | */ | 1453 | */ |
1377 | list_for_each_entry(xfer, &message->transfers, transfer_list) { | 1454 | list_for_each_entry(xfer, &message->transfers, transfer_list) { |
1455 | message->frame_length += xfer->len; | ||
1378 | if (!xfer->bits_per_word) | 1456 | if (!xfer->bits_per_word) |
1379 | xfer->bits_per_word = spi->bits_per_word; | 1457 | xfer->bits_per_word = spi->bits_per_word; |
1380 | if (!xfer->speed_hz) | 1458 | if (!xfer->speed_hz) { |
1381 | xfer->speed_hz = spi->max_speed_hz; | 1459 | xfer->speed_hz = spi->max_speed_hz; |
1460 | if (master->max_speed_hz && | ||
1461 | xfer->speed_hz > master->max_speed_hz) | ||
1462 | xfer->speed_hz = master->max_speed_hz; | ||
1463 | } | ||
1464 | |||
1382 | if (master->bits_per_word_mask) { | 1465 | if (master->bits_per_word_mask) { |
1383 | /* Only 32 bits fit in the mask */ | 1466 | /* Only 32 bits fit in the mask */ |
1384 | if (xfer->bits_per_word > 32) | 1467 | if (xfer->bits_per_word > 32) |
@@ -1387,6 +1470,54 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message) | |||
1387 | BIT(xfer->bits_per_word - 1))) | 1470 | BIT(xfer->bits_per_word - 1))) |
1388 | return -EINVAL; | 1471 | return -EINVAL; |
1389 | } | 1472 | } |
1473 | |||
1474 | if (xfer->speed_hz && master->min_speed_hz && | ||
1475 | xfer->speed_hz < master->min_speed_hz) | ||
1476 | return -EINVAL; | ||
1477 | if (xfer->speed_hz && master->max_speed_hz && | ||
1478 | xfer->speed_hz > master->max_speed_hz) | ||
1479 | return -EINVAL; | ||
1480 | |||
1481 | if (xfer->tx_buf && !xfer->tx_nbits) | ||
1482 | xfer->tx_nbits = SPI_NBITS_SINGLE; | ||
1483 | if (xfer->rx_buf && !xfer->rx_nbits) | ||
1484 | xfer->rx_nbits = SPI_NBITS_SINGLE; | ||
1485 | /* check transfer tx/rx_nbits: | ||
1486 | * 1. keep the value is not out of single, dual and quad | ||
1487 | * 2. keep tx/rx_nbits is contained by mode in spi_device | ||
1488 | * 3. if SPI_3WIRE, tx/rx_nbits should be in single | ||
1489 | */ | ||
1490 | if (xfer->tx_buf) { | ||
1491 | if (xfer->tx_nbits != SPI_NBITS_SINGLE && | ||
1492 | xfer->tx_nbits != SPI_NBITS_DUAL && | ||
1493 | xfer->tx_nbits != SPI_NBITS_QUAD) | ||
1494 | return -EINVAL; | ||
1495 | if ((xfer->tx_nbits == SPI_NBITS_DUAL) && | ||
1496 | !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) | ||
1497 | return -EINVAL; | ||
1498 | if ((xfer->tx_nbits == SPI_NBITS_QUAD) && | ||
1499 | !(spi->mode & SPI_TX_QUAD)) | ||
1500 | return -EINVAL; | ||
1501 | if ((spi->mode & SPI_3WIRE) && | ||
1502 | (xfer->tx_nbits != SPI_NBITS_SINGLE)) | ||
1503 | return -EINVAL; | ||
1504 | } | ||
1505 | /* check transfer rx_nbits */ | ||
1506 | if (xfer->rx_buf) { | ||
1507 | if (xfer->rx_nbits != SPI_NBITS_SINGLE && | ||
1508 | xfer->rx_nbits != SPI_NBITS_DUAL && | ||
1509 | xfer->rx_nbits != SPI_NBITS_QUAD) | ||
1510 | return -EINVAL; | ||
1511 | if ((xfer->rx_nbits == SPI_NBITS_DUAL) && | ||
1512 | !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) | ||
1513 | return -EINVAL; | ||
1514 | if ((xfer->rx_nbits == SPI_NBITS_QUAD) && | ||
1515 | !(spi->mode & SPI_RX_QUAD)) | ||
1516 | return -EINVAL; | ||
1517 | if ((spi->mode & SPI_3WIRE) && | ||
1518 | (xfer->rx_nbits != SPI_NBITS_SINGLE)) | ||
1519 | return -EINVAL; | ||
1520 | } | ||
1390 | } | 1521 | } |
1391 | 1522 | ||
1392 | message->spi = spi; | 1523 | message->spi = spi; |
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index e25eba5713c1..b3b5125faa72 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c | |||
@@ -482,7 +482,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it) | |||
482 | ret = comedi_device_postconfig(dev); | 482 | ret = comedi_device_postconfig(dev); |
483 | if (ret < 0) { | 483 | if (ret < 0) { |
484 | comedi_device_detach(dev); | 484 | comedi_device_detach(dev); |
485 | module_put(dev->driver->module); | 485 | module_put(driv->module); |
486 | } | 486 | } |
487 | /* On success, the driver module count has been incremented. */ | 487 | /* On success, the driver module count has been incremented. */ |
488 | return ret; | 488 | return ret; |
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 609dbc2f7151..83b4ef4dfcf8 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c | |||
@@ -1119,11 +1119,11 @@ static int usbtmc_probe(struct usb_interface *intf, | |||
1119 | /* Determine if it is a Rigol or not */ | 1119 | /* Determine if it is a Rigol or not */ |
1120 | data->rigol_quirk = 0; | 1120 | data->rigol_quirk = 0; |
1121 | dev_dbg(&intf->dev, "Trying to find if device Vendor 0x%04X Product 0x%04X has the RIGOL quirk\n", | 1121 | dev_dbg(&intf->dev, "Trying to find if device Vendor 0x%04X Product 0x%04X has the RIGOL quirk\n", |
1122 | data->usb_dev->descriptor.idVendor, | 1122 | le16_to_cpu(data->usb_dev->descriptor.idVendor), |
1123 | data->usb_dev->descriptor.idProduct); | 1123 | le16_to_cpu(data->usb_dev->descriptor.idProduct)); |
1124 | for(n = 0; usbtmc_id_quirk[n].idVendor > 0; n++) { | 1124 | for(n = 0; usbtmc_id_quirk[n].idVendor > 0; n++) { |
1125 | if ((usbtmc_id_quirk[n].idVendor == data->usb_dev->descriptor.idVendor) && | 1125 | if ((usbtmc_id_quirk[n].idVendor == le16_to_cpu(data->usb_dev->descriptor.idVendor)) && |
1126 | (usbtmc_id_quirk[n].idProduct == data->usb_dev->descriptor.idProduct)) { | 1126 | (usbtmc_id_quirk[n].idProduct == le16_to_cpu(data->usb_dev->descriptor.idProduct))) { |
1127 | dev_dbg(&intf->dev, "Setting this device as having the RIGOL quirk\n"); | 1127 | dev_dbg(&intf->dev, "Setting this device as having the RIGOL quirk\n"); |
1128 | data->rigol_quirk = 1; | 1128 | data->rigol_quirk = 1; |
1129 | break; | 1129 | break; |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index a63598895077..5b44cd47da5b 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
@@ -78,6 +78,12 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
78 | { USB_DEVICE(0x04d8, 0x000c), .driver_info = | 78 | { USB_DEVICE(0x04d8, 0x000c), .driver_info = |
79 | USB_QUIRK_CONFIG_INTF_STRINGS }, | 79 | USB_QUIRK_CONFIG_INTF_STRINGS }, |
80 | 80 | ||
81 | /* CarrolTouch 4000U */ | ||
82 | { USB_DEVICE(0x04e7, 0x0009), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
83 | |||
84 | /* CarrolTouch 4500U */ | ||
85 | { USB_DEVICE(0x04e7, 0x0030), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
86 | |||
81 | /* Samsung Android phone modem - ID conflict with SPH-I500 */ | 87 | /* Samsung Android phone modem - ID conflict with SPH-I500 */ |
82 | { USB_DEVICE(0x04e8, 0x6601), .driver_info = | 88 | { USB_DEVICE(0x04e8, 0x6601), .driver_info = |
83 | USB_QUIRK_CONFIG_INTF_STRINGS }, | 89 | USB_QUIRK_CONFIG_INTF_STRINGS }, |
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index f80d0330d548..8e3c878f38cf 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c | |||
@@ -1391,21 +1391,20 @@ iso_stream_schedule ( | |||
1391 | 1391 | ||
1392 | /* Behind the scheduling threshold? */ | 1392 | /* Behind the scheduling threshold? */ |
1393 | if (unlikely(start < next)) { | 1393 | if (unlikely(start < next)) { |
1394 | unsigned now2 = (now - base) & (mod - 1); | ||
1394 | 1395 | ||
1395 | /* USB_ISO_ASAP: Round up to the first available slot */ | 1396 | /* USB_ISO_ASAP: Round up to the first available slot */ |
1396 | if (urb->transfer_flags & URB_ISO_ASAP) | 1397 | if (urb->transfer_flags & URB_ISO_ASAP) |
1397 | start += (next - start + period - 1) & -period; | 1398 | start += (next - start + period - 1) & -period; |
1398 | 1399 | ||
1399 | /* | 1400 | /* |
1400 | * Not ASAP: Use the next slot in the stream. If | 1401 | * Not ASAP: Use the next slot in the stream, |
1401 | * the entire URB falls before the threshold, fail. | 1402 | * no matter what. |
1402 | */ | 1403 | */ |
1403 | else if (start + span - period < next) { | 1404 | else if (start + span - period < now2) { |
1404 | ehci_dbg(ehci, "iso urb late %p (%u+%u < %u)\n", | 1405 | ehci_dbg(ehci, "iso underrun %p (%u+%u < %u)\n", |
1405 | urb, start + base, | 1406 | urb, start + base, |
1406 | span - period, next + base); | 1407 | span - period, now2 + base); |
1407 | status = -EXDEV; | ||
1408 | goto fail; | ||
1409 | } | 1408 | } |
1410 | } | 1409 | } |
1411 | 1410 | ||
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index 08613e241894..0f1d193fef02 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c | |||
@@ -304,6 +304,11 @@ static int __init ohci_pci_init(void) | |||
304 | pr_info("%s: " DRIVER_DESC "\n", hcd_name); | 304 | pr_info("%s: " DRIVER_DESC "\n", hcd_name); |
305 | 305 | ||
306 | ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides); | 306 | ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides); |
307 | |||
308 | /* Entries for the PCI suspend/resume callbacks are special */ | ||
309 | ohci_pci_hc_driver.pci_suspend = ohci_suspend; | ||
310 | ohci_pci_hc_driver.pci_resume = ohci_resume; | ||
311 | |||
307 | return pci_register_driver(&ohci_pci_driver); | 312 | return pci_register_driver(&ohci_pci_driver); |
308 | } | 313 | } |
309 | module_init(ohci_pci_init); | 314 | module_init(ohci_pci_init); |
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c index eb3c8c142fa9..eeb27208c0d1 100644 --- a/drivers/usb/misc/adutux.c +++ b/drivers/usb/misc/adutux.c | |||
@@ -830,7 +830,7 @@ static int adu_probe(struct usb_interface *interface, | |||
830 | 830 | ||
831 | /* let the user know what node this device is now attached to */ | 831 | /* let the user know what node this device is now attached to */ |
832 | dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n", | 832 | dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n", |
833 | udev->descriptor.idProduct, dev->serial_number, | 833 | le16_to_cpu(udev->descriptor.idProduct), dev->serial_number, |
834 | (dev->minor - ADU_MINOR_BASE)); | 834 | (dev->minor - ADU_MINOR_BASE)); |
835 | exit: | 835 | exit: |
836 | dbg(2, " %s : leave, return value %p (dev)", __func__, dev); | 836 | dbg(2, " %s : leave, return value %p (dev)", __func__, dev); |
diff --git a/drivers/usb/phy/phy-fsl-usb.h b/drivers/usb/phy/phy-fsl-usb.h index ca266280895d..e1859b8ef567 100644 --- a/drivers/usb/phy/phy-fsl-usb.h +++ b/drivers/usb/phy/phy-fsl-usb.h | |||
@@ -15,7 +15,7 @@ | |||
15 | * 675 Mass Ave, Cambridge, MA 02139, USA. | 15 | * 675 Mass Ave, Cambridge, MA 02139, USA. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include "otg_fsm.h" | 18 | #include "phy-fsm-usb.h" |
19 | #include <linux/usb/otg.h> | 19 | #include <linux/usb/otg.h> |
20 | #include <linux/ioctl.h> | 20 | #include <linux/ioctl.h> |
21 | 21 | ||
diff --git a/drivers/usb/phy/phy-fsm-usb.c b/drivers/usb/phy/phy-fsm-usb.c index c520b3548e7c..7f4596606e18 100644 --- a/drivers/usb/phy/phy-fsm-usb.c +++ b/drivers/usb/phy/phy-fsm-usb.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #include <linux/usb/gadget.h> | 29 | #include <linux/usb/gadget.h> |
30 | #include <linux/usb/otg.h> | 30 | #include <linux/usb/otg.h> |
31 | 31 | ||
32 | #include "phy-otg-fsm.h" | 32 | #include "phy-fsm-usb.h" |
33 | 33 | ||
34 | /* Change USB protocol when there is a protocol change */ | 34 | /* Change USB protocol when there is a protocol change */ |
35 | static int otg_set_protocol(struct otg_fsm *fsm, int protocol) | 35 | static int otg_set_protocol(struct otg_fsm *fsm, int protocol) |
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index 5a979729f8ec..58c17fdc85eb 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c | |||
@@ -2303,7 +2303,7 @@ static int keyspan_startup(struct usb_serial *serial) | |||
2303 | if (d_details == NULL) { | 2303 | if (d_details == NULL) { |
2304 | dev_err(&serial->dev->dev, "%s - unknown product id %x\n", | 2304 | dev_err(&serial->dev->dev, "%s - unknown product id %x\n", |
2305 | __func__, le16_to_cpu(serial->dev->descriptor.idProduct)); | 2305 | __func__, le16_to_cpu(serial->dev->descriptor.idProduct)); |
2306 | return 1; | 2306 | return -ENODEV; |
2307 | } | 2307 | } |
2308 | 2308 | ||
2309 | /* Setup private data for serial driver */ | 2309 | /* Setup private data for serial driver */ |
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 51da424327b0..b01300164fc0 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c | |||
@@ -90,6 +90,7 @@ struct urbtracker { | |||
90 | struct list_head urblist_entry; | 90 | struct list_head urblist_entry; |
91 | struct kref ref_count; | 91 | struct kref ref_count; |
92 | struct urb *urb; | 92 | struct urb *urb; |
93 | struct usb_ctrlrequest *setup; | ||
93 | }; | 94 | }; |
94 | 95 | ||
95 | enum mos7715_pp_modes { | 96 | enum mos7715_pp_modes { |
@@ -271,6 +272,7 @@ static void destroy_urbtracker(struct kref *kref) | |||
271 | struct mos7715_parport *mos_parport = urbtrack->mos_parport; | 272 | struct mos7715_parport *mos_parport = urbtrack->mos_parport; |
272 | 273 | ||
273 | usb_free_urb(urbtrack->urb); | 274 | usb_free_urb(urbtrack->urb); |
275 | kfree(urbtrack->setup); | ||
274 | kfree(urbtrack); | 276 | kfree(urbtrack); |
275 | kref_put(&mos_parport->ref_count, destroy_mos_parport); | 277 | kref_put(&mos_parport->ref_count, destroy_mos_parport); |
276 | } | 278 | } |
@@ -355,7 +357,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport, | |||
355 | struct urbtracker *urbtrack; | 357 | struct urbtracker *urbtrack; |
356 | int ret_val; | 358 | int ret_val; |
357 | unsigned long flags; | 359 | unsigned long flags; |
358 | struct usb_ctrlrequest setup; | ||
359 | struct usb_serial *serial = mos_parport->serial; | 360 | struct usb_serial *serial = mos_parport->serial; |
360 | struct usb_device *usbdev = serial->dev; | 361 | struct usb_device *usbdev = serial->dev; |
361 | 362 | ||
@@ -373,14 +374,20 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport, | |||
373 | kfree(urbtrack); | 374 | kfree(urbtrack); |
374 | return -ENOMEM; | 375 | return -ENOMEM; |
375 | } | 376 | } |
376 | setup.bRequestType = (__u8)0x40; | 377 | urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_KERNEL); |
377 | setup.bRequest = (__u8)0x0e; | 378 | if (!urbtrack->setup) { |
378 | setup.wValue = get_reg_value(reg, dummy); | 379 | usb_free_urb(urbtrack->urb); |
379 | setup.wIndex = get_reg_index(reg); | 380 | kfree(urbtrack); |
380 | setup.wLength = 0; | 381 | return -ENOMEM; |
382 | } | ||
383 | urbtrack->setup->bRequestType = (__u8)0x40; | ||
384 | urbtrack->setup->bRequest = (__u8)0x0e; | ||
385 | urbtrack->setup->wValue = get_reg_value(reg, dummy); | ||
386 | urbtrack->setup->wIndex = get_reg_index(reg); | ||
387 | urbtrack->setup->wLength = 0; | ||
381 | usb_fill_control_urb(urbtrack->urb, usbdev, | 388 | usb_fill_control_urb(urbtrack->urb, usbdev, |
382 | usb_sndctrlpipe(usbdev, 0), | 389 | usb_sndctrlpipe(usbdev, 0), |
383 | (unsigned char *)&setup, | 390 | (unsigned char *)urbtrack->setup, |
384 | NULL, 0, async_complete, urbtrack); | 391 | NULL, 0, async_complete, urbtrack); |
385 | kref_init(&urbtrack->ref_count); | 392 | kref_init(&urbtrack->ref_count); |
386 | INIT_LIST_HEAD(&urbtrack->urblist_entry); | 393 | INIT_LIST_HEAD(&urbtrack->urblist_entry); |
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index d953d674f222..3bac4693c038 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c | |||
@@ -2193,7 +2193,7 @@ static int mos7810_check(struct usb_serial *serial) | |||
2193 | static int mos7840_probe(struct usb_serial *serial, | 2193 | static int mos7840_probe(struct usb_serial *serial, |
2194 | const struct usb_device_id *id) | 2194 | const struct usb_device_id *id) |
2195 | { | 2195 | { |
2196 | u16 product = serial->dev->descriptor.idProduct; | 2196 | u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); |
2197 | u8 *buf; | 2197 | u8 *buf; |
2198 | int device_type; | 2198 | int device_type; |
2199 | 2199 | ||
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 375b5a400b6f..5c9f9b1d7736 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c | |||
@@ -1536,14 +1536,15 @@ static int ti_download_firmware(struct ti_device *tdev) | |||
1536 | char buf[32]; | 1536 | char buf[32]; |
1537 | 1537 | ||
1538 | /* try ID specific firmware first, then try generic firmware */ | 1538 | /* try ID specific firmware first, then try generic firmware */ |
1539 | sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor, | 1539 | sprintf(buf, "ti_usb-v%04x-p%04x.fw", |
1540 | dev->descriptor.idProduct); | 1540 | le16_to_cpu(dev->descriptor.idVendor), |
1541 | le16_to_cpu(dev->descriptor.idProduct)); | ||
1541 | status = request_firmware(&fw_p, buf, &dev->dev); | 1542 | status = request_firmware(&fw_p, buf, &dev->dev); |
1542 | 1543 | ||
1543 | if (status != 0) { | 1544 | if (status != 0) { |
1544 | buf[0] = '\0'; | 1545 | buf[0] = '\0'; |
1545 | if (dev->descriptor.idVendor == MTS_VENDOR_ID) { | 1546 | if (le16_to_cpu(dev->descriptor.idVendor) == MTS_VENDOR_ID) { |
1546 | switch (dev->descriptor.idProduct) { | 1547 | switch (le16_to_cpu(dev->descriptor.idProduct)) { |
1547 | case MTS_CDMA_PRODUCT_ID: | 1548 | case MTS_CDMA_PRODUCT_ID: |
1548 | strcpy(buf, "mts_cdma.fw"); | 1549 | strcpy(buf, "mts_cdma.fw"); |
1549 | break; | 1550 | break; |
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index 8257d30c4072..85365784040b 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c | |||
@@ -291,18 +291,18 @@ static void usb_wwan_indat_callback(struct urb *urb) | |||
291 | tty_flip_buffer_push(&port->port); | 291 | tty_flip_buffer_push(&port->port); |
292 | } else | 292 | } else |
293 | dev_dbg(dev, "%s: empty read urb received\n", __func__); | 293 | dev_dbg(dev, "%s: empty read urb received\n", __func__); |
294 | 294 | } | |
295 | /* Resubmit urb so we continue receiving */ | 295 | /* Resubmit urb so we continue receiving */ |
296 | err = usb_submit_urb(urb, GFP_ATOMIC); | 296 | err = usb_submit_urb(urb, GFP_ATOMIC); |
297 | if (err) { | 297 | if (err) { |
298 | if (err != -EPERM) { | 298 | if (err != -EPERM) { |
299 | dev_err(dev, "%s: resubmit read urb failed. (%d)\n", __func__, err); | 299 | dev_err(dev, "%s: resubmit read urb failed. (%d)\n", |
300 | /* busy also in error unless we are killed */ | 300 | __func__, err); |
301 | usb_mark_last_busy(port->serial->dev); | 301 | /* busy also in error unless we are killed */ |
302 | } | ||
303 | } else { | ||
304 | usb_mark_last_busy(port->serial->dev); | 302 | usb_mark_last_busy(port->serial->dev); |
305 | } | 303 | } |
304 | } else { | ||
305 | usb_mark_last_busy(port->serial->dev); | ||
306 | } | 306 | } |
307 | } | 307 | } |
308 | 308 | ||
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c index 16968c899493..d3493ca0525d 100644 --- a/drivers/usb/wusbcore/wa-xfer.c +++ b/drivers/usb/wusbcore/wa-xfer.c | |||
@@ -1226,6 +1226,12 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb) | |||
1226 | } | 1226 | } |
1227 | spin_lock_irqsave(&xfer->lock, flags); | 1227 | spin_lock_irqsave(&xfer->lock, flags); |
1228 | rpipe = xfer->ep->hcpriv; | 1228 | rpipe = xfer->ep->hcpriv; |
1229 | if (rpipe == NULL) { | ||
1230 | pr_debug("%s: xfer id 0x%08X has no RPIPE. %s", | ||
1231 | __func__, wa_xfer_id(xfer), | ||
1232 | "Probably already aborted.\n" ); | ||
1233 | goto out_unlock; | ||
1234 | } | ||
1229 | /* Check the delayed list -> if there, release and complete */ | 1235 | /* Check the delayed list -> if there, release and complete */ |
1230 | spin_lock_irqsave(&wa->xfer_list_lock, flags2); | 1236 | spin_lock_irqsave(&wa->xfer_list_lock, flags2); |
1231 | if (!list_empty(&xfer->list_node) && xfer->seg == NULL) | 1237 | if (!list_empty(&xfer->list_node) && xfer->seg == NULL) |
@@ -1644,8 +1650,7 @@ static void wa_xfer_result_cb(struct urb *urb) | |||
1644 | break; | 1650 | break; |
1645 | } | 1651 | } |
1646 | usb_status = xfer_result->bTransferStatus & 0x3f; | 1652 | usb_status = xfer_result->bTransferStatus & 0x3f; |
1647 | if (usb_status == WA_XFER_STATUS_ABORTED | 1653 | if (usb_status == WA_XFER_STATUS_NOT_FOUND) |
1648 | || usb_status == WA_XFER_STATUS_NOT_FOUND) | ||
1649 | /* taken care of already */ | 1654 | /* taken care of already */ |
1650 | break; | 1655 | break; |
1651 | xfer_id = xfer_result->dwTransferID; | 1656 | xfer_id = xfer_result->dwTransferID; |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index a58ac435a9a4..5e8be462aed5 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -348,7 +348,7 @@ static void init_evtchn_cpu_bindings(void) | |||
348 | 348 | ||
349 | for_each_possible_cpu(i) | 349 | for_each_possible_cpu(i) |
350 | memset(per_cpu(cpu_evtchn_mask, i), | 350 | memset(per_cpu(cpu_evtchn_mask, i), |
351 | (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i))); | 351 | (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8); |
352 | } | 352 | } |
353 | 353 | ||
354 | static inline void clear_evtchn(int port) | 354 | static inline void clear_evtchn(int port) |
@@ -1493,8 +1493,10 @@ void rebind_evtchn_irq(int evtchn, int irq) | |||
1493 | /* Rebind an evtchn so that it gets delivered to a specific cpu */ | 1493 | /* Rebind an evtchn so that it gets delivered to a specific cpu */ |
1494 | static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) | 1494 | static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) |
1495 | { | 1495 | { |
1496 | struct shared_info *s = HYPERVISOR_shared_info; | ||
1496 | struct evtchn_bind_vcpu bind_vcpu; | 1497 | struct evtchn_bind_vcpu bind_vcpu; |
1497 | int evtchn = evtchn_from_irq(irq); | 1498 | int evtchn = evtchn_from_irq(irq); |
1499 | int masked; | ||
1498 | 1500 | ||
1499 | if (!VALID_EVTCHN(evtchn)) | 1501 | if (!VALID_EVTCHN(evtchn)) |
1500 | return -1; | 1502 | return -1; |
@@ -1511,6 +1513,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) | |||
1511 | bind_vcpu.vcpu = tcpu; | 1513 | bind_vcpu.vcpu = tcpu; |
1512 | 1514 | ||
1513 | /* | 1515 | /* |
1516 | * Mask the event while changing the VCPU binding to prevent | ||
1517 | * it being delivered on an unexpected VCPU. | ||
1518 | */ | ||
1519 | masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask)); | ||
1520 | |||
1521 | /* | ||
1514 | * If this fails, it usually just indicates that we're dealing with a | 1522 | * If this fails, it usually just indicates that we're dealing with a |
1515 | * virq or IPI channel, which don't actually need to be rebound. Ignore | 1523 | * virq or IPI channel, which don't actually need to be rebound. Ignore |
1516 | * it, but don't do the xenlinux-level rebind in that case. | 1524 | * it, but don't do the xenlinux-level rebind in that case. |
@@ -1518,6 +1526,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) | |||
1518 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) | 1526 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) |
1519 | bind_evtchn_to_cpu(evtchn, tcpu); | 1527 | bind_evtchn_to_cpu(evtchn, tcpu); |
1520 | 1528 | ||
1529 | if (!masked) | ||
1530 | unmask_evtchn(evtchn); | ||
1531 | |||
1521 | return 0; | 1532 | return 0; |
1522 | } | 1533 | } |
1523 | 1534 | ||
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c index 5e376bb93419..8defc6b3f9a2 100644 --- a/fs/bfs/inode.c +++ b/fs/bfs/inode.c | |||
@@ -40,7 +40,7 @@ struct inode *bfs_iget(struct super_block *sb, unsigned long ino) | |||
40 | int block, off; | 40 | int block, off; |
41 | 41 | ||
42 | inode = iget_locked(sb, ino); | 42 | inode = iget_locked(sb, ino); |
43 | if (IS_ERR(inode)) | 43 | if (!inode) |
44 | return ERR_PTR(-ENOMEM); | 44 | return ERR_PTR(-ENOMEM); |
45 | if (!(inode->i_state & I_NEW)) | 45 | if (!(inode->i_state & I_NEW)) |
46 | return inode; | 46 | return inode; |
@@ -1045,12 +1045,22 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs, | |||
1045 | int bio_uncopy_user(struct bio *bio) | 1045 | int bio_uncopy_user(struct bio *bio) |
1046 | { | 1046 | { |
1047 | struct bio_map_data *bmd = bio->bi_private; | 1047 | struct bio_map_data *bmd = bio->bi_private; |
1048 | int ret = 0; | 1048 | struct bio_vec *bvec; |
1049 | int ret = 0, i; | ||
1049 | 1050 | ||
1050 | if (!bio_flagged(bio, BIO_NULL_MAPPED)) | 1051 | if (!bio_flagged(bio, BIO_NULL_MAPPED)) { |
1051 | ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, | 1052 | /* |
1052 | bmd->nr_sgvecs, bio_data_dir(bio) == READ, | 1053 | * if we're in a workqueue, the request is orphaned, so |
1053 | 0, bmd->is_our_pages); | 1054 | * don't copy into a random user address space, just free. |
1055 | */ | ||
1056 | if (current->mm) | ||
1057 | ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, | ||
1058 | bmd->nr_sgvecs, bio_data_dir(bio) == READ, | ||
1059 | 0, bmd->is_our_pages); | ||
1060 | else if (bmd->is_our_pages) | ||
1061 | bio_for_each_segment_all(bvec, bio, i) | ||
1062 | __free_page(bvec->bv_page); | ||
1063 | } | ||
1054 | bio_free_map_data(bmd); | 1064 | bio_free_map_data(bmd); |
1055 | bio_put(bio); | 1065 | bio_put(bio); |
1056 | return ret; | 1066 | return ret; |
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 45e57cc38200..fc6f4f3a1a9d 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c | |||
@@ -43,17 +43,18 @@ cifs_crypto_shash_md5_allocate(struct TCP_Server_Info *server) | |||
43 | server->secmech.md5 = crypto_alloc_shash("md5", 0, 0); | 43 | server->secmech.md5 = crypto_alloc_shash("md5", 0, 0); |
44 | if (IS_ERR(server->secmech.md5)) { | 44 | if (IS_ERR(server->secmech.md5)) { |
45 | cifs_dbg(VFS, "could not allocate crypto md5\n"); | 45 | cifs_dbg(VFS, "could not allocate crypto md5\n"); |
46 | return PTR_ERR(server->secmech.md5); | 46 | rc = PTR_ERR(server->secmech.md5); |
47 | server->secmech.md5 = NULL; | ||
48 | return rc; | ||
47 | } | 49 | } |
48 | 50 | ||
49 | size = sizeof(struct shash_desc) + | 51 | size = sizeof(struct shash_desc) + |
50 | crypto_shash_descsize(server->secmech.md5); | 52 | crypto_shash_descsize(server->secmech.md5); |
51 | server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL); | 53 | server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL); |
52 | if (!server->secmech.sdescmd5) { | 54 | if (!server->secmech.sdescmd5) { |
53 | rc = -ENOMEM; | ||
54 | crypto_free_shash(server->secmech.md5); | 55 | crypto_free_shash(server->secmech.md5); |
55 | server->secmech.md5 = NULL; | 56 | server->secmech.md5 = NULL; |
56 | return rc; | 57 | return -ENOMEM; |
57 | } | 58 | } |
58 | server->secmech.sdescmd5->shash.tfm = server->secmech.md5; | 59 | server->secmech.sdescmd5->shash.tfm = server->secmech.md5; |
59 | server->secmech.sdescmd5->shash.flags = 0x0; | 60 | server->secmech.sdescmd5->shash.flags = 0x0; |
@@ -421,7 +422,7 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp) | |||
421 | if (blobptr + attrsize > blobend) | 422 | if (blobptr + attrsize > blobend) |
422 | break; | 423 | break; |
423 | if (type == NTLMSSP_AV_NB_DOMAIN_NAME) { | 424 | if (type == NTLMSSP_AV_NB_DOMAIN_NAME) { |
424 | if (!attrsize) | 425 | if (!attrsize || attrsize >= CIFS_MAX_DOMAINNAME_LEN) |
425 | break; | 426 | break; |
426 | if (!ses->domainName) { | 427 | if (!ses->domainName) { |
427 | ses->domainName = | 428 | ses->domainName = |
@@ -591,6 +592,7 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash) | |||
591 | 592 | ||
592 | static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server) | 593 | static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server) |
593 | { | 594 | { |
595 | int rc; | ||
594 | unsigned int size; | 596 | unsigned int size; |
595 | 597 | ||
596 | /* check if already allocated */ | 598 | /* check if already allocated */ |
@@ -600,7 +602,9 @@ static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server) | |||
600 | server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0); | 602 | server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0); |
601 | if (IS_ERR(server->secmech.hmacmd5)) { | 603 | if (IS_ERR(server->secmech.hmacmd5)) { |
602 | cifs_dbg(VFS, "could not allocate crypto hmacmd5\n"); | 604 | cifs_dbg(VFS, "could not allocate crypto hmacmd5\n"); |
603 | return PTR_ERR(server->secmech.hmacmd5); | 605 | rc = PTR_ERR(server->secmech.hmacmd5); |
606 | server->secmech.hmacmd5 = NULL; | ||
607 | return rc; | ||
604 | } | 608 | } |
605 | 609 | ||
606 | size = sizeof(struct shash_desc) + | 610 | size = sizeof(struct shash_desc) + |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 4bdd547dbf6f..85ea98d139fc 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -147,18 +147,17 @@ cifs_read_super(struct super_block *sb) | |||
147 | goto out_no_root; | 147 | goto out_no_root; |
148 | } | 148 | } |
149 | 149 | ||
150 | if (cifs_sb_master_tcon(cifs_sb)->nocase) | ||
151 | sb->s_d_op = &cifs_ci_dentry_ops; | ||
152 | else | ||
153 | sb->s_d_op = &cifs_dentry_ops; | ||
154 | |||
150 | sb->s_root = d_make_root(inode); | 155 | sb->s_root = d_make_root(inode); |
151 | if (!sb->s_root) { | 156 | if (!sb->s_root) { |
152 | rc = -ENOMEM; | 157 | rc = -ENOMEM; |
153 | goto out_no_root; | 158 | goto out_no_root; |
154 | } | 159 | } |
155 | 160 | ||
156 | /* do that *after* d_make_root() - we want NULL ->d_op for root here */ | ||
157 | if (cifs_sb_master_tcon(cifs_sb)->nocase) | ||
158 | sb->s_d_op = &cifs_ci_dentry_ops; | ||
159 | else | ||
160 | sb->s_d_op = &cifs_dentry_ops; | ||
161 | |||
162 | #ifdef CONFIG_CIFS_NFSD_EXPORT | 161 | #ifdef CONFIG_CIFS_NFSD_EXPORT |
163 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { | 162 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { |
164 | cifs_dbg(FYI, "export ops supported\n"); | 163 | cifs_dbg(FYI, "export ops supported\n"); |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 1fdc37041057..52ca861ed35e 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -44,6 +44,7 @@ | |||
44 | #define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1) | 44 | #define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1) |
45 | #define MAX_SERVER_SIZE 15 | 45 | #define MAX_SERVER_SIZE 15 |
46 | #define MAX_SHARE_SIZE 80 | 46 | #define MAX_SHARE_SIZE 80 |
47 | #define CIFS_MAX_DOMAINNAME_LEN 256 /* max domain name length */ | ||
47 | #define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */ | 48 | #define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */ |
48 | #define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */ | 49 | #define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */ |
49 | 50 | ||
@@ -369,6 +370,9 @@ struct smb_version_operations { | |||
369 | void (*generate_signingkey)(struct TCP_Server_Info *server); | 370 | void (*generate_signingkey)(struct TCP_Server_Info *server); |
370 | int (*calc_signature)(struct smb_rqst *rqst, | 371 | int (*calc_signature)(struct smb_rqst *rqst, |
371 | struct TCP_Server_Info *server); | 372 | struct TCP_Server_Info *server); |
373 | int (*query_mf_symlink)(const unsigned char *path, char *pbuf, | ||
374 | unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb, | ||
375 | unsigned int xid); | ||
372 | }; | 376 | }; |
373 | 377 | ||
374 | struct smb_version_values { | 378 | struct smb_version_values { |
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index f7e584d047e2..b29a012bed33 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
@@ -497,5 +497,7 @@ void cifs_writev_complete(struct work_struct *work); | |||
497 | struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages, | 497 | struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages, |
498 | work_func_t complete); | 498 | work_func_t complete); |
499 | void cifs_writedata_release(struct kref *refcount); | 499 | void cifs_writedata_release(struct kref *refcount); |
500 | 500 | int open_query_close_cifs_symlink(const unsigned char *path, char *pbuf, | |
501 | unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb, | ||
502 | unsigned int xid); | ||
501 | #endif /* _CIFSPROTO_H */ | 503 | #endif /* _CIFSPROTO_H */ |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index fa68813396b5..d67c550c4980 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -1675,7 +1675,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
1675 | if (string == NULL) | 1675 | if (string == NULL) |
1676 | goto out_nomem; | 1676 | goto out_nomem; |
1677 | 1677 | ||
1678 | if (strnlen(string, 256) == 256) { | 1678 | if (strnlen(string, CIFS_MAX_DOMAINNAME_LEN) |
1679 | == CIFS_MAX_DOMAINNAME_LEN) { | ||
1679 | printk(KERN_WARNING "CIFS: domain name too" | 1680 | printk(KERN_WARNING "CIFS: domain name too" |
1680 | " long\n"); | 1681 | " long\n"); |
1681 | goto cifs_parse_mount_err; | 1682 | goto cifs_parse_mount_err; |
@@ -2276,8 +2277,8 @@ cifs_put_smb_ses(struct cifs_ses *ses) | |||
2276 | 2277 | ||
2277 | #ifdef CONFIG_KEYS | 2278 | #ifdef CONFIG_KEYS |
2278 | 2279 | ||
2279 | /* strlen("cifs:a:") + INET6_ADDRSTRLEN + 1 */ | 2280 | /* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */ |
2280 | #define CIFSCREDS_DESC_SIZE (7 + INET6_ADDRSTRLEN + 1) | 2281 | #define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1) |
2281 | 2282 | ||
2282 | /* Populate username and pw fields from keyring if possible */ | 2283 | /* Populate username and pw fields from keyring if possible */ |
2283 | static int | 2284 | static int |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 1e57f36ea1b2..7e36ae34e947 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -647,6 +647,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush) | |||
647 | oflags, &oplock, &cfile->fid.netfid, xid); | 647 | oflags, &oplock, &cfile->fid.netfid, xid); |
648 | if (rc == 0) { | 648 | if (rc == 0) { |
649 | cifs_dbg(FYI, "posix reopen succeeded\n"); | 649 | cifs_dbg(FYI, "posix reopen succeeded\n"); |
650 | oparms.reconnect = true; | ||
650 | goto reopen_success; | 651 | goto reopen_success; |
651 | } | 652 | } |
652 | /* | 653 | /* |
diff --git a/fs/cifs/link.c b/fs/cifs/link.c index b83c3f5646bd..562044f700e5 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c | |||
@@ -305,67 +305,89 @@ CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr) | |||
305 | } | 305 | } |
306 | 306 | ||
307 | int | 307 | int |
308 | CIFSCheckMFSymlink(struct cifs_fattr *fattr, | 308 | open_query_close_cifs_symlink(const unsigned char *path, char *pbuf, |
309 | const unsigned char *path, | 309 | unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb, |
310 | struct cifs_sb_info *cifs_sb, unsigned int xid) | 310 | unsigned int xid) |
311 | { | 311 | { |
312 | int rc; | 312 | int rc; |
313 | int oplock = 0; | 313 | int oplock = 0; |
314 | __u16 netfid = 0; | 314 | __u16 netfid = 0; |
315 | struct tcon_link *tlink; | 315 | struct tcon_link *tlink; |
316 | struct cifs_tcon *pTcon; | 316 | struct cifs_tcon *ptcon; |
317 | struct cifs_io_parms io_parms; | 317 | struct cifs_io_parms io_parms; |
318 | u8 *buf; | ||
319 | char *pbuf; | ||
320 | unsigned int bytes_read = 0; | ||
321 | int buf_type = CIFS_NO_BUFFER; | 318 | int buf_type = CIFS_NO_BUFFER; |
322 | unsigned int link_len = 0; | ||
323 | FILE_ALL_INFO file_info; | 319 | FILE_ALL_INFO file_info; |
324 | 320 | ||
325 | if (!CIFSCouldBeMFSymlink(fattr)) | ||
326 | /* it's not a symlink */ | ||
327 | return 0; | ||
328 | |||
329 | tlink = cifs_sb_tlink(cifs_sb); | 321 | tlink = cifs_sb_tlink(cifs_sb); |
330 | if (IS_ERR(tlink)) | 322 | if (IS_ERR(tlink)) |
331 | return PTR_ERR(tlink); | 323 | return PTR_ERR(tlink); |
332 | pTcon = tlink_tcon(tlink); | 324 | ptcon = tlink_tcon(tlink); |
333 | 325 | ||
334 | rc = CIFSSMBOpen(xid, pTcon, path, FILE_OPEN, GENERIC_READ, | 326 | rc = CIFSSMBOpen(xid, ptcon, path, FILE_OPEN, GENERIC_READ, |
335 | CREATE_NOT_DIR, &netfid, &oplock, &file_info, | 327 | CREATE_NOT_DIR, &netfid, &oplock, &file_info, |
336 | cifs_sb->local_nls, | 328 | cifs_sb->local_nls, |
337 | cifs_sb->mnt_cifs_flags & | 329 | cifs_sb->mnt_cifs_flags & |
338 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 330 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
339 | if (rc != 0) | 331 | if (rc != 0) { |
340 | goto out; | 332 | cifs_put_tlink(tlink); |
333 | return rc; | ||
334 | } | ||
341 | 335 | ||
342 | if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) { | 336 | if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) { |
343 | CIFSSMBClose(xid, pTcon, netfid); | 337 | CIFSSMBClose(xid, ptcon, netfid); |
338 | cifs_put_tlink(tlink); | ||
344 | /* it's not a symlink */ | 339 | /* it's not a symlink */ |
345 | goto out; | 340 | return rc; |
346 | } | 341 | } |
347 | 342 | ||
348 | buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL); | ||
349 | if (!buf) { | ||
350 | rc = -ENOMEM; | ||
351 | goto out; | ||
352 | } | ||
353 | pbuf = buf; | ||
354 | io_parms.netfid = netfid; | 343 | io_parms.netfid = netfid; |
355 | io_parms.pid = current->tgid; | 344 | io_parms.pid = current->tgid; |
356 | io_parms.tcon = pTcon; | 345 | io_parms.tcon = ptcon; |
357 | io_parms.offset = 0; | 346 | io_parms.offset = 0; |
358 | io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE; | 347 | io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE; |
359 | 348 | ||
360 | rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf, &buf_type); | 349 | rc = CIFSSMBRead(xid, &io_parms, pbytes_read, &pbuf, &buf_type); |
361 | CIFSSMBClose(xid, pTcon, netfid); | 350 | CIFSSMBClose(xid, ptcon, netfid); |
362 | if (rc != 0) { | 351 | cifs_put_tlink(tlink); |
363 | kfree(buf); | 352 | return rc; |
353 | } | ||
354 | |||
355 | |||
356 | int | ||
357 | CIFSCheckMFSymlink(struct cifs_fattr *fattr, | ||
358 | const unsigned char *path, | ||
359 | struct cifs_sb_info *cifs_sb, unsigned int xid) | ||
360 | { | ||
361 | int rc = 0; | ||
362 | u8 *buf = NULL; | ||
363 | unsigned int link_len = 0; | ||
364 | unsigned int bytes_read = 0; | ||
365 | struct cifs_tcon *ptcon; | ||
366 | |||
367 | if (!CIFSCouldBeMFSymlink(fattr)) | ||
368 | /* it's not a symlink */ | ||
369 | return 0; | ||
370 | |||
371 | buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL); | ||
372 | if (!buf) { | ||
373 | rc = -ENOMEM; | ||
364 | goto out; | 374 | goto out; |
365 | } | 375 | } |
366 | 376 | ||
377 | ptcon = tlink_tcon(cifs_sb_tlink(cifs_sb)); | ||
378 | if ((ptcon->ses) && (ptcon->ses->server->ops->query_mf_symlink)) | ||
379 | rc = ptcon->ses->server->ops->query_mf_symlink(path, buf, | ||
380 | &bytes_read, cifs_sb, xid); | ||
381 | else | ||
382 | goto out; | ||
383 | |||
384 | if (rc != 0) | ||
385 | goto out; | ||
386 | |||
387 | if (bytes_read == 0) /* not a symlink */ | ||
388 | goto out; | ||
389 | |||
367 | rc = CIFSParseMFSymlink(buf, bytes_read, &link_len, NULL); | 390 | rc = CIFSParseMFSymlink(buf, bytes_read, &link_len, NULL); |
368 | kfree(buf); | ||
369 | if (rc == -EINVAL) { | 391 | if (rc == -EINVAL) { |
370 | /* it's not a symlink */ | 392 | /* it's not a symlink */ |
371 | rc = 0; | 393 | rc = 0; |
@@ -381,7 +403,7 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr, | |||
381 | fattr->cf_mode |= S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO; | 403 | fattr->cf_mode |= S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO; |
382 | fattr->cf_dtype = DT_LNK; | 404 | fattr->cf_dtype = DT_LNK; |
383 | out: | 405 | out: |
384 | cifs_put_tlink(tlink); | 406 | kfree(buf); |
385 | return rc; | 407 | return rc; |
386 | } | 408 | } |
387 | 409 | ||
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index ab8778469394..69d2c826a23b 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c | |||
@@ -111,6 +111,14 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name, | |||
111 | return; | 111 | return; |
112 | } | 112 | } |
113 | 113 | ||
114 | /* | ||
115 | * If we know that the inode will need to be revalidated immediately, | ||
116 | * then don't create a new dentry for it. We'll end up doing an on | ||
117 | * the wire call either way and this spares us an invalidation. | ||
118 | */ | ||
119 | if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL) | ||
120 | return; | ||
121 | |||
114 | dentry = d_alloc(parent, name); | 122 | dentry = d_alloc(parent, name); |
115 | if (!dentry) | 123 | if (!dentry) |
116 | return; | 124 | return; |
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 79358e341fd2..08dd37bb23aa 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c | |||
@@ -197,7 +197,7 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses, | |||
197 | bytes_ret = 0; | 197 | bytes_ret = 0; |
198 | } else | 198 | } else |
199 | bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName, | 199 | bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName, |
200 | 256, nls_cp); | 200 | CIFS_MAX_DOMAINNAME_LEN, nls_cp); |
201 | bcc_ptr += 2 * bytes_ret; | 201 | bcc_ptr += 2 * bytes_ret; |
202 | bcc_ptr += 2; /* account for null terminator */ | 202 | bcc_ptr += 2; /* account for null terminator */ |
203 | 203 | ||
@@ -255,8 +255,8 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses, | |||
255 | 255 | ||
256 | /* copy domain */ | 256 | /* copy domain */ |
257 | if (ses->domainName != NULL) { | 257 | if (ses->domainName != NULL) { |
258 | strncpy(bcc_ptr, ses->domainName, 256); | 258 | strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN); |
259 | bcc_ptr += strnlen(ses->domainName, 256); | 259 | bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN); |
260 | } /* else we will send a null domain name | 260 | } /* else we will send a null domain name |
261 | so the server will default to its own domain */ | 261 | so the server will default to its own domain */ |
262 | *bcc_ptr = 0; | 262 | *bcc_ptr = 0; |
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index 6457690731a2..60943978aec3 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c | |||
@@ -944,6 +944,7 @@ struct smb_version_operations smb1_operations = { | |||
944 | .mand_lock = cifs_mand_lock, | 944 | .mand_lock = cifs_mand_lock, |
945 | .mand_unlock_range = cifs_unlock_range, | 945 | .mand_unlock_range = cifs_unlock_range, |
946 | .push_mand_locks = cifs_push_mandatory_locks, | 946 | .push_mand_locks = cifs_push_mandatory_locks, |
947 | .query_mf_symlink = open_query_close_cifs_symlink, | ||
947 | }; | 948 | }; |
948 | 949 | ||
949 | struct smb_version_values smb1_values = { | 950 | struct smb_version_values smb1_values = { |
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c index 301b191270b9..4f2300d020c7 100644 --- a/fs/cifs/smb2transport.c +++ b/fs/cifs/smb2transport.c | |||
@@ -42,6 +42,7 @@ | |||
42 | static int | 42 | static int |
43 | smb2_crypto_shash_allocate(struct TCP_Server_Info *server) | 43 | smb2_crypto_shash_allocate(struct TCP_Server_Info *server) |
44 | { | 44 | { |
45 | int rc; | ||
45 | unsigned int size; | 46 | unsigned int size; |
46 | 47 | ||
47 | if (server->secmech.sdeschmacsha256 != NULL) | 48 | if (server->secmech.sdeschmacsha256 != NULL) |
@@ -50,7 +51,9 @@ smb2_crypto_shash_allocate(struct TCP_Server_Info *server) | |||
50 | server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0); | 51 | server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0); |
51 | if (IS_ERR(server->secmech.hmacsha256)) { | 52 | if (IS_ERR(server->secmech.hmacsha256)) { |
52 | cifs_dbg(VFS, "could not allocate crypto hmacsha256\n"); | 53 | cifs_dbg(VFS, "could not allocate crypto hmacsha256\n"); |
53 | return PTR_ERR(server->secmech.hmacsha256); | 54 | rc = PTR_ERR(server->secmech.hmacsha256); |
55 | server->secmech.hmacsha256 = NULL; | ||
56 | return rc; | ||
54 | } | 57 | } |
55 | 58 | ||
56 | size = sizeof(struct shash_desc) + | 59 | size = sizeof(struct shash_desc) + |
@@ -87,7 +90,9 @@ smb3_crypto_shash_allocate(struct TCP_Server_Info *server) | |||
87 | server->secmech.sdeschmacsha256 = NULL; | 90 | server->secmech.sdeschmacsha256 = NULL; |
88 | crypto_free_shash(server->secmech.hmacsha256); | 91 | crypto_free_shash(server->secmech.hmacsha256); |
89 | server->secmech.hmacsha256 = NULL; | 92 | server->secmech.hmacsha256 = NULL; |
90 | return PTR_ERR(server->secmech.cmacaes); | 93 | rc = PTR_ERR(server->secmech.cmacaes); |
94 | server->secmech.cmacaes = NULL; | ||
95 | return rc; | ||
91 | } | 96 | } |
92 | 97 | ||
93 | size = sizeof(struct shash_desc) + | 98 | size = sizeof(struct shash_desc) + |
diff --git a/fs/dcache.c b/fs/dcache.c index 87bdb5329c3c..83cfb834db03 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -2724,6 +2724,17 @@ char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen, | |||
2724 | return memcpy(buffer, temp, sz); | 2724 | return memcpy(buffer, temp, sz); |
2725 | } | 2725 | } |
2726 | 2726 | ||
2727 | char *simple_dname(struct dentry *dentry, char *buffer, int buflen) | ||
2728 | { | ||
2729 | char *end = buffer + buflen; | ||
2730 | /* these dentries are never renamed, so d_lock is not needed */ | ||
2731 | if (prepend(&end, &buflen, " (deleted)", 11) || | ||
2732 | prepend_name(&end, &buflen, &dentry->d_name) || | ||
2733 | prepend(&end, &buflen, "/", 1)) | ||
2734 | end = ERR_PTR(-ENAMETOOLONG); | ||
2735 | return end; | ||
2736 | } | ||
2737 | |||
2727 | /* | 2738 | /* |
2728 | * Write full pathname from the root of the filesystem into the buffer. | 2739 | * Write full pathname from the root of the filesystem into the buffer. |
2729 | */ | 2740 | */ |
diff --git a/fs/efs/inode.c b/fs/efs/inode.c index f3913eb2c474..d15ccf20f1b3 100644 --- a/fs/efs/inode.c +++ b/fs/efs/inode.c | |||
@@ -57,7 +57,7 @@ struct inode *efs_iget(struct super_block *super, unsigned long ino) | |||
57 | struct inode *inode; | 57 | struct inode *inode; |
58 | 58 | ||
59 | inode = iget_locked(super, ino); | 59 | inode = iget_locked(super, ino); |
60 | if (IS_ERR(inode)) | 60 | if (!inode) |
61 | return ERR_PTR(-ENOMEM); | 61 | return ERR_PTR(-ENOMEM); |
62 | if (!(inode->i_state & I_NEW)) | 62 | if (!(inode->i_state & I_NEW)) |
63 | return inode; | 63 | return inode; |
@@ -608,7 +608,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) | |||
608 | return -ENOMEM; | 608 | return -ENOMEM; |
609 | 609 | ||
610 | lru_add_drain(); | 610 | lru_add_drain(); |
611 | tlb_gather_mmu(&tlb, mm, 0); | 611 | tlb_gather_mmu(&tlb, mm, old_start, old_end); |
612 | if (new_end > old_start) { | 612 | if (new_end > old_start) { |
613 | /* | 613 | /* |
614 | * when the old and new regions overlap clear from new_end. | 614 | * when the old and new regions overlap clear from new_end. |
@@ -625,7 +625,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) | |||
625 | free_pgd_range(&tlb, old_start, old_end, new_end, | 625 | free_pgd_range(&tlb, old_start, old_end, new_end, |
626 | vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); | 626 | vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); |
627 | } | 627 | } |
628 | tlb_finish_mmu(&tlb, new_end, old_end); | 628 | tlb_finish_mmu(&tlb, old_start, old_end); |
629 | 629 | ||
630 | /* | 630 | /* |
631 | * Shrink the vma to just the new range. Always succeeds. | 631 | * Shrink the vma to just the new range. Always succeeds. |
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index b577e45425b0..0ab26fbf3380 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
@@ -2086,6 +2086,7 @@ extern int ext4_sync_inode(handle_t *, struct inode *); | |||
2086 | extern void ext4_dirty_inode(struct inode *, int); | 2086 | extern void ext4_dirty_inode(struct inode *, int); |
2087 | extern int ext4_change_inode_journal_flag(struct inode *, int); | 2087 | extern int ext4_change_inode_journal_flag(struct inode *, int); |
2088 | extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *); | 2088 | extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *); |
2089 | extern int ext4_inode_attach_jinode(struct inode *inode); | ||
2089 | extern int ext4_can_truncate(struct inode *inode); | 2090 | extern int ext4_can_truncate(struct inode *inode); |
2090 | extern void ext4_truncate(struct inode *); | 2091 | extern void ext4_truncate(struct inode *); |
2091 | extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length); | 2092 | extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length); |
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index 72a3600aedbd..17ac112ab101 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c | |||
@@ -255,10 +255,10 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line, | |||
255 | set_buffer_prio(bh); | 255 | set_buffer_prio(bh); |
256 | if (ext4_handle_valid(handle)) { | 256 | if (ext4_handle_valid(handle)) { |
257 | err = jbd2_journal_dirty_metadata(handle, bh); | 257 | err = jbd2_journal_dirty_metadata(handle, bh); |
258 | if (err) { | 258 | /* Errors can only happen if there is a bug */ |
259 | /* Errors can only happen if there is a bug */ | 259 | if (WARN_ON_ONCE(err)) { |
260 | handle->h_err = err; | 260 | ext4_journal_abort_handle(where, line, __func__, bh, |
261 | __ext4_journal_stop(where, line, handle); | 261 | handle, err); |
262 | } | 262 | } |
263 | } else { | 263 | } else { |
264 | if (inode) | 264 | if (inode) |
diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 6f4cc567c382..319c9d26279a 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c | |||
@@ -219,7 +219,6 @@ static int ext4_file_open(struct inode * inode, struct file * filp) | |||
219 | { | 219 | { |
220 | struct super_block *sb = inode->i_sb; | 220 | struct super_block *sb = inode->i_sb; |
221 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 221 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
222 | struct ext4_inode_info *ei = EXT4_I(inode); | ||
223 | struct vfsmount *mnt = filp->f_path.mnt; | 222 | struct vfsmount *mnt = filp->f_path.mnt; |
224 | struct path path; | 223 | struct path path; |
225 | char buf[64], *cp; | 224 | char buf[64], *cp; |
@@ -259,22 +258,10 @@ static int ext4_file_open(struct inode * inode, struct file * filp) | |||
259 | * Set up the jbd2_inode if we are opening the inode for | 258 | * Set up the jbd2_inode if we are opening the inode for |
260 | * writing and the journal is present | 259 | * writing and the journal is present |
261 | */ | 260 | */ |
262 | if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) { | 261 | if (filp->f_mode & FMODE_WRITE) { |
263 | struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL); | 262 | int ret = ext4_inode_attach_jinode(inode); |
264 | 263 | if (ret < 0) | |
265 | spin_lock(&inode->i_lock); | 264 | return ret; |
266 | if (!ei->jinode) { | ||
267 | if (!jinode) { | ||
268 | spin_unlock(&inode->i_lock); | ||
269 | return -ENOMEM; | ||
270 | } | ||
271 | ei->jinode = jinode; | ||
272 | jbd2_journal_init_jbd_inode(ei->jinode, inode); | ||
273 | jinode = NULL; | ||
274 | } | ||
275 | spin_unlock(&inode->i_lock); | ||
276 | if (unlikely(jinode != NULL)) | ||
277 | jbd2_free_inode(jinode); | ||
278 | } | 265 | } |
279 | return dquot_file_open(inode, filp); | 266 | return dquot_file_open(inode, filp); |
280 | } | 267 | } |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index dd32a2eacd0d..c2ca04e67a4f 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -3533,6 +3533,18 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) | |||
3533 | offset; | 3533 | offset; |
3534 | } | 3534 | } |
3535 | 3535 | ||
3536 | if (offset & (sb->s_blocksize - 1) || | ||
3537 | (offset + length) & (sb->s_blocksize - 1)) { | ||
3538 | /* | ||
3539 | * Attach jinode to inode for jbd2 if we do any zeroing of | ||
3540 | * partial block | ||
3541 | */ | ||
3542 | ret = ext4_inode_attach_jinode(inode); | ||
3543 | if (ret < 0) | ||
3544 | goto out_mutex; | ||
3545 | |||
3546 | } | ||
3547 | |||
3536 | first_block_offset = round_up(offset, sb->s_blocksize); | 3548 | first_block_offset = round_up(offset, sb->s_blocksize); |
3537 | last_block_offset = round_down((offset + length), sb->s_blocksize) - 1; | 3549 | last_block_offset = round_down((offset + length), sb->s_blocksize) - 1; |
3538 | 3550 | ||
@@ -3601,6 +3613,31 @@ out_mutex: | |||
3601 | return ret; | 3613 | return ret; |
3602 | } | 3614 | } |
3603 | 3615 | ||
3616 | int ext4_inode_attach_jinode(struct inode *inode) | ||
3617 | { | ||
3618 | struct ext4_inode_info *ei = EXT4_I(inode); | ||
3619 | struct jbd2_inode *jinode; | ||
3620 | |||
3621 | if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal) | ||
3622 | return 0; | ||
3623 | |||
3624 | jinode = jbd2_alloc_inode(GFP_KERNEL); | ||
3625 | spin_lock(&inode->i_lock); | ||
3626 | if (!ei->jinode) { | ||
3627 | if (!jinode) { | ||
3628 | spin_unlock(&inode->i_lock); | ||
3629 | return -ENOMEM; | ||
3630 | } | ||
3631 | ei->jinode = jinode; | ||
3632 | jbd2_journal_init_jbd_inode(ei->jinode, inode); | ||
3633 | jinode = NULL; | ||
3634 | } | ||
3635 | spin_unlock(&inode->i_lock); | ||
3636 | if (unlikely(jinode != NULL)) | ||
3637 | jbd2_free_inode(jinode); | ||
3638 | return 0; | ||
3639 | } | ||
3640 | |||
3604 | /* | 3641 | /* |
3605 | * ext4_truncate() | 3642 | * ext4_truncate() |
3606 | * | 3643 | * |
@@ -3661,6 +3698,12 @@ void ext4_truncate(struct inode *inode) | |||
3661 | return; | 3698 | return; |
3662 | } | 3699 | } |
3663 | 3700 | ||
3701 | /* If we zero-out tail of the page, we have to create jinode for jbd2 */ | ||
3702 | if (inode->i_size & (inode->i_sb->s_blocksize - 1)) { | ||
3703 | if (ext4_inode_attach_jinode(inode) < 0) | ||
3704 | return; | ||
3705 | } | ||
3706 | |||
3664 | if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) | 3707 | if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) |
3665 | credits = ext4_writepage_trans_blocks(inode); | 3708 | credits = ext4_writepage_trans_blocks(inode); |
3666 | else | 3709 | else |
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 9491ac0590f7..c0427e2f6648 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c | |||
@@ -77,8 +77,10 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2) | |||
77 | memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data)); | 77 | memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data)); |
78 | memswap(&ei1->i_flags, &ei2->i_flags, sizeof(ei1->i_flags)); | 78 | memswap(&ei1->i_flags, &ei2->i_flags, sizeof(ei1->i_flags)); |
79 | memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize)); | 79 | memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize)); |
80 | memswap(&ei1->i_es_tree, &ei2->i_es_tree, sizeof(ei1->i_es_tree)); | 80 | ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS); |
81 | memswap(&ei1->i_es_lru_nr, &ei2->i_es_lru_nr, sizeof(ei1->i_es_lru_nr)); | 81 | ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS); |
82 | ext4_es_lru_del(inode1); | ||
83 | ext4_es_lru_del(inode2); | ||
82 | 84 | ||
83 | isize = i_size_read(inode1); | 85 | isize = i_size_read(inode1); |
84 | i_size_write(inode1, i_size_read(inode2)); | 86 | i_size_write(inode1, i_size_read(inode2)); |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 36b141e420b7..b59373b625e9 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -1359,7 +1359,7 @@ static const struct mount_opts { | |||
1359 | {Opt_delalloc, EXT4_MOUNT_DELALLOC, | 1359 | {Opt_delalloc, EXT4_MOUNT_DELALLOC, |
1360 | MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, | 1360 | MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, |
1361 | {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, | 1361 | {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, |
1362 | MOPT_EXT4_ONLY | MOPT_CLEAR | MOPT_EXPLICIT}, | 1362 | MOPT_EXT4_ONLY | MOPT_CLEAR}, |
1363 | {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, | 1363 | {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, |
1364 | MOPT_EXT4_ONLY | MOPT_SET}, | 1364 | MOPT_EXT4_ONLY | MOPT_SET}, |
1365 | {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT | | 1365 | {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT | |
@@ -3483,7 +3483,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
3483 | } | 3483 | } |
3484 | if (test_opt(sb, DIOREAD_NOLOCK)) { | 3484 | if (test_opt(sb, DIOREAD_NOLOCK)) { |
3485 | ext4_msg(sb, KERN_ERR, "can't mount with " | 3485 | ext4_msg(sb, KERN_ERR, "can't mount with " |
3486 | "both data=journal and delalloc"); | 3486 | "both data=journal and dioread_nolock"); |
3487 | goto failed_mount; | 3487 | goto failed_mount; |
3488 | } | 3488 | } |
3489 | if (test_opt(sb, DELALLOC)) | 3489 | if (test_opt(sb, DELALLOC)) |
@@ -4727,6 +4727,21 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) | |||
4727 | goto restore_opts; | 4727 | goto restore_opts; |
4728 | } | 4728 | } |
4729 | 4729 | ||
4730 | if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { | ||
4731 | if (test_opt2(sb, EXPLICIT_DELALLOC)) { | ||
4732 | ext4_msg(sb, KERN_ERR, "can't mount with " | ||
4733 | "both data=journal and delalloc"); | ||
4734 | err = -EINVAL; | ||
4735 | goto restore_opts; | ||
4736 | } | ||
4737 | if (test_opt(sb, DIOREAD_NOLOCK)) { | ||
4738 | ext4_msg(sb, KERN_ERR, "can't mount with " | ||
4739 | "both data=journal and dioread_nolock"); | ||
4740 | err = -EINVAL; | ||
4741 | goto restore_opts; | ||
4742 | } | ||
4743 | } | ||
4744 | |||
4730 | if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) | 4745 | if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) |
4731 | ext4_abort(sb, "Abort forced by user"); | 4746 | ext4_abort(sb, "Abort forced by user"); |
4732 | 4747 | ||
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 9435384562a2..544a809819c3 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -1838,14 +1838,14 @@ int __init gfs2_glock_init(void) | |||
1838 | 1838 | ||
1839 | glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | | 1839 | glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | |
1840 | WQ_HIGHPRI | WQ_FREEZABLE, 0); | 1840 | WQ_HIGHPRI | WQ_FREEZABLE, 0); |
1841 | if (IS_ERR(glock_workqueue)) | 1841 | if (!glock_workqueue) |
1842 | return PTR_ERR(glock_workqueue); | 1842 | return -ENOMEM; |
1843 | gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", | 1843 | gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", |
1844 | WQ_MEM_RECLAIM | WQ_FREEZABLE, | 1844 | WQ_MEM_RECLAIM | WQ_FREEZABLE, |
1845 | 0); | 1845 | 0); |
1846 | if (IS_ERR(gfs2_delete_workqueue)) { | 1846 | if (!gfs2_delete_workqueue) { |
1847 | destroy_workqueue(glock_workqueue); | 1847 | destroy_workqueue(glock_workqueue); |
1848 | return PTR_ERR(gfs2_delete_workqueue); | 1848 | return -ENOMEM; |
1849 | } | 1849 | } |
1850 | 1850 | ||
1851 | register_shrinker(&glock_shrinker); | 1851 | register_shrinker(&glock_shrinker); |
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 5f2e5224c51c..e2e0a90396e7 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c | |||
@@ -47,7 +47,8 @@ static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) | |||
47 | * None of the buffers should be dirty, locked, or pinned. | 47 | * None of the buffers should be dirty, locked, or pinned. |
48 | */ | 48 | */ |
49 | 49 | ||
50 | static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) | 50 | static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, |
51 | unsigned int nr_revokes) | ||
51 | { | 52 | { |
52 | struct gfs2_sbd *sdp = gl->gl_sbd; | 53 | struct gfs2_sbd *sdp = gl->gl_sbd; |
53 | struct list_head *head = &gl->gl_ail_list; | 54 | struct list_head *head = &gl->gl_ail_list; |
@@ -57,7 +58,9 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) | |||
57 | 58 | ||
58 | gfs2_log_lock(sdp); | 59 | gfs2_log_lock(sdp); |
59 | spin_lock(&sdp->sd_ail_lock); | 60 | spin_lock(&sdp->sd_ail_lock); |
60 | list_for_each_entry_safe(bd, tmp, head, bd_ail_gl_list) { | 61 | list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) { |
62 | if (nr_revokes == 0) | ||
63 | break; | ||
61 | bh = bd->bd_bh; | 64 | bh = bd->bd_bh; |
62 | if (bh->b_state & b_state) { | 65 | if (bh->b_state & b_state) { |
63 | if (fsync) | 66 | if (fsync) |
@@ -65,6 +68,7 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) | |||
65 | gfs2_ail_error(gl, bh); | 68 | gfs2_ail_error(gl, bh); |
66 | } | 69 | } |
67 | gfs2_trans_add_revoke(sdp, bd); | 70 | gfs2_trans_add_revoke(sdp, bd); |
71 | nr_revokes--; | ||
68 | } | 72 | } |
69 | GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); | 73 | GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); |
70 | spin_unlock(&sdp->sd_ail_lock); | 74 | spin_unlock(&sdp->sd_ail_lock); |
@@ -91,7 +95,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl) | |||
91 | WARN_ON_ONCE(current->journal_info); | 95 | WARN_ON_ONCE(current->journal_info); |
92 | current->journal_info = &tr; | 96 | current->journal_info = &tr; |
93 | 97 | ||
94 | __gfs2_ail_flush(gl, 0); | 98 | __gfs2_ail_flush(gl, 0, tr.tr_revokes); |
95 | 99 | ||
96 | gfs2_trans_end(sdp); | 100 | gfs2_trans_end(sdp); |
97 | gfs2_log_flush(sdp, NULL); | 101 | gfs2_log_flush(sdp, NULL); |
@@ -101,15 +105,19 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) | |||
101 | { | 105 | { |
102 | struct gfs2_sbd *sdp = gl->gl_sbd; | 106 | struct gfs2_sbd *sdp = gl->gl_sbd; |
103 | unsigned int revokes = atomic_read(&gl->gl_ail_count); | 107 | unsigned int revokes = atomic_read(&gl->gl_ail_count); |
108 | unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64); | ||
104 | int ret; | 109 | int ret; |
105 | 110 | ||
106 | if (!revokes) | 111 | if (!revokes) |
107 | return; | 112 | return; |
108 | 113 | ||
109 | ret = gfs2_trans_begin(sdp, 0, revokes); | 114 | while (revokes > max_revokes) |
115 | max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64); | ||
116 | |||
117 | ret = gfs2_trans_begin(sdp, 0, max_revokes); | ||
110 | if (ret) | 118 | if (ret) |
111 | return; | 119 | return; |
112 | __gfs2_ail_flush(gl, fsync); | 120 | __gfs2_ail_flush(gl, fsync, max_revokes); |
113 | gfs2_trans_end(sdp); | 121 | gfs2_trans_end(sdp); |
114 | gfs2_log_flush(sdp, NULL); | 122 | gfs2_log_flush(sdp, NULL); |
115 | } | 123 | } |
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index bbb2715171cd..64915eeae5a7 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c | |||
@@ -594,7 +594,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, | |||
594 | } | 594 | } |
595 | gfs2_glock_dq_uninit(ghs); | 595 | gfs2_glock_dq_uninit(ghs); |
596 | if (IS_ERR(d)) | 596 | if (IS_ERR(d)) |
597 | return PTR_RET(d); | 597 | return PTR_ERR(d); |
598 | return error; | 598 | return error; |
599 | } else if (error != -ENOENT) { | 599 | } else if (error != -ENOENT) { |
600 | goto fail_gunlock; | 600 | goto fail_gunlock; |
@@ -1750,6 +1750,10 @@ static ssize_t gfs2_getxattr(struct dentry *dentry, const char *name, | |||
1750 | struct gfs2_holder gh; | 1750 | struct gfs2_holder gh; |
1751 | int ret; | 1751 | int ret; |
1752 | 1752 | ||
1753 | /* For selinux during lookup */ | ||
1754 | if (gfs2_glock_is_locked_by_me(ip->i_gl)) | ||
1755 | return generic_getxattr(dentry, name, data, size); | ||
1756 | |||
1753 | gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); | 1757 | gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); |
1754 | ret = gfs2_glock_nq(&gh); | 1758 | ret = gfs2_glock_nq(&gh); |
1755 | if (ret == 0) { | 1759 | if (ret == 0) { |
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index e04d0e09ee7b..7b0f5043cf24 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c | |||
@@ -155,7 +155,7 @@ static int __init init_gfs2_fs(void) | |||
155 | goto fail_wq; | 155 | goto fail_wq; |
156 | 156 | ||
157 | gfs2_control_wq = alloc_workqueue("gfs2_control", | 157 | gfs2_control_wq = alloc_workqueue("gfs2_control", |
158 | WQ_NON_REENTRANT | WQ_UNBOUND | WQ_FREEZABLE, 0); | 158 | WQ_UNBOUND | WQ_FREEZABLE, 0); |
159 | if (!gfs2_control_wq) | 159 | if (!gfs2_control_wq) |
160 | goto fail_recovery; | 160 | goto fail_recovery; |
161 | 161 | ||
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index a3f868ae3fd4..d19b30ababf1 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
@@ -463,6 +463,14 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb, | |||
463 | return inode; | 463 | return inode; |
464 | } | 464 | } |
465 | 465 | ||
466 | /* | ||
467 | * Hugetlbfs is not reclaimable; therefore its i_mmap_mutex will never | ||
468 | * be taken from reclaim -- unlike regular filesystems. This needs an | ||
469 | * annotation because huge_pmd_share() does an allocation under | ||
470 | * i_mmap_mutex. | ||
471 | */ | ||
472 | struct lock_class_key hugetlbfs_i_mmap_mutex_key; | ||
473 | |||
466 | static struct inode *hugetlbfs_get_inode(struct super_block *sb, | 474 | static struct inode *hugetlbfs_get_inode(struct super_block *sb, |
467 | struct inode *dir, | 475 | struct inode *dir, |
468 | umode_t mode, dev_t dev) | 476 | umode_t mode, dev_t dev) |
@@ -474,6 +482,8 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, | |||
474 | struct hugetlbfs_inode_info *info; | 482 | struct hugetlbfs_inode_info *info; |
475 | inode->i_ino = get_next_ino(); | 483 | inode->i_ino = get_next_ino(); |
476 | inode_init_owner(inode, dir, mode); | 484 | inode_init_owner(inode, dir, mode); |
485 | lockdep_set_class(&inode->i_mapping->i_mmap_mutex, | ||
486 | &hugetlbfs_i_mmap_mutex_key); | ||
477 | inode->i_mapping->a_ops = &hugetlbfs_aops; | 487 | inode->i_mapping->a_ops = &hugetlbfs_aops; |
478 | inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info; | 488 | inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info; |
479 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | 489 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
@@ -916,14 +926,8 @@ static int get_hstate_idx(int page_size_log) | |||
916 | return h - hstates; | 926 | return h - hstates; |
917 | } | 927 | } |
918 | 928 | ||
919 | static char *hugetlb_dname(struct dentry *dentry, char *buffer, int buflen) | ||
920 | { | ||
921 | return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)", | ||
922 | dentry->d_name.name); | ||
923 | } | ||
924 | |||
925 | static struct dentry_operations anon_ops = { | 929 | static struct dentry_operations anon_ops = { |
926 | .d_dname = hugetlb_dname | 930 | .d_dname = simple_dname |
927 | }; | 931 | }; |
928 | 932 | ||
929 | /* | 933 | /* |
diff --git a/fs/namespace.c b/fs/namespace.c index 7b1ca9ba0b0a..a45ba4f267fe 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -1429,7 +1429,7 @@ struct vfsmount *collect_mounts(struct path *path) | |||
1429 | CL_COPY_ALL | CL_PRIVATE); | 1429 | CL_COPY_ALL | CL_PRIVATE); |
1430 | namespace_unlock(); | 1430 | namespace_unlock(); |
1431 | if (IS_ERR(tree)) | 1431 | if (IS_ERR(tree)) |
1432 | return NULL; | 1432 | return ERR_CAST(tree); |
1433 | return &tree->mnt; | 1433 | return &tree->mnt; |
1434 | } | 1434 | } |
1435 | 1435 | ||
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index dc9a913784ab..2d8be51f90dc 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c | |||
@@ -345,8 +345,7 @@ static void nilfs_end_bio_write(struct bio *bio, int err) | |||
345 | 345 | ||
346 | if (err == -EOPNOTSUPP) { | 346 | if (err == -EOPNOTSUPP) { |
347 | set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); | 347 | set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); |
348 | bio_put(bio); | 348 | /* to be detected by nilfs_segbuf_submit_bio() */ |
349 | /* to be detected by submit_seg_bio() */ | ||
350 | } | 349 | } |
351 | 350 | ||
352 | if (!uptodate) | 351 | if (!uptodate) |
@@ -377,12 +376,12 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf, | |||
377 | bio->bi_private = segbuf; | 376 | bio->bi_private = segbuf; |
378 | bio_get(bio); | 377 | bio_get(bio); |
379 | submit_bio(mode, bio); | 378 | submit_bio(mode, bio); |
379 | segbuf->sb_nbio++; | ||
380 | if (bio_flagged(bio, BIO_EOPNOTSUPP)) { | 380 | if (bio_flagged(bio, BIO_EOPNOTSUPP)) { |
381 | bio_put(bio); | 381 | bio_put(bio); |
382 | err = -EOPNOTSUPP; | 382 | err = -EOPNOTSUPP; |
383 | goto failed; | 383 | goto failed; |
384 | } | 384 | } |
385 | segbuf->sb_nbio++; | ||
386 | bio_put(bio); | 385 | bio_put(bio); |
387 | 386 | ||
388 | wi->bio = NULL; | 387 | wi->bio = NULL; |
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 79736a28d84f..2abf97b2a592 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c | |||
@@ -1757,7 +1757,7 @@ try_again: | |||
1757 | goto out; | 1757 | goto out; |
1758 | } else if (ret == 1) { | 1758 | } else if (ret == 1) { |
1759 | clusters_need = wc->w_clen; | 1759 | clusters_need = wc->w_clen; |
1760 | ret = ocfs2_refcount_cow(inode, filp, di_bh, | 1760 | ret = ocfs2_refcount_cow(inode, di_bh, |
1761 | wc->w_cpos, wc->w_clen, UINT_MAX); | 1761 | wc->w_cpos, wc->w_clen, UINT_MAX); |
1762 | if (ret) { | 1762 | if (ret) { |
1763 | mlog_errno(ret); | 1763 | mlog_errno(ret); |
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index eb760d8acd50..30544ce8e9f7 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c | |||
@@ -2153,11 +2153,9 @@ int ocfs2_empty_dir(struct inode *inode) | |||
2153 | { | 2153 | { |
2154 | int ret; | 2154 | int ret; |
2155 | struct ocfs2_empty_dir_priv priv = { | 2155 | struct ocfs2_empty_dir_priv priv = { |
2156 | .ctx.actor = ocfs2_empty_dir_filldir | 2156 | .ctx.actor = ocfs2_empty_dir_filldir, |
2157 | }; | 2157 | }; |
2158 | 2158 | ||
2159 | memset(&priv, 0, sizeof(priv)); | ||
2160 | |||
2161 | if (ocfs2_dir_indexed(inode)) { | 2159 | if (ocfs2_dir_indexed(inode)) { |
2162 | ret = ocfs2_empty_dir_dx(inode, &priv); | 2160 | ret = ocfs2_empty_dir_dx(inode, &priv); |
2163 | if (ret) | 2161 | if (ret) |
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 41000f223ca4..3261d71319ee 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
@@ -370,7 +370,7 @@ static int ocfs2_cow_file_pos(struct inode *inode, | |||
370 | if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) | 370 | if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) |
371 | goto out; | 371 | goto out; |
372 | 372 | ||
373 | return ocfs2_refcount_cow(inode, NULL, fe_bh, cpos, 1, cpos+1); | 373 | return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1); |
374 | 374 | ||
375 | out: | 375 | out: |
376 | return status; | 376 | return status; |
@@ -899,7 +899,7 @@ static int ocfs2_zero_extend_get_range(struct inode *inode, | |||
899 | zero_clusters = last_cpos - zero_cpos; | 899 | zero_clusters = last_cpos - zero_cpos; |
900 | 900 | ||
901 | if (needs_cow) { | 901 | if (needs_cow) { |
902 | rc = ocfs2_refcount_cow(inode, NULL, di_bh, zero_cpos, | 902 | rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos, |
903 | zero_clusters, UINT_MAX); | 903 | zero_clusters, UINT_MAX); |
904 | if (rc) { | 904 | if (rc) { |
905 | mlog_errno(rc); | 905 | mlog_errno(rc); |
@@ -2078,7 +2078,7 @@ static int ocfs2_prepare_inode_for_refcount(struct inode *inode, | |||
2078 | 2078 | ||
2079 | *meta_level = 1; | 2079 | *meta_level = 1; |
2080 | 2080 | ||
2081 | ret = ocfs2_refcount_cow(inode, file, di_bh, cpos, clusters, UINT_MAX); | 2081 | ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX); |
2082 | if (ret) | 2082 | if (ret) |
2083 | mlog_errno(ret); | 2083 | mlog_errno(ret); |
2084 | out: | 2084 | out: |
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index 96f9ac237e86..0a992737dcaf 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h | |||
@@ -537,7 +537,7 @@ static inline int ocfs2_calc_extend_credits(struct super_block *sb, | |||
537 | extent_blocks = 1 + 1 + le16_to_cpu(root_el->l_tree_depth); | 537 | extent_blocks = 1 + 1 + le16_to_cpu(root_el->l_tree_depth); |
538 | 538 | ||
539 | return bitmap_blocks + sysfile_bitmap_blocks + extent_blocks + | 539 | return bitmap_blocks + sysfile_bitmap_blocks + extent_blocks + |
540 | ocfs2_quota_trans_credits(sb) + bits_wanted; | 540 | ocfs2_quota_trans_credits(sb); |
541 | } | 541 | } |
542 | 542 | ||
543 | static inline int ocfs2_calc_symlink_credits(struct super_block *sb) | 543 | static inline int ocfs2_calc_symlink_credits(struct super_block *sb) |
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c index f1fc172175b6..452068b45749 100644 --- a/fs/ocfs2/move_extents.c +++ b/fs/ocfs2/move_extents.c | |||
@@ -69,7 +69,7 @@ static int __ocfs2_move_extent(handle_t *handle, | |||
69 | u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci); | 69 | u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci); |
70 | u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos); | 70 | u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos); |
71 | 71 | ||
72 | ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos, | 72 | ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos, |
73 | p_cpos, new_p_cpos, len); | 73 | p_cpos, new_p_cpos, len); |
74 | if (ret) { | 74 | if (ret) { |
75 | mlog_errno(ret); | 75 | mlog_errno(ret); |
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 9f6b96a09615..a70d604593b6 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c | |||
@@ -49,7 +49,6 @@ | |||
49 | 49 | ||
50 | struct ocfs2_cow_context { | 50 | struct ocfs2_cow_context { |
51 | struct inode *inode; | 51 | struct inode *inode; |
52 | struct file *file; | ||
53 | u32 cow_start; | 52 | u32 cow_start; |
54 | u32 cow_len; | 53 | u32 cow_len; |
55 | struct ocfs2_extent_tree data_et; | 54 | struct ocfs2_extent_tree data_et; |
@@ -66,7 +65,7 @@ struct ocfs2_cow_context { | |||
66 | u32 *num_clusters, | 65 | u32 *num_clusters, |
67 | unsigned int *extent_flags); | 66 | unsigned int *extent_flags); |
68 | int (*cow_duplicate_clusters)(handle_t *handle, | 67 | int (*cow_duplicate_clusters)(handle_t *handle, |
69 | struct file *file, | 68 | struct inode *inode, |
70 | u32 cpos, u32 old_cluster, | 69 | u32 cpos, u32 old_cluster, |
71 | u32 new_cluster, u32 new_len); | 70 | u32 new_cluster, u32 new_len); |
72 | }; | 71 | }; |
@@ -2922,14 +2921,12 @@ static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh) | |||
2922 | } | 2921 | } |
2923 | 2922 | ||
2924 | int ocfs2_duplicate_clusters_by_page(handle_t *handle, | 2923 | int ocfs2_duplicate_clusters_by_page(handle_t *handle, |
2925 | struct file *file, | 2924 | struct inode *inode, |
2926 | u32 cpos, u32 old_cluster, | 2925 | u32 cpos, u32 old_cluster, |
2927 | u32 new_cluster, u32 new_len) | 2926 | u32 new_cluster, u32 new_len) |
2928 | { | 2927 | { |
2929 | int ret = 0, partial; | 2928 | int ret = 0, partial; |
2930 | struct inode *inode = file_inode(file); | 2929 | struct super_block *sb = inode->i_sb; |
2931 | struct ocfs2_caching_info *ci = INODE_CACHE(inode); | ||
2932 | struct super_block *sb = ocfs2_metadata_cache_get_super(ci); | ||
2933 | u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); | 2930 | u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); |
2934 | struct page *page; | 2931 | struct page *page; |
2935 | pgoff_t page_index; | 2932 | pgoff_t page_index; |
@@ -2978,13 +2975,6 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle, | |||
2978 | if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize) | 2975 | if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize) |
2979 | BUG_ON(PageDirty(page)); | 2976 | BUG_ON(PageDirty(page)); |
2980 | 2977 | ||
2981 | if (PageReadahead(page)) { | ||
2982 | page_cache_async_readahead(mapping, | ||
2983 | &file->f_ra, file, | ||
2984 | page, page_index, | ||
2985 | readahead_pages); | ||
2986 | } | ||
2987 | |||
2988 | if (!PageUptodate(page)) { | 2978 | if (!PageUptodate(page)) { |
2989 | ret = block_read_full_page(page, ocfs2_get_block); | 2979 | ret = block_read_full_page(page, ocfs2_get_block); |
2990 | if (ret) { | 2980 | if (ret) { |
@@ -3004,7 +2994,8 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle, | |||
3004 | } | 2994 | } |
3005 | } | 2995 | } |
3006 | 2996 | ||
3007 | ocfs2_map_and_dirty_page(inode, handle, from, to, | 2997 | ocfs2_map_and_dirty_page(inode, |
2998 | handle, from, to, | ||
3008 | page, 0, &new_block); | 2999 | page, 0, &new_block); |
3009 | mark_page_accessed(page); | 3000 | mark_page_accessed(page); |
3010 | unlock: | 3001 | unlock: |
@@ -3020,12 +3011,11 @@ unlock: | |||
3020 | } | 3011 | } |
3021 | 3012 | ||
3022 | int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, | 3013 | int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, |
3023 | struct file *file, | 3014 | struct inode *inode, |
3024 | u32 cpos, u32 old_cluster, | 3015 | u32 cpos, u32 old_cluster, |
3025 | u32 new_cluster, u32 new_len) | 3016 | u32 new_cluster, u32 new_len) |
3026 | { | 3017 | { |
3027 | int ret = 0; | 3018 | int ret = 0; |
3028 | struct inode *inode = file_inode(file); | ||
3029 | struct super_block *sb = inode->i_sb; | 3019 | struct super_block *sb = inode->i_sb; |
3030 | struct ocfs2_caching_info *ci = INODE_CACHE(inode); | 3020 | struct ocfs2_caching_info *ci = INODE_CACHE(inode); |
3031 | int i, blocks = ocfs2_clusters_to_blocks(sb, new_len); | 3021 | int i, blocks = ocfs2_clusters_to_blocks(sb, new_len); |
@@ -3150,7 +3140,7 @@ static int ocfs2_replace_clusters(handle_t *handle, | |||
3150 | 3140 | ||
3151 | /*If the old clusters is unwritten, no need to duplicate. */ | 3141 | /*If the old clusters is unwritten, no need to duplicate. */ |
3152 | if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) { | 3142 | if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) { |
3153 | ret = context->cow_duplicate_clusters(handle, context->file, | 3143 | ret = context->cow_duplicate_clusters(handle, context->inode, |
3154 | cpos, old, new, len); | 3144 | cpos, old, new, len); |
3155 | if (ret) { | 3145 | if (ret) { |
3156 | mlog_errno(ret); | 3146 | mlog_errno(ret); |
@@ -3428,35 +3418,12 @@ static int ocfs2_replace_cow(struct ocfs2_cow_context *context) | |||
3428 | return ret; | 3418 | return ret; |
3429 | } | 3419 | } |
3430 | 3420 | ||
3431 | static void ocfs2_readahead_for_cow(struct inode *inode, | ||
3432 | struct file *file, | ||
3433 | u32 start, u32 len) | ||
3434 | { | ||
3435 | struct address_space *mapping; | ||
3436 | pgoff_t index; | ||
3437 | unsigned long num_pages; | ||
3438 | int cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits; | ||
3439 | |||
3440 | if (!file) | ||
3441 | return; | ||
3442 | |||
3443 | mapping = file->f_mapping; | ||
3444 | num_pages = (len << cs_bits) >> PAGE_CACHE_SHIFT; | ||
3445 | if (!num_pages) | ||
3446 | num_pages = 1; | ||
3447 | |||
3448 | index = ((loff_t)start << cs_bits) >> PAGE_CACHE_SHIFT; | ||
3449 | page_cache_sync_readahead(mapping, &file->f_ra, file, | ||
3450 | index, num_pages); | ||
3451 | } | ||
3452 | |||
3453 | /* | 3421 | /* |
3454 | * Starting at cpos, try to CoW write_len clusters. Don't CoW | 3422 | * Starting at cpos, try to CoW write_len clusters. Don't CoW |
3455 | * past max_cpos. This will stop when it runs into a hole or an | 3423 | * past max_cpos. This will stop when it runs into a hole or an |
3456 | * unrefcounted extent. | 3424 | * unrefcounted extent. |
3457 | */ | 3425 | */ |
3458 | static int ocfs2_refcount_cow_hunk(struct inode *inode, | 3426 | static int ocfs2_refcount_cow_hunk(struct inode *inode, |
3459 | struct file *file, | ||
3460 | struct buffer_head *di_bh, | 3427 | struct buffer_head *di_bh, |
3461 | u32 cpos, u32 write_len, u32 max_cpos) | 3428 | u32 cpos, u32 write_len, u32 max_cpos) |
3462 | { | 3429 | { |
@@ -3485,8 +3452,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode, | |||
3485 | 3452 | ||
3486 | BUG_ON(cow_len == 0); | 3453 | BUG_ON(cow_len == 0); |
3487 | 3454 | ||
3488 | ocfs2_readahead_for_cow(inode, file, cow_start, cow_len); | ||
3489 | |||
3490 | context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS); | 3455 | context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS); |
3491 | if (!context) { | 3456 | if (!context) { |
3492 | ret = -ENOMEM; | 3457 | ret = -ENOMEM; |
@@ -3508,7 +3473,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode, | |||
3508 | context->ref_root_bh = ref_root_bh; | 3473 | context->ref_root_bh = ref_root_bh; |
3509 | context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page; | 3474 | context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page; |
3510 | context->get_clusters = ocfs2_di_get_clusters; | 3475 | context->get_clusters = ocfs2_di_get_clusters; |
3511 | context->file = file; | ||
3512 | 3476 | ||
3513 | ocfs2_init_dinode_extent_tree(&context->data_et, | 3477 | ocfs2_init_dinode_extent_tree(&context->data_et, |
3514 | INODE_CACHE(inode), di_bh); | 3478 | INODE_CACHE(inode), di_bh); |
@@ -3537,7 +3501,6 @@ out: | |||
3537 | * clusters between cpos and cpos+write_len are safe to modify. | 3501 | * clusters between cpos and cpos+write_len are safe to modify. |
3538 | */ | 3502 | */ |
3539 | int ocfs2_refcount_cow(struct inode *inode, | 3503 | int ocfs2_refcount_cow(struct inode *inode, |
3540 | struct file *file, | ||
3541 | struct buffer_head *di_bh, | 3504 | struct buffer_head *di_bh, |
3542 | u32 cpos, u32 write_len, u32 max_cpos) | 3505 | u32 cpos, u32 write_len, u32 max_cpos) |
3543 | { | 3506 | { |
@@ -3557,7 +3520,7 @@ int ocfs2_refcount_cow(struct inode *inode, | |||
3557 | num_clusters = write_len; | 3520 | num_clusters = write_len; |
3558 | 3521 | ||
3559 | if (ext_flags & OCFS2_EXT_REFCOUNTED) { | 3522 | if (ext_flags & OCFS2_EXT_REFCOUNTED) { |
3560 | ret = ocfs2_refcount_cow_hunk(inode, file, di_bh, cpos, | 3523 | ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos, |
3561 | num_clusters, max_cpos); | 3524 | num_clusters, max_cpos); |
3562 | if (ret) { | 3525 | if (ret) { |
3563 | mlog_errno(ret); | 3526 | mlog_errno(ret); |
diff --git a/fs/ocfs2/refcounttree.h b/fs/ocfs2/refcounttree.h index 7754608c83a4..6422bbcdb525 100644 --- a/fs/ocfs2/refcounttree.h +++ b/fs/ocfs2/refcounttree.h | |||
@@ -53,7 +53,7 @@ int ocfs2_prepare_refcount_change_for_del(struct inode *inode, | |||
53 | int *credits, | 53 | int *credits, |
54 | int *ref_blocks); | 54 | int *ref_blocks); |
55 | int ocfs2_refcount_cow(struct inode *inode, | 55 | int ocfs2_refcount_cow(struct inode *inode, |
56 | struct file *filep, struct buffer_head *di_bh, | 56 | struct buffer_head *di_bh, |
57 | u32 cpos, u32 write_len, u32 max_cpos); | 57 | u32 cpos, u32 write_len, u32 max_cpos); |
58 | 58 | ||
59 | typedef int (ocfs2_post_refcount_func)(struct inode *inode, | 59 | typedef int (ocfs2_post_refcount_func)(struct inode *inode, |
@@ -85,11 +85,11 @@ int ocfs2_refcount_cow_xattr(struct inode *inode, | |||
85 | u32 cpos, u32 write_len, | 85 | u32 cpos, u32 write_len, |
86 | struct ocfs2_post_refcount *post); | 86 | struct ocfs2_post_refcount *post); |
87 | int ocfs2_duplicate_clusters_by_page(handle_t *handle, | 87 | int ocfs2_duplicate_clusters_by_page(handle_t *handle, |
88 | struct file *file, | 88 | struct inode *inode, |
89 | u32 cpos, u32 old_cluster, | 89 | u32 cpos, u32 old_cluster, |
90 | u32 new_cluster, u32 new_len); | 90 | u32 new_cluster, u32 new_len); |
91 | int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, | 91 | int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, |
92 | struct file *file, | 92 | struct inode *inode, |
93 | u32 cpos, u32 old_cluster, | 93 | u32 cpos, u32 old_cluster, |
94 | u32 new_cluster, u32 new_len); | 94 | u32 new_cluster, u32 new_len); |
95 | int ocfs2_cow_sync_writeback(struct super_block *sb, | 95 | int ocfs2_cow_sync_writeback(struct super_block *sb, |
diff --git a/fs/proc/fd.c b/fs/proc/fd.c index 75f2890abbd8..0ff80f9b930f 100644 --- a/fs/proc/fd.c +++ b/fs/proc/fd.c | |||
@@ -230,8 +230,6 @@ static int proc_readfd_common(struct file *file, struct dir_context *ctx, | |||
230 | 230 | ||
231 | if (!dir_emit_dots(file, ctx)) | 231 | if (!dir_emit_dots(file, ctx)) |
232 | goto out; | 232 | goto out; |
233 | if (!dir_emit_dots(file, ctx)) | ||
234 | goto out; | ||
235 | files = get_files_struct(p); | 233 | files = get_files_struct(p); |
236 | if (!files) | 234 | if (!files) |
237 | goto out; | 235 | goto out; |
diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 94441a407337..737e15615b04 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c | |||
@@ -271,7 +271,7 @@ int proc_readdir_de(struct proc_dir_entry *de, struct file *file, | |||
271 | de = next; | 271 | de = next; |
272 | } while (de); | 272 | } while (de); |
273 | spin_unlock(&proc_subdir_lock); | 273 | spin_unlock(&proc_subdir_lock); |
274 | return 0; | 274 | return 1; |
275 | } | 275 | } |
276 | 276 | ||
277 | int proc_readdir(struct file *file, struct dir_context *ctx) | 277 | int proc_readdir(struct file *file, struct dir_context *ctx) |
diff --git a/fs/proc/root.c b/fs/proc/root.c index 229e366598da..e0a790da726d 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c | |||
@@ -205,7 +205,9 @@ static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentr | |||
205 | static int proc_root_readdir(struct file *file, struct dir_context *ctx) | 205 | static int proc_root_readdir(struct file *file, struct dir_context *ctx) |
206 | { | 206 | { |
207 | if (ctx->pos < FIRST_PROCESS_ENTRY) { | 207 | if (ctx->pos < FIRST_PROCESS_ENTRY) { |
208 | proc_readdir(file, ctx); | 208 | int error = proc_readdir(file, ctx); |
209 | if (unlikely(error <= 0)) | ||
210 | return error; | ||
209 | ctx->pos = FIRST_PROCESS_ENTRY; | 211 | ctx->pos = FIRST_PROCESS_ENTRY; |
210 | } | 212 | } |
211 | 213 | ||
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index dbf61f6174f0..107d026f5d6e 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -730,8 +730,16 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma, | |||
730 | * of how soft-dirty works. | 730 | * of how soft-dirty works. |
731 | */ | 731 | */ |
732 | pte_t ptent = *pte; | 732 | pte_t ptent = *pte; |
733 | ptent = pte_wrprotect(ptent); | 733 | |
734 | ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY); | 734 | if (pte_present(ptent)) { |
735 | ptent = pte_wrprotect(ptent); | ||
736 | ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY); | ||
737 | } else if (is_swap_pte(ptent)) { | ||
738 | ptent = pte_swp_clear_soft_dirty(ptent); | ||
739 | } else if (pte_file(ptent)) { | ||
740 | ptent = pte_file_clear_soft_dirty(ptent); | ||
741 | } | ||
742 | |||
735 | set_pte_at(vma->vm_mm, addr, pte, ptent); | 743 | set_pte_at(vma->vm_mm, addr, pte, ptent); |
736 | #endif | 744 | #endif |
737 | } | 745 | } |
@@ -752,14 +760,15 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, | |||
752 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); | 760 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
753 | for (; addr != end; pte++, addr += PAGE_SIZE) { | 761 | for (; addr != end; pte++, addr += PAGE_SIZE) { |
754 | ptent = *pte; | 762 | ptent = *pte; |
755 | if (!pte_present(ptent)) | ||
756 | continue; | ||
757 | 763 | ||
758 | if (cp->type == CLEAR_REFS_SOFT_DIRTY) { | 764 | if (cp->type == CLEAR_REFS_SOFT_DIRTY) { |
759 | clear_soft_dirty(vma, addr, pte); | 765 | clear_soft_dirty(vma, addr, pte); |
760 | continue; | 766 | continue; |
761 | } | 767 | } |
762 | 768 | ||
769 | if (!pte_present(ptent)) | ||
770 | continue; | ||
771 | |||
763 | page = vm_normal_page(vma, addr, ptent); | 772 | page = vm_normal_page(vma, addr, ptent); |
764 | if (!page) | 773 | if (!page) |
765 | continue; | 774 | continue; |
@@ -859,7 +868,7 @@ typedef struct { | |||
859 | } pagemap_entry_t; | 868 | } pagemap_entry_t; |
860 | 869 | ||
861 | struct pagemapread { | 870 | struct pagemapread { |
862 | int pos, len; | 871 | int pos, len; /* units: PM_ENTRY_BYTES, not bytes */ |
863 | pagemap_entry_t *buffer; | 872 | pagemap_entry_t *buffer; |
864 | bool v2; | 873 | bool v2; |
865 | }; | 874 | }; |
@@ -867,7 +876,7 @@ struct pagemapread { | |||
867 | #define PAGEMAP_WALK_SIZE (PMD_SIZE) | 876 | #define PAGEMAP_WALK_SIZE (PMD_SIZE) |
868 | #define PAGEMAP_WALK_MASK (PMD_MASK) | 877 | #define PAGEMAP_WALK_MASK (PMD_MASK) |
869 | 878 | ||
870 | #define PM_ENTRY_BYTES sizeof(u64) | 879 | #define PM_ENTRY_BYTES sizeof(pagemap_entry_t) |
871 | #define PM_STATUS_BITS 3 | 880 | #define PM_STATUS_BITS 3 |
872 | #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) | 881 | #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) |
873 | #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET) | 882 | #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET) |
@@ -930,8 +939,10 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, | |||
930 | flags = PM_PRESENT; | 939 | flags = PM_PRESENT; |
931 | page = vm_normal_page(vma, addr, pte); | 940 | page = vm_normal_page(vma, addr, pte); |
932 | } else if (is_swap_pte(pte)) { | 941 | } else if (is_swap_pte(pte)) { |
933 | swp_entry_t entry = pte_to_swp_entry(pte); | 942 | swp_entry_t entry; |
934 | 943 | if (pte_swp_soft_dirty(pte)) | |
944 | flags2 |= __PM_SOFT_DIRTY; | ||
945 | entry = pte_to_swp_entry(pte); | ||
935 | frame = swp_type(entry) | | 946 | frame = swp_type(entry) | |
936 | (swp_offset(entry) << MAX_SWAPFILES_SHIFT); | 947 | (swp_offset(entry) << MAX_SWAPFILES_SHIFT); |
937 | flags = PM_SWAP; | 948 | flags = PM_SWAP; |
@@ -1116,8 +1127,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, | |||
1116 | goto out_task; | 1127 | goto out_task; |
1117 | 1128 | ||
1118 | pm.v2 = soft_dirty_cleared; | 1129 | pm.v2 = soft_dirty_cleared; |
1119 | pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); | 1130 | pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); |
1120 | pm.buffer = kmalloc(pm.len, GFP_TEMPORARY); | 1131 | pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY); |
1121 | ret = -ENOMEM; | 1132 | ret = -ENOMEM; |
1122 | if (!pm.buffer) | 1133 | if (!pm.buffer) |
1123 | goto out_task; | 1134 | goto out_task; |
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 2f47ade1b567..0807ddf97b05 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -417,6 +417,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) | |||
417 | { | 417 | { |
418 | return pmd; | 418 | return pmd; |
419 | } | 419 | } |
420 | |||
421 | static inline pte_t pte_swp_mksoft_dirty(pte_t pte) | ||
422 | { | ||
423 | return pte; | ||
424 | } | ||
425 | |||
426 | static inline int pte_swp_soft_dirty(pte_t pte) | ||
427 | { | ||
428 | return 0; | ||
429 | } | ||
430 | |||
431 | static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) | ||
432 | { | ||
433 | return pte; | ||
434 | } | ||
435 | |||
436 | static inline pte_t pte_file_clear_soft_dirty(pte_t pte) | ||
437 | { | ||
438 | return pte; | ||
439 | } | ||
440 | |||
441 | static inline pte_t pte_file_mksoft_dirty(pte_t pte) | ||
442 | { | ||
443 | return pte; | ||
444 | } | ||
445 | |||
446 | static inline int pte_file_soft_dirty(pte_t pte) | ||
447 | { | ||
448 | return 0; | ||
449 | } | ||
420 | #endif | 450 | #endif |
421 | 451 | ||
422 | #ifndef __HAVE_PFNMAP_TRACKING | 452 | #ifndef __HAVE_PFNMAP_TRACKING |
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 13821c339a41..5672d7ea1fa0 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h | |||
@@ -112,7 +112,7 @@ struct mmu_gather { | |||
112 | 112 | ||
113 | #define HAVE_GENERIC_MMU_GATHER | 113 | #define HAVE_GENERIC_MMU_GATHER |
114 | 114 | ||
115 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm); | 115 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end); |
116 | void tlb_flush_mmu(struct mmu_gather *tlb); | 116 | void tlb_flush_mmu(struct mmu_gather *tlb); |
117 | void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, | 117 | void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, |
118 | unsigned long end); | 118 | unsigned long end); |
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index b90337c9d468..4a12532da8c4 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
@@ -336,6 +336,7 @@ extern int d_validate(struct dentry *, struct dentry *); | |||
336 | * helper function for dentry_operations.d_dname() members | 336 | * helper function for dentry_operations.d_dname() members |
337 | */ | 337 | */ |
338 | extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); | 338 | extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); |
339 | extern char *simple_dname(struct dentry *, char *, int); | ||
339 | 340 | ||
340 | extern char *__d_path(const struct path *, const struct path *, char *, int); | 341 | extern char *__d_path(const struct path *, const struct path *, char *, int); |
341 | extern char *d_absolute_path(const struct path *, char *, int); | 342 | extern char *d_absolute_path(const struct path *, char *, int); |
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index b99cd23f3474..79640e015a86 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h | |||
@@ -5,45 +5,13 @@ | |||
5 | 5 | ||
6 | #include <linux/bitmap.h> | 6 | #include <linux/bitmap.h> |
7 | #include <linux/if.h> | 7 | #include <linux/if.h> |
8 | #include <linux/ip.h> | ||
8 | #include <linux/netdevice.h> | 9 | #include <linux/netdevice.h> |
9 | #include <linux/rcupdate.h> | 10 | #include <linux/rcupdate.h> |
10 | #include <linux/timer.h> | 11 | #include <linux/timer.h> |
11 | #include <linux/sysctl.h> | 12 | #include <linux/sysctl.h> |
12 | #include <linux/rtnetlink.h> | 13 | #include <linux/rtnetlink.h> |
13 | 14 | ||
14 | enum | ||
15 | { | ||
16 | IPV4_DEVCONF_FORWARDING=1, | ||
17 | IPV4_DEVCONF_MC_FORWARDING, | ||
18 | IPV4_DEVCONF_PROXY_ARP, | ||
19 | IPV4_DEVCONF_ACCEPT_REDIRECTS, | ||
20 | IPV4_DEVCONF_SECURE_REDIRECTS, | ||
21 | IPV4_DEVCONF_SEND_REDIRECTS, | ||
22 | IPV4_DEVCONF_SHARED_MEDIA, | ||
23 | IPV4_DEVCONF_RP_FILTER, | ||
24 | IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE, | ||
25 | IPV4_DEVCONF_BOOTP_RELAY, | ||
26 | IPV4_DEVCONF_LOG_MARTIANS, | ||
27 | IPV4_DEVCONF_TAG, | ||
28 | IPV4_DEVCONF_ARPFILTER, | ||
29 | IPV4_DEVCONF_MEDIUM_ID, | ||
30 | IPV4_DEVCONF_NOXFRM, | ||
31 | IPV4_DEVCONF_NOPOLICY, | ||
32 | IPV4_DEVCONF_FORCE_IGMP_VERSION, | ||
33 | IPV4_DEVCONF_ARP_ANNOUNCE, | ||
34 | IPV4_DEVCONF_ARP_IGNORE, | ||
35 | IPV4_DEVCONF_PROMOTE_SECONDARIES, | ||
36 | IPV4_DEVCONF_ARP_ACCEPT, | ||
37 | IPV4_DEVCONF_ARP_NOTIFY, | ||
38 | IPV4_DEVCONF_ACCEPT_LOCAL, | ||
39 | IPV4_DEVCONF_SRC_VMARK, | ||
40 | IPV4_DEVCONF_PROXY_ARP_PVLAN, | ||
41 | IPV4_DEVCONF_ROUTE_LOCALNET, | ||
42 | __IPV4_DEVCONF_MAX | ||
43 | }; | ||
44 | |||
45 | #define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1) | ||
46 | |||
47 | struct ipv4_devconf { | 15 | struct ipv4_devconf { |
48 | void *sysctl; | 16 | void *sysctl; |
49 | int data[IPV4_DEVCONF_MAX]; | 17 | int data[IPV4_DEVCONF_MAX]; |
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 850e95bc766c..b8b7dc755752 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
@@ -101,6 +101,7 @@ struct inet6_skb_parm { | |||
101 | #define IP6SKB_FORWARDED 2 | 101 | #define IP6SKB_FORWARDED 2 |
102 | #define IP6SKB_REROUTED 4 | 102 | #define IP6SKB_REROUTED 4 |
103 | #define IP6SKB_ROUTERALERT 8 | 103 | #define IP6SKB_ROUTERALERT 8 |
104 | #define IP6SKB_FRAGMENTED 16 | ||
104 | }; | 105 | }; |
105 | 106 | ||
106 | #define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb)) | 107 | #define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb)) |
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 737685e9e852..68029b30c3dc 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h | |||
@@ -309,21 +309,20 @@ struct mlx5_hca_cap { | |||
309 | __be16 max_desc_sz_rq; | 309 | __be16 max_desc_sz_rq; |
310 | u8 rsvd21[2]; | 310 | u8 rsvd21[2]; |
311 | __be16 max_desc_sz_sq_dc; | 311 | __be16 max_desc_sz_sq_dc; |
312 | u8 rsvd22[4]; | 312 | __be32 max_qp_mcg; |
313 | __be16 max_qp_mcg; | 313 | u8 rsvd22[3]; |
314 | u8 rsvd23; | ||
315 | u8 log_max_mcg; | 314 | u8 log_max_mcg; |
316 | u8 rsvd24; | 315 | u8 rsvd23; |
317 | u8 log_max_pd; | 316 | u8 log_max_pd; |
318 | u8 rsvd25; | 317 | u8 rsvd24; |
319 | u8 log_max_xrcd; | 318 | u8 log_max_xrcd; |
320 | u8 rsvd26[42]; | 319 | u8 rsvd25[42]; |
321 | __be16 log_uar_page_sz; | 320 | __be16 log_uar_page_sz; |
322 | u8 rsvd27[28]; | 321 | u8 rsvd26[28]; |
323 | u8 log_msx_atomic_size_qp; | 322 | u8 log_msx_atomic_size_qp; |
324 | u8 rsvd28[2]; | 323 | u8 rsvd27[2]; |
325 | u8 log_msx_atomic_size_dc; | 324 | u8 log_msx_atomic_size_dc; |
326 | u8 rsvd29[76]; | 325 | u8 rsvd28[76]; |
327 | }; | 326 | }; |
328 | 327 | ||
329 | 328 | ||
@@ -472,9 +471,8 @@ struct mlx5_eqe_cmd { | |||
472 | struct mlx5_eqe_page_req { | 471 | struct mlx5_eqe_page_req { |
473 | u8 rsvd0[2]; | 472 | u8 rsvd0[2]; |
474 | __be16 func_id; | 473 | __be16 func_id; |
475 | u8 rsvd1[2]; | 474 | __be32 num_pages; |
476 | __be16 num_pages; | 475 | __be32 rsvd1[5]; |
477 | __be32 rsvd2[5]; | ||
478 | }; | 476 | }; |
479 | 477 | ||
480 | union ev_data { | 478 | union ev_data { |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 2aa258b0ced1..8888381fc150 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
@@ -358,7 +358,7 @@ struct mlx5_caps { | |||
358 | u32 reserved_lkey; | 358 | u32 reserved_lkey; |
359 | u8 local_ca_ack_delay; | 359 | u8 local_ca_ack_delay; |
360 | u8 log_max_mcg; | 360 | u8 log_max_mcg; |
361 | u16 max_qp_mcg; | 361 | u32 max_qp_mcg; |
362 | int min_page_sz; | 362 | int min_page_sz; |
363 | }; | 363 | }; |
364 | 364 | ||
@@ -691,7 +691,7 @@ void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); | |||
691 | int mlx5_pagealloc_start(struct mlx5_core_dev *dev); | 691 | int mlx5_pagealloc_start(struct mlx5_core_dev *dev); |
692 | void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); | 692 | void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); |
693 | void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, | 693 | void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, |
694 | s16 npages); | 694 | s32 npages); |
695 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); | 695 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); |
696 | int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); | 696 | int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); |
697 | void mlx5_register_debugfs(void); | 697 | void mlx5_register_debugfs(void); |
@@ -731,9 +731,6 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); | |||
731 | int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); | 731 | int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); |
732 | void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); | 732 | void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); |
733 | 733 | ||
734 | typedef void (*health_handler_t)(struct pci_dev *pdev, struct health_buffer __iomem *buf, int size); | ||
735 | int mlx5_register_health_report_handler(health_handler_t handler); | ||
736 | void mlx5_unregister_health_report_handler(void); | ||
737 | const char *mlx5_command_str(int command); | 734 | const char *mlx5_command_str(int command); |
738 | int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); | 735 | int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); |
739 | void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); | 736 | void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index fb425aa16c01..faf4b7c1ad12 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -332,6 +332,7 @@ struct mm_struct { | |||
332 | unsigned long pgoff, unsigned long flags); | 332 | unsigned long pgoff, unsigned long flags); |
333 | #endif | 333 | #endif |
334 | unsigned long mmap_base; /* base of mmap area */ | 334 | unsigned long mmap_base; /* base of mmap area */ |
335 | unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */ | ||
335 | unsigned long task_size; /* size of task vm space */ | 336 | unsigned long task_size; /* size of task vm space */ |
336 | unsigned long highest_vm_end; /* highest vma end address */ | 337 | unsigned long highest_vm_end; /* highest vma end address */ |
337 | pgd_t * pgd; | 338 | pgd_t * pgd; |
diff --git a/include/linux/platform_data/efm32-spi.h b/include/linux/platform_data/efm32-spi.h new file mode 100644 index 000000000000..31b19ca1d73a --- /dev/null +++ b/include/linux/platform_data/efm32-spi.h | |||
@@ -0,0 +1,14 @@ | |||
1 | #ifndef __LINUX_PLATFORM_DATA_EFM32_SPI_H__ | ||
2 | #define __LINUX_PLATFORM_DATA_EFM32_SPI_H__ | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | /** | ||
7 | * struct efm32_spi_pdata | ||
8 | * @location: pinmux location for the I/O pins (to be written to the ROUTE | ||
9 | * register) | ||
10 | */ | ||
11 | struct efm32_spi_pdata { | ||
12 | u8 location; | ||
13 | }; | ||
14 | #endif /* ifndef __LINUX_PLATFORM_DATA_EFM32_SPI_H__ */ | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index d722490da030..078066daffd4 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1532,6 +1532,8 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk) | |||
1532 | * Test if a process is not yet dead (at most zombie state) | 1532 | * Test if a process is not yet dead (at most zombie state) |
1533 | * If pid_alive fails, then pointers within the task structure | 1533 | * If pid_alive fails, then pointers within the task structure |
1534 | * can be stale and must not be dereferenced. | 1534 | * can be stale and must not be dereferenced. |
1535 | * | ||
1536 | * Return: 1 if the process is alive. 0 otherwise. | ||
1535 | */ | 1537 | */ |
1536 | static inline int pid_alive(struct task_struct *p) | 1538 | static inline int pid_alive(struct task_struct *p) |
1537 | { | 1539 | { |
@@ -1543,6 +1545,8 @@ static inline int pid_alive(struct task_struct *p) | |||
1543 | * @tsk: Task structure to be checked. | 1545 | * @tsk: Task structure to be checked. |
1544 | * | 1546 | * |
1545 | * Check if a task structure is the first user space task the kernel created. | 1547 | * Check if a task structure is the first user space task the kernel created. |
1548 | * | ||
1549 | * Return: 1 if the task structure is init. 0 otherwise. | ||
1546 | */ | 1550 | */ |
1547 | static inline int is_global_init(struct task_struct *tsk) | 1551 | static inline int is_global_init(struct task_struct *tsk) |
1548 | { | 1552 | { |
@@ -1894,6 +1898,8 @@ extern struct task_struct *idle_task(int cpu); | |||
1894 | /** | 1898 | /** |
1895 | * is_idle_task - is the specified task an idle task? | 1899 | * is_idle_task - is the specified task an idle task? |
1896 | * @p: the task in question. | 1900 | * @p: the task in question. |
1901 | * | ||
1902 | * Return: 1 if @p is an idle task. 0 otherwise. | ||
1897 | */ | 1903 | */ |
1898 | static inline bool is_idle_task(const struct task_struct *p) | 1904 | static inline bool is_idle_task(const struct task_struct *p) |
1899 | { | 1905 | { |
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 28e440be1c07..887116dbce2c 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
@@ -74,7 +74,7 @@ struct spi_device { | |||
74 | struct spi_master *master; | 74 | struct spi_master *master; |
75 | u32 max_speed_hz; | 75 | u32 max_speed_hz; |
76 | u8 chip_select; | 76 | u8 chip_select; |
77 | u8 mode; | 77 | u16 mode; |
78 | #define SPI_CPHA 0x01 /* clock phase */ | 78 | #define SPI_CPHA 0x01 /* clock phase */ |
79 | #define SPI_CPOL 0x02 /* clock polarity */ | 79 | #define SPI_CPOL 0x02 /* clock polarity */ |
80 | #define SPI_MODE_0 (0|0) /* (original MicroWire) */ | 80 | #define SPI_MODE_0 (0|0) /* (original MicroWire) */ |
@@ -87,6 +87,10 @@ struct spi_device { | |||
87 | #define SPI_LOOP 0x20 /* loopback mode */ | 87 | #define SPI_LOOP 0x20 /* loopback mode */ |
88 | #define SPI_NO_CS 0x40 /* 1 dev/bus, no chipselect */ | 88 | #define SPI_NO_CS 0x40 /* 1 dev/bus, no chipselect */ |
89 | #define SPI_READY 0x80 /* slave pulls low to pause */ | 89 | #define SPI_READY 0x80 /* slave pulls low to pause */ |
90 | #define SPI_TX_DUAL 0x100 /* transmit with 2 wires */ | ||
91 | #define SPI_TX_QUAD 0x200 /* transmit with 4 wires */ | ||
92 | #define SPI_RX_DUAL 0x400 /* receive with 2 wires */ | ||
93 | #define SPI_RX_QUAD 0x800 /* receive with 4 wires */ | ||
90 | u8 bits_per_word; | 94 | u8 bits_per_word; |
91 | int irq; | 95 | int irq; |
92 | void *controller_state; | 96 | void *controller_state; |
@@ -233,6 +237,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) | |||
233 | * suported. If set, the SPI core will reject any transfer with an | 237 | * suported. If set, the SPI core will reject any transfer with an |
234 | * unsupported bits_per_word. If not set, this value is simply ignored, | 238 | * unsupported bits_per_word. If not set, this value is simply ignored, |
235 | * and it's up to the individual driver to perform any validation. | 239 | * and it's up to the individual driver to perform any validation. |
240 | * @min_speed_hz: Lowest supported transfer speed | ||
241 | * @max_speed_hz: Highest supported transfer speed | ||
236 | * @flags: other constraints relevant to this driver | 242 | * @flags: other constraints relevant to this driver |
237 | * @bus_lock_spinlock: spinlock for SPI bus locking | 243 | * @bus_lock_spinlock: spinlock for SPI bus locking |
238 | * @bus_lock_mutex: mutex for SPI bus locking | 244 | * @bus_lock_mutex: mutex for SPI bus locking |
@@ -254,6 +260,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) | |||
254 | * @busy: message pump is busy | 260 | * @busy: message pump is busy |
255 | * @running: message pump is running | 261 | * @running: message pump is running |
256 | * @rt: whether this queue is set to run as a realtime task | 262 | * @rt: whether this queue is set to run as a realtime task |
263 | * @auto_runtime_pm: the core should ensure a runtime PM reference is held | ||
264 | * while the hardware is prepared, using the parent | ||
265 | * device for the spidev | ||
257 | * @prepare_transfer_hardware: a message will soon arrive from the queue | 266 | * @prepare_transfer_hardware: a message will soon arrive from the queue |
258 | * so the subsystem requests the driver to prepare the transfer hardware | 267 | * so the subsystem requests the driver to prepare the transfer hardware |
259 | * by issuing this call | 268 | * by issuing this call |
@@ -309,9 +318,13 @@ struct spi_master { | |||
309 | /* bitmask of supported bits_per_word for transfers */ | 318 | /* bitmask of supported bits_per_word for transfers */ |
310 | u32 bits_per_word_mask; | 319 | u32 bits_per_word_mask; |
311 | #define SPI_BPW_MASK(bits) BIT((bits) - 1) | 320 | #define SPI_BPW_MASK(bits) BIT((bits) - 1) |
312 | #define SPI_BIT_MASK(bits) (((bits) == 32) ? ~0UL : (BIT(bits) - 1)) | 321 | #define SPI_BIT_MASK(bits) (((bits) == 32) ? ~0U : (BIT(bits) - 1)) |
313 | #define SPI_BPW_RANGE_MASK(min, max) (SPI_BIT_MASK(max) - SPI_BIT_MASK(min - 1)) | 322 | #define SPI_BPW_RANGE_MASK(min, max) (SPI_BIT_MASK(max) - SPI_BIT_MASK(min - 1)) |
314 | 323 | ||
324 | /* limits on transfer speed */ | ||
325 | u32 min_speed_hz; | ||
326 | u32 max_speed_hz; | ||
327 | |||
315 | /* other constraints relevant to this driver */ | 328 | /* other constraints relevant to this driver */ |
316 | u16 flags; | 329 | u16 flags; |
317 | #define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */ | 330 | #define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */ |
@@ -374,11 +387,13 @@ struct spi_master { | |||
374 | bool busy; | 387 | bool busy; |
375 | bool running; | 388 | bool running; |
376 | bool rt; | 389 | bool rt; |
390 | bool auto_runtime_pm; | ||
377 | 391 | ||
378 | int (*prepare_transfer_hardware)(struct spi_master *master); | 392 | int (*prepare_transfer_hardware)(struct spi_master *master); |
379 | int (*transfer_one_message)(struct spi_master *master, | 393 | int (*transfer_one_message)(struct spi_master *master, |
380 | struct spi_message *mesg); | 394 | struct spi_message *mesg); |
381 | int (*unprepare_transfer_hardware)(struct spi_master *master); | 395 | int (*unprepare_transfer_hardware)(struct spi_master *master); |
396 | |||
382 | /* gpio chip select */ | 397 | /* gpio chip select */ |
383 | int *cs_gpios; | 398 | int *cs_gpios; |
384 | }; | 399 | }; |
@@ -448,6 +463,10 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum); | |||
448 | * @rx_buf: data to be read (dma-safe memory), or NULL | 463 | * @rx_buf: data to be read (dma-safe memory), or NULL |
449 | * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped | 464 | * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped |
450 | * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped | 465 | * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped |
466 | * @tx_nbits: number of bits used for writting. If 0 the default | ||
467 | * (SPI_NBITS_SINGLE) is used. | ||
468 | * @rx_nbits: number of bits used for reading. If 0 the default | ||
469 | * (SPI_NBITS_SINGLE) is used. | ||
451 | * @len: size of rx and tx buffers (in bytes) | 470 | * @len: size of rx and tx buffers (in bytes) |
452 | * @speed_hz: Select a speed other than the device default for this | 471 | * @speed_hz: Select a speed other than the device default for this |
453 | * transfer. If 0 the default (from @spi_device) is used. | 472 | * transfer. If 0 the default (from @spi_device) is used. |
@@ -502,6 +521,11 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum); | |||
502 | * by the results of previous messages and where the whole transaction | 521 | * by the results of previous messages and where the whole transaction |
503 | * ends when the chipselect goes intactive. | 522 | * ends when the chipselect goes intactive. |
504 | * | 523 | * |
524 | * When SPI can transfer in 1x,2x or 4x. It can get this tranfer information | ||
525 | * from device through @tx_nbits and @rx_nbits. In Bi-direction, these | ||
526 | * two should both be set. User can set transfer mode with SPI_NBITS_SINGLE(1x) | ||
527 | * SPI_NBITS_DUAL(2x) and SPI_NBITS_QUAD(4x) to support these three transfer. | ||
528 | * | ||
505 | * The code that submits an spi_message (and its spi_transfers) | 529 | * The code that submits an spi_message (and its spi_transfers) |
506 | * to the lower layers is responsible for managing its memory. | 530 | * to the lower layers is responsible for managing its memory. |
507 | * Zero-initialize every field you don't set up explicitly, to | 531 | * Zero-initialize every field you don't set up explicitly, to |
@@ -522,6 +546,11 @@ struct spi_transfer { | |||
522 | dma_addr_t rx_dma; | 546 | dma_addr_t rx_dma; |
523 | 547 | ||
524 | unsigned cs_change:1; | 548 | unsigned cs_change:1; |
549 | u8 tx_nbits; | ||
550 | u8 rx_nbits; | ||
551 | #define SPI_NBITS_SINGLE 0x01 /* 1bit transfer */ | ||
552 | #define SPI_NBITS_DUAL 0x02 /* 2bits transfer */ | ||
553 | #define SPI_NBITS_QUAD 0x04 /* 4bits transfer */ | ||
525 | u8 bits_per_word; | 554 | u8 bits_per_word; |
526 | u16 delay_usecs; | 555 | u16 delay_usecs; |
527 | u32 speed_hz; | 556 | u32 speed_hz; |
@@ -578,6 +607,7 @@ struct spi_message { | |||
578 | /* completion is reported through a callback */ | 607 | /* completion is reported through a callback */ |
579 | void (*complete)(void *context); | 608 | void (*complete)(void *context); |
580 | void *context; | 609 | void *context; |
610 | unsigned frame_length; | ||
581 | unsigned actual_length; | 611 | unsigned actual_length; |
582 | int status; | 612 | int status; |
583 | 613 | ||
@@ -869,7 +899,7 @@ struct spi_board_info { | |||
869 | /* mode becomes spi_device.mode, and is essential for chips | 899 | /* mode becomes spi_device.mode, and is essential for chips |
870 | * where the default of SPI_CS_HIGH = 0 is wrong. | 900 | * where the default of SPI_CS_HIGH = 0 is wrong. |
871 | */ | 901 | */ |
872 | u8 mode; | 902 | u16 mode; |
873 | 903 | ||
874 | /* ... may need additional spi_device chip config data here. | 904 | /* ... may need additional spi_device chip config data here. |
875 | * avoid stuff protocol drivers can set; but include stuff | 905 | * avoid stuff protocol drivers can set; but include stuff |
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h index f987a2bee16a..daebaba886aa 100644 --- a/include/linux/spi/spi_bitbang.h +++ b/include/linux/spi/spi_bitbang.h | |||
@@ -4,11 +4,7 @@ | |||
4 | #include <linux/workqueue.h> | 4 | #include <linux/workqueue.h> |
5 | 5 | ||
6 | struct spi_bitbang { | 6 | struct spi_bitbang { |
7 | struct workqueue_struct *workqueue; | ||
8 | struct work_struct work; | ||
9 | |||
10 | spinlock_t lock; | 7 | spinlock_t lock; |
11 | struct list_head queue; | ||
12 | u8 busy; | 8 | u8 busy; |
13 | u8 use_dma; | 9 | u8 use_dma; |
14 | u8 flags; /* extra spi->mode support */ | 10 | u8 flags; /* extra spi->mode support */ |
@@ -41,7 +37,6 @@ struct spi_bitbang { | |||
41 | */ | 37 | */ |
42 | extern int spi_bitbang_setup(struct spi_device *spi); | 38 | extern int spi_bitbang_setup(struct spi_device *spi); |
43 | extern void spi_bitbang_cleanup(struct spi_device *spi); | 39 | extern void spi_bitbang_cleanup(struct spi_device *spi); |
44 | extern int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m); | ||
45 | extern int spi_bitbang_setup_transfer(struct spi_device *spi, | 40 | extern int spi_bitbang_setup_transfer(struct spi_device *spi, |
46 | struct spi_transfer *t); | 41 | struct spi_transfer *t); |
47 | 42 | ||
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 7d537ced949a..75f34949d9ab 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -117,9 +117,17 @@ do { \ | |||
117 | #endif /*arch_spin_is_contended*/ | 117 | #endif /*arch_spin_is_contended*/ |
118 | #endif | 118 | #endif |
119 | 119 | ||
120 | /* The lock does not imply full memory barrier. */ | 120 | /* |
121 | #ifndef ARCH_HAS_SMP_MB_AFTER_LOCK | 121 | * Despite its name it doesn't necessarily has to be a full barrier. |
122 | static inline void smp_mb__after_lock(void) { smp_mb(); } | 122 | * It should only guarantee that a STORE before the critical section |
123 | * can not be reordered with a LOAD inside this section. | ||
124 | * spin_lock() is the one-way barrier, this LOAD can not escape out | ||
125 | * of the region. So the default implementation simply ensures that | ||
126 | * a STORE can not move into the critical section, smp_wmb() should | ||
127 | * serialize it with another STORE done by spin_lock(). | ||
128 | */ | ||
129 | #ifndef smp_mb__before_spinlock | ||
130 | #define smp_mb__before_spinlock() smp_wmb() | ||
123 | #endif | 131 | #endif |
124 | 132 | ||
125 | /** | 133 | /** |
diff --git a/include/linux/swapops.h b/include/linux/swapops.h index c5fd30d2a415..8d4fa82bfb91 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h | |||
@@ -67,6 +67,8 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte) | |||
67 | swp_entry_t arch_entry; | 67 | swp_entry_t arch_entry; |
68 | 68 | ||
69 | BUG_ON(pte_file(pte)); | 69 | BUG_ON(pte_file(pte)); |
70 | if (pte_swp_soft_dirty(pte)) | ||
71 | pte = pte_swp_clear_soft_dirty(pte); | ||
70 | arch_entry = __pte_to_swp_entry(pte); | 72 | arch_entry = __pte_to_swp_entry(pte); |
71 | return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); | 73 | return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); |
72 | } | 74 | } |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 4147d700a293..84662ecc7b51 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
@@ -802,9 +802,14 @@ asmlinkage long sys_vfork(void); | |||
802 | asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int, | 802 | asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int, |
803 | int __user *); | 803 | int __user *); |
804 | #else | 804 | #else |
805 | #ifdef CONFIG_CLONE_BACKWARDS3 | ||
806 | asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *, | ||
807 | int __user *, int); | ||
808 | #else | ||
805 | asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, | 809 | asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, |
806 | int __user *, int); | 810 | int __user *, int); |
807 | #endif | 811 | #endif |
812 | #endif | ||
808 | 813 | ||
809 | asmlinkage long sys_execve(const char __user *filename, | 814 | asmlinkage long sys_execve(const char __user *filename, |
810 | const char __user *const __user *argv, | 815 | const char __user *const __user *argv, |
diff --git a/include/linux/wait.h b/include/linux/wait.h index f487a4750b7f..a67fc1635592 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
@@ -811,6 +811,63 @@ do { \ | |||
811 | __ret; \ | 811 | __ret; \ |
812 | }) | 812 | }) |
813 | 813 | ||
814 | #define __wait_event_interruptible_lock_irq_timeout(wq, condition, \ | ||
815 | lock, ret) \ | ||
816 | do { \ | ||
817 | DEFINE_WAIT(__wait); \ | ||
818 | \ | ||
819 | for (;;) { \ | ||
820 | prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ | ||
821 | if (condition) \ | ||
822 | break; \ | ||
823 | if (signal_pending(current)) { \ | ||
824 | ret = -ERESTARTSYS; \ | ||
825 | break; \ | ||
826 | } \ | ||
827 | spin_unlock_irq(&lock); \ | ||
828 | ret = schedule_timeout(ret); \ | ||
829 | spin_lock_irq(&lock); \ | ||
830 | if (!ret) \ | ||
831 | break; \ | ||
832 | } \ | ||
833 | finish_wait(&wq, &__wait); \ | ||
834 | } while (0) | ||
835 | |||
836 | /** | ||
837 | * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses. | ||
838 | * The condition is checked under the lock. This is expected | ||
839 | * to be called with the lock taken. | ||
840 | * @wq: the waitqueue to wait on | ||
841 | * @condition: a C expression for the event to wait for | ||
842 | * @lock: a locked spinlock_t, which will be released before schedule() | ||
843 | * and reacquired afterwards. | ||
844 | * @timeout: timeout, in jiffies | ||
845 | * | ||
846 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the | ||
847 | * @condition evaluates to true or signal is received. The @condition is | ||
848 | * checked each time the waitqueue @wq is woken up. | ||
849 | * | ||
850 | * wake_up() has to be called after changing any variable that could | ||
851 | * change the result of the wait condition. | ||
852 | * | ||
853 | * This is supposed to be called while holding the lock. The lock is | ||
854 | * dropped before going to sleep and is reacquired afterwards. | ||
855 | * | ||
856 | * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it | ||
857 | * was interrupted by a signal, and the remaining jiffies otherwise | ||
858 | * if the condition evaluated to true before the timeout elapsed. | ||
859 | */ | ||
860 | #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \ | ||
861 | timeout) \ | ||
862 | ({ \ | ||
863 | int __ret = timeout; \ | ||
864 | \ | ||
865 | if (!(condition)) \ | ||
866 | __wait_event_interruptible_lock_irq_timeout( \ | ||
867 | wq, condition, lock, __ret); \ | ||
868 | __ret; \ | ||
869 | }) | ||
870 | |||
814 | 871 | ||
815 | /* | 872 | /* |
816 | * These are the old interfaces to sleep waiting for an event. | 873 | * These are the old interfaces to sleep waiting for an event. |
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index f18b91966d3d..8a358a2c97e6 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h | |||
@@ -122,7 +122,7 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock) | |||
122 | if (rc > 0) | 122 | if (rc > 0) |
123 | /* local bh are disabled so it is ok to use _BH */ | 123 | /* local bh are disabled so it is ok to use _BH */ |
124 | NET_ADD_STATS_BH(sock_net(sk), | 124 | NET_ADD_STATS_BH(sock_net(sk), |
125 | LINUX_MIB_LOWLATENCYRXPACKETS, rc); | 125 | LINUX_MIB_BUSYPOLLRXPACKETS, rc); |
126 | 126 | ||
127 | } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && | 127 | } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && |
128 | !need_resched() && !busy_loop_timeout(end_time)); | 128 | !need_resched() && !busy_loop_timeout(end_time)); |
@@ -162,11 +162,6 @@ static inline bool sk_can_busy_loop(struct sock *sk) | |||
162 | return false; | 162 | return false; |
163 | } | 163 | } |
164 | 164 | ||
165 | static inline bool sk_busy_poll(struct sock *sk, int nonblock) | ||
166 | { | ||
167 | return false; | ||
168 | } | ||
169 | |||
170 | static inline void skb_mark_napi_id(struct sk_buff *skb, | 165 | static inline void skb_mark_napi_id(struct sk_buff *skb, |
171 | struct napi_struct *napi) | 166 | struct napi_struct *napi) |
172 | { | 167 | { |
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 260f83f16bcf..f667248202b6 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h | |||
@@ -135,6 +135,8 @@ extern void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, | |||
135 | extern void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, | 135 | extern void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, |
136 | __be32 mtu); | 136 | __be32 mtu); |
137 | extern void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark); | 137 | extern void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark); |
138 | extern void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif, | ||
139 | u32 mark); | ||
138 | extern void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk); | 140 | extern void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk); |
139 | 141 | ||
140 | struct netlink_callback; | 142 | struct netlink_callback; |
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 781b3cf86a2f..a354db5b7662 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h | |||
@@ -145,20 +145,6 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph, | |||
145 | return INET_ECN_encapsulate(tos, inner); | 145 | return INET_ECN_encapsulate(tos, inner); |
146 | } | 146 | } |
147 | 147 | ||
148 | static inline void tunnel_ip_select_ident(struct sk_buff *skb, | ||
149 | const struct iphdr *old_iph, | ||
150 | struct dst_entry *dst) | ||
151 | { | ||
152 | struct iphdr *iph = ip_hdr(skb); | ||
153 | |||
154 | /* Use inner packet iph-id if possible. */ | ||
155 | if (skb->protocol == htons(ETH_P_IP) && old_iph->id) | ||
156 | iph->id = old_iph->id; | ||
157 | else | ||
158 | __ip_select_ident(iph, dst, | ||
159 | (skb_shinfo(skb)->gso_segs ?: 1) - 1); | ||
160 | } | ||
161 | |||
162 | int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); | 148 | int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); |
163 | int iptunnel_xmit(struct net *net, struct rtable *rt, | 149 | int iptunnel_xmit(struct net *net, struct rtable *rt, |
164 | struct sk_buff *skb, | 150 | struct sk_buff *skb, |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 6eab63363e59..e5ae0c50fa9c 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -683,13 +683,19 @@ struct psched_ratecfg { | |||
683 | u64 rate_bytes_ps; /* bytes per second */ | 683 | u64 rate_bytes_ps; /* bytes per second */ |
684 | u32 mult; | 684 | u32 mult; |
685 | u16 overhead; | 685 | u16 overhead; |
686 | u8 linklayer; | ||
686 | u8 shift; | 687 | u8 shift; |
687 | }; | 688 | }; |
688 | 689 | ||
689 | static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, | 690 | static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, |
690 | unsigned int len) | 691 | unsigned int len) |
691 | { | 692 | { |
692 | return ((u64)(len + r->overhead) * r->mult) >> r->shift; | 693 | len += r->overhead; |
694 | |||
695 | if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) | ||
696 | return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; | ||
697 | |||
698 | return ((u64)len * r->mult) >> r->shift; | ||
693 | } | 699 | } |
694 | 700 | ||
695 | extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf); | 701 | extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf); |
@@ -700,6 +706,7 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res, | |||
700 | memset(res, 0, sizeof(*res)); | 706 | memset(res, 0, sizeof(*res)); |
701 | res->rate = r->rate_bytes_ps; | 707 | res->rate = r->rate_bytes_ps; |
702 | res->overhead = r->overhead; | 708 | res->overhead = r->overhead; |
709 | res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); | ||
703 | } | 710 | } |
704 | 711 | ||
705 | #endif | 712 | #endif |
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h index 6cf06bfd841b..2fee45bdec0a 100644 --- a/include/uapi/linux/ip.h +++ b/include/uapi/linux/ip.h | |||
@@ -133,4 +133,38 @@ struct ip_beet_phdr { | |||
133 | __u8 reserved; | 133 | __u8 reserved; |
134 | }; | 134 | }; |
135 | 135 | ||
136 | /* index values for the variables in ipv4_devconf */ | ||
137 | enum | ||
138 | { | ||
139 | IPV4_DEVCONF_FORWARDING=1, | ||
140 | IPV4_DEVCONF_MC_FORWARDING, | ||
141 | IPV4_DEVCONF_PROXY_ARP, | ||
142 | IPV4_DEVCONF_ACCEPT_REDIRECTS, | ||
143 | IPV4_DEVCONF_SECURE_REDIRECTS, | ||
144 | IPV4_DEVCONF_SEND_REDIRECTS, | ||
145 | IPV4_DEVCONF_SHARED_MEDIA, | ||
146 | IPV4_DEVCONF_RP_FILTER, | ||
147 | IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE, | ||
148 | IPV4_DEVCONF_BOOTP_RELAY, | ||
149 | IPV4_DEVCONF_LOG_MARTIANS, | ||
150 | IPV4_DEVCONF_TAG, | ||
151 | IPV4_DEVCONF_ARPFILTER, | ||
152 | IPV4_DEVCONF_MEDIUM_ID, | ||
153 | IPV4_DEVCONF_NOXFRM, | ||
154 | IPV4_DEVCONF_NOPOLICY, | ||
155 | IPV4_DEVCONF_FORCE_IGMP_VERSION, | ||
156 | IPV4_DEVCONF_ARP_ANNOUNCE, | ||
157 | IPV4_DEVCONF_ARP_IGNORE, | ||
158 | IPV4_DEVCONF_PROMOTE_SECONDARIES, | ||
159 | IPV4_DEVCONF_ARP_ACCEPT, | ||
160 | IPV4_DEVCONF_ARP_NOTIFY, | ||
161 | IPV4_DEVCONF_ACCEPT_LOCAL, | ||
162 | IPV4_DEVCONF_SRC_VMARK, | ||
163 | IPV4_DEVCONF_PROXY_ARP_PVLAN, | ||
164 | IPV4_DEVCONF_ROUTE_LOCALNET, | ||
165 | __IPV4_DEVCONF_MAX | ||
166 | }; | ||
167 | |||
168 | #define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1) | ||
169 | |||
136 | #endif /* _UAPI_LINUX_IP_H */ | 170 | #endif /* _UAPI_LINUX_IP_H */ |
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index dbd71b0c7d8c..09d62b9228ff 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h | |||
@@ -73,9 +73,17 @@ struct tc_estimator { | |||
73 | #define TC_H_ROOT (0xFFFFFFFFU) | 73 | #define TC_H_ROOT (0xFFFFFFFFU) |
74 | #define TC_H_INGRESS (0xFFFFFFF1U) | 74 | #define TC_H_INGRESS (0xFFFFFFF1U) |
75 | 75 | ||
76 | /* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */ | ||
77 | enum tc_link_layer { | ||
78 | TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */ | ||
79 | TC_LINKLAYER_ETHERNET, | ||
80 | TC_LINKLAYER_ATM, | ||
81 | }; | ||
82 | #define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */ | ||
83 | |||
76 | struct tc_ratespec { | 84 | struct tc_ratespec { |
77 | unsigned char cell_log; | 85 | unsigned char cell_log; |
78 | unsigned char __reserved; | 86 | __u8 linklayer; /* lower 4 bits */ |
79 | unsigned short overhead; | 87 | unsigned short overhead; |
80 | short cell_align; | 88 | short cell_align; |
81 | unsigned short mpu; | 89 | unsigned short mpu; |
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index af0a674cc677..a1356d3b54df 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h | |||
@@ -253,7 +253,7 @@ enum | |||
253 | LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */ | 253 | LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */ |
254 | LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */ | 254 | LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */ |
255 | LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */ | 255 | LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */ |
256 | LINUX_MIB_LOWLATENCYRXPACKETS, /* LowLatencyRxPackets */ | 256 | LINUX_MIB_BUSYPOLLRXPACKETS, /* BusyPollRxPackets */ |
257 | __LINUX_MIB_MAX | 257 | __LINUX_MIB_MAX |
258 | }; | 258 | }; |
259 | 259 | ||
diff --git a/init/Kconfig b/init/Kconfig index 247084be0590..fed81b576f29 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -955,7 +955,7 @@ config MEMCG_SWAP_ENABLED | |||
955 | Memory Resource Controller Swap Extension comes with its price in | 955 | Memory Resource Controller Swap Extension comes with its price in |
956 | a bigger memory consumption. General purpose distribution kernels | 956 | a bigger memory consumption. General purpose distribution kernels |
957 | which want to enable the feature but keep it disabled by default | 957 | which want to enable the feature but keep it disabled by default |
958 | and let the user enable it by swapaccount boot command line | 958 | and let the user enable it by swapaccount=1 boot command line |
959 | parameter should have this option unselected. | 959 | parameter should have this option unselected. |
960 | For those who want to have the feature enabled by default should | 960 | For those who want to have the feature enabled by default should |
961 | select this option (if, for some reason, they need to disable it | 961 | select this option (if, for some reason, they need to disable it |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index e5657788fedd..ea1966db34f2 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -475,13 +475,17 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial) | |||
475 | 475 | ||
476 | /* | 476 | /* |
477 | * Cpusets with tasks - existing or newly being attached - can't | 477 | * Cpusets with tasks - existing or newly being attached - can't |
478 | * have empty cpus_allowed or mems_allowed. | 478 | * be changed to have empty cpus_allowed or mems_allowed. |
479 | */ | 479 | */ |
480 | ret = -ENOSPC; | 480 | ret = -ENOSPC; |
481 | if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress) && | 481 | if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress)) { |
482 | (cpumask_empty(trial->cpus_allowed) && | 482 | if (!cpumask_empty(cur->cpus_allowed) && |
483 | nodes_empty(trial->mems_allowed))) | 483 | cpumask_empty(trial->cpus_allowed)) |
484 | goto out; | 484 | goto out; |
485 | if (!nodes_empty(cur->mems_allowed) && | ||
486 | nodes_empty(trial->mems_allowed)) | ||
487 | goto out; | ||
488 | } | ||
485 | 489 | ||
486 | ret = 0; | 490 | ret = 0; |
487 | out: | 491 | out: |
@@ -1608,11 +1612,13 @@ static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val) | |||
1608 | { | 1612 | { |
1609 | struct cpuset *cs = cgroup_cs(cgrp); | 1613 | struct cpuset *cs = cgroup_cs(cgrp); |
1610 | cpuset_filetype_t type = cft->private; | 1614 | cpuset_filetype_t type = cft->private; |
1611 | int retval = -ENODEV; | 1615 | int retval = 0; |
1612 | 1616 | ||
1613 | mutex_lock(&cpuset_mutex); | 1617 | mutex_lock(&cpuset_mutex); |
1614 | if (!is_cpuset_online(cs)) | 1618 | if (!is_cpuset_online(cs)) { |
1619 | retval = -ENODEV; | ||
1615 | goto out_unlock; | 1620 | goto out_unlock; |
1621 | } | ||
1616 | 1622 | ||
1617 | switch (type) { | 1623 | switch (type) { |
1618 | case FILE_CPU_EXCLUSIVE: | 1624 | case FILE_CPU_EXCLUSIVE: |
diff --git a/kernel/fork.c b/kernel/fork.c index 403d2bb8a968..e23bb19e2a3e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1679,6 +1679,12 @@ SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags, | |||
1679 | int __user *, parent_tidptr, | 1679 | int __user *, parent_tidptr, |
1680 | int __user *, child_tidptr, | 1680 | int __user *, child_tidptr, |
1681 | int, tls_val) | 1681 | int, tls_val) |
1682 | #elif defined(CONFIG_CLONE_BACKWARDS3) | ||
1683 | SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp, | ||
1684 | int, stack_size, | ||
1685 | int __user *, parent_tidptr, | ||
1686 | int __user *, child_tidptr, | ||
1687 | int, tls_val) | ||
1682 | #else | 1688 | #else |
1683 | SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, | 1689 | SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, |
1684 | int __user *, parent_tidptr, | 1690 | int __user *, parent_tidptr, |
diff --git a/kernel/mutex.c b/kernel/mutex.c index ff05f4bd86eb..a52ee7bb830d 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -686,7 +686,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |||
686 | might_sleep(); | 686 | might_sleep(); |
687 | ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, | 687 | ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, |
688 | 0, &ctx->dep_map, _RET_IP_, ctx); | 688 | 0, &ctx->dep_map, _RET_IP_, ctx); |
689 | if (!ret && ctx->acquired > 0) | 689 | if (!ret && ctx->acquired > 1) |
690 | return ww_mutex_deadlock_injection(lock, ctx); | 690 | return ww_mutex_deadlock_injection(lock, ctx); |
691 | 691 | ||
692 | return ret; | 692 | return ret; |
@@ -702,7 +702,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |||
702 | ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, | 702 | ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, |
703 | 0, &ctx->dep_map, _RET_IP_, ctx); | 703 | 0, &ctx->dep_map, _RET_IP_, ctx); |
704 | 704 | ||
705 | if (!ret && ctx->acquired > 0) | 705 | if (!ret && ctx->acquired > 1) |
706 | return ww_mutex_deadlock_injection(lock, ctx); | 706 | return ww_mutex_deadlock_injection(lock, ctx); |
707 | 707 | ||
708 | return ret; | 708 | return ret; |
diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 06fe28589e9c..a394297f8b2f 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c | |||
@@ -296,6 +296,17 @@ int pm_qos_request_active(struct pm_qos_request *req) | |||
296 | } | 296 | } |
297 | EXPORT_SYMBOL_GPL(pm_qos_request_active); | 297 | EXPORT_SYMBOL_GPL(pm_qos_request_active); |
298 | 298 | ||
299 | static void __pm_qos_update_request(struct pm_qos_request *req, | ||
300 | s32 new_value) | ||
301 | { | ||
302 | trace_pm_qos_update_request(req->pm_qos_class, new_value); | ||
303 | |||
304 | if (new_value != req->node.prio) | ||
305 | pm_qos_update_target( | ||
306 | pm_qos_array[req->pm_qos_class]->constraints, | ||
307 | &req->node, PM_QOS_UPDATE_REQ, new_value); | ||
308 | } | ||
309 | |||
299 | /** | 310 | /** |
300 | * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout | 311 | * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout |
301 | * @work: work struct for the delayed work (timeout) | 312 | * @work: work struct for the delayed work (timeout) |
@@ -308,7 +319,7 @@ static void pm_qos_work_fn(struct work_struct *work) | |||
308 | struct pm_qos_request, | 319 | struct pm_qos_request, |
309 | work); | 320 | work); |
310 | 321 | ||
311 | pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE); | 322 | __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE); |
312 | } | 323 | } |
313 | 324 | ||
314 | /** | 325 | /** |
@@ -364,12 +375,7 @@ void pm_qos_update_request(struct pm_qos_request *req, | |||
364 | } | 375 | } |
365 | 376 | ||
366 | cancel_delayed_work_sync(&req->work); | 377 | cancel_delayed_work_sync(&req->work); |
367 | 378 | __pm_qos_update_request(req, new_value); | |
368 | trace_pm_qos_update_request(req->pm_qos_class, new_value); | ||
369 | if (new_value != req->node.prio) | ||
370 | pm_qos_update_target( | ||
371 | pm_qos_array[req->pm_qos_class]->constraints, | ||
372 | &req->node, PM_QOS_UPDATE_REQ, new_value); | ||
373 | } | 379 | } |
374 | EXPORT_SYMBOL_GPL(pm_qos_update_request); | 380 | EXPORT_SYMBOL_GPL(pm_qos_update_request); |
375 | 381 | ||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b7c32cb7bfeb..05c39f030314 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -933,6 +933,8 @@ static int effective_prio(struct task_struct *p) | |||
933 | /** | 933 | /** |
934 | * task_curr - is this task currently executing on a CPU? | 934 | * task_curr - is this task currently executing on a CPU? |
935 | * @p: the task in question. | 935 | * @p: the task in question. |
936 | * | ||
937 | * Return: 1 if the task is currently executing. 0 otherwise. | ||
936 | */ | 938 | */ |
937 | inline int task_curr(const struct task_struct *p) | 939 | inline int task_curr(const struct task_struct *p) |
938 | { | 940 | { |
@@ -1482,7 +1484,7 @@ static void ttwu_queue(struct task_struct *p, int cpu) | |||
1482 | * the simpler "current->state = TASK_RUNNING" to mark yourself | 1484 | * the simpler "current->state = TASK_RUNNING" to mark yourself |
1483 | * runnable without the overhead of this. | 1485 | * runnable without the overhead of this. |
1484 | * | 1486 | * |
1485 | * Returns %true if @p was woken up, %false if it was already running | 1487 | * Return: %true if @p was woken up, %false if it was already running. |
1486 | * or @state didn't match @p's state. | 1488 | * or @state didn't match @p's state. |
1487 | */ | 1489 | */ |
1488 | static int | 1490 | static int |
@@ -1491,7 +1493,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) | |||
1491 | unsigned long flags; | 1493 | unsigned long flags; |
1492 | int cpu, success = 0; | 1494 | int cpu, success = 0; |
1493 | 1495 | ||
1494 | smp_wmb(); | 1496 | /* |
1497 | * If we are going to wake up a thread waiting for CONDITION we | ||
1498 | * need to ensure that CONDITION=1 done by the caller can not be | ||
1499 | * reordered with p->state check below. This pairs with mb() in | ||
1500 | * set_current_state() the waiting thread does. | ||
1501 | */ | ||
1502 | smp_mb__before_spinlock(); | ||
1495 | raw_spin_lock_irqsave(&p->pi_lock, flags); | 1503 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
1496 | if (!(p->state & state)) | 1504 | if (!(p->state & state)) |
1497 | goto out; | 1505 | goto out; |
@@ -1577,8 +1585,9 @@ out: | |||
1577 | * @p: The process to be woken up. | 1585 | * @p: The process to be woken up. |
1578 | * | 1586 | * |
1579 | * Attempt to wake up the nominated process and move it to the set of runnable | 1587 | * Attempt to wake up the nominated process and move it to the set of runnable |
1580 | * processes. Returns 1 if the process was woken up, 0 if it was already | 1588 | * processes. |
1581 | * running. | 1589 | * |
1590 | * Return: 1 if the process was woken up, 0 if it was already running. | ||
1582 | * | 1591 | * |
1583 | * It may be assumed that this function implies a write memory barrier before | 1592 | * It may be assumed that this function implies a write memory barrier before |
1584 | * changing the task state if and only if any tasks are woken up. | 1593 | * changing the task state if and only if any tasks are woken up. |
@@ -2191,6 +2200,8 @@ void scheduler_tick(void) | |||
2191 | * This makes sure that uptime, CFS vruntime, load | 2200 | * This makes sure that uptime, CFS vruntime, load |
2192 | * balancing, etc... continue to move forward, even | 2201 | * balancing, etc... continue to move forward, even |
2193 | * with a very low granularity. | 2202 | * with a very low granularity. |
2203 | * | ||
2204 | * Return: Maximum deferment in nanoseconds. | ||
2194 | */ | 2205 | */ |
2195 | u64 scheduler_tick_max_deferment(void) | 2206 | u64 scheduler_tick_max_deferment(void) |
2196 | { | 2207 | { |
@@ -2394,6 +2405,12 @@ need_resched: | |||
2394 | if (sched_feat(HRTICK)) | 2405 | if (sched_feat(HRTICK)) |
2395 | hrtick_clear(rq); | 2406 | hrtick_clear(rq); |
2396 | 2407 | ||
2408 | /* | ||
2409 | * Make sure that signal_pending_state()->signal_pending() below | ||
2410 | * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) | ||
2411 | * done by the caller to avoid the race with signal_wake_up(). | ||
2412 | */ | ||
2413 | smp_mb__before_spinlock(); | ||
2397 | raw_spin_lock_irq(&rq->lock); | 2414 | raw_spin_lock_irq(&rq->lock); |
2398 | 2415 | ||
2399 | switch_count = &prev->nivcsw; | 2416 | switch_count = &prev->nivcsw; |
@@ -2796,8 +2813,8 @@ EXPORT_SYMBOL(wait_for_completion); | |||
2796 | * specified timeout to expire. The timeout is in jiffies. It is not | 2813 | * specified timeout to expire. The timeout is in jiffies. It is not |
2797 | * interruptible. | 2814 | * interruptible. |
2798 | * | 2815 | * |
2799 | * The return value is 0 if timed out, and positive (at least 1, or number of | 2816 | * Return: 0 if timed out, and positive (at least 1, or number of jiffies left |
2800 | * jiffies left till timeout) if completed. | 2817 | * till timeout) if completed. |
2801 | */ | 2818 | */ |
2802 | unsigned long __sched | 2819 | unsigned long __sched |
2803 | wait_for_completion_timeout(struct completion *x, unsigned long timeout) | 2820 | wait_for_completion_timeout(struct completion *x, unsigned long timeout) |
@@ -2829,8 +2846,8 @@ EXPORT_SYMBOL(wait_for_completion_io); | |||
2829 | * specified timeout to expire. The timeout is in jiffies. It is not | 2846 | * specified timeout to expire. The timeout is in jiffies. It is not |
2830 | * interruptible. The caller is accounted as waiting for IO. | 2847 | * interruptible. The caller is accounted as waiting for IO. |
2831 | * | 2848 | * |
2832 | * The return value is 0 if timed out, and positive (at least 1, or number of | 2849 | * Return: 0 if timed out, and positive (at least 1, or number of jiffies left |
2833 | * jiffies left till timeout) if completed. | 2850 | * till timeout) if completed. |
2834 | */ | 2851 | */ |
2835 | unsigned long __sched | 2852 | unsigned long __sched |
2836 | wait_for_completion_io_timeout(struct completion *x, unsigned long timeout) | 2853 | wait_for_completion_io_timeout(struct completion *x, unsigned long timeout) |
@@ -2846,7 +2863,7 @@ EXPORT_SYMBOL(wait_for_completion_io_timeout); | |||
2846 | * This waits for completion of a specific task to be signaled. It is | 2863 | * This waits for completion of a specific task to be signaled. It is |
2847 | * interruptible. | 2864 | * interruptible. |
2848 | * | 2865 | * |
2849 | * The return value is -ERESTARTSYS if interrupted, 0 if completed. | 2866 | * Return: -ERESTARTSYS if interrupted, 0 if completed. |
2850 | */ | 2867 | */ |
2851 | int __sched wait_for_completion_interruptible(struct completion *x) | 2868 | int __sched wait_for_completion_interruptible(struct completion *x) |
2852 | { | 2869 | { |
@@ -2865,8 +2882,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible); | |||
2865 | * This waits for either a completion of a specific task to be signaled or for a | 2882 | * This waits for either a completion of a specific task to be signaled or for a |
2866 | * specified timeout to expire. It is interruptible. The timeout is in jiffies. | 2883 | * specified timeout to expire. It is interruptible. The timeout is in jiffies. |
2867 | * | 2884 | * |
2868 | * The return value is -ERESTARTSYS if interrupted, 0 if timed out, | 2885 | * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1, |
2869 | * positive (at least 1, or number of jiffies left till timeout) if completed. | 2886 | * or number of jiffies left till timeout) if completed. |
2870 | */ | 2887 | */ |
2871 | long __sched | 2888 | long __sched |
2872 | wait_for_completion_interruptible_timeout(struct completion *x, | 2889 | wait_for_completion_interruptible_timeout(struct completion *x, |
@@ -2883,7 +2900,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); | |||
2883 | * This waits to be signaled for completion of a specific task. It can be | 2900 | * This waits to be signaled for completion of a specific task. It can be |
2884 | * interrupted by a kill signal. | 2901 | * interrupted by a kill signal. |
2885 | * | 2902 | * |
2886 | * The return value is -ERESTARTSYS if interrupted, 0 if completed. | 2903 | * Return: -ERESTARTSYS if interrupted, 0 if completed. |
2887 | */ | 2904 | */ |
2888 | int __sched wait_for_completion_killable(struct completion *x) | 2905 | int __sched wait_for_completion_killable(struct completion *x) |
2889 | { | 2906 | { |
@@ -2903,8 +2920,8 @@ EXPORT_SYMBOL(wait_for_completion_killable); | |||
2903 | * signaled or for a specified timeout to expire. It can be | 2920 | * signaled or for a specified timeout to expire. It can be |
2904 | * interrupted by a kill signal. The timeout is in jiffies. | 2921 | * interrupted by a kill signal. The timeout is in jiffies. |
2905 | * | 2922 | * |
2906 | * The return value is -ERESTARTSYS if interrupted, 0 if timed out, | 2923 | * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1, |
2907 | * positive (at least 1, or number of jiffies left till timeout) if completed. | 2924 | * or number of jiffies left till timeout) if completed. |
2908 | */ | 2925 | */ |
2909 | long __sched | 2926 | long __sched |
2910 | wait_for_completion_killable_timeout(struct completion *x, | 2927 | wait_for_completion_killable_timeout(struct completion *x, |
@@ -2918,7 +2935,7 @@ EXPORT_SYMBOL(wait_for_completion_killable_timeout); | |||
2918 | * try_wait_for_completion - try to decrement a completion without blocking | 2935 | * try_wait_for_completion - try to decrement a completion without blocking |
2919 | * @x: completion structure | 2936 | * @x: completion structure |
2920 | * | 2937 | * |
2921 | * Returns: 0 if a decrement cannot be done without blocking | 2938 | * Return: 0 if a decrement cannot be done without blocking |
2922 | * 1 if a decrement succeeded. | 2939 | * 1 if a decrement succeeded. |
2923 | * | 2940 | * |
2924 | * If a completion is being used as a counting completion, | 2941 | * If a completion is being used as a counting completion, |
@@ -2945,7 +2962,7 @@ EXPORT_SYMBOL(try_wait_for_completion); | |||
2945 | * completion_done - Test to see if a completion has any waiters | 2962 | * completion_done - Test to see if a completion has any waiters |
2946 | * @x: completion structure | 2963 | * @x: completion structure |
2947 | * | 2964 | * |
2948 | * Returns: 0 if there are waiters (wait_for_completion() in progress) | 2965 | * Return: 0 if there are waiters (wait_for_completion() in progress) |
2949 | * 1 if there are no waiters. | 2966 | * 1 if there are no waiters. |
2950 | * | 2967 | * |
2951 | */ | 2968 | */ |
@@ -3182,7 +3199,7 @@ SYSCALL_DEFINE1(nice, int, increment) | |||
3182 | * task_prio - return the priority value of a given task. | 3199 | * task_prio - return the priority value of a given task. |
3183 | * @p: the task in question. | 3200 | * @p: the task in question. |
3184 | * | 3201 | * |
3185 | * This is the priority value as seen by users in /proc. | 3202 | * Return: The priority value as seen by users in /proc. |
3186 | * RT tasks are offset by -200. Normal tasks are centered | 3203 | * RT tasks are offset by -200. Normal tasks are centered |
3187 | * around 0, value goes from -16 to +15. | 3204 | * around 0, value goes from -16 to +15. |
3188 | */ | 3205 | */ |
@@ -3194,6 +3211,8 @@ int task_prio(const struct task_struct *p) | |||
3194 | /** | 3211 | /** |
3195 | * task_nice - return the nice value of a given task. | 3212 | * task_nice - return the nice value of a given task. |
3196 | * @p: the task in question. | 3213 | * @p: the task in question. |
3214 | * | ||
3215 | * Return: The nice value [ -20 ... 0 ... 19 ]. | ||
3197 | */ | 3216 | */ |
3198 | int task_nice(const struct task_struct *p) | 3217 | int task_nice(const struct task_struct *p) |
3199 | { | 3218 | { |
@@ -3204,6 +3223,8 @@ EXPORT_SYMBOL(task_nice); | |||
3204 | /** | 3223 | /** |
3205 | * idle_cpu - is a given cpu idle currently? | 3224 | * idle_cpu - is a given cpu idle currently? |
3206 | * @cpu: the processor in question. | 3225 | * @cpu: the processor in question. |
3226 | * | ||
3227 | * Return: 1 if the CPU is currently idle. 0 otherwise. | ||
3207 | */ | 3228 | */ |
3208 | int idle_cpu(int cpu) | 3229 | int idle_cpu(int cpu) |
3209 | { | 3230 | { |
@@ -3226,6 +3247,8 @@ int idle_cpu(int cpu) | |||
3226 | /** | 3247 | /** |
3227 | * idle_task - return the idle task for a given cpu. | 3248 | * idle_task - return the idle task for a given cpu. |
3228 | * @cpu: the processor in question. | 3249 | * @cpu: the processor in question. |
3250 | * | ||
3251 | * Return: The idle task for the cpu @cpu. | ||
3229 | */ | 3252 | */ |
3230 | struct task_struct *idle_task(int cpu) | 3253 | struct task_struct *idle_task(int cpu) |
3231 | { | 3254 | { |
@@ -3235,6 +3258,8 @@ struct task_struct *idle_task(int cpu) | |||
3235 | /** | 3258 | /** |
3236 | * find_process_by_pid - find a process with a matching PID value. | 3259 | * find_process_by_pid - find a process with a matching PID value. |
3237 | * @pid: the pid in question. | 3260 | * @pid: the pid in question. |
3261 | * | ||
3262 | * The task of @pid, if found. %NULL otherwise. | ||
3238 | */ | 3263 | */ |
3239 | static struct task_struct *find_process_by_pid(pid_t pid) | 3264 | static struct task_struct *find_process_by_pid(pid_t pid) |
3240 | { | 3265 | { |
@@ -3432,6 +3457,8 @@ recheck: | |||
3432 | * @policy: new policy. | 3457 | * @policy: new policy. |
3433 | * @param: structure containing the new RT priority. | 3458 | * @param: structure containing the new RT priority. |
3434 | * | 3459 | * |
3460 | * Return: 0 on success. An error code otherwise. | ||
3461 | * | ||
3435 | * NOTE that the task may be already dead. | 3462 | * NOTE that the task may be already dead. |
3436 | */ | 3463 | */ |
3437 | int sched_setscheduler(struct task_struct *p, int policy, | 3464 | int sched_setscheduler(struct task_struct *p, int policy, |
@@ -3451,6 +3478,8 @@ EXPORT_SYMBOL_GPL(sched_setscheduler); | |||
3451 | * current context has permission. For example, this is needed in | 3478 | * current context has permission. For example, this is needed in |
3452 | * stop_machine(): we create temporary high priority worker threads, | 3479 | * stop_machine(): we create temporary high priority worker threads, |
3453 | * but our caller might not have that capability. | 3480 | * but our caller might not have that capability. |
3481 | * | ||
3482 | * Return: 0 on success. An error code otherwise. | ||
3454 | */ | 3483 | */ |
3455 | int sched_setscheduler_nocheck(struct task_struct *p, int policy, | 3484 | int sched_setscheduler_nocheck(struct task_struct *p, int policy, |
3456 | const struct sched_param *param) | 3485 | const struct sched_param *param) |
@@ -3485,6 +3514,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | |||
3485 | * @pid: the pid in question. | 3514 | * @pid: the pid in question. |
3486 | * @policy: new policy. | 3515 | * @policy: new policy. |
3487 | * @param: structure containing the new RT priority. | 3516 | * @param: structure containing the new RT priority. |
3517 | * | ||
3518 | * Return: 0 on success. An error code otherwise. | ||
3488 | */ | 3519 | */ |
3489 | SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, | 3520 | SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, |
3490 | struct sched_param __user *, param) | 3521 | struct sched_param __user *, param) |
@@ -3500,6 +3531,8 @@ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, | |||
3500 | * sys_sched_setparam - set/change the RT priority of a thread | 3531 | * sys_sched_setparam - set/change the RT priority of a thread |
3501 | * @pid: the pid in question. | 3532 | * @pid: the pid in question. |
3502 | * @param: structure containing the new RT priority. | 3533 | * @param: structure containing the new RT priority. |
3534 | * | ||
3535 | * Return: 0 on success. An error code otherwise. | ||
3503 | */ | 3536 | */ |
3504 | SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) | 3537 | SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) |
3505 | { | 3538 | { |
@@ -3509,6 +3542,9 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) | |||
3509 | /** | 3542 | /** |
3510 | * sys_sched_getscheduler - get the policy (scheduling class) of a thread | 3543 | * sys_sched_getscheduler - get the policy (scheduling class) of a thread |
3511 | * @pid: the pid in question. | 3544 | * @pid: the pid in question. |
3545 | * | ||
3546 | * Return: On success, the policy of the thread. Otherwise, a negative error | ||
3547 | * code. | ||
3512 | */ | 3548 | */ |
3513 | SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) | 3549 | SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) |
3514 | { | 3550 | { |
@@ -3535,6 +3571,9 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) | |||
3535 | * sys_sched_getparam - get the RT priority of a thread | 3571 | * sys_sched_getparam - get the RT priority of a thread |
3536 | * @pid: the pid in question. | 3572 | * @pid: the pid in question. |
3537 | * @param: structure containing the RT priority. | 3573 | * @param: structure containing the RT priority. |
3574 | * | ||
3575 | * Return: On success, 0 and the RT priority is in @param. Otherwise, an error | ||
3576 | * code. | ||
3538 | */ | 3577 | */ |
3539 | SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) | 3578 | SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) |
3540 | { | 3579 | { |
@@ -3659,6 +3698,8 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, | |||
3659 | * @pid: pid of the process | 3698 | * @pid: pid of the process |
3660 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr | 3699 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
3661 | * @user_mask_ptr: user-space pointer to the new cpu mask | 3700 | * @user_mask_ptr: user-space pointer to the new cpu mask |
3701 | * | ||
3702 | * Return: 0 on success. An error code otherwise. | ||
3662 | */ | 3703 | */ |
3663 | SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, | 3704 | SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, |
3664 | unsigned long __user *, user_mask_ptr) | 3705 | unsigned long __user *, user_mask_ptr) |
@@ -3710,6 +3751,8 @@ out_unlock: | |||
3710 | * @pid: pid of the process | 3751 | * @pid: pid of the process |
3711 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr | 3752 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
3712 | * @user_mask_ptr: user-space pointer to hold the current cpu mask | 3753 | * @user_mask_ptr: user-space pointer to hold the current cpu mask |
3754 | * | ||
3755 | * Return: 0 on success. An error code otherwise. | ||
3713 | */ | 3756 | */ |
3714 | SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, | 3757 | SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, |
3715 | unsigned long __user *, user_mask_ptr) | 3758 | unsigned long __user *, user_mask_ptr) |
@@ -3744,6 +3787,8 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, | |||
3744 | * | 3787 | * |
3745 | * This function yields the current CPU to other tasks. If there are no | 3788 | * This function yields the current CPU to other tasks. If there are no |
3746 | * other threads running on this CPU then this function will return. | 3789 | * other threads running on this CPU then this function will return. |
3790 | * | ||
3791 | * Return: 0. | ||
3747 | */ | 3792 | */ |
3748 | SYSCALL_DEFINE0(sched_yield) | 3793 | SYSCALL_DEFINE0(sched_yield) |
3749 | { | 3794 | { |
@@ -3869,7 +3914,7 @@ EXPORT_SYMBOL(yield); | |||
3869 | * It's the caller's job to ensure that the target task struct | 3914 | * It's the caller's job to ensure that the target task struct |
3870 | * can't go away on us before we can do any checks. | 3915 | * can't go away on us before we can do any checks. |
3871 | * | 3916 | * |
3872 | * Returns: | 3917 | * Return: |
3873 | * true (>0) if we indeed boosted the target task. | 3918 | * true (>0) if we indeed boosted the target task. |
3874 | * false (0) if we failed to boost the target. | 3919 | * false (0) if we failed to boost the target. |
3875 | * -ESRCH if there's no task to yield to. | 3920 | * -ESRCH if there's no task to yield to. |
@@ -3972,8 +4017,9 @@ long __sched io_schedule_timeout(long timeout) | |||
3972 | * sys_sched_get_priority_max - return maximum RT priority. | 4017 | * sys_sched_get_priority_max - return maximum RT priority. |
3973 | * @policy: scheduling class. | 4018 | * @policy: scheduling class. |
3974 | * | 4019 | * |
3975 | * this syscall returns the maximum rt_priority that can be used | 4020 | * Return: On success, this syscall returns the maximum |
3976 | * by a given scheduling class. | 4021 | * rt_priority that can be used by a given scheduling class. |
4022 | * On failure, a negative error code is returned. | ||
3977 | */ | 4023 | */ |
3978 | SYSCALL_DEFINE1(sched_get_priority_max, int, policy) | 4024 | SYSCALL_DEFINE1(sched_get_priority_max, int, policy) |
3979 | { | 4025 | { |
@@ -3997,8 +4043,9 @@ SYSCALL_DEFINE1(sched_get_priority_max, int, policy) | |||
3997 | * sys_sched_get_priority_min - return minimum RT priority. | 4043 | * sys_sched_get_priority_min - return minimum RT priority. |
3998 | * @policy: scheduling class. | 4044 | * @policy: scheduling class. |
3999 | * | 4045 | * |
4000 | * this syscall returns the minimum rt_priority that can be used | 4046 | * Return: On success, this syscall returns the minimum |
4001 | * by a given scheduling class. | 4047 | * rt_priority that can be used by a given scheduling class. |
4048 | * On failure, a negative error code is returned. | ||
4002 | */ | 4049 | */ |
4003 | SYSCALL_DEFINE1(sched_get_priority_min, int, policy) | 4050 | SYSCALL_DEFINE1(sched_get_priority_min, int, policy) |
4004 | { | 4051 | { |
@@ -4024,6 +4071,9 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy) | |||
4024 | * | 4071 | * |
4025 | * this syscall writes the default timeslice value of a given process | 4072 | * this syscall writes the default timeslice value of a given process |
4026 | * into the user-space timespec buffer. A value of '0' means infinity. | 4073 | * into the user-space timespec buffer. A value of '0' means infinity. |
4074 | * | ||
4075 | * Return: On success, 0 and the timeslice is in @interval. Otherwise, | ||
4076 | * an error code. | ||
4027 | */ | 4077 | */ |
4028 | SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, | 4078 | SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, |
4029 | struct timespec __user *, interval) | 4079 | struct timespec __user *, interval) |
@@ -6632,6 +6682,8 @@ void normalize_rt_tasks(void) | |||
6632 | * @cpu: the processor in question. | 6682 | * @cpu: the processor in question. |
6633 | * | 6683 | * |
6634 | * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! | 6684 | * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! |
6685 | * | ||
6686 | * Return: The current task for @cpu. | ||
6635 | */ | 6687 | */ |
6636 | struct task_struct *curr_task(int cpu) | 6688 | struct task_struct *curr_task(int cpu) |
6637 | { | 6689 | { |
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index 1095e878a46f..8b836b376d91 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c | |||
@@ -62,7 +62,7 @@ static int convert_prio(int prio) | |||
62 | * any discrepancies created by racing against the uncertainty of the current | 62 | * any discrepancies created by racing against the uncertainty of the current |
63 | * priority configuration. | 63 | * priority configuration. |
64 | * | 64 | * |
65 | * Returns: (int)bool - CPUs were found | 65 | * Return: (int)bool - CPUs were found |
66 | */ | 66 | */ |
67 | int cpupri_find(struct cpupri *cp, struct task_struct *p, | 67 | int cpupri_find(struct cpupri *cp, struct task_struct *p, |
68 | struct cpumask *lowest_mask) | 68 | struct cpumask *lowest_mask) |
@@ -203,7 +203,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) | |||
203 | * cpupri_init - initialize the cpupri structure | 203 | * cpupri_init - initialize the cpupri structure |
204 | * @cp: The cpupri context | 204 | * @cp: The cpupri context |
205 | * | 205 | * |
206 | * Returns: -ENOMEM if memory fails. | 206 | * Return: -ENOMEM on memory allocation failure. |
207 | */ | 207 | */ |
208 | int cpupri_init(struct cpupri *cp) | 208 | int cpupri_init(struct cpupri *cp) |
209 | { | 209 | { |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 9565645e3202..68f1609ca149 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -2032,6 +2032,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) | |||
2032 | */ | 2032 | */ |
2033 | update_entity_load_avg(curr, 1); | 2033 | update_entity_load_avg(curr, 1); |
2034 | update_cfs_rq_blocked_load(cfs_rq, 1); | 2034 | update_cfs_rq_blocked_load(cfs_rq, 1); |
2035 | update_cfs_shares(cfs_rq); | ||
2035 | 2036 | ||
2036 | #ifdef CONFIG_SCHED_HRTICK | 2037 | #ifdef CONFIG_SCHED_HRTICK |
2037 | /* | 2038 | /* |
@@ -4280,6 +4281,8 @@ struct sg_lb_stats { | |||
4280 | * get_sd_load_idx - Obtain the load index for a given sched domain. | 4281 | * get_sd_load_idx - Obtain the load index for a given sched domain. |
4281 | * @sd: The sched_domain whose load_idx is to be obtained. | 4282 | * @sd: The sched_domain whose load_idx is to be obtained. |
4282 | * @idle: The Idle status of the CPU for whose sd load_icx is obtained. | 4283 | * @idle: The Idle status of the CPU for whose sd load_icx is obtained. |
4284 | * | ||
4285 | * Return: The load index. | ||
4283 | */ | 4286 | */ |
4284 | static inline int get_sd_load_idx(struct sched_domain *sd, | 4287 | static inline int get_sd_load_idx(struct sched_domain *sd, |
4285 | enum cpu_idle_type idle) | 4288 | enum cpu_idle_type idle) |
@@ -4574,6 +4577,9 @@ static inline void update_sg_lb_stats(struct lb_env *env, | |||
4574 | * | 4577 | * |
4575 | * Determine if @sg is a busier group than the previously selected | 4578 | * Determine if @sg is a busier group than the previously selected |
4576 | * busiest group. | 4579 | * busiest group. |
4580 | * | ||
4581 | * Return: %true if @sg is a busier group than the previously selected | ||
4582 | * busiest group. %false otherwise. | ||
4577 | */ | 4583 | */ |
4578 | static bool update_sd_pick_busiest(struct lb_env *env, | 4584 | static bool update_sd_pick_busiest(struct lb_env *env, |
4579 | struct sd_lb_stats *sds, | 4585 | struct sd_lb_stats *sds, |
@@ -4691,7 +4697,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, | |||
4691 | * assuming lower CPU number will be equivalent to lower a SMT thread | 4697 | * assuming lower CPU number will be equivalent to lower a SMT thread |
4692 | * number. | 4698 | * number. |
4693 | * | 4699 | * |
4694 | * Returns 1 when packing is required and a task should be moved to | 4700 | * Return: 1 when packing is required and a task should be moved to |
4695 | * this CPU. The amount of the imbalance is returned in *imbalance. | 4701 | * this CPU. The amount of the imbalance is returned in *imbalance. |
4696 | * | 4702 | * |
4697 | * @env: The load balancing environment. | 4703 | * @env: The load balancing environment. |
@@ -4869,7 +4875,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s | |||
4869 | * @balance: Pointer to a variable indicating if this_cpu | 4875 | * @balance: Pointer to a variable indicating if this_cpu |
4870 | * is the appropriate cpu to perform load balancing at this_level. | 4876 | * is the appropriate cpu to perform load balancing at this_level. |
4871 | * | 4877 | * |
4872 | * Returns: - the busiest group if imbalance exists. | 4878 | * Return: - The busiest group if imbalance exists. |
4873 | * - If no imbalance and user has opted for power-savings balance, | 4879 | * - If no imbalance and user has opted for power-savings balance, |
4874 | * return the least loaded group whose CPUs can be | 4880 | * return the least loaded group whose CPUs can be |
4875 | * put to idle by rebalancing its tasks onto our group. | 4881 | * put to idle by rebalancing its tasks onto our group. |
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index a326f27d7f09..0b479a6a22bb 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c | |||
@@ -121,7 +121,7 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) | |||
121 | BUG_ON(bits > 32); | 121 | BUG_ON(bits > 32); |
122 | WARN_ON(!irqs_disabled()); | 122 | WARN_ON(!irqs_disabled()); |
123 | read_sched_clock = read; | 123 | read_sched_clock = read; |
124 | sched_clock_mask = (1 << bits) - 1; | 124 | sched_clock_mask = (1ULL << bits) - 1; |
125 | cd.rate = rate; | 125 | cd.rate = rate; |
126 | 126 | ||
127 | /* calculate the mult/shift to convert counter ticks to ns. */ | 127 | /* calculate the mult/shift to convert counter ticks to ns. */ |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index e77edc97e036..e8a1516cc0a3 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -182,7 +182,8 @@ static bool can_stop_full_tick(void) | |||
182 | * Don't allow the user to think they can get | 182 | * Don't allow the user to think they can get |
183 | * full NO_HZ with this machine. | 183 | * full NO_HZ with this machine. |
184 | */ | 184 | */ |
185 | WARN_ONCE(1, "NO_HZ FULL will not work with unstable sched clock"); | 185 | WARN_ONCE(have_nohz_full_mask, |
186 | "NO_HZ FULL will not work with unstable sched clock"); | ||
186 | return false; | 187 | return false; |
187 | } | 188 | } |
188 | #endif | 189 | #endif |
@@ -343,8 +344,6 @@ static int tick_nohz_init_all(void) | |||
343 | 344 | ||
344 | void __init tick_nohz_init(void) | 345 | void __init tick_nohz_init(void) |
345 | { | 346 | { |
346 | int cpu; | ||
347 | |||
348 | if (!have_nohz_full_mask) { | 347 | if (!have_nohz_full_mask) { |
349 | if (tick_nohz_init_all() < 0) | 348 | if (tick_nohz_init_all() < 0) |
350 | return; | 349 | return; |
diff --git a/kernel/wait.c b/kernel/wait.c index dec68bd4e9d8..d550920e040c 100644 --- a/kernel/wait.c +++ b/kernel/wait.c | |||
@@ -363,8 +363,7 @@ EXPORT_SYMBOL(out_of_line_wait_on_atomic_t); | |||
363 | 363 | ||
364 | /** | 364 | /** |
365 | * wake_up_atomic_t - Wake up a waiter on a atomic_t | 365 | * wake_up_atomic_t - Wake up a waiter on a atomic_t |
366 | * @word: The word being waited on, a kernel virtual address | 366 | * @p: The atomic_t being waited on, a kernel virtual address |
367 | * @bit: The bit of the word being waited on | ||
368 | * | 367 | * |
369 | * Wake up anyone waiting for the atomic_t to go to zero. | 368 | * Wake up anyone waiting for the atomic_t to go to zero. |
370 | * | 369 | * |
diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c index fd94058bd7f9..28321d8f75ef 100644 --- a/lib/lz4/lz4_compress.c +++ b/lib/lz4/lz4_compress.c | |||
@@ -437,7 +437,7 @@ int lz4_compress(const unsigned char *src, size_t src_len, | |||
437 | exit: | 437 | exit: |
438 | return ret; | 438 | return ret; |
439 | } | 439 | } |
440 | EXPORT_SYMBOL_GPL(lz4_compress); | 440 | EXPORT_SYMBOL(lz4_compress); |
441 | 441 | ||
442 | MODULE_LICENSE("GPL"); | 442 | MODULE_LICENSE("Dual BSD/GPL"); |
443 | MODULE_DESCRIPTION("LZ4 compressor"); | 443 | MODULE_DESCRIPTION("LZ4 compressor"); |
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c index d3414eae73a1..411be80ddb46 100644 --- a/lib/lz4/lz4_decompress.c +++ b/lib/lz4/lz4_decompress.c | |||
@@ -299,7 +299,7 @@ exit_0: | |||
299 | return ret; | 299 | return ret; |
300 | } | 300 | } |
301 | #ifndef STATIC | 301 | #ifndef STATIC |
302 | EXPORT_SYMBOL_GPL(lz4_decompress); | 302 | EXPORT_SYMBOL(lz4_decompress); |
303 | #endif | 303 | #endif |
304 | 304 | ||
305 | int lz4_decompress_unknownoutputsize(const char *src, size_t src_len, | 305 | int lz4_decompress_unknownoutputsize(const char *src, size_t src_len, |
@@ -319,8 +319,8 @@ exit_0: | |||
319 | return ret; | 319 | return ret; |
320 | } | 320 | } |
321 | #ifndef STATIC | 321 | #ifndef STATIC |
322 | EXPORT_SYMBOL_GPL(lz4_decompress_unknownoutputsize); | 322 | EXPORT_SYMBOL(lz4_decompress_unknownoutputsize); |
323 | 323 | ||
324 | MODULE_LICENSE("GPL"); | 324 | MODULE_LICENSE("Dual BSD/GPL"); |
325 | MODULE_DESCRIPTION("LZ4 Decompressor"); | 325 | MODULE_DESCRIPTION("LZ4 Decompressor"); |
326 | #endif | 326 | #endif |
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c index eb1a74f5e368..f344f76b6559 100644 --- a/lib/lz4/lz4hc_compress.c +++ b/lib/lz4/lz4hc_compress.c | |||
@@ -533,7 +533,7 @@ int lz4hc_compress(const unsigned char *src, size_t src_len, | |||
533 | exit: | 533 | exit: |
534 | return ret; | 534 | return ret; |
535 | } | 535 | } |
536 | EXPORT_SYMBOL_GPL(lz4hc_compress); | 536 | EXPORT_SYMBOL(lz4hc_compress); |
537 | 537 | ||
538 | MODULE_LICENSE("GPL"); | 538 | MODULE_LICENSE("Dual BSD/GPL"); |
539 | MODULE_DESCRIPTION("LZ4HC compressor"); | 539 | MODULE_DESCRIPTION("LZ4HC compressor"); |
diff --git a/mm/fremap.c b/mm/fremap.c index 87da3590c61e..5bff08147768 100644 --- a/mm/fremap.c +++ b/mm/fremap.c | |||
@@ -57,17 +57,22 @@ static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, | |||
57 | unsigned long addr, unsigned long pgoff, pgprot_t prot) | 57 | unsigned long addr, unsigned long pgoff, pgprot_t prot) |
58 | { | 58 | { |
59 | int err = -ENOMEM; | 59 | int err = -ENOMEM; |
60 | pte_t *pte; | 60 | pte_t *pte, ptfile; |
61 | spinlock_t *ptl; | 61 | spinlock_t *ptl; |
62 | 62 | ||
63 | pte = get_locked_pte(mm, addr, &ptl); | 63 | pte = get_locked_pte(mm, addr, &ptl); |
64 | if (!pte) | 64 | if (!pte) |
65 | goto out; | 65 | goto out; |
66 | 66 | ||
67 | if (!pte_none(*pte)) | 67 | ptfile = pgoff_to_pte(pgoff); |
68 | |||
69 | if (!pte_none(*pte)) { | ||
70 | if (pte_present(*pte) && pte_soft_dirty(*pte)) | ||
71 | pte_file_mksoft_dirty(ptfile); | ||
68 | zap_pte(mm, vma, addr, pte); | 72 | zap_pte(mm, vma, addr, pte); |
73 | } | ||
69 | 74 | ||
70 | set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); | 75 | set_pte_at(mm, addr, pte, ptfile); |
71 | /* | 76 | /* |
72 | * We don't need to run update_mmu_cache() here because the "file pte" | 77 | * We don't need to run update_mmu_cache() here because the "file pte" |
73 | * being installed by install_file_pte() is not a real pte - it's a | 78 | * being installed by install_file_pte() is not a real pte - it's a |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 83aff0a4d093..b60f33080a28 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -2490,7 +2490,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | |||
2490 | 2490 | ||
2491 | mm = vma->vm_mm; | 2491 | mm = vma->vm_mm; |
2492 | 2492 | ||
2493 | tlb_gather_mmu(&tlb, mm, 0); | 2493 | tlb_gather_mmu(&tlb, mm, start, end); |
2494 | __unmap_hugepage_range(&tlb, vma, start, end, ref_page); | 2494 | __unmap_hugepage_range(&tlb, vma, start, end, ref_page); |
2495 | tlb_finish_mmu(&tlb, start, end); | 2495 | tlb_finish_mmu(&tlb, start, end); |
2496 | } | 2496 | } |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c290a1cf3862..0878ff7c26a9 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -3195,11 +3195,11 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, | |||
3195 | if (!s->memcg_params) | 3195 | if (!s->memcg_params) |
3196 | return -ENOMEM; | 3196 | return -ENOMEM; |
3197 | 3197 | ||
3198 | INIT_WORK(&s->memcg_params->destroy, | ||
3199 | kmem_cache_destroy_work_func); | ||
3200 | if (memcg) { | 3198 | if (memcg) { |
3201 | s->memcg_params->memcg = memcg; | 3199 | s->memcg_params->memcg = memcg; |
3202 | s->memcg_params->root_cache = root_cache; | 3200 | s->memcg_params->root_cache = root_cache; |
3201 | INIT_WORK(&s->memcg_params->destroy, | ||
3202 | kmem_cache_destroy_work_func); | ||
3203 | } else | 3203 | } else |
3204 | s->memcg_params->is_root_cache = true; | 3204 | s->memcg_params->is_root_cache = true; |
3205 | 3205 | ||
@@ -6969,7 +6969,6 @@ struct cgroup_subsys mem_cgroup_subsys = { | |||
6969 | #ifdef CONFIG_MEMCG_SWAP | 6969 | #ifdef CONFIG_MEMCG_SWAP |
6970 | static int __init enable_swap_account(char *s) | 6970 | static int __init enable_swap_account(char *s) |
6971 | { | 6971 | { |
6972 | /* consider enabled if no parameter or 1 is given */ | ||
6973 | if (!strcmp(s, "1")) | 6972 | if (!strcmp(s, "1")) |
6974 | really_do_swap_account = 1; | 6973 | really_do_swap_account = 1; |
6975 | else if (!strcmp(s, "0")) | 6974 | else if (!strcmp(s, "0")) |
diff --git a/mm/memory.c b/mm/memory.c index 1ce2e2a734fc..af84bc0ec17c 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -209,14 +209,15 @@ static int tlb_next_batch(struct mmu_gather *tlb) | |||
209 | * tear-down from @mm. The @fullmm argument is used when @mm is without | 209 | * tear-down from @mm. The @fullmm argument is used when @mm is without |
210 | * users and we're going to destroy the full address space (exit/execve). | 210 | * users and we're going to destroy the full address space (exit/execve). |
211 | */ | 211 | */ |
212 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) | 212 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
213 | { | 213 | { |
214 | tlb->mm = mm; | 214 | tlb->mm = mm; |
215 | 215 | ||
216 | tlb->fullmm = fullmm; | 216 | /* Is it from 0 to ~0? */ |
217 | tlb->fullmm = !(start | (end+1)); | ||
217 | tlb->need_flush_all = 0; | 218 | tlb->need_flush_all = 0; |
218 | tlb->start = -1UL; | 219 | tlb->start = start; |
219 | tlb->end = 0; | 220 | tlb->end = end; |
220 | tlb->need_flush = 0; | 221 | tlb->need_flush = 0; |
221 | tlb->local.next = NULL; | 222 | tlb->local.next = NULL; |
222 | tlb->local.nr = 0; | 223 | tlb->local.nr = 0; |
@@ -256,8 +257,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e | |||
256 | { | 257 | { |
257 | struct mmu_gather_batch *batch, *next; | 258 | struct mmu_gather_batch *batch, *next; |
258 | 259 | ||
259 | tlb->start = start; | ||
260 | tlb->end = end; | ||
261 | tlb_flush_mmu(tlb); | 260 | tlb_flush_mmu(tlb); |
262 | 261 | ||
263 | /* keep the page table cache within bounds */ | 262 | /* keep the page table cache within bounds */ |
@@ -1099,7 +1098,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, | |||
1099 | spinlock_t *ptl; | 1098 | spinlock_t *ptl; |
1100 | pte_t *start_pte; | 1099 | pte_t *start_pte; |
1101 | pte_t *pte; | 1100 | pte_t *pte; |
1102 | unsigned long range_start = addr; | ||
1103 | 1101 | ||
1104 | again: | 1102 | again: |
1105 | init_rss_vec(rss); | 1103 | init_rss_vec(rss); |
@@ -1141,9 +1139,12 @@ again: | |||
1141 | continue; | 1139 | continue; |
1142 | if (unlikely(details) && details->nonlinear_vma | 1140 | if (unlikely(details) && details->nonlinear_vma |
1143 | && linear_page_index(details->nonlinear_vma, | 1141 | && linear_page_index(details->nonlinear_vma, |
1144 | addr) != page->index) | 1142 | addr) != page->index) { |
1145 | set_pte_at(mm, addr, pte, | 1143 | pte_t ptfile = pgoff_to_pte(page->index); |
1146 | pgoff_to_pte(page->index)); | 1144 | if (pte_soft_dirty(ptent)) |
1145 | pte_file_mksoft_dirty(ptfile); | ||
1146 | set_pte_at(mm, addr, pte, ptfile); | ||
1147 | } | ||
1147 | if (PageAnon(page)) | 1148 | if (PageAnon(page)) |
1148 | rss[MM_ANONPAGES]--; | 1149 | rss[MM_ANONPAGES]--; |
1149 | else { | 1150 | else { |
@@ -1202,17 +1203,25 @@ again: | |||
1202 | * and page-free while holding it. | 1203 | * and page-free while holding it. |
1203 | */ | 1204 | */ |
1204 | if (force_flush) { | 1205 | if (force_flush) { |
1206 | unsigned long old_end; | ||
1207 | |||
1205 | force_flush = 0; | 1208 | force_flush = 0; |
1206 | 1209 | ||
1207 | #ifdef HAVE_GENERIC_MMU_GATHER | 1210 | /* |
1208 | tlb->start = range_start; | 1211 | * Flush the TLB just for the previous segment, |
1212 | * then update the range to be the remaining | ||
1213 | * TLB range. | ||
1214 | */ | ||
1215 | old_end = tlb->end; | ||
1209 | tlb->end = addr; | 1216 | tlb->end = addr; |
1210 | #endif | 1217 | |
1211 | tlb_flush_mmu(tlb); | 1218 | tlb_flush_mmu(tlb); |
1212 | if (addr != end) { | 1219 | |
1213 | range_start = addr; | 1220 | tlb->start = addr; |
1221 | tlb->end = old_end; | ||
1222 | |||
1223 | if (addr != end) | ||
1214 | goto again; | 1224 | goto again; |
1215 | } | ||
1216 | } | 1225 | } |
1217 | 1226 | ||
1218 | return addr; | 1227 | return addr; |
@@ -1397,7 +1406,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start, | |||
1397 | unsigned long end = start + size; | 1406 | unsigned long end = start + size; |
1398 | 1407 | ||
1399 | lru_add_drain(); | 1408 | lru_add_drain(); |
1400 | tlb_gather_mmu(&tlb, mm, 0); | 1409 | tlb_gather_mmu(&tlb, mm, start, end); |
1401 | update_hiwater_rss(mm); | 1410 | update_hiwater_rss(mm); |
1402 | mmu_notifier_invalidate_range_start(mm, start, end); | 1411 | mmu_notifier_invalidate_range_start(mm, start, end); |
1403 | for ( ; vma && vma->vm_start < end; vma = vma->vm_next) | 1412 | for ( ; vma && vma->vm_start < end; vma = vma->vm_next) |
@@ -1423,7 +1432,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr | |||
1423 | unsigned long end = address + size; | 1432 | unsigned long end = address + size; |
1424 | 1433 | ||
1425 | lru_add_drain(); | 1434 | lru_add_drain(); |
1426 | tlb_gather_mmu(&tlb, mm, 0); | 1435 | tlb_gather_mmu(&tlb, mm, address, end); |
1427 | update_hiwater_rss(mm); | 1436 | update_hiwater_rss(mm); |
1428 | mmu_notifier_invalidate_range_start(mm, address, end); | 1437 | mmu_notifier_invalidate_range_start(mm, address, end); |
1429 | unmap_single_vma(&tlb, vma, address, end, details); | 1438 | unmap_single_vma(&tlb, vma, address, end, details); |
@@ -3115,6 +3124,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3115 | exclusive = 1; | 3124 | exclusive = 1; |
3116 | } | 3125 | } |
3117 | flush_icache_page(vma, page); | 3126 | flush_icache_page(vma, page); |
3127 | if (pte_swp_soft_dirty(orig_pte)) | ||
3128 | pte = pte_mksoft_dirty(pte); | ||
3118 | set_pte_at(mm, address, page_table, pte); | 3129 | set_pte_at(mm, address, page_table, pte); |
3119 | if (page == swapcache) | 3130 | if (page == swapcache) |
3120 | do_page_add_anon_rmap(page, vma, address, exclusive); | 3131 | do_page_add_anon_rmap(page, vma, address, exclusive); |
@@ -3408,6 +3419,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3408 | entry = mk_pte(page, vma->vm_page_prot); | 3419 | entry = mk_pte(page, vma->vm_page_prot); |
3409 | if (flags & FAULT_FLAG_WRITE) | 3420 | if (flags & FAULT_FLAG_WRITE) |
3410 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | 3421 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
3422 | else if (pte_file(orig_pte) && pte_file_soft_dirty(orig_pte)) | ||
3423 | pte_mksoft_dirty(entry); | ||
3411 | if (anon) { | 3424 | if (anon) { |
3412 | inc_mm_counter_fast(mm, MM_ANONPAGES); | 3425 | inc_mm_counter_fast(mm, MM_ANONPAGES); |
3413 | page_add_new_anon_rmap(page, vma, address); | 3426 | page_add_new_anon_rmap(page, vma, address); |
@@ -2336,7 +2336,7 @@ static void unmap_region(struct mm_struct *mm, | |||
2336 | struct mmu_gather tlb; | 2336 | struct mmu_gather tlb; |
2337 | 2337 | ||
2338 | lru_add_drain(); | 2338 | lru_add_drain(); |
2339 | tlb_gather_mmu(&tlb, mm, 0); | 2339 | tlb_gather_mmu(&tlb, mm, start, end); |
2340 | update_hiwater_rss(mm); | 2340 | update_hiwater_rss(mm); |
2341 | unmap_vmas(&tlb, vma, start, end); | 2341 | unmap_vmas(&tlb, vma, start, end); |
2342 | free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, | 2342 | free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, |
@@ -2709,7 +2709,7 @@ void exit_mmap(struct mm_struct *mm) | |||
2709 | 2709 | ||
2710 | lru_add_drain(); | 2710 | lru_add_drain(); |
2711 | flush_cache_mm(mm); | 2711 | flush_cache_mm(mm); |
2712 | tlb_gather_mmu(&tlb, mm, 1); | 2712 | tlb_gather_mmu(&tlb, mm, 0, -1); |
2713 | /* update_hiwater_rss(mm) here? but nobody should be looking */ | 2713 | /* update_hiwater_rss(mm) here? but nobody should be looking */ |
2714 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ | 2714 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ |
2715 | unmap_vmas(&tlb, vma, 0, -1); | 2715 | unmap_vmas(&tlb, vma, 0, -1); |
@@ -1236,6 +1236,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1236 | swp_entry_to_pte(make_hwpoison_entry(page))); | 1236 | swp_entry_to_pte(make_hwpoison_entry(page))); |
1237 | } else if (PageAnon(page)) { | 1237 | } else if (PageAnon(page)) { |
1238 | swp_entry_t entry = { .val = page_private(page) }; | 1238 | swp_entry_t entry = { .val = page_private(page) }; |
1239 | pte_t swp_pte; | ||
1239 | 1240 | ||
1240 | if (PageSwapCache(page)) { | 1241 | if (PageSwapCache(page)) { |
1241 | /* | 1242 | /* |
@@ -1264,7 +1265,10 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1264 | BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); | 1265 | BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); |
1265 | entry = make_migration_entry(page, pte_write(pteval)); | 1266 | entry = make_migration_entry(page, pte_write(pteval)); |
1266 | } | 1267 | } |
1267 | set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); | 1268 | swp_pte = swp_entry_to_pte(entry); |
1269 | if (pte_soft_dirty(pteval)) | ||
1270 | swp_pte = pte_swp_mksoft_dirty(swp_pte); | ||
1271 | set_pte_at(mm, address, pte, swp_pte); | ||
1268 | BUG_ON(pte_file(*pte)); | 1272 | BUG_ON(pte_file(*pte)); |
1269 | } else if (IS_ENABLED(CONFIG_MIGRATION) && | 1273 | } else if (IS_ENABLED(CONFIG_MIGRATION) && |
1270 | (TTU_ACTION(flags) == TTU_MIGRATION)) { | 1274 | (TTU_ACTION(flags) == TTU_MIGRATION)) { |
@@ -1401,8 +1405,12 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, | |||
1401 | pteval = ptep_clear_flush(vma, address, pte); | 1405 | pteval = ptep_clear_flush(vma, address, pte); |
1402 | 1406 | ||
1403 | /* If nonlinear, store the file page offset in the pte. */ | 1407 | /* If nonlinear, store the file page offset in the pte. */ |
1404 | if (page->index != linear_page_index(vma, address)) | 1408 | if (page->index != linear_page_index(vma, address)) { |
1405 | set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); | 1409 | pte_t ptfile = pgoff_to_pte(page->index); |
1410 | if (pte_soft_dirty(pteval)) | ||
1411 | pte_file_mksoft_dirty(ptfile); | ||
1412 | set_pte_at(mm, address, pte, ptfile); | ||
1413 | } | ||
1406 | 1414 | ||
1407 | /* Move the dirty bit to the physical page now the pte is gone. */ | 1415 | /* Move the dirty bit to the physical page now the pte is gone. */ |
1408 | if (pte_dirty(pteval)) | 1416 | if (pte_dirty(pteval)) |
diff --git a/mm/shmem.c b/mm/shmem.c index 8335dbd3fc35..e43dc555069d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -2909,14 +2909,8 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range); | |||
2909 | 2909 | ||
2910 | /* common code */ | 2910 | /* common code */ |
2911 | 2911 | ||
2912 | static char *shmem_dname(struct dentry *dentry, char *buffer, int buflen) | ||
2913 | { | ||
2914 | return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)", | ||
2915 | dentry->d_name.name); | ||
2916 | } | ||
2917 | |||
2918 | static struct dentry_operations anon_ops = { | 2912 | static struct dentry_operations anon_ops = { |
2919 | .d_dname = shmem_dname | 2913 | .d_dname = simple_dname |
2920 | }; | 2914 | }; |
2921 | 2915 | ||
2922 | /** | 2916 | /** |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 36af6eeaa67e..6cf2e60983b7 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -866,6 +866,21 @@ unsigned int count_swap_pages(int type, int free) | |||
866 | } | 866 | } |
867 | #endif /* CONFIG_HIBERNATION */ | 867 | #endif /* CONFIG_HIBERNATION */ |
868 | 868 | ||
869 | static inline int maybe_same_pte(pte_t pte, pte_t swp_pte) | ||
870 | { | ||
871 | #ifdef CONFIG_MEM_SOFT_DIRTY | ||
872 | /* | ||
873 | * When pte keeps soft dirty bit the pte generated | ||
874 | * from swap entry does not has it, still it's same | ||
875 | * pte from logical point of view. | ||
876 | */ | ||
877 | pte_t swp_pte_dirty = pte_swp_mksoft_dirty(swp_pte); | ||
878 | return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty); | ||
879 | #else | ||
880 | return pte_same(pte, swp_pte); | ||
881 | #endif | ||
882 | } | ||
883 | |||
869 | /* | 884 | /* |
870 | * No need to decide whether this PTE shares the swap entry with others, | 885 | * No need to decide whether this PTE shares the swap entry with others, |
871 | * just let do_wp_page work it out if a write is requested later - to | 886 | * just let do_wp_page work it out if a write is requested later - to |
@@ -892,7 +907,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, | |||
892 | } | 907 | } |
893 | 908 | ||
894 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); | 909 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
895 | if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) { | 910 | if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) { |
896 | mem_cgroup_cancel_charge_swapin(memcg); | 911 | mem_cgroup_cancel_charge_swapin(memcg); |
897 | ret = 0; | 912 | ret = 0; |
898 | goto out; | 913 | goto out; |
@@ -947,7 +962,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
947 | * swapoff spends a _lot_ of time in this loop! | 962 | * swapoff spends a _lot_ of time in this loop! |
948 | * Test inline before going to call unuse_pte. | 963 | * Test inline before going to call unuse_pte. |
949 | */ | 964 | */ |
950 | if (unlikely(pte_same(*pte, swp_pte))) { | 965 | if (unlikely(maybe_same_pte(*pte, swp_pte))) { |
951 | pte_unmap(pte); | 966 | pte_unmap(pte); |
952 | ret = unuse_pte(vma, pmd, addr, entry, page); | 967 | ret = unuse_pte(vma, pmd, addr, entry, page); |
953 | if (ret) | 968 | if (ret) |
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 4a78c4de9f20..6ee48aac776f 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -91,7 +91,12 @@ EXPORT_SYMBOL(__vlan_find_dev_deep); | |||
91 | 91 | ||
92 | struct net_device *vlan_dev_real_dev(const struct net_device *dev) | 92 | struct net_device *vlan_dev_real_dev(const struct net_device *dev) |
93 | { | 93 | { |
94 | return vlan_dev_priv(dev)->real_dev; | 94 | struct net_device *ret = vlan_dev_priv(dev)->real_dev; |
95 | |||
96 | while (is_vlan_dev(ret)) | ||
97 | ret = vlan_dev_priv(ret)->real_dev; | ||
98 | |||
99 | return ret; | ||
95 | } | 100 | } |
96 | EXPORT_SYMBOL(vlan_dev_real_dev); | 101 | EXPORT_SYMBOL(vlan_dev_real_dev); |
97 | 102 | ||
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index e14531f1ce1c..264de88db320 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c | |||
@@ -1529,6 +1529,8 @@ out: | |||
1529 | * in these cases, the skb is further handled by this function and | 1529 | * in these cases, the skb is further handled by this function and |
1530 | * returns 1, otherwise it returns 0 and the caller shall further | 1530 | * returns 1, otherwise it returns 0 and the caller shall further |
1531 | * process the skb. | 1531 | * process the skb. |
1532 | * | ||
1533 | * This call might reallocate skb data. | ||
1532 | */ | 1534 | */ |
1533 | int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, | 1535 | int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, |
1534 | unsigned short vid) | 1536 | unsigned short vid) |
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index f105219f4a4b..7614af31daff 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c | |||
@@ -508,6 +508,7 @@ out: | |||
508 | return 0; | 508 | return 0; |
509 | } | 509 | } |
510 | 510 | ||
511 | /* this call might reallocate skb data */ | ||
511 | static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len) | 512 | static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len) |
512 | { | 513 | { |
513 | int ret = false; | 514 | int ret = false; |
@@ -568,6 +569,7 @@ out: | |||
568 | return ret; | 569 | return ret; |
569 | } | 570 | } |
570 | 571 | ||
572 | /* this call might reallocate skb data */ | ||
571 | bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) | 573 | bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) |
572 | { | 574 | { |
573 | struct ethhdr *ethhdr; | 575 | struct ethhdr *ethhdr; |
@@ -619,6 +621,12 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) | |||
619 | 621 | ||
620 | if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr))) | 622 | if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr))) |
621 | return false; | 623 | return false; |
624 | |||
625 | /* skb->data might have been reallocated by pskb_may_pull() */ | ||
626 | ethhdr = (struct ethhdr *)skb->data; | ||
627 | if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) | ||
628 | ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN); | ||
629 | |||
622 | udphdr = (struct udphdr *)(skb->data + *header_len); | 630 | udphdr = (struct udphdr *)(skb->data + *header_len); |
623 | *header_len += sizeof(*udphdr); | 631 | *header_len += sizeof(*udphdr); |
624 | 632 | ||
@@ -634,12 +642,14 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) | |||
634 | return true; | 642 | return true; |
635 | } | 643 | } |
636 | 644 | ||
645 | /* this call might reallocate skb data */ | ||
637 | bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, | 646 | bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, |
638 | struct sk_buff *skb, struct ethhdr *ethhdr) | 647 | struct sk_buff *skb) |
639 | { | 648 | { |
640 | struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL; | 649 | struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL; |
641 | struct batadv_orig_node *orig_dst_node = NULL; | 650 | struct batadv_orig_node *orig_dst_node = NULL; |
642 | struct batadv_gw_node *curr_gw = NULL; | 651 | struct batadv_gw_node *curr_gw = NULL; |
652 | struct ethhdr *ethhdr; | ||
643 | bool ret, out_of_range = false; | 653 | bool ret, out_of_range = false; |
644 | unsigned int header_len = 0; | 654 | unsigned int header_len = 0; |
645 | uint8_t curr_tq_avg; | 655 | uint8_t curr_tq_avg; |
@@ -648,6 +658,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, | |||
648 | if (!ret) | 658 | if (!ret) |
649 | goto out; | 659 | goto out; |
650 | 660 | ||
661 | ethhdr = (struct ethhdr *)skb->data; | ||
651 | orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source, | 662 | orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source, |
652 | ethhdr->h_dest); | 663 | ethhdr->h_dest); |
653 | if (!orig_dst_node) | 664 | if (!orig_dst_node) |
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h index 039902dca4a6..1037d75da51f 100644 --- a/net/batman-adv/gateway_client.h +++ b/net/batman-adv/gateway_client.h | |||
@@ -34,7 +34,6 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv, | |||
34 | void batadv_gw_node_purge(struct batadv_priv *bat_priv); | 34 | void batadv_gw_node_purge(struct batadv_priv *bat_priv); |
35 | int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset); | 35 | int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset); |
36 | bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len); | 36 | bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len); |
37 | bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, | 37 | bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb); |
38 | struct sk_buff *skb, struct ethhdr *ethhdr); | ||
39 | 38 | ||
40 | #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ | 39 | #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ |
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 700d0b49742d..0f04e1c302b4 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -180,6 +180,9 @@ static int batadv_interface_tx(struct sk_buff *skb, | |||
180 | if (batadv_bla_tx(bat_priv, skb, vid)) | 180 | if (batadv_bla_tx(bat_priv, skb, vid)) |
181 | goto dropped; | 181 | goto dropped; |
182 | 182 | ||
183 | /* skb->data might have been reallocated by batadv_bla_tx() */ | ||
184 | ethhdr = (struct ethhdr *)skb->data; | ||
185 | |||
183 | /* Register the client MAC in the transtable */ | 186 | /* Register the client MAC in the transtable */ |
184 | if (!is_multicast_ether_addr(ethhdr->h_source)) | 187 | if (!is_multicast_ether_addr(ethhdr->h_source)) |
185 | batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); | 188 | batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); |
@@ -220,6 +223,10 @@ static int batadv_interface_tx(struct sk_buff *skb, | |||
220 | default: | 223 | default: |
221 | break; | 224 | break; |
222 | } | 225 | } |
226 | |||
227 | /* reminder: ethhdr might have become unusable from here on | ||
228 | * (batadv_gw_is_dhcp_target() might have reallocated skb data) | ||
229 | */ | ||
223 | } | 230 | } |
224 | 231 | ||
225 | /* ethernet packet should be broadcasted */ | 232 | /* ethernet packet should be broadcasted */ |
@@ -266,7 +273,7 @@ static int batadv_interface_tx(struct sk_buff *skb, | |||
266 | /* unicast packet */ | 273 | /* unicast packet */ |
267 | } else { | 274 | } else { |
268 | if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) { | 275 | if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) { |
269 | ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr); | 276 | ret = batadv_gw_out_of_range(bat_priv, skb); |
270 | if (ret) | 277 | if (ret) |
271 | goto dropped; | 278 | goto dropped; |
272 | } | 279 | } |
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c index dc8b5d4dd636..857e1b8349ee 100644 --- a/net/batman-adv/unicast.c +++ b/net/batman-adv/unicast.c | |||
@@ -326,7 +326,9 @@ static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size, | |||
326 | * @skb: the skb containing the payload to encapsulate | 326 | * @skb: the skb containing the payload to encapsulate |
327 | * @orig_node: the destination node | 327 | * @orig_node: the destination node |
328 | * | 328 | * |
329 | * Returns false if the payload could not be encapsulated or true otherwise | 329 | * Returns false if the payload could not be encapsulated or true otherwise. |
330 | * | ||
331 | * This call might reallocate skb data. | ||
330 | */ | 332 | */ |
331 | static bool batadv_unicast_prepare_skb(struct sk_buff *skb, | 333 | static bool batadv_unicast_prepare_skb(struct sk_buff *skb, |
332 | struct batadv_orig_node *orig_node) | 334 | struct batadv_orig_node *orig_node) |
@@ -343,7 +345,9 @@ static bool batadv_unicast_prepare_skb(struct sk_buff *skb, | |||
343 | * @orig_node: the destination node | 345 | * @orig_node: the destination node |
344 | * @packet_subtype: the batman 4addr packet subtype to use | 346 | * @packet_subtype: the batman 4addr packet subtype to use |
345 | * | 347 | * |
346 | * Returns false if the payload could not be encapsulated or true otherwise | 348 | * Returns false if the payload could not be encapsulated or true otherwise. |
349 | * | ||
350 | * This call might reallocate skb data. | ||
347 | */ | 351 | */ |
348 | bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv, | 352 | bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv, |
349 | struct sk_buff *skb, | 353 | struct sk_buff *skb, |
@@ -401,7 +405,7 @@ int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv, | |||
401 | struct batadv_neigh_node *neigh_node; | 405 | struct batadv_neigh_node *neigh_node; |
402 | int data_len = skb->len; | 406 | int data_len = skb->len; |
403 | int ret = NET_RX_DROP; | 407 | int ret = NET_RX_DROP; |
404 | unsigned int dev_mtu; | 408 | unsigned int dev_mtu, header_len; |
405 | 409 | ||
406 | /* get routing information */ | 410 | /* get routing information */ |
407 | if (is_multicast_ether_addr(ethhdr->h_dest)) { | 411 | if (is_multicast_ether_addr(ethhdr->h_dest)) { |
@@ -428,11 +432,17 @@ find_router: | |||
428 | 432 | ||
429 | switch (packet_type) { | 433 | switch (packet_type) { |
430 | case BATADV_UNICAST: | 434 | case BATADV_UNICAST: |
431 | batadv_unicast_prepare_skb(skb, orig_node); | 435 | if (!batadv_unicast_prepare_skb(skb, orig_node)) |
436 | goto out; | ||
437 | |||
438 | header_len = sizeof(struct batadv_unicast_packet); | ||
432 | break; | 439 | break; |
433 | case BATADV_UNICAST_4ADDR: | 440 | case BATADV_UNICAST_4ADDR: |
434 | batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node, | 441 | if (!batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node, |
435 | packet_subtype); | 442 | packet_subtype)) |
443 | goto out; | ||
444 | |||
445 | header_len = sizeof(struct batadv_unicast_4addr_packet); | ||
436 | break; | 446 | break; |
437 | default: | 447 | default: |
438 | /* this function supports UNICAST and UNICAST_4ADDR only. It | 448 | /* this function supports UNICAST and UNICAST_4ADDR only. It |
@@ -441,6 +451,7 @@ find_router: | |||
441 | goto out; | 451 | goto out; |
442 | } | 452 | } |
443 | 453 | ||
454 | ethhdr = (struct ethhdr *)(skb->data + header_len); | ||
444 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 455 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
445 | 456 | ||
446 | /* inform the destination node that we are still missing a correct route | 457 | /* inform the destination node that we are still missing a correct route |
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 60aca9109a50..ffd5874f2592 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
@@ -161,7 +161,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr) | |||
161 | if (!pv) | 161 | if (!pv) |
162 | return; | 162 | return; |
163 | 163 | ||
164 | for_each_set_bit_from(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { | 164 | for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) { |
165 | f = __br_fdb_get(br, br->dev->dev_addr, vid); | 165 | f = __br_fdb_get(br, br->dev->dev_addr, vid); |
166 | if (f && f->is_local && !f->dst) | 166 | if (f && f->is_local && !f->dst) |
167 | fdb_delete(br, f); | 167 | fdb_delete(br, f); |
@@ -730,7 +730,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], | |||
730 | /* VID was specified, so use it. */ | 730 | /* VID was specified, so use it. */ |
731 | err = __br_fdb_add(ndm, p, addr, nlh_flags, vid); | 731 | err = __br_fdb_add(ndm, p, addr, nlh_flags, vid); |
732 | } else { | 732 | } else { |
733 | if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) { | 733 | if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) { |
734 | err = __br_fdb_add(ndm, p, addr, nlh_flags, 0); | 734 | err = __br_fdb_add(ndm, p, addr, nlh_flags, 0); |
735 | goto out; | 735 | goto out; |
736 | } | 736 | } |
@@ -739,7 +739,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], | |||
739 | * specify a VLAN. To be nice, add/update entry for every | 739 | * specify a VLAN. To be nice, add/update entry for every |
740 | * vlan on this port. | 740 | * vlan on this port. |
741 | */ | 741 | */ |
742 | for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { | 742 | for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) { |
743 | err = __br_fdb_add(ndm, p, addr, nlh_flags, vid); | 743 | err = __br_fdb_add(ndm, p, addr, nlh_flags, vid); |
744 | if (err) | 744 | if (err) |
745 | goto out; | 745 | goto out; |
@@ -817,7 +817,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], | |||
817 | 817 | ||
818 | err = __br_fdb_delete(p, addr, vid); | 818 | err = __br_fdb_delete(p, addr, vid); |
819 | } else { | 819 | } else { |
820 | if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) { | 820 | if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) { |
821 | err = __br_fdb_delete(p, addr, 0); | 821 | err = __br_fdb_delete(p, addr, 0); |
822 | goto out; | 822 | goto out; |
823 | } | 823 | } |
@@ -827,7 +827,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], | |||
827 | * vlan on this port. | 827 | * vlan on this port. |
828 | */ | 828 | */ |
829 | err = -ENOENT; | 829 | err = -ENOENT; |
830 | for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { | 830 | for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) { |
831 | err &= __br_fdb_delete(p, addr, vid); | 831 | err &= __br_fdb_delete(p, addr, vid); |
832 | } | 832 | } |
833 | } | 833 | } |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 61c5e819380e..08e576ada0b2 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -1195,7 +1195,7 @@ static int br_ip6_multicast_query(struct net_bridge *br, | |||
1195 | max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); | 1195 | max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); |
1196 | if (max_delay) | 1196 | if (max_delay) |
1197 | group = &mld->mld_mca; | 1197 | group = &mld->mld_mca; |
1198 | } else if (skb->len >= sizeof(*mld2q)) { | 1198 | } else { |
1199 | if (!pskb_may_pull(skb, sizeof(*mld2q))) { | 1199 | if (!pskb_may_pull(skb, sizeof(*mld2q))) { |
1200 | err = -EINVAL; | 1200 | err = -EINVAL; |
1201 | goto out; | 1201 | goto out; |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 1fc30abd3a52..b9259efa636e 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
@@ -132,7 +132,7 @@ static int br_fill_ifinfo(struct sk_buff *skb, | |||
132 | else | 132 | else |
133 | pv = br_get_vlan_info(br); | 133 | pv = br_get_vlan_info(br); |
134 | 134 | ||
135 | if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) | 135 | if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) |
136 | goto done; | 136 | goto done; |
137 | 137 | ||
138 | af = nla_nest_start(skb, IFLA_AF_SPEC); | 138 | af = nla_nest_start(skb, IFLA_AF_SPEC); |
@@ -140,7 +140,7 @@ static int br_fill_ifinfo(struct sk_buff *skb, | |||
140 | goto nla_put_failure; | 140 | goto nla_put_failure; |
141 | 141 | ||
142 | pvid = br_get_pvid(pv); | 142 | pvid = br_get_pvid(pv); |
143 | for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { | 143 | for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) { |
144 | vinfo.vid = vid; | 144 | vinfo.vid = vid; |
145 | vinfo.flags = 0; | 145 | vinfo.flags = 0; |
146 | if (vid == pvid) | 146 | if (vid == pvid) |
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index 394bb96b6087..3b9637fb7939 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Sysfs attributes of bridge ports | 2 | * Sysfs attributes of bridge |
3 | * Linux ethernet bridge | 3 | * Linux ethernet bridge |
4 | * | 4 | * |
5 | * Authors: | 5 | * Authors: |
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index bd58b45f5f90..9a9ffe7e4019 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c | |||
@@ -108,7 +108,7 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid) | |||
108 | 108 | ||
109 | clear_bit(vid, v->vlan_bitmap); | 109 | clear_bit(vid, v->vlan_bitmap); |
110 | v->num_vlans--; | 110 | v->num_vlans--; |
111 | if (bitmap_empty(v->vlan_bitmap, BR_VLAN_BITMAP_LEN)) { | 111 | if (bitmap_empty(v->vlan_bitmap, VLAN_N_VID)) { |
112 | if (v->port_idx) | 112 | if (v->port_idx) |
113 | rcu_assign_pointer(v->parent.port->vlan_info, NULL); | 113 | rcu_assign_pointer(v->parent.port->vlan_info, NULL); |
114 | else | 114 | else |
@@ -122,7 +122,7 @@ static void __vlan_flush(struct net_port_vlans *v) | |||
122 | { | 122 | { |
123 | smp_wmb(); | 123 | smp_wmb(); |
124 | v->pvid = 0; | 124 | v->pvid = 0; |
125 | bitmap_zero(v->vlan_bitmap, BR_VLAN_BITMAP_LEN); | 125 | bitmap_zero(v->vlan_bitmap, VLAN_N_VID); |
126 | if (v->port_idx) | 126 | if (v->port_idx) |
127 | rcu_assign_pointer(v->parent.port->vlan_info, NULL); | 127 | rcu_assign_pointer(v->parent.port->vlan_info, NULL); |
128 | else | 128 | else |
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 00ee068efc1c..b84a1b155bc1 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
@@ -65,6 +65,7 @@ ipv6: | |||
65 | nhoff += sizeof(struct ipv6hdr); | 65 | nhoff += sizeof(struct ipv6hdr); |
66 | break; | 66 | break; |
67 | } | 67 | } |
68 | case __constant_htons(ETH_P_8021AD): | ||
68 | case __constant_htons(ETH_P_8021Q): { | 69 | case __constant_htons(ETH_P_8021Q): { |
69 | const struct vlan_hdr *vlan; | 70 | const struct vlan_hdr *vlan; |
70 | struct vlan_hdr _vlan; | 71 | struct vlan_hdr _vlan; |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 9232c68941ab..60533db8b72d 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -1441,16 +1441,18 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev, | |||
1441 | atomic_set(&p->refcnt, 1); | 1441 | atomic_set(&p->refcnt, 1); |
1442 | p->reachable_time = | 1442 | p->reachable_time = |
1443 | neigh_rand_reach_time(p->base_reachable_time); | 1443 | neigh_rand_reach_time(p->base_reachable_time); |
1444 | dev_hold(dev); | ||
1445 | p->dev = dev; | ||
1446 | write_pnet(&p->net, hold_net(net)); | ||
1447 | p->sysctl_table = NULL; | ||
1444 | 1448 | ||
1445 | if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) { | 1449 | if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) { |
1450 | release_net(net); | ||
1451 | dev_put(dev); | ||
1446 | kfree(p); | 1452 | kfree(p); |
1447 | return NULL; | 1453 | return NULL; |
1448 | } | 1454 | } |
1449 | 1455 | ||
1450 | dev_hold(dev); | ||
1451 | p->dev = dev; | ||
1452 | write_pnet(&p->net, hold_net(net)); | ||
1453 | p->sysctl_table = NULL; | ||
1454 | write_lock_bh(&tbl->lock); | 1456 | write_lock_bh(&tbl->lock); |
1455 | p->next = tbl->parms.next; | 1457 | p->next = tbl->parms.next; |
1456 | tbl->parms.next = p; | 1458 | tbl->parms.next = p; |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 3de740834d1f..ca198c1d1d30 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -2156,7 +2156,7 @@ int ndo_dflt_fdb_del(struct ndmsg *ndm, | |||
2156 | /* If aging addresses are supported device will need to | 2156 | /* If aging addresses are supported device will need to |
2157 | * implement its own handler for this. | 2157 | * implement its own handler for this. |
2158 | */ | 2158 | */ |
2159 | if (ndm->ndm_state & NUD_PERMANENT) { | 2159 | if (!(ndm->ndm_state & NUD_PERMANENT)) { |
2160 | pr_info("%s: FDB only supports static addresses\n", dev->name); | 2160 | pr_info("%s: FDB only supports static addresses\n", dev->name); |
2161 | return -EINVAL; | 2161 | return -EINVAL; |
2162 | } | 2162 | } |
@@ -2384,7 +2384,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) | |||
2384 | struct nlattr *extfilt; | 2384 | struct nlattr *extfilt; |
2385 | u32 filter_mask = 0; | 2385 | u32 filter_mask = 0; |
2386 | 2386 | ||
2387 | extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg), | 2387 | extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg), |
2388 | IFLA_EXT_MASK); | 2388 | IFLA_EXT_MASK); |
2389 | if (extfilt) | 2389 | if (extfilt) |
2390 | filter_mask = nla_get_u32(extfilt); | 2390 | filter_mask = nla_get_u32(extfilt); |
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index ab3d814bc80a..109ee89f123e 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
@@ -477,7 +477,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu) | |||
477 | } | 477 | } |
478 | 478 | ||
479 | return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - | 479 | return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - |
480 | net_adj) & ~(align - 1)) + (net_adj - 2); | 480 | net_adj) & ~(align - 1)) + net_adj - 2; |
481 | } | 481 | } |
482 | 482 | ||
483 | static void esp4_err(struct sk_buff *skb, u32 info) | 483 | static void esp4_err(struct sk_buff *skb, u32 info) |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 108a1e9c9eac..3df6d3edb2a1 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -71,7 +71,6 @@ | |||
71 | #include <linux/init.h> | 71 | #include <linux/init.h> |
72 | #include <linux/list.h> | 72 | #include <linux/list.h> |
73 | #include <linux/slab.h> | 73 | #include <linux/slab.h> |
74 | #include <linux/prefetch.h> | ||
75 | #include <linux/export.h> | 74 | #include <linux/export.h> |
76 | #include <net/net_namespace.h> | 75 | #include <net/net_namespace.h> |
77 | #include <net/ip.h> | 76 | #include <net/ip.h> |
@@ -1761,10 +1760,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c) | |||
1761 | if (!c) | 1760 | if (!c) |
1762 | continue; | 1761 | continue; |
1763 | 1762 | ||
1764 | if (IS_LEAF(c)) { | 1763 | if (IS_LEAF(c)) |
1765 | prefetch(rcu_dereference_rtnl(p->child[idx])); | ||
1766 | return (struct leaf *) c; | 1764 | return (struct leaf *) c; |
1767 | } | ||
1768 | 1765 | ||
1769 | /* Rescan start scanning in new node */ | 1766 | /* Rescan start scanning in new node */ |
1770 | p = (struct tnode *) c; | 1767 | p = (struct tnode *) c; |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 1f6eab66f7ce..8d6939eeb492 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -383,7 +383,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, | |||
383 | if (daddr) | 383 | if (daddr) |
384 | memcpy(&iph->daddr, daddr, 4); | 384 | memcpy(&iph->daddr, daddr, 4); |
385 | if (iph->daddr) | 385 | if (iph->daddr) |
386 | return t->hlen; | 386 | return t->hlen + sizeof(*iph); |
387 | 387 | ||
388 | return -(t->hlen + sizeof(*iph)); | 388 | return -(t->hlen + sizeof(*iph)); |
389 | } | 389 | } |
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 7167b08977df..850525b34899 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c | |||
@@ -76,9 +76,7 @@ int iptunnel_xmit(struct net *net, struct rtable *rt, | |||
76 | iph->daddr = dst; | 76 | iph->daddr = dst; |
77 | iph->saddr = src; | 77 | iph->saddr = src; |
78 | iph->ttl = ttl; | 78 | iph->ttl = ttl; |
79 | tunnel_ip_select_ident(skb, | 79 | __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1); |
80 | (const struct iphdr *)skb_inner_network_header(skb), | ||
81 | &rt->dst); | ||
82 | 80 | ||
83 | err = ip_local_out(skb); | 81 | err = ip_local_out(skb); |
84 | if (unlikely(net_xmit_eval(err))) | 82 | if (unlikely(net_xmit_eval(err))) |
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 6577a1149a47..463bd1273346 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
@@ -273,7 +273,7 @@ static const struct snmp_mib snmp4_net_list[] = { | |||
273 | SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW), | 273 | SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW), |
274 | SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD), | 274 | SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD), |
275 | SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES), | 275 | SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES), |
276 | SNMP_MIB_ITEM("LowLatencyRxPackets", LINUX_MIB_LOWLATENCYRXPACKETS), | 276 | SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS), |
277 | SNMP_MIB_SENTINEL | 277 | SNMP_MIB_SENTINEL |
278 | }; | 278 | }; |
279 | 279 | ||
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 5423223e93c2..b2f6c74861af 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1121,6 +1121,13 @@ new_segment: | |||
1121 | goto wait_for_memory; | 1121 | goto wait_for_memory; |
1122 | 1122 | ||
1123 | /* | 1123 | /* |
1124 | * All packets are restored as if they have | ||
1125 | * already been sent. | ||
1126 | */ | ||
1127 | if (tp->repair) | ||
1128 | TCP_SKB_CB(skb)->when = tcp_time_stamp; | ||
1129 | |||
1130 | /* | ||
1124 | * Check whether we can use HW checksum. | 1131 | * Check whether we can use HW checksum. |
1125 | */ | 1132 | */ |
1126 | if (sk->sk_route_caps & NETIF_F_ALL_CSUM) | 1133 | if (sk->sk_route_caps & NETIF_F_ALL_CSUM) |
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index a9077f441cb2..b6ae92a51f58 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c | |||
@@ -206,8 +206,8 @@ static u32 cubic_root(u64 a) | |||
206 | */ | 206 | */ |
207 | static inline void bictcp_update(struct bictcp *ca, u32 cwnd) | 207 | static inline void bictcp_update(struct bictcp *ca, u32 cwnd) |
208 | { | 208 | { |
209 | u64 offs; | 209 | u32 delta, bic_target, max_cnt; |
210 | u32 delta, t, bic_target, max_cnt; | 210 | u64 offs, t; |
211 | 211 | ||
212 | ca->ack_cnt++; /* count the number of ACKs */ | 212 | ca->ack_cnt++; /* count the number of ACKs */ |
213 | 213 | ||
@@ -250,9 +250,11 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) | |||
250 | * if the cwnd < 1 million packets !!! | 250 | * if the cwnd < 1 million packets !!! |
251 | */ | 251 | */ |
252 | 252 | ||
253 | t = (s32)(tcp_time_stamp - ca->epoch_start); | ||
254 | t += msecs_to_jiffies(ca->delay_min >> 3); | ||
253 | /* change the unit from HZ to bictcp_HZ */ | 255 | /* change the unit from HZ to bictcp_HZ */ |
254 | t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3) | 256 | t <<= BICTCP_HZ; |
255 | - ca->epoch_start) << BICTCP_HZ) / HZ; | 257 | do_div(t, HZ); |
256 | 258 | ||
257 | if (t < ca->bic_K) /* t - K */ | 259 | if (t < ca->bic_K) /* t - K */ |
258 | offs = ca->bic_K - t; | 260 | offs = ca->bic_K - t; |
@@ -414,7 +416,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) | |||
414 | return; | 416 | return; |
415 | 417 | ||
416 | /* Discard delay samples right after fast recovery */ | 418 | /* Discard delay samples right after fast recovery */ |
417 | if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ) | 419 | if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ) |
418 | return; | 420 | return; |
419 | 421 | ||
420 | delay = (rtt_us << 3) / USEC_PER_MSEC; | 422 | delay = (rtt_us << 3) / USEC_PER_MSEC; |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index da4241c8c7da..498ea99194af 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -1126,12 +1126,10 @@ retry: | |||
1126 | if (ifp->flags & IFA_F_OPTIMISTIC) | 1126 | if (ifp->flags & IFA_F_OPTIMISTIC) |
1127 | addr_flags |= IFA_F_OPTIMISTIC; | 1127 | addr_flags |= IFA_F_OPTIMISTIC; |
1128 | 1128 | ||
1129 | ift = !max_addresses || | 1129 | ift = ipv6_add_addr(idev, &addr, NULL, tmp_plen, |
1130 | ipv6_count_addresses(idev) < max_addresses ? | 1130 | ipv6_addr_scope(&addr), addr_flags, |
1131 | ipv6_add_addr(idev, &addr, NULL, tmp_plen, | 1131 | tmp_valid_lft, tmp_prefered_lft); |
1132 | ipv6_addr_scope(&addr), addr_flags, | 1132 | if (IS_ERR(ift)) { |
1133 | tmp_valid_lft, tmp_prefered_lft) : NULL; | ||
1134 | if (IS_ERR_OR_NULL(ift)) { | ||
1135 | in6_ifa_put(ifp); | 1133 | in6_ifa_put(ifp); |
1136 | in6_dev_put(idev); | 1134 | in6_dev_put(idev); |
1137 | pr_info("%s: retry temporary address regeneration\n", __func__); | 1135 | pr_info("%s: retry temporary address regeneration\n", __func__); |
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 40ffd72243a4..aeac0dc3635d 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c | |||
@@ -425,7 +425,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu) | |||
425 | net_adj = 0; | 425 | net_adj = 0; |
426 | 426 | ||
427 | return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - | 427 | return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - |
428 | net_adj) & ~(align - 1)) + (net_adj - 2); | 428 | net_adj) & ~(align - 1)) + net_adj - 2; |
429 | } | 429 | } |
430 | 430 | ||
431 | static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | 431 | static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index bff3d821c7eb..c4ff5bbb45c4 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -993,14 +993,22 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root, | |||
993 | 993 | ||
994 | if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) { | 994 | if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) { |
995 | #ifdef CONFIG_IPV6_SUBTREES | 995 | #ifdef CONFIG_IPV6_SUBTREES |
996 | if (fn->subtree) | 996 | if (fn->subtree) { |
997 | fn = fib6_lookup_1(fn->subtree, args + 1); | 997 | struct fib6_node *sfn; |
998 | sfn = fib6_lookup_1(fn->subtree, | ||
999 | args + 1); | ||
1000 | if (!sfn) | ||
1001 | goto backtrack; | ||
1002 | fn = sfn; | ||
1003 | } | ||
998 | #endif | 1004 | #endif |
999 | if (!fn || fn->fn_flags & RTN_RTINFO) | 1005 | if (fn->fn_flags & RTN_RTINFO) |
1000 | return fn; | 1006 | return fn; |
1001 | } | 1007 | } |
1002 | } | 1008 | } |
1003 | 1009 | #ifdef CONFIG_IPV6_SUBTREES | |
1010 | backtrack: | ||
1011 | #endif | ||
1004 | if (fn->fn_flags & RTN_ROOT) | 1012 | if (fn->fn_flags & RTN_ROOT) |
1005 | break; | 1013 | break; |
1006 | 1014 | ||
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 79aa9652ed86..04d31c2fbef1 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -1369,8 +1369,10 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) | |||
1369 | if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) | 1369 | if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) |
1370 | return; | 1370 | return; |
1371 | 1371 | ||
1372 | if (!ndopts.nd_opts_rh) | 1372 | if (!ndopts.nd_opts_rh) { |
1373 | ip6_redirect_no_header(skb, dev_net(skb->dev), 0, 0); | ||
1373 | return; | 1374 | return; |
1375 | } | ||
1374 | 1376 | ||
1375 | hdr = (u8 *)ndopts.nd_opts_rh; | 1377 | hdr = (u8 *)ndopts.nd_opts_rh; |
1376 | hdr += 8; | 1378 | hdr += 8; |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 790d9f4b8b0b..1aeb473b2cc6 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -490,6 +490,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, | |||
490 | ipv6_hdr(head)->payload_len = htons(payload_len); | 490 | ipv6_hdr(head)->payload_len = htons(payload_len); |
491 | ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn); | 491 | ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn); |
492 | IP6CB(head)->nhoff = nhoff; | 492 | IP6CB(head)->nhoff = nhoff; |
493 | IP6CB(head)->flags |= IP6SKB_FRAGMENTED; | ||
493 | 494 | ||
494 | /* Yes, and fold redundant checksum back. 8) */ | 495 | /* Yes, and fold redundant checksum back. 8) */ |
495 | if (head->ip_summed == CHECKSUM_COMPLETE) | 496 | if (head->ip_summed == CHECKSUM_COMPLETE) |
@@ -524,6 +525,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb) | |||
524 | struct net *net = dev_net(skb_dst(skb)->dev); | 525 | struct net *net = dev_net(skb_dst(skb)->dev); |
525 | int evicted; | 526 | int evicted; |
526 | 527 | ||
528 | if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED) | ||
529 | goto fail_hdr; | ||
530 | |||
527 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); | 531 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); |
528 | 532 | ||
529 | /* Jumbo payload inhibits frag. header */ | 533 | /* Jumbo payload inhibits frag. header */ |
@@ -544,6 +548,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb) | |||
544 | ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS); | 548 | ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS); |
545 | 549 | ||
546 | IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); | 550 | IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); |
551 | IP6CB(skb)->flags |= IP6SKB_FRAGMENTED; | ||
547 | return 1; | 552 | return 1; |
548 | } | 553 | } |
549 | 554 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index b70f8979003b..8d9a93ed9c59 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -1178,6 +1178,27 @@ void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark) | |||
1178 | } | 1178 | } |
1179 | EXPORT_SYMBOL_GPL(ip6_redirect); | 1179 | EXPORT_SYMBOL_GPL(ip6_redirect); |
1180 | 1180 | ||
1181 | void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif, | ||
1182 | u32 mark) | ||
1183 | { | ||
1184 | const struct ipv6hdr *iph = ipv6_hdr(skb); | ||
1185 | const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb); | ||
1186 | struct dst_entry *dst; | ||
1187 | struct flowi6 fl6; | ||
1188 | |||
1189 | memset(&fl6, 0, sizeof(fl6)); | ||
1190 | fl6.flowi6_oif = oif; | ||
1191 | fl6.flowi6_mark = mark; | ||
1192 | fl6.flowi6_flags = 0; | ||
1193 | fl6.daddr = msg->dest; | ||
1194 | fl6.saddr = iph->daddr; | ||
1195 | |||
1196 | dst = ip6_route_output(net, NULL, &fl6); | ||
1197 | if (!dst->error) | ||
1198 | rt6_do_redirect(dst, NULL, skb); | ||
1199 | dst_release(dst); | ||
1200 | } | ||
1201 | |||
1181 | void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk) | 1202 | void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk) |
1182 | { | 1203 | { |
1183 | ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark); | 1204 | ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark); |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index ae31968d42d3..cc9e02d79b55 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -31,10 +31,12 @@ | |||
31 | #include "led.h" | 31 | #include "led.h" |
32 | 32 | ||
33 | #define IEEE80211_AUTH_TIMEOUT (HZ / 5) | 33 | #define IEEE80211_AUTH_TIMEOUT (HZ / 5) |
34 | #define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2) | ||
34 | #define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10) | 35 | #define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10) |
35 | #define IEEE80211_AUTH_MAX_TRIES 3 | 36 | #define IEEE80211_AUTH_MAX_TRIES 3 |
36 | #define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5) | 37 | #define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5) |
37 | #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) | 38 | #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) |
39 | #define IEEE80211_ASSOC_TIMEOUT_LONG (HZ / 2) | ||
38 | #define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10) | 40 | #define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10) |
39 | #define IEEE80211_ASSOC_MAX_TRIES 3 | 41 | #define IEEE80211_ASSOC_MAX_TRIES 3 |
40 | 42 | ||
@@ -209,8 +211,9 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, | |||
209 | struct ieee80211_channel *channel, | 211 | struct ieee80211_channel *channel, |
210 | const struct ieee80211_ht_operation *ht_oper, | 212 | const struct ieee80211_ht_operation *ht_oper, |
211 | const struct ieee80211_vht_operation *vht_oper, | 213 | const struct ieee80211_vht_operation *vht_oper, |
212 | struct cfg80211_chan_def *chandef, bool verbose) | 214 | struct cfg80211_chan_def *chandef, bool tracking) |
213 | { | 215 | { |
216 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | ||
214 | struct cfg80211_chan_def vht_chandef; | 217 | struct cfg80211_chan_def vht_chandef; |
215 | u32 ht_cfreq, ret; | 218 | u32 ht_cfreq, ret; |
216 | 219 | ||
@@ -229,7 +232,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, | |||
229 | ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan, | 232 | ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan, |
230 | channel->band); | 233 | channel->band); |
231 | /* check that channel matches the right operating channel */ | 234 | /* check that channel matches the right operating channel */ |
232 | if (channel->center_freq != ht_cfreq) { | 235 | if (!tracking && channel->center_freq != ht_cfreq) { |
233 | /* | 236 | /* |
234 | * It's possible that some APs are confused here; | 237 | * It's possible that some APs are confused here; |
235 | * Netgear WNDR3700 sometimes reports 4 higher than | 238 | * Netgear WNDR3700 sometimes reports 4 higher than |
@@ -237,11 +240,10 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, | |||
237 | * since we look at probe response/beacon data here | 240 | * since we look at probe response/beacon data here |
238 | * it should be OK. | 241 | * it should be OK. |
239 | */ | 242 | */ |
240 | if (verbose) | 243 | sdata_info(sdata, |
241 | sdata_info(sdata, | 244 | "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n", |
242 | "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n", | 245 | channel->center_freq, ht_cfreq, |
243 | channel->center_freq, ht_cfreq, | 246 | ht_oper->primary_chan, channel->band); |
244 | ht_oper->primary_chan, channel->band); | ||
245 | ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; | 247 | ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; |
246 | goto out; | 248 | goto out; |
247 | } | 249 | } |
@@ -295,7 +297,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, | |||
295 | channel->band); | 297 | channel->band); |
296 | break; | 298 | break; |
297 | default: | 299 | default: |
298 | if (verbose) | 300 | if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) |
299 | sdata_info(sdata, | 301 | sdata_info(sdata, |
300 | "AP VHT operation IE has invalid channel width (%d), disable VHT\n", | 302 | "AP VHT operation IE has invalid channel width (%d), disable VHT\n", |
301 | vht_oper->chan_width); | 303 | vht_oper->chan_width); |
@@ -304,7 +306,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, | |||
304 | } | 306 | } |
305 | 307 | ||
306 | if (!cfg80211_chandef_valid(&vht_chandef)) { | 308 | if (!cfg80211_chandef_valid(&vht_chandef)) { |
307 | if (verbose) | 309 | if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) |
308 | sdata_info(sdata, | 310 | sdata_info(sdata, |
309 | "AP VHT information is invalid, disable VHT\n"); | 311 | "AP VHT information is invalid, disable VHT\n"); |
310 | ret = IEEE80211_STA_DISABLE_VHT; | 312 | ret = IEEE80211_STA_DISABLE_VHT; |
@@ -317,7 +319,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, | |||
317 | } | 319 | } |
318 | 320 | ||
319 | if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) { | 321 | if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) { |
320 | if (verbose) | 322 | if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) |
321 | sdata_info(sdata, | 323 | sdata_info(sdata, |
322 | "AP VHT information doesn't match HT, disable VHT\n"); | 324 | "AP VHT information doesn't match HT, disable VHT\n"); |
323 | ret = IEEE80211_STA_DISABLE_VHT; | 325 | ret = IEEE80211_STA_DISABLE_VHT; |
@@ -333,18 +335,27 @@ out: | |||
333 | if (ret & IEEE80211_STA_DISABLE_VHT) | 335 | if (ret & IEEE80211_STA_DISABLE_VHT) |
334 | vht_chandef = *chandef; | 336 | vht_chandef = *chandef; |
335 | 337 | ||
338 | /* | ||
339 | * Ignore the DISABLED flag when we're already connected and only | ||
340 | * tracking the APs beacon for bandwidth changes - otherwise we | ||
341 | * might get disconnected here if we connect to an AP, update our | ||
342 | * regulatory information based on the AP's country IE and the | ||
343 | * information we have is wrong/outdated and disables the channel | ||
344 | * that we're actually using for the connection to the AP. | ||
345 | */ | ||
336 | while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef, | 346 | while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef, |
337 | IEEE80211_CHAN_DISABLED)) { | 347 | tracking ? 0 : |
348 | IEEE80211_CHAN_DISABLED)) { | ||
338 | if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) { | 349 | if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) { |
339 | ret = IEEE80211_STA_DISABLE_HT | | 350 | ret = IEEE80211_STA_DISABLE_HT | |
340 | IEEE80211_STA_DISABLE_VHT; | 351 | IEEE80211_STA_DISABLE_VHT; |
341 | goto out; | 352 | break; |
342 | } | 353 | } |
343 | 354 | ||
344 | ret |= chandef_downgrade(chandef); | 355 | ret |= chandef_downgrade(chandef); |
345 | } | 356 | } |
346 | 357 | ||
347 | if (chandef->width != vht_chandef.width && verbose) | 358 | if (chandef->width != vht_chandef.width && !tracking) |
348 | sdata_info(sdata, | 359 | sdata_info(sdata, |
349 | "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n"); | 360 | "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n"); |
350 | 361 | ||
@@ -384,7 +395,7 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata, | |||
384 | 395 | ||
385 | /* calculate new channel (type) based on HT/VHT operation IEs */ | 396 | /* calculate new channel (type) based on HT/VHT operation IEs */ |
386 | flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper, | 397 | flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper, |
387 | vht_oper, &chandef, false); | 398 | vht_oper, &chandef, true); |
388 | 399 | ||
389 | /* | 400 | /* |
390 | * Downgrade the new channel if we associated with restricted | 401 | * Downgrade the new channel if we associated with restricted |
@@ -3394,10 +3405,13 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) | |||
3394 | 3405 | ||
3395 | if (tx_flags == 0) { | 3406 | if (tx_flags == 0) { |
3396 | auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; | 3407 | auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; |
3397 | ifmgd->auth_data->timeout_started = true; | 3408 | auth_data->timeout_started = true; |
3398 | run_again(sdata, auth_data->timeout); | 3409 | run_again(sdata, auth_data->timeout); |
3399 | } else { | 3410 | } else { |
3400 | auth_data->timeout_started = false; | 3411 | auth_data->timeout = |
3412 | round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG); | ||
3413 | auth_data->timeout_started = true; | ||
3414 | run_again(sdata, auth_data->timeout); | ||
3401 | } | 3415 | } |
3402 | 3416 | ||
3403 | return 0; | 3417 | return 0; |
@@ -3434,7 +3448,11 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata) | |||
3434 | assoc_data->timeout_started = true; | 3448 | assoc_data->timeout_started = true; |
3435 | run_again(sdata, assoc_data->timeout); | 3449 | run_again(sdata, assoc_data->timeout); |
3436 | } else { | 3450 | } else { |
3437 | assoc_data->timeout_started = false; | 3451 | assoc_data->timeout = |
3452 | round_jiffies_up(jiffies + | ||
3453 | IEEE80211_ASSOC_TIMEOUT_LONG); | ||
3454 | assoc_data->timeout_started = true; | ||
3455 | run_again(sdata, assoc_data->timeout); | ||
3438 | } | 3456 | } |
3439 | 3457 | ||
3440 | return 0; | 3458 | return 0; |
@@ -3829,7 +3847,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, | |||
3829 | ifmgd->flags |= ieee80211_determine_chantype(sdata, sband, | 3847 | ifmgd->flags |= ieee80211_determine_chantype(sdata, sband, |
3830 | cbss->channel, | 3848 | cbss->channel, |
3831 | ht_oper, vht_oper, | 3849 | ht_oper, vht_oper, |
3832 | &chandef, true); | 3850 | &chandef, false); |
3833 | 3851 | ||
3834 | sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss), | 3852 | sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss), |
3835 | local->rx_chains); | 3853 | local->rx_chains); |
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 7dcc376eea5f..2f8010707d01 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c | |||
@@ -526,7 +526,7 @@ static bool tcp_in_window(const struct nf_conn *ct, | |||
526 | const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; | 526 | const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; |
527 | __u32 seq, ack, sack, end, win, swin; | 527 | __u32 seq, ack, sack, end, win, swin; |
528 | s16 receiver_offset; | 528 | s16 receiver_offset; |
529 | bool res; | 529 | bool res, in_recv_win; |
530 | 530 | ||
531 | /* | 531 | /* |
532 | * Get the required data from the packet. | 532 | * Get the required data from the packet. |
@@ -649,14 +649,18 @@ static bool tcp_in_window(const struct nf_conn *ct, | |||
649 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin, | 649 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin, |
650 | receiver->td_scale); | 650 | receiver->td_scale); |
651 | 651 | ||
652 | /* Is the ending sequence in the receive window (if available)? */ | ||
653 | in_recv_win = !receiver->td_maxwin || | ||
654 | after(end, sender->td_end - receiver->td_maxwin - 1); | ||
655 | |||
652 | pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n", | 656 | pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n", |
653 | before(seq, sender->td_maxend + 1), | 657 | before(seq, sender->td_maxend + 1), |
654 | after(end, sender->td_end - receiver->td_maxwin - 1), | 658 | (in_recv_win ? 1 : 0), |
655 | before(sack, receiver->td_end + 1), | 659 | before(sack, receiver->td_end + 1), |
656 | after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)); | 660 | after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)); |
657 | 661 | ||
658 | if (before(seq, sender->td_maxend + 1) && | 662 | if (before(seq, sender->td_maxend + 1) && |
659 | after(end, sender->td_end - receiver->td_maxwin - 1) && | 663 | in_recv_win && |
660 | before(sack, receiver->td_end + 1) && | 664 | before(sack, receiver->td_end + 1) && |
661 | after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) { | 665 | after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) { |
662 | /* | 666 | /* |
@@ -725,7 +729,7 @@ static bool tcp_in_window(const struct nf_conn *ct, | |||
725 | nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, | 729 | nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, |
726 | "nf_ct_tcp: %s ", | 730 | "nf_ct_tcp: %s ", |
727 | before(seq, sender->td_maxend + 1) ? | 731 | before(seq, sender->td_maxend + 1) ? |
728 | after(end, sender->td_end - receiver->td_maxwin - 1) ? | 732 | in_recv_win ? |
729 | before(sack, receiver->td_end + 1) ? | 733 | before(sack, receiver->td_end + 1) ? |
730 | after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG" | 734 | after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG" |
731 | : "ACK is under the lower bound (possible overly delayed ACK)" | 735 | : "ACK is under the lower bound (possible overly delayed ACK)" |
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 962e9792e317..d92cc317bf8b 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -419,6 +419,7 @@ __build_packet_message(struct nfnl_log_net *log, | |||
419 | nfmsg->version = NFNETLINK_V0; | 419 | nfmsg->version = NFNETLINK_V0; |
420 | nfmsg->res_id = htons(inst->group_num); | 420 | nfmsg->res_id = htons(inst->group_num); |
421 | 421 | ||
422 | memset(&pmsg, 0, sizeof(pmsg)); | ||
422 | pmsg.hw_protocol = skb->protocol; | 423 | pmsg.hw_protocol = skb->protocol; |
423 | pmsg.hook = hooknum; | 424 | pmsg.hook = hooknum; |
424 | 425 | ||
@@ -498,7 +499,10 @@ __build_packet_message(struct nfnl_log_net *log, | |||
498 | if (indev && skb->dev && | 499 | if (indev && skb->dev && |
499 | skb->mac_header != skb->network_header) { | 500 | skb->mac_header != skb->network_header) { |
500 | struct nfulnl_msg_packet_hw phw; | 501 | struct nfulnl_msg_packet_hw phw; |
501 | int len = dev_parse_header(skb, phw.hw_addr); | 502 | int len; |
503 | |||
504 | memset(&phw, 0, sizeof(phw)); | ||
505 | len = dev_parse_header(skb, phw.hw_addr); | ||
502 | if (len > 0) { | 506 | if (len > 0) { |
503 | phw.hw_addrlen = htons(len); | 507 | phw.hw_addrlen = htons(len); |
504 | if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw)) | 508 | if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw)) |
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 971ea145ab3e..8a703c3dd318 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c | |||
@@ -463,7 +463,10 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, | |||
463 | if (indev && entskb->dev && | 463 | if (indev && entskb->dev && |
464 | entskb->mac_header != entskb->network_header) { | 464 | entskb->mac_header != entskb->network_header) { |
465 | struct nfqnl_msg_packet_hw phw; | 465 | struct nfqnl_msg_packet_hw phw; |
466 | int len = dev_parse_header(entskb, phw.hw_addr); | 466 | int len; |
467 | |||
468 | memset(&phw, 0, sizeof(phw)); | ||
469 | len = dev_parse_header(entskb, phw.hw_addr); | ||
467 | if (len) { | 470 | if (len) { |
468 | phw.hw_addrlen = htons(len); | 471 | phw.hw_addrlen = htons(len); |
469 | if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw)) | 472 | if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw)) |
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index 7011c71646f0..6113cc7efffc 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c | |||
@@ -52,7 +52,8 @@ tcpmss_mangle_packet(struct sk_buff *skb, | |||
52 | { | 52 | { |
53 | const struct xt_tcpmss_info *info = par->targinfo; | 53 | const struct xt_tcpmss_info *info = par->targinfo; |
54 | struct tcphdr *tcph; | 54 | struct tcphdr *tcph; |
55 | unsigned int tcplen, i; | 55 | int len, tcp_hdrlen; |
56 | unsigned int i; | ||
56 | __be16 oldval; | 57 | __be16 oldval; |
57 | u16 newmss; | 58 | u16 newmss; |
58 | u8 *opt; | 59 | u8 *opt; |
@@ -64,11 +65,14 @@ tcpmss_mangle_packet(struct sk_buff *skb, | |||
64 | if (!skb_make_writable(skb, skb->len)) | 65 | if (!skb_make_writable(skb, skb->len)) |
65 | return -1; | 66 | return -1; |
66 | 67 | ||
67 | tcplen = skb->len - tcphoff; | 68 | len = skb->len - tcphoff; |
69 | if (len < (int)sizeof(struct tcphdr)) | ||
70 | return -1; | ||
71 | |||
68 | tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); | 72 | tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); |
73 | tcp_hdrlen = tcph->doff * 4; | ||
69 | 74 | ||
70 | /* Header cannot be larger than the packet */ | 75 | if (len < tcp_hdrlen) |
71 | if (tcplen < tcph->doff*4) | ||
72 | return -1; | 76 | return -1; |
73 | 77 | ||
74 | if (info->mss == XT_TCPMSS_CLAMP_PMTU) { | 78 | if (info->mss == XT_TCPMSS_CLAMP_PMTU) { |
@@ -87,9 +91,8 @@ tcpmss_mangle_packet(struct sk_buff *skb, | |||
87 | newmss = info->mss; | 91 | newmss = info->mss; |
88 | 92 | ||
89 | opt = (u_int8_t *)tcph; | 93 | opt = (u_int8_t *)tcph; |
90 | for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) { | 94 | for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) { |
91 | if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS && | 95 | if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) { |
92 | opt[i+1] == TCPOLEN_MSS) { | ||
93 | u_int16_t oldmss; | 96 | u_int16_t oldmss; |
94 | 97 | ||
95 | oldmss = (opt[i+2] << 8) | opt[i+3]; | 98 | oldmss = (opt[i+2] << 8) | opt[i+3]; |
@@ -112,9 +115,10 @@ tcpmss_mangle_packet(struct sk_buff *skb, | |||
112 | } | 115 | } |
113 | 116 | ||
114 | /* There is data after the header so the option can't be added | 117 | /* There is data after the header so the option can't be added |
115 | without moving it, and doing so may make the SYN packet | 118 | * without moving it, and doing so may make the SYN packet |
116 | itself too large. Accept the packet unmodified instead. */ | 119 | * itself too large. Accept the packet unmodified instead. |
117 | if (tcplen > tcph->doff*4) | 120 | */ |
121 | if (len > tcp_hdrlen) | ||
118 | return 0; | 122 | return 0; |
119 | 123 | ||
120 | /* | 124 | /* |
@@ -143,10 +147,10 @@ tcpmss_mangle_packet(struct sk_buff *skb, | |||
143 | newmss = min(newmss, (u16)1220); | 147 | newmss = min(newmss, (u16)1220); |
144 | 148 | ||
145 | opt = (u_int8_t *)tcph + sizeof(struct tcphdr); | 149 | opt = (u_int8_t *)tcph + sizeof(struct tcphdr); |
146 | memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr)); | 150 | memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr)); |
147 | 151 | ||
148 | inet_proto_csum_replace2(&tcph->check, skb, | 152 | inet_proto_csum_replace2(&tcph->check, skb, |
149 | htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1); | 153 | htons(len), htons(len + TCPOLEN_MSS), 1); |
150 | opt[0] = TCPOPT_MSS; | 154 | opt[0] = TCPOPT_MSS; |
151 | opt[1] = TCPOLEN_MSS; | 155 | opt[1] = TCPOLEN_MSS; |
152 | opt[2] = (newmss & 0xff00) >> 8; | 156 | opt[2] = (newmss & 0xff00) >> 8; |
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c index b68fa191710f..625fa1d636a0 100644 --- a/net/netfilter/xt_TCPOPTSTRIP.c +++ b/net/netfilter/xt_TCPOPTSTRIP.c | |||
@@ -38,7 +38,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb, | |||
38 | struct tcphdr *tcph; | 38 | struct tcphdr *tcph; |
39 | u_int16_t n, o; | 39 | u_int16_t n, o; |
40 | u_int8_t *opt; | 40 | u_int8_t *opt; |
41 | int len; | 41 | int len, tcp_hdrlen; |
42 | 42 | ||
43 | /* This is a fragment, no TCP header is available */ | 43 | /* This is a fragment, no TCP header is available */ |
44 | if (par->fragoff != 0) | 44 | if (par->fragoff != 0) |
@@ -52,7 +52,9 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb, | |||
52 | return NF_DROP; | 52 | return NF_DROP; |
53 | 53 | ||
54 | tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); | 54 | tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); |
55 | if (tcph->doff * 4 > len) | 55 | tcp_hdrlen = tcph->doff * 4; |
56 | |||
57 | if (len < tcp_hdrlen) | ||
56 | return NF_DROP; | 58 | return NF_DROP; |
57 | 59 | ||
58 | opt = (u_int8_t *)tcph; | 60 | opt = (u_int8_t *)tcph; |
@@ -61,10 +63,10 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb, | |||
61 | * Walk through all TCP options - if we find some option to remove, | 63 | * Walk through all TCP options - if we find some option to remove, |
62 | * set all octets to %TCPOPT_NOP and adjust checksum. | 64 | * set all octets to %TCPOPT_NOP and adjust checksum. |
63 | */ | 65 | */ |
64 | for (i = sizeof(struct tcphdr); i < tcp_hdrlen(skb); i += optl) { | 66 | for (i = sizeof(struct tcphdr); i < tcp_hdrlen - 1; i += optl) { |
65 | optl = optlen(opt, i); | 67 | optl = optlen(opt, i); |
66 | 68 | ||
67 | if (i + optl > tcp_hdrlen(skb)) | 69 | if (i + optl > tcp_hdrlen) |
68 | break; | 70 | break; |
69 | 71 | ||
70 | if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i])) | 72 | if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i])) |
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 22c5f399f1cf..ab101f715447 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c | |||
@@ -535,6 +535,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb) | |||
535 | { | 535 | { |
536 | struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); | 536 | struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); |
537 | 537 | ||
538 | OVS_CB(skb)->tun_key = NULL; | ||
538 | return do_execute_actions(dp, skb, acts->actions, | 539 | return do_execute_actions(dp, skb, acts->actions, |
539 | acts->actions_len, false); | 540 | acts->actions_len, false); |
540 | } | 541 | } |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index f7e3a0d84c40..f2ed7600084e 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -2076,9 +2076,6 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) | |||
2076 | ovs_notify(reply, info, &ovs_dp_vport_multicast_group); | 2076 | ovs_notify(reply, info, &ovs_dp_vport_multicast_group); |
2077 | return 0; | 2077 | return 0; |
2078 | 2078 | ||
2079 | rtnl_unlock(); | ||
2080 | return 0; | ||
2081 | |||
2082 | exit_free: | 2079 | exit_free: |
2083 | kfree_skb(reply); | 2080 | kfree_skb(reply); |
2084 | exit_unlock: | 2081 | exit_unlock: |
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 5c519b121e1b..1aa84dc58777 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c | |||
@@ -240,7 +240,7 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets) | |||
240 | struct flex_array *buckets; | 240 | struct flex_array *buckets; |
241 | int i, err; | 241 | int i, err; |
242 | 242 | ||
243 | buckets = flex_array_alloc(sizeof(struct hlist_head *), | 243 | buckets = flex_array_alloc(sizeof(struct hlist_head), |
244 | n_buckets, GFP_KERNEL); | 244 | n_buckets, GFP_KERNEL); |
245 | if (!buckets) | 245 | if (!buckets) |
246 | return NULL; | 246 | return NULL; |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 4b66c752eae5..75c8bbf598c8 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -3259,9 +3259,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, | |||
3259 | 3259 | ||
3260 | if (po->tp_version == TPACKET_V3) { | 3260 | if (po->tp_version == TPACKET_V3) { |
3261 | lv = sizeof(struct tpacket_stats_v3); | 3261 | lv = sizeof(struct tpacket_stats_v3); |
3262 | st.stats3.tp_packets += st.stats3.tp_drops; | ||
3262 | data = &st.stats3; | 3263 | data = &st.stats3; |
3263 | } else { | 3264 | } else { |
3264 | lv = sizeof(struct tpacket_stats); | 3265 | lv = sizeof(struct tpacket_stats); |
3266 | st.stats1.tp_packets += st.stats1.tp_drops; | ||
3265 | data = &st.stats1; | 3267 | data = &st.stats1; |
3266 | } | 3268 | } |
3267 | 3269 | ||
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 281c1bded1f6..51b968d3febb 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -285,6 +285,45 @@ static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind) | |||
285 | return q; | 285 | return q; |
286 | } | 286 | } |
287 | 287 | ||
288 | /* The linklayer setting were not transferred from iproute2, in older | ||
289 | * versions, and the rate tables lookup systems have been dropped in | ||
290 | * the kernel. To keep backward compatible with older iproute2 tc | ||
291 | * utils, we detect the linklayer setting by detecting if the rate | ||
292 | * table were modified. | ||
293 | * | ||
294 | * For linklayer ATM table entries, the rate table will be aligned to | ||
295 | * 48 bytes, thus some table entries will contain the same value. The | ||
296 | * mpu (min packet unit) is also encoded into the old rate table, thus | ||
297 | * starting from the mpu, we find low and high table entries for | ||
298 | * mapping this cell. If these entries contain the same value, when | ||
299 | * the rate tables have been modified for linklayer ATM. | ||
300 | * | ||
301 | * This is done by rounding mpu to the nearest 48 bytes cell/entry, | ||
302 | * and then roundup to the next cell, calc the table entry one below, | ||
303 | * and compare. | ||
304 | */ | ||
305 | static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab) | ||
306 | { | ||
307 | int low = roundup(r->mpu, 48); | ||
308 | int high = roundup(low+1, 48); | ||
309 | int cell_low = low >> r->cell_log; | ||
310 | int cell_high = (high >> r->cell_log) - 1; | ||
311 | |||
312 | /* rtab is too inaccurate at rates > 100Mbit/s */ | ||
313 | if ((r->rate > (100000000/8)) || (rtab[0] == 0)) { | ||
314 | pr_debug("TC linklayer: Giving up ATM detection\n"); | ||
315 | return TC_LINKLAYER_ETHERNET; | ||
316 | } | ||
317 | |||
318 | if ((cell_high > cell_low) && (cell_high < 256) | ||
319 | && (rtab[cell_low] == rtab[cell_high])) { | ||
320 | pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n", | ||
321 | cell_low, cell_high, rtab[cell_high]); | ||
322 | return TC_LINKLAYER_ATM; | ||
323 | } | ||
324 | return TC_LINKLAYER_ETHERNET; | ||
325 | } | ||
326 | |||
288 | static struct qdisc_rate_table *qdisc_rtab_list; | 327 | static struct qdisc_rate_table *qdisc_rtab_list; |
289 | 328 | ||
290 | struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab) | 329 | struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab) |
@@ -308,6 +347,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta | |||
308 | rtab->rate = *r; | 347 | rtab->rate = *r; |
309 | rtab->refcnt = 1; | 348 | rtab->refcnt = 1; |
310 | memcpy(rtab->data, nla_data(tab), 1024); | 349 | memcpy(rtab->data, nla_data(tab), 1024); |
350 | if (r->linklayer == TC_LINKLAYER_UNAWARE) | ||
351 | r->linklayer = __detect_linklayer(r, rtab->data); | ||
311 | rtab->next = qdisc_rtab_list; | 352 | rtab->next = qdisc_rtab_list; |
312 | qdisc_rtab_list = rtab; | 353 | qdisc_rtab_list = rtab; |
313 | } | 354 | } |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 4626cef4b76e..48be3d5c0d92 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/rcupdate.h> | 25 | #include <linux/rcupdate.h> |
26 | #include <linux/list.h> | 26 | #include <linux/list.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/if_vlan.h> | ||
28 | #include <net/sch_generic.h> | 29 | #include <net/sch_generic.h> |
29 | #include <net/pkt_sched.h> | 30 | #include <net/pkt_sched.h> |
30 | #include <net/dst.h> | 31 | #include <net/dst.h> |
@@ -207,15 +208,19 @@ void __qdisc_run(struct Qdisc *q) | |||
207 | 208 | ||
208 | unsigned long dev_trans_start(struct net_device *dev) | 209 | unsigned long dev_trans_start(struct net_device *dev) |
209 | { | 210 | { |
210 | unsigned long val, res = dev->trans_start; | 211 | unsigned long val, res; |
211 | unsigned int i; | 212 | unsigned int i; |
212 | 213 | ||
214 | if (is_vlan_dev(dev)) | ||
215 | dev = vlan_dev_real_dev(dev); | ||
216 | res = dev->trans_start; | ||
213 | for (i = 0; i < dev->num_tx_queues; i++) { | 217 | for (i = 0; i < dev->num_tx_queues; i++) { |
214 | val = netdev_get_tx_queue(dev, i)->trans_start; | 218 | val = netdev_get_tx_queue(dev, i)->trans_start; |
215 | if (val && time_after(val, res)) | 219 | if (val && time_after(val, res)) |
216 | res = val; | 220 | res = val; |
217 | } | 221 | } |
218 | dev->trans_start = res; | 222 | dev->trans_start = res; |
223 | |||
219 | return res; | 224 | return res; |
220 | } | 225 | } |
221 | EXPORT_SYMBOL(dev_trans_start); | 226 | EXPORT_SYMBOL(dev_trans_start); |
@@ -904,6 +909,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r, | |||
904 | memset(r, 0, sizeof(*r)); | 909 | memset(r, 0, sizeof(*r)); |
905 | r->overhead = conf->overhead; | 910 | r->overhead = conf->overhead; |
906 | r->rate_bytes_ps = conf->rate; | 911 | r->rate_bytes_ps = conf->rate; |
912 | r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK); | ||
907 | r->mult = 1; | 913 | r->mult = 1; |
908 | /* | 914 | /* |
909 | * The deal here is to replace a divide by a reciprocal one | 915 | * The deal here is to replace a divide by a reciprocal one |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 45e751527dfc..c2178b15ca6e 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -1329,6 +1329,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1329 | struct htb_sched *q = qdisc_priv(sch); | 1329 | struct htb_sched *q = qdisc_priv(sch); |
1330 | struct htb_class *cl = (struct htb_class *)*arg, *parent; | 1330 | struct htb_class *cl = (struct htb_class *)*arg, *parent; |
1331 | struct nlattr *opt = tca[TCA_OPTIONS]; | 1331 | struct nlattr *opt = tca[TCA_OPTIONS]; |
1332 | struct qdisc_rate_table *rtab = NULL, *ctab = NULL; | ||
1332 | struct nlattr *tb[TCA_HTB_MAX + 1]; | 1333 | struct nlattr *tb[TCA_HTB_MAX + 1]; |
1333 | struct tc_htb_opt *hopt; | 1334 | struct tc_htb_opt *hopt; |
1334 | 1335 | ||
@@ -1350,6 +1351,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1350 | if (!hopt->rate.rate || !hopt->ceil.rate) | 1351 | if (!hopt->rate.rate || !hopt->ceil.rate) |
1351 | goto failure; | 1352 | goto failure; |
1352 | 1353 | ||
1354 | /* Keeping backward compatible with rate_table based iproute2 tc */ | ||
1355 | if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) { | ||
1356 | rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]); | ||
1357 | if (rtab) | ||
1358 | qdisc_put_rtab(rtab); | ||
1359 | } | ||
1360 | if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) { | ||
1361 | ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]); | ||
1362 | if (ctab) | ||
1363 | qdisc_put_rtab(ctab); | ||
1364 | } | ||
1365 | |||
1353 | if (!cl) { /* new class */ | 1366 | if (!cl) { /* new class */ |
1354 | struct Qdisc *new_q; | 1367 | struct Qdisc *new_q; |
1355 | int prio; | 1368 | int prio; |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index bce5b79662a6..ab67efc64b24 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -846,12 +846,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, | |||
846 | else | 846 | else |
847 | spc_state = SCTP_ADDR_AVAILABLE; | 847 | spc_state = SCTP_ADDR_AVAILABLE; |
848 | /* Don't inform ULP about transition from PF to | 848 | /* Don't inform ULP about transition from PF to |
849 | * active state and set cwnd to 1, see SCTP | 849 | * active state and set cwnd to 1 MTU, see SCTP |
850 | * Quick failover draft section 5.1, point 5 | 850 | * Quick failover draft section 5.1, point 5 |
851 | */ | 851 | */ |
852 | if (transport->state == SCTP_PF) { | 852 | if (transport->state == SCTP_PF) { |
853 | ulp_notify = false; | 853 | ulp_notify = false; |
854 | transport->cwnd = 1; | 854 | transport->cwnd = asoc->pathmtu; |
855 | } | 855 | } |
856 | transport->state = SCTP_ACTIVE; | 856 | transport->state = SCTP_ACTIVE; |
857 | break; | 857 | break; |
diff --git a/net/sctp/transport.c b/net/sctp/transport.c index bdbbc3fd7c14..8fdd16046d66 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c | |||
@@ -181,12 +181,12 @@ static void sctp_transport_destroy(struct sctp_transport *transport) | |||
181 | return; | 181 | return; |
182 | } | 182 | } |
183 | 183 | ||
184 | call_rcu(&transport->rcu, sctp_transport_destroy_rcu); | ||
185 | |||
186 | sctp_packet_free(&transport->packet); | 184 | sctp_packet_free(&transport->packet); |
187 | 185 | ||
188 | if (transport->asoc) | 186 | if (transport->asoc) |
189 | sctp_association_put(transport->asoc); | 187 | sctp_association_put(transport->asoc); |
188 | |||
189 | call_rcu(&transport->rcu, sctp_transport_destroy_rcu); | ||
190 | } | 190 | } |
191 | 191 | ||
192 | /* Start T3_rtx timer if it is not already running and update the heartbeat | 192 | /* Start T3_rtx timer if it is not already running and update the heartbeat |
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index cb29ef7ba2f0..609c30c80816 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c | |||
@@ -460,6 +460,7 @@ static void bearer_disable(struct tipc_bearer *b_ptr) | |||
460 | { | 460 | { |
461 | struct tipc_link *l_ptr; | 461 | struct tipc_link *l_ptr; |
462 | struct tipc_link *temp_l_ptr; | 462 | struct tipc_link *temp_l_ptr; |
463 | struct tipc_link_req *temp_req; | ||
463 | 464 | ||
464 | pr_info("Disabling bearer <%s>\n", b_ptr->name); | 465 | pr_info("Disabling bearer <%s>\n", b_ptr->name); |
465 | spin_lock_bh(&b_ptr->lock); | 466 | spin_lock_bh(&b_ptr->lock); |
@@ -468,9 +469,13 @@ static void bearer_disable(struct tipc_bearer *b_ptr) | |||
468 | list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { | 469 | list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { |
469 | tipc_link_delete(l_ptr); | 470 | tipc_link_delete(l_ptr); |
470 | } | 471 | } |
471 | if (b_ptr->link_req) | 472 | temp_req = b_ptr->link_req; |
472 | tipc_disc_delete(b_ptr->link_req); | 473 | b_ptr->link_req = NULL; |
473 | spin_unlock_bh(&b_ptr->lock); | 474 | spin_unlock_bh(&b_ptr->lock); |
475 | |||
476 | if (temp_req) | ||
477 | tipc_disc_delete(temp_req); | ||
478 | |||
474 | memset(b_ptr, 0, sizeof(struct tipc_bearer)); | 479 | memset(b_ptr, 0, sizeof(struct tipc_bearer)); |
475 | } | 480 | } |
476 | 481 | ||
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 593071dabd1c..4d9334683f84 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c | |||
@@ -347,7 +347,7 @@ void vsock_for_each_connected_socket(void (*fn)(struct sock *sk)) | |||
347 | for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) { | 347 | for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) { |
348 | struct vsock_sock *vsk; | 348 | struct vsock_sock *vsk; |
349 | list_for_each_entry(vsk, &vsock_connected_table[i], | 349 | list_for_each_entry(vsk, &vsock_connected_table[i], |
350 | connected_table); | 350 | connected_table) |
351 | fn(sk_vsock(vsk)); | 351 | fn(sk_vsock(vsk)); |
352 | } | 352 | } |
353 | 353 | ||
diff --git a/net/wireless/core.c b/net/wireless/core.c index 4f9f216665e9..a8c29fa4f1b3 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -765,6 +765,7 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev, | |||
765 | cfg80211_leave_mesh(rdev, dev); | 765 | cfg80211_leave_mesh(rdev, dev); |
766 | break; | 766 | break; |
767 | case NL80211_IFTYPE_AP: | 767 | case NL80211_IFTYPE_AP: |
768 | case NL80211_IFTYPE_P2P_GO: | ||
768 | cfg80211_stop_ap(rdev, dev); | 769 | cfg80211_stop_ap(rdev, dev); |
769 | break; | 770 | break; |
770 | default: | 771 | default: |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 25d217d90807..5f6e982cdcf4 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -441,10 +441,12 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb, | |||
441 | goto out_unlock; | 441 | goto out_unlock; |
442 | } | 442 | } |
443 | *rdev = wiphy_to_dev((*wdev)->wiphy); | 443 | *rdev = wiphy_to_dev((*wdev)->wiphy); |
444 | cb->args[0] = (*rdev)->wiphy_idx; | 444 | /* 0 is the first index - add 1 to parse only once */ |
445 | cb->args[0] = (*rdev)->wiphy_idx + 1; | ||
445 | cb->args[1] = (*wdev)->identifier; | 446 | cb->args[1] = (*wdev)->identifier; |
446 | } else { | 447 | } else { |
447 | struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0]); | 448 | /* subtract the 1 again here */ |
449 | struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); | ||
448 | struct wireless_dev *tmp; | 450 | struct wireless_dev *tmp; |
449 | 451 | ||
450 | if (!wiphy) { | 452 | if (!wiphy) { |
@@ -2620,8 +2622,8 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) | |||
2620 | 2622 | ||
2621 | hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, | 2623 | hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, |
2622 | NL80211_CMD_NEW_KEY); | 2624 | NL80211_CMD_NEW_KEY); |
2623 | if (IS_ERR(hdr)) | 2625 | if (!hdr) |
2624 | return PTR_ERR(hdr); | 2626 | return -ENOBUFS; |
2625 | 2627 | ||
2626 | cookie.msg = msg; | 2628 | cookie.msg = msg; |
2627 | cookie.idx = key_idx; | 2629 | cookie.idx = key_idx; |
@@ -6505,6 +6507,9 @@ static int nl80211_testmode_dump(struct sk_buff *skb, | |||
6505 | NL80211_CMD_TESTMODE); | 6507 | NL80211_CMD_TESTMODE); |
6506 | struct nlattr *tmdata; | 6508 | struct nlattr *tmdata; |
6507 | 6509 | ||
6510 | if (!hdr) | ||
6511 | break; | ||
6512 | |||
6508 | if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) { | 6513 | if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) { |
6509 | genlmsg_cancel(skb, hdr); | 6514 | genlmsg_cancel(skb, hdr); |
6510 | break; | 6515 | break; |
@@ -6949,9 +6954,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb, | |||
6949 | 6954 | ||
6950 | hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, | 6955 | hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, |
6951 | NL80211_CMD_REMAIN_ON_CHANNEL); | 6956 | NL80211_CMD_REMAIN_ON_CHANNEL); |
6952 | 6957 | if (!hdr) { | |
6953 | if (IS_ERR(hdr)) { | 6958 | err = -ENOBUFS; |
6954 | err = PTR_ERR(hdr); | ||
6955 | goto free_msg; | 6959 | goto free_msg; |
6956 | } | 6960 | } |
6957 | 6961 | ||
@@ -7249,9 +7253,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) | |||
7249 | 7253 | ||
7250 | hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, | 7254 | hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, |
7251 | NL80211_CMD_FRAME); | 7255 | NL80211_CMD_FRAME); |
7252 | 7256 | if (!hdr) { | |
7253 | if (IS_ERR(hdr)) { | 7257 | err = -ENOBUFS; |
7254 | err = PTR_ERR(hdr); | ||
7255 | goto free_msg; | 7258 | goto free_msg; |
7256 | } | 7259 | } |
7257 | } | 7260 | } |
@@ -8130,9 +8133,8 @@ static int nl80211_probe_client(struct sk_buff *skb, | |||
8130 | 8133 | ||
8131 | hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, | 8134 | hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, |
8132 | NL80211_CMD_PROBE_CLIENT); | 8135 | NL80211_CMD_PROBE_CLIENT); |
8133 | 8136 | if (!hdr) { | |
8134 | if (IS_ERR(hdr)) { | 8137 | err = -ENOBUFS; |
8135 | err = PTR_ERR(hdr); | ||
8136 | goto free_msg; | 8138 | goto free_msg; |
8137 | } | 8139 | } |
8138 | 8140 | ||
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 81c8a10d743c..20e86a95dc4e 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
@@ -976,21 +976,19 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev, | |||
976 | struct net_device *dev, u16 reason, bool wextev) | 976 | struct net_device *dev, u16 reason, bool wextev) |
977 | { | 977 | { |
978 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 978 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
979 | int err; | 979 | int err = 0; |
980 | 980 | ||
981 | ASSERT_WDEV_LOCK(wdev); | 981 | ASSERT_WDEV_LOCK(wdev); |
982 | 982 | ||
983 | kfree(wdev->connect_keys); | 983 | kfree(wdev->connect_keys); |
984 | wdev->connect_keys = NULL; | 984 | wdev->connect_keys = NULL; |
985 | 985 | ||
986 | if (wdev->conn) { | 986 | if (wdev->conn) |
987 | err = cfg80211_sme_disconnect(wdev, reason); | 987 | err = cfg80211_sme_disconnect(wdev, reason); |
988 | } else if (!rdev->ops->disconnect) { | 988 | else if (!rdev->ops->disconnect) |
989 | cfg80211_mlme_down(rdev, dev); | 989 | cfg80211_mlme_down(rdev, dev); |
990 | err = 0; | 990 | else if (wdev->current_bss) |
991 | } else { | ||
992 | err = rdev_disconnect(rdev, dev, reason); | 991 | err = rdev_disconnect(rdev, dev, reason); |
993 | } | ||
994 | 992 | ||
995 | return err; | 993 | return err; |
996 | } | 994 | } |
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 8e77cbbad871..e3c7ba8d7582 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c | |||
@@ -522,7 +522,7 @@ static bool same_amp_caps(struct hda_codec *codec, hda_nid_t nid1, | |||
522 | } | 522 | } |
523 | 523 | ||
524 | #define nid_has_mute(codec, nid, dir) \ | 524 | #define nid_has_mute(codec, nid, dir) \ |
525 | check_amp_caps(codec, nid, dir, AC_AMPCAP_MUTE) | 525 | check_amp_caps(codec, nid, dir, (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) |
526 | #define nid_has_volume(codec, nid, dir) \ | 526 | #define nid_has_volume(codec, nid, dir) \ |
527 | check_amp_caps(codec, nid, dir, AC_AMPCAP_NUM_STEPS) | 527 | check_amp_caps(codec, nid, dir, AC_AMPCAP_NUM_STEPS) |
528 | 528 | ||
@@ -624,7 +624,7 @@ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid, | |||
624 | if (enable) | 624 | if (enable) |
625 | val = (caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT; | 625 | val = (caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT; |
626 | } | 626 | } |
627 | if (caps & AC_AMPCAP_MUTE) { | 627 | if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) { |
628 | if (!enable) | 628 | if (!enable) |
629 | val |= HDA_AMP_MUTE; | 629 | val |= HDA_AMP_MUTE; |
630 | } | 630 | } |
@@ -648,7 +648,7 @@ static unsigned int get_amp_mask_to_modify(struct hda_codec *codec, | |||
648 | { | 648 | { |
649 | unsigned int mask = 0xff; | 649 | unsigned int mask = 0xff; |
650 | 650 | ||
651 | if (caps & AC_AMPCAP_MUTE) { | 651 | if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) { |
652 | if (is_ctl_associated(codec, nid, dir, idx, NID_PATH_MUTE_CTL)) | 652 | if (is_ctl_associated(codec, nid, dir, idx, NID_PATH_MUTE_CTL)) |
653 | mask &= ~0x80; | 653 | mask &= ~0x80; |
654 | } | 654 | } |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 8bd226149868..f303cd898515 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -1031,6 +1031,7 @@ enum { | |||
1031 | ALC880_FIXUP_GPIO2, | 1031 | ALC880_FIXUP_GPIO2, |
1032 | ALC880_FIXUP_MEDION_RIM, | 1032 | ALC880_FIXUP_MEDION_RIM, |
1033 | ALC880_FIXUP_LG, | 1033 | ALC880_FIXUP_LG, |
1034 | ALC880_FIXUP_LG_LW25, | ||
1034 | ALC880_FIXUP_W810, | 1035 | ALC880_FIXUP_W810, |
1035 | ALC880_FIXUP_EAPD_COEF, | 1036 | ALC880_FIXUP_EAPD_COEF, |
1036 | ALC880_FIXUP_TCL_S700, | 1037 | ALC880_FIXUP_TCL_S700, |
@@ -1089,6 +1090,14 @@ static const struct hda_fixup alc880_fixups[] = { | |||
1089 | { } | 1090 | { } |
1090 | } | 1091 | } |
1091 | }, | 1092 | }, |
1093 | [ALC880_FIXUP_LG_LW25] = { | ||
1094 | .type = HDA_FIXUP_PINS, | ||
1095 | .v.pins = (const struct hda_pintbl[]) { | ||
1096 | { 0x1a, 0x0181344f }, /* line-in */ | ||
1097 | { 0x1b, 0x0321403f }, /* headphone */ | ||
1098 | { } | ||
1099 | } | ||
1100 | }, | ||
1092 | [ALC880_FIXUP_W810] = { | 1101 | [ALC880_FIXUP_W810] = { |
1093 | .type = HDA_FIXUP_PINS, | 1102 | .type = HDA_FIXUP_PINS, |
1094 | .v.pins = (const struct hda_pintbl[]) { | 1103 | .v.pins = (const struct hda_pintbl[]) { |
@@ -1341,6 +1350,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = { | |||
1341 | SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_FIXUP_LG), | 1350 | SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_FIXUP_LG), |
1342 | SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_FIXUP_LG), | 1351 | SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_FIXUP_LG), |
1343 | SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_FIXUP_LG), | 1352 | SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_FIXUP_LG), |
1353 | SND_PCI_QUIRK(0x1854, 0x0077, "LG LW25", ALC880_FIXUP_LG_LW25), | ||
1344 | SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_FIXUP_TCL_S700), | 1354 | SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_FIXUP_TCL_S700), |
1345 | 1355 | ||
1346 | /* Below is the copied entries from alc880_quirks.c. | 1356 | /* Below is the copied entries from alc880_quirks.c. |
@@ -4329,6 +4339,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { | |||
4329 | SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE), | 4339 | SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE), |
4330 | SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE), | 4340 | SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE), |
4331 | SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC), | 4341 | SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC), |
4342 | SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC), | ||
4332 | SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), | 4343 | SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), |
4333 | SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), | 4344 | SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), |
4334 | SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), | 4345 | SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), |
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c index 987f728718c5..be2ba1b6fe4a 100644 --- a/sound/soc/codecs/cs42l52.c +++ b/sound/soc/codecs/cs42l52.c | |||
@@ -195,6 +195,8 @@ static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0); | |||
195 | 195 | ||
196 | static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0); | 196 | static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0); |
197 | 197 | ||
198 | static DECLARE_TLV_DB_SCALE(beep_tlv, -56, 200, 0); | ||
199 | |||
198 | static const unsigned int limiter_tlv[] = { | 200 | static const unsigned int limiter_tlv[] = { |
199 | TLV_DB_RANGE_HEAD(2), | 201 | TLV_DB_RANGE_HEAD(2), |
200 | 0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0), | 202 | 0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0), |
@@ -451,7 +453,8 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = { | |||
451 | SOC_ENUM("Beep Pitch", beep_pitch_enum), | 453 | SOC_ENUM("Beep Pitch", beep_pitch_enum), |
452 | SOC_ENUM("Beep on Time", beep_ontime_enum), | 454 | SOC_ENUM("Beep on Time", beep_ontime_enum), |
453 | SOC_ENUM("Beep off Time", beep_offtime_enum), | 455 | SOC_ENUM("Beep off Time", beep_offtime_enum), |
454 | SOC_SINGLE_TLV("Beep Volume", CS42L52_BEEP_VOL, 0, 0x1f, 0x07, hl_tlv), | 456 | SOC_SINGLE_SX_TLV("Beep Volume", CS42L52_BEEP_VOL, |
457 | 0, 0x07, 0x1f, beep_tlv), | ||
455 | SOC_SINGLE("Beep Mixer Switch", CS42L52_BEEP_TONE_CTL, 5, 1, 1), | 458 | SOC_SINGLE("Beep Mixer Switch", CS42L52_BEEP_TONE_CTL, 5, 1, 1), |
456 | SOC_ENUM("Beep Treble Corner Freq", beep_treble_enum), | 459 | SOC_ENUM("Beep Treble Corner Freq", beep_treble_enum), |
457 | SOC_ENUM("Beep Bass Corner Freq", beep_bass_enum), | 460 | SOC_ENUM("Beep Bass Corner Freq", beep_bass_enum), |
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index 6c8a9e7bee25..760e8bfeacaa 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c | |||
@@ -153,6 +153,8 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w, | |||
153 | static int power_vag_event(struct snd_soc_dapm_widget *w, | 153 | static int power_vag_event(struct snd_soc_dapm_widget *w, |
154 | struct snd_kcontrol *kcontrol, int event) | 154 | struct snd_kcontrol *kcontrol, int event) |
155 | { | 155 | { |
156 | const u32 mask = SGTL5000_DAC_POWERUP | SGTL5000_ADC_POWERUP; | ||
157 | |||
156 | switch (event) { | 158 | switch (event) { |
157 | case SND_SOC_DAPM_POST_PMU: | 159 | case SND_SOC_DAPM_POST_PMU: |
158 | snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER, | 160 | snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER, |
@@ -160,9 +162,17 @@ static int power_vag_event(struct snd_soc_dapm_widget *w, | |||
160 | break; | 162 | break; |
161 | 163 | ||
162 | case SND_SOC_DAPM_PRE_PMD: | 164 | case SND_SOC_DAPM_PRE_PMD: |
163 | snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER, | 165 | /* |
164 | SGTL5000_VAG_POWERUP, 0); | 166 | * Don't clear VAG_POWERUP, when both DAC and ADC are |
165 | msleep(400); | 167 | * operational to prevent inadvertently starving the |
168 | * other one of them. | ||
169 | */ | ||
170 | if ((snd_soc_read(w->codec, SGTL5000_CHIP_ANA_POWER) & | ||
171 | mask) != mask) { | ||
172 | snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER, | ||
173 | SGTL5000_VAG_POWERUP, 0); | ||
174 | msleep(400); | ||
175 | } | ||
166 | break; | 176 | break; |
167 | default: | 177 | default: |
168 | break; | 178 | break; |
@@ -388,7 +398,7 @@ static const struct snd_kcontrol_new sgtl5000_snd_controls[] = { | |||
388 | SOC_DOUBLE("Capture Volume", SGTL5000_CHIP_ANA_ADC_CTRL, 0, 4, 0xf, 0), | 398 | SOC_DOUBLE("Capture Volume", SGTL5000_CHIP_ANA_ADC_CTRL, 0, 4, 0xf, 0), |
389 | SOC_SINGLE_TLV("Capture Attenuate Switch (-6dB)", | 399 | SOC_SINGLE_TLV("Capture Attenuate Switch (-6dB)", |
390 | SGTL5000_CHIP_ANA_ADC_CTRL, | 400 | SGTL5000_CHIP_ANA_ADC_CTRL, |
391 | 8, 2, 0, capture_6db_attenuate), | 401 | 8, 1, 0, capture_6db_attenuate), |
392 | SOC_SINGLE("Capture ZC Switch", SGTL5000_CHIP_ANA_CTRL, 1, 1, 0), | 402 | SOC_SINGLE("Capture ZC Switch", SGTL5000_CHIP_ANA_CTRL, 1, 1, 0), |
393 | 403 | ||
394 | SOC_DOUBLE_TLV("Headphone Playback Volume", | 404 | SOC_DOUBLE_TLV("Headphone Playback Volume", |
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index bd16010441cc..4375c9f2b791 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c | |||
@@ -679,13 +679,14 @@ static int dapm_new_mux(struct snd_soc_dapm_widget *w) | |||
679 | return -EINVAL; | 679 | return -EINVAL; |
680 | } | 680 | } |
681 | 681 | ||
682 | path = list_first_entry(&w->sources, struct snd_soc_dapm_path, | 682 | if (list_empty(&w->sources)) { |
683 | list_sink); | ||
684 | if (!path) { | ||
685 | dev_err(dapm->dev, "ASoC: mux %s has no paths\n", w->name); | 683 | dev_err(dapm->dev, "ASoC: mux %s has no paths\n", w->name); |
686 | return -EINVAL; | 684 | return -EINVAL; |
687 | } | 685 | } |
688 | 686 | ||
687 | path = list_first_entry(&w->sources, struct snd_soc_dapm_path, | ||
688 | list_sink); | ||
689 | |||
689 | ret = dapm_create_or_share_mixmux_kcontrol(w, 0, path); | 690 | ret = dapm_create_or_share_mixmux_kcontrol(w, 0, path); |
690 | if (ret < 0) | 691 | if (ret < 0) |
691 | return ret; | 692 | return ret; |
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c index d04146cad61f..47565fd04505 100644 --- a/sound/soc/tegra/tegra30_i2s.c +++ b/sound/soc/tegra/tegra30_i2s.c | |||
@@ -228,7 +228,7 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream, | |||
228 | reg = TEGRA30_I2S_CIF_RX_CTRL; | 228 | reg = TEGRA30_I2S_CIF_RX_CTRL; |
229 | } else { | 229 | } else { |
230 | val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX; | 230 | val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX; |
231 | reg = TEGRA30_I2S_CIF_RX_CTRL; | 231 | reg = TEGRA30_I2S_CIF_TX_CTRL; |
232 | } | 232 | } |
233 | 233 | ||
234 | regmap_write(i2s->regmap, reg, val); | 234 | regmap_write(i2s->regmap, reg, val); |
diff --git a/sound/usb/6fire/midi.c b/sound/usb/6fire/midi.c index 26722423330d..f3dd7266c391 100644 --- a/sound/usb/6fire/midi.c +++ b/sound/usb/6fire/midi.c | |||
@@ -19,6 +19,10 @@ | |||
19 | #include "chip.h" | 19 | #include "chip.h" |
20 | #include "comm.h" | 20 | #include "comm.h" |
21 | 21 | ||
22 | enum { | ||
23 | MIDI_BUFSIZE = 64 | ||
24 | }; | ||
25 | |||
22 | static void usb6fire_midi_out_handler(struct urb *urb) | 26 | static void usb6fire_midi_out_handler(struct urb *urb) |
23 | { | 27 | { |
24 | struct midi_runtime *rt = urb->context; | 28 | struct midi_runtime *rt = urb->context; |
@@ -156,6 +160,12 @@ int usb6fire_midi_init(struct sfire_chip *chip) | |||
156 | if (!rt) | 160 | if (!rt) |
157 | return -ENOMEM; | 161 | return -ENOMEM; |
158 | 162 | ||
163 | rt->out_buffer = kzalloc(MIDI_BUFSIZE, GFP_KERNEL); | ||
164 | if (!rt->out_buffer) { | ||
165 | kfree(rt); | ||
166 | return -ENOMEM; | ||
167 | } | ||
168 | |||
159 | rt->chip = chip; | 169 | rt->chip = chip; |
160 | rt->in_received = usb6fire_midi_in_received; | 170 | rt->in_received = usb6fire_midi_in_received; |
161 | rt->out_buffer[0] = 0x80; /* 'send midi' command */ | 171 | rt->out_buffer[0] = 0x80; /* 'send midi' command */ |
@@ -169,6 +179,7 @@ int usb6fire_midi_init(struct sfire_chip *chip) | |||
169 | 179 | ||
170 | ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance); | 180 | ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance); |
171 | if (ret < 0) { | 181 | if (ret < 0) { |
182 | kfree(rt->out_buffer); | ||
172 | kfree(rt); | 183 | kfree(rt); |
173 | snd_printk(KERN_ERR PREFIX "unable to create midi.\n"); | 184 | snd_printk(KERN_ERR PREFIX "unable to create midi.\n"); |
174 | return ret; | 185 | return ret; |
@@ -197,6 +208,9 @@ void usb6fire_midi_abort(struct sfire_chip *chip) | |||
197 | 208 | ||
198 | void usb6fire_midi_destroy(struct sfire_chip *chip) | 209 | void usb6fire_midi_destroy(struct sfire_chip *chip) |
199 | { | 210 | { |
200 | kfree(chip->midi); | 211 | struct midi_runtime *rt = chip->midi; |
212 | |||
213 | kfree(rt->out_buffer); | ||
214 | kfree(rt); | ||
201 | chip->midi = NULL; | 215 | chip->midi = NULL; |
202 | } | 216 | } |
diff --git a/sound/usb/6fire/midi.h b/sound/usb/6fire/midi.h index c321006e5430..84851b9f5559 100644 --- a/sound/usb/6fire/midi.h +++ b/sound/usb/6fire/midi.h | |||
@@ -16,10 +16,6 @@ | |||
16 | 16 | ||
17 | #include "common.h" | 17 | #include "common.h" |
18 | 18 | ||
19 | enum { | ||
20 | MIDI_BUFSIZE = 64 | ||
21 | }; | ||
22 | |||
23 | struct midi_runtime { | 19 | struct midi_runtime { |
24 | struct sfire_chip *chip; | 20 | struct sfire_chip *chip; |
25 | struct snd_rawmidi *instance; | 21 | struct snd_rawmidi *instance; |
@@ -32,7 +28,7 @@ struct midi_runtime { | |||
32 | struct snd_rawmidi_substream *out; | 28 | struct snd_rawmidi_substream *out; |
33 | struct urb out_urb; | 29 | struct urb out_urb; |
34 | u8 out_serial; /* serial number of out packet */ | 30 | u8 out_serial; /* serial number of out packet */ |
35 | u8 out_buffer[MIDI_BUFSIZE]; | 31 | u8 *out_buffer; |
36 | int buffer_offset; | 32 | int buffer_offset; |
37 | 33 | ||
38 | void (*in_received)(struct midi_runtime *rt, u8 *data, int length); | 34 | void (*in_received)(struct midi_runtime *rt, u8 *data, int length); |
diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c index 3d2551cc10f2..b5eb97fdc842 100644 --- a/sound/usb/6fire/pcm.c +++ b/sound/usb/6fire/pcm.c | |||
@@ -582,6 +582,33 @@ static void usb6fire_pcm_init_urb(struct pcm_urb *urb, | |||
582 | urb->instance.number_of_packets = PCM_N_PACKETS_PER_URB; | 582 | urb->instance.number_of_packets = PCM_N_PACKETS_PER_URB; |
583 | } | 583 | } |
584 | 584 | ||
585 | static int usb6fire_pcm_buffers_init(struct pcm_runtime *rt) | ||
586 | { | ||
587 | int i; | ||
588 | |||
589 | for (i = 0; i < PCM_N_URBS; i++) { | ||
590 | rt->out_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB | ||
591 | * PCM_MAX_PACKET_SIZE, GFP_KERNEL); | ||
592 | if (!rt->out_urbs[i].buffer) | ||
593 | return -ENOMEM; | ||
594 | rt->in_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB | ||
595 | * PCM_MAX_PACKET_SIZE, GFP_KERNEL); | ||
596 | if (!rt->in_urbs[i].buffer) | ||
597 | return -ENOMEM; | ||
598 | } | ||
599 | return 0; | ||
600 | } | ||
601 | |||
602 | static void usb6fire_pcm_buffers_destroy(struct pcm_runtime *rt) | ||
603 | { | ||
604 | int i; | ||
605 | |||
606 | for (i = 0; i < PCM_N_URBS; i++) { | ||
607 | kfree(rt->out_urbs[i].buffer); | ||
608 | kfree(rt->in_urbs[i].buffer); | ||
609 | } | ||
610 | } | ||
611 | |||
585 | int usb6fire_pcm_init(struct sfire_chip *chip) | 612 | int usb6fire_pcm_init(struct sfire_chip *chip) |
586 | { | 613 | { |
587 | int i; | 614 | int i; |
@@ -593,6 +620,13 @@ int usb6fire_pcm_init(struct sfire_chip *chip) | |||
593 | if (!rt) | 620 | if (!rt) |
594 | return -ENOMEM; | 621 | return -ENOMEM; |
595 | 622 | ||
623 | ret = usb6fire_pcm_buffers_init(rt); | ||
624 | if (ret) { | ||
625 | usb6fire_pcm_buffers_destroy(rt); | ||
626 | kfree(rt); | ||
627 | return ret; | ||
628 | } | ||
629 | |||
596 | rt->chip = chip; | 630 | rt->chip = chip; |
597 | rt->stream_state = STREAM_DISABLED; | 631 | rt->stream_state = STREAM_DISABLED; |
598 | rt->rate = ARRAY_SIZE(rates); | 632 | rt->rate = ARRAY_SIZE(rates); |
@@ -614,6 +648,7 @@ int usb6fire_pcm_init(struct sfire_chip *chip) | |||
614 | 648 | ||
615 | ret = snd_pcm_new(chip->card, "DMX6FireUSB", 0, 1, 1, &pcm); | 649 | ret = snd_pcm_new(chip->card, "DMX6FireUSB", 0, 1, 1, &pcm); |
616 | if (ret < 0) { | 650 | if (ret < 0) { |
651 | usb6fire_pcm_buffers_destroy(rt); | ||
617 | kfree(rt); | 652 | kfree(rt); |
618 | snd_printk(KERN_ERR PREFIX "cannot create pcm instance.\n"); | 653 | snd_printk(KERN_ERR PREFIX "cannot create pcm instance.\n"); |
619 | return ret; | 654 | return ret; |
@@ -625,6 +660,7 @@ int usb6fire_pcm_init(struct sfire_chip *chip) | |||
625 | snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_ops); | 660 | snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_ops); |
626 | 661 | ||
627 | if (ret) { | 662 | if (ret) { |
663 | usb6fire_pcm_buffers_destroy(rt); | ||
628 | kfree(rt); | 664 | kfree(rt); |
629 | snd_printk(KERN_ERR PREFIX | 665 | snd_printk(KERN_ERR PREFIX |
630 | "error preallocating pcm buffers.\n"); | 666 | "error preallocating pcm buffers.\n"); |
@@ -669,6 +705,9 @@ void usb6fire_pcm_abort(struct sfire_chip *chip) | |||
669 | 705 | ||
670 | void usb6fire_pcm_destroy(struct sfire_chip *chip) | 706 | void usb6fire_pcm_destroy(struct sfire_chip *chip) |
671 | { | 707 | { |
672 | kfree(chip->pcm); | 708 | struct pcm_runtime *rt = chip->pcm; |
709 | |||
710 | usb6fire_pcm_buffers_destroy(rt); | ||
711 | kfree(rt); | ||
673 | chip->pcm = NULL; | 712 | chip->pcm = NULL; |
674 | } | 713 | } |
diff --git a/sound/usb/6fire/pcm.h b/sound/usb/6fire/pcm.h index 9b01133ee3fe..f5779d6182c6 100644 --- a/sound/usb/6fire/pcm.h +++ b/sound/usb/6fire/pcm.h | |||
@@ -32,7 +32,7 @@ struct pcm_urb { | |||
32 | struct urb instance; | 32 | struct urb instance; |
33 | struct usb_iso_packet_descriptor packets[PCM_N_PACKETS_PER_URB]; | 33 | struct usb_iso_packet_descriptor packets[PCM_N_PACKETS_PER_URB]; |
34 | /* END DO NOT SEPARATE */ | 34 | /* END DO NOT SEPARATE */ |
35 | u8 buffer[PCM_N_PACKETS_PER_URB * PCM_MAX_PACKET_SIZE]; | 35 | u8 *buffer; |
36 | 36 | ||
37 | struct pcm_urb *peer; | 37 | struct pcm_urb *peer; |
38 | }; | 38 | }; |
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index d5438083fd6a..95558ef4a7a0 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c | |||
@@ -888,6 +888,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, | |||
888 | case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */ | 888 | case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */ |
889 | case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */ | 889 | case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */ |
890 | case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */ | 890 | case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */ |
891 | case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */ | ||
891 | case USB_ID(0x046d, 0x0991): | 892 | case USB_ID(0x046d, 0x0991): |
892 | /* Most audio usb devices lie about volume resolution. | 893 | /* Most audio usb devices lie about volume resolution. |
893 | * Most Logitech webcams have res = 384. | 894 | * Most Logitech webcams have res = 384. |
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 1bc45e71f1fe..0df9ede99dfd 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c | |||
@@ -319,19 +319,19 @@ static int create_auto_midi_quirk(struct snd_usb_audio *chip, | |||
319 | if (altsd->bNumEndpoints < 1) | 319 | if (altsd->bNumEndpoints < 1) |
320 | return -ENODEV; | 320 | return -ENODEV; |
321 | epd = get_endpoint(alts, 0); | 321 | epd = get_endpoint(alts, 0); |
322 | if (!usb_endpoint_xfer_bulk(epd) || | 322 | if (!usb_endpoint_xfer_bulk(epd) && |
323 | !usb_endpoint_xfer_int(epd)) | 323 | !usb_endpoint_xfer_int(epd)) |
324 | return -ENODEV; | 324 | return -ENODEV; |
325 | 325 | ||
326 | switch (USB_ID_VENDOR(chip->usb_id)) { | 326 | switch (USB_ID_VENDOR(chip->usb_id)) { |
327 | case 0x0499: /* Yamaha */ | 327 | case 0x0499: /* Yamaha */ |
328 | err = create_yamaha_midi_quirk(chip, iface, driver, alts); | 328 | err = create_yamaha_midi_quirk(chip, iface, driver, alts); |
329 | if (err < 0 && err != -ENODEV) | 329 | if (err != -ENODEV) |
330 | return err; | 330 | return err; |
331 | break; | 331 | break; |
332 | case 0x0582: /* Roland */ | 332 | case 0x0582: /* Roland */ |
333 | err = create_roland_midi_quirk(chip, iface, driver, alts); | 333 | err = create_roland_midi_quirk(chip, iface, driver, alts); |
334 | if (err < 0 && err != -ENODEV) | 334 | if (err != -ENODEV) |
335 | return err; | 335 | return err; |
336 | break; | 336 | break; |
337 | } | 337 | } |