diff options
645 files changed, 7692 insertions, 5202 deletions
diff --git a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt index f4445e5a2bbb..1e097037349c 100644 --- a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt +++ b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt | |||
@@ -22,6 +22,8 @@ Optional Properties: | |||
22 | - pclkN, clkN: Pairs of parent of input clock and input clock to the | 22 | - pclkN, clkN: Pairs of parent of input clock and input clock to the |
23 | devices in this power domain. Maximum of 4 pairs (N = 0 to 3) | 23 | devices in this power domain. Maximum of 4 pairs (N = 0 to 3) |
24 | are supported currently. | 24 | are supported currently. |
25 | - power-domains: phandle pointing to the parent power domain, for more details | ||
26 | see Documentation/devicetree/bindings/power/power_domain.txt | ||
25 | 27 | ||
26 | Node of a device using power domains must have a power-domains property | 28 | Node of a device using power domains must have a power-domains property |
27 | defined with a phandle to respective power domain. | 29 | defined with a phandle to respective power domain. |
diff --git a/Documentation/devicetree/bindings/arm/sti.txt b/Documentation/devicetree/bindings/arm/sti.txt index d70ec358736c..8d27f6b084c7 100644 --- a/Documentation/devicetree/bindings/arm/sti.txt +++ b/Documentation/devicetree/bindings/arm/sti.txt | |||
@@ -13,6 +13,10 @@ Boards with the ST STiH407 SoC shall have the following properties: | |||
13 | Required root node property: | 13 | Required root node property: |
14 | compatible = "st,stih407"; | 14 | compatible = "st,stih407"; |
15 | 15 | ||
16 | Boards with the ST STiH410 SoC shall have the following properties: | ||
17 | Required root node property: | ||
18 | compatible = "st,stih410"; | ||
19 | |||
16 | Boards with the ST STiH418 SoC shall have the following properties: | 20 | Boards with the ST STiH418 SoC shall have the following properties: |
17 | Required root node property: | 21 | Required root node property: |
18 | compatible = "st,stih418"; | 22 | compatible = "st,stih418"; |
diff --git a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt index cfcc52705ed8..6151999c5dca 100644 --- a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt +++ b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt | |||
@@ -4,7 +4,10 @@ Ethernet nodes are defined to describe on-chip ethernet interfaces in | |||
4 | APM X-Gene SoC. | 4 | APM X-Gene SoC. |
5 | 5 | ||
6 | Required properties for all the ethernet interfaces: | 6 | Required properties for all the ethernet interfaces: |
7 | - compatible: Should be "apm,xgene-enet" | 7 | - compatible: Should state binding information from the following list, |
8 | - "apm,xgene-enet": RGMII based 1G interface | ||
9 | - "apm,xgene1-sgenet": SGMII based 1G interface | ||
10 | - "apm,xgene1-xgenet": XFI based 10G interface | ||
8 | - reg: Address and length of the register set for the device. It contains the | 11 | - reg: Address and length of the register set for the device. It contains the |
9 | information of registers in the same order as described by reg-names | 12 | information of registers in the same order as described by reg-names |
10 | - reg-names: Should contain the register set names | 13 | - reg-names: Should contain the register set names |
diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt index 98c16672ab5f..0f8ed3710c66 100644 --- a/Documentation/devicetree/bindings/power/power_domain.txt +++ b/Documentation/devicetree/bindings/power/power_domain.txt | |||
@@ -19,6 +19,16 @@ Required properties: | |||
19 | providing multiple PM domains (e.g. power controllers), but can be any value | 19 | providing multiple PM domains (e.g. power controllers), but can be any value |
20 | as specified by device tree binding documentation of particular provider. | 20 | as specified by device tree binding documentation of particular provider. |
21 | 21 | ||
22 | Optional properties: | ||
23 | - power-domains : A phandle and PM domain specifier as defined by bindings of | ||
24 | the power controller specified by phandle. | ||
25 | Some power domains might be powered from another power domain (or have | ||
26 | other hardware specific dependencies). For representing such dependency | ||
27 | a standard PM domain consumer binding is used. When provided, all domains | ||
28 | created by the given provider should be subdomains of the domain | ||
29 | specified by this binding. More details about power domain specifier are | ||
30 | available in the next section. | ||
31 | |||
22 | Example: | 32 | Example: |
23 | 33 | ||
24 | power: power-controller@12340000 { | 34 | power: power-controller@12340000 { |
@@ -30,6 +40,25 @@ Example: | |||
30 | The node above defines a power controller that is a PM domain provider and | 40 | The node above defines a power controller that is a PM domain provider and |
31 | expects one cell as its phandle argument. | 41 | expects one cell as its phandle argument. |
32 | 42 | ||
43 | Example 2: | ||
44 | |||
45 | parent: power-controller@12340000 { | ||
46 | compatible = "foo,power-controller"; | ||
47 | reg = <0x12340000 0x1000>; | ||
48 | #power-domain-cells = <1>; | ||
49 | }; | ||
50 | |||
51 | child: power-controller@12340000 { | ||
52 | compatible = "foo,power-controller"; | ||
53 | reg = <0x12341000 0x1000>; | ||
54 | power-domains = <&parent 0>; | ||
55 | #power-domain-cells = <1>; | ||
56 | }; | ||
57 | |||
58 | The nodes above define two power controllers: 'parent' and 'child'. | ||
59 | Domains created by the 'child' power controller are subdomains of '0' power | ||
60 | domain provided by the 'parent' power controller. | ||
61 | |||
33 | ==PM domain consumers== | 62 | ==PM domain consumers== |
34 | 63 | ||
35 | Required properties: | 64 | Required properties: |
diff --git a/Documentation/devicetree/bindings/serial/of-serial.txt b/Documentation/devicetree/bindings/serial/8250.txt index 91d5ab0e60fc..91d5ab0e60fc 100644 --- a/Documentation/devicetree/bindings/serial/of-serial.txt +++ b/Documentation/devicetree/bindings/serial/8250.txt | |||
diff --git a/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt b/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt new file mode 100644 index 000000000000..ebcbb62c0a76 --- /dev/null +++ b/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt | |||
@@ -0,0 +1,19 @@ | |||
1 | ETRAX FS UART | ||
2 | |||
3 | Required properties: | ||
4 | - compatible : "axis,etraxfs-uart" | ||
5 | - reg: offset and length of the register set for the device. | ||
6 | - interrupts: device interrupt | ||
7 | |||
8 | Optional properties: | ||
9 | - {dtr,dsr,ri,cd}-gpios: specify a GPIO for DTR/DSR/RI/CD | ||
10 | line respectively. | ||
11 | |||
12 | Example: | ||
13 | |||
14 | serial@b00260000 { | ||
15 | compatible = "axis,etraxfs-uart"; | ||
16 | reg = <0xb0026000 0x1000>; | ||
17 | interrupts = <68>; | ||
18 | status = "disabled"; | ||
19 | }; | ||
diff --git a/Documentation/devicetree/bindings/submitting-patches.txt b/Documentation/devicetree/bindings/submitting-patches.txt index 56742bc70218..7d44eae7ab0b 100644 --- a/Documentation/devicetree/bindings/submitting-patches.txt +++ b/Documentation/devicetree/bindings/submitting-patches.txt | |||
@@ -12,6 +12,9 @@ I. For patch submitters | |||
12 | 12 | ||
13 | devicetree@vger.kernel.org | 13 | devicetree@vger.kernel.org |
14 | 14 | ||
15 | and Cc: the DT maintainers. Use scripts/get_maintainer.pl to identify | ||
16 | all of the DT maintainers. | ||
17 | |||
15 | 3) The Documentation/ portion of the patch should come in the series before | 18 | 3) The Documentation/ portion of the patch should come in the series before |
16 | the code implementing the binding. | 19 | the code implementing the binding. |
17 | 20 | ||
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 389ca1347a77..fae26d014aaf 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt | |||
@@ -20,6 +20,7 @@ amlogic Amlogic, Inc. | |||
20 | ams AMS AG | 20 | ams AMS AG |
21 | amstaos AMS-Taos Inc. | 21 | amstaos AMS-Taos Inc. |
22 | apm Applied Micro Circuits Corporation (APM) | 22 | apm Applied Micro Circuits Corporation (APM) |
23 | arasan Arasan Chip Systems | ||
23 | arm ARM Ltd. | 24 | arm ARM Ltd. |
24 | armadeus ARMadeus Systems SARL | 25 | armadeus ARMadeus Systems SARL |
25 | asahi-kasei Asahi Kasei Corp. | 26 | asahi-kasei Asahi Kasei Corp. |
@@ -27,6 +28,7 @@ atmel Atmel Corporation | |||
27 | auo AU Optronics Corporation | 28 | auo AU Optronics Corporation |
28 | avago Avago Technologies | 29 | avago Avago Technologies |
29 | avic Shanghai AVIC Optoelectronics Co., Ltd. | 30 | avic Shanghai AVIC Optoelectronics Co., Ltd. |
31 | axis Axis Communications AB | ||
30 | bosch Bosch Sensortec GmbH | 32 | bosch Bosch Sensortec GmbH |
31 | brcm Broadcom Corporation | 33 | brcm Broadcom Corporation |
32 | buffalo Buffalo, Inc. | 34 | buffalo Buffalo, Inc. |
diff --git a/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt b/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt index f90e294d7631..a4d869744f59 100644 --- a/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt | |||
@@ -26,6 +26,11 @@ Optional properties: | |||
26 | - atmel,disable : Should be present if you want to disable the watchdog. | 26 | - atmel,disable : Should be present if you want to disable the watchdog. |
27 | - atmel,idle-halt : Should be present if you want to stop the watchdog when | 27 | - atmel,idle-halt : Should be present if you want to stop the watchdog when |
28 | entering idle state. | 28 | entering idle state. |
29 | CAUTION: This property should be used with care, it actually makes the | ||
30 | watchdog not counting when the CPU is in idle state, therefore the | ||
31 | watchdog reset time depends on mean CPU usage and will not reset at all | ||
32 | if the CPU stop working while it is in idle state, which is probably | ||
33 | not what you want. | ||
29 | - atmel,dbg-halt : Should be present if you want to stop the watchdog when | 34 | - atmel,dbg-halt : Should be present if you want to stop the watchdog when |
30 | entering debug state. | 35 | entering debug state. |
31 | 36 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 6239a305dff0..1de6afa8ee51 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1030,6 +1030,16 @@ F: arch/arm/mach-mxs/ | |||
1030 | F: arch/arm/boot/dts/imx* | 1030 | F: arch/arm/boot/dts/imx* |
1031 | F: arch/arm/configs/imx*_defconfig | 1031 | F: arch/arm/configs/imx*_defconfig |
1032 | 1032 | ||
1033 | ARM/FREESCALE VYBRID ARM ARCHITECTURE | ||
1034 | M: Shawn Guo <shawn.guo@linaro.org> | ||
1035 | M: Sascha Hauer <kernel@pengutronix.de> | ||
1036 | R: Stefan Agner <stefan@agner.ch> | ||
1037 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
1038 | S: Maintained | ||
1039 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git | ||
1040 | F: arch/arm/mach-imx/*vf610* | ||
1041 | F: arch/arm/boot/dts/vf* | ||
1042 | |||
1033 | ARM/GLOMATION GESBC9312SX MACHINE SUPPORT | 1043 | ARM/GLOMATION GESBC9312SX MACHINE SUPPORT |
1034 | M: Lennert Buytenhek <kernel@wantstofly.org> | 1044 | M: Lennert Buytenhek <kernel@wantstofly.org> |
1035 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1045 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
@@ -1176,7 +1186,7 @@ M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> | |||
1176 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1186 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1177 | S: Maintained | 1187 | S: Maintained |
1178 | F: arch/arm/mach-mvebu/ | 1188 | F: arch/arm/mach-mvebu/ |
1179 | F: drivers/rtc/armada38x-rtc | 1189 | F: drivers/rtc/rtc-armada38x.c |
1180 | 1190 | ||
1181 | ARM/Marvell Berlin SoC support | 1191 | ARM/Marvell Berlin SoC support |
1182 | M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> | 1192 | M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> |
@@ -1188,6 +1198,7 @@ ARM/Marvell Dove/MV78xx0/Orion SOC support | |||
1188 | M: Jason Cooper <jason@lakedaemon.net> | 1198 | M: Jason Cooper <jason@lakedaemon.net> |
1189 | M: Andrew Lunn <andrew@lunn.ch> | 1199 | M: Andrew Lunn <andrew@lunn.ch> |
1190 | M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> | 1200 | M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> |
1201 | M: Gregory Clement <gregory.clement@free-electrons.com> | ||
1191 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1202 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1192 | S: Maintained | 1203 | S: Maintained |
1193 | F: arch/arm/mach-dove/ | 1204 | F: arch/arm/mach-dove/ |
@@ -1351,6 +1362,7 @@ F: drivers/i2c/busses/i2c-rk3x.c | |||
1351 | F: drivers/*/*rockchip* | 1362 | F: drivers/*/*rockchip* |
1352 | F: drivers/*/*/*rockchip* | 1363 | F: drivers/*/*/*rockchip* |
1353 | F: sound/soc/rockchip/ | 1364 | F: sound/soc/rockchip/ |
1365 | N: rockchip | ||
1354 | 1366 | ||
1355 | ARM/SAMSUNG EXYNOS ARM ARCHITECTURES | 1367 | ARM/SAMSUNG EXYNOS ARM ARCHITECTURES |
1356 | M: Kukjin Kim <kgene@kernel.org> | 1368 | M: Kukjin Kim <kgene@kernel.org> |
@@ -1664,8 +1676,8 @@ F: drivers/misc/eeprom/at24.c | |||
1664 | F: include/linux/platform_data/at24.h | 1676 | F: include/linux/platform_data/at24.h |
1665 | 1677 | ||
1666 | ATA OVER ETHERNET (AOE) DRIVER | 1678 | ATA OVER ETHERNET (AOE) DRIVER |
1667 | M: "Ed L. Cashin" <ecashin@coraid.com> | 1679 | M: "Ed L. Cashin" <ed.cashin@acm.org> |
1668 | W: http://support.coraid.com/support/linux | 1680 | W: http://www.openaoe.org/ |
1669 | S: Supported | 1681 | S: Supported |
1670 | F: Documentation/aoe/ | 1682 | F: Documentation/aoe/ |
1671 | F: drivers/block/aoe/ | 1683 | F: drivers/block/aoe/ |
@@ -1730,7 +1742,7 @@ S: Maintained | |||
1730 | F: drivers/net/ethernet/atheros/ | 1742 | F: drivers/net/ethernet/atheros/ |
1731 | 1743 | ||
1732 | ATM | 1744 | ATM |
1733 | M: Chas Williams <chas@cmf.nrl.navy.mil> | 1745 | M: Chas Williams <3chas3@gmail.com> |
1734 | L: linux-atm-general@lists.sourceforge.net (moderated for non-subscribers) | 1746 | L: linux-atm-general@lists.sourceforge.net (moderated for non-subscribers) |
1735 | L: netdev@vger.kernel.org | 1747 | L: netdev@vger.kernel.org |
1736 | W: http://linux-atm.sourceforge.net | 1748 | W: http://linux-atm.sourceforge.net |
@@ -2107,7 +2119,6 @@ F: drivers/net/ethernet/broadcom/bnx2x/ | |||
2107 | 2119 | ||
2108 | BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE | 2120 | BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE |
2109 | M: Christian Daudt <bcm@fixthebug.org> | 2121 | M: Christian Daudt <bcm@fixthebug.org> |
2110 | M: Matt Porter <mporter@linaro.org> | ||
2111 | M: Florian Fainelli <f.fainelli@gmail.com> | 2122 | M: Florian Fainelli <f.fainelli@gmail.com> |
2112 | L: bcm-kernel-feedback-list@broadcom.com | 2123 | L: bcm-kernel-feedback-list@broadcom.com |
2113 | T: git git://github.com/broadcom/mach-bcm | 2124 | T: git git://github.com/broadcom/mach-bcm |
@@ -2369,8 +2380,9 @@ F: arch/x86/include/asm/tce.h | |||
2369 | 2380 | ||
2370 | CAN NETWORK LAYER | 2381 | CAN NETWORK LAYER |
2371 | M: Oliver Hartkopp <socketcan@hartkopp.net> | 2382 | M: Oliver Hartkopp <socketcan@hartkopp.net> |
2383 | M: Marc Kleine-Budde <mkl@pengutronix.de> | ||
2372 | L: linux-can@vger.kernel.org | 2384 | L: linux-can@vger.kernel.org |
2373 | W: http://gitorious.org/linux-can | 2385 | W: https://github.com/linux-can |
2374 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git | 2386 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git |
2375 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git | 2387 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git |
2376 | S: Maintained | 2388 | S: Maintained |
@@ -2386,7 +2398,7 @@ CAN NETWORK DRIVERS | |||
2386 | M: Wolfgang Grandegger <wg@grandegger.com> | 2398 | M: Wolfgang Grandegger <wg@grandegger.com> |
2387 | M: Marc Kleine-Budde <mkl@pengutronix.de> | 2399 | M: Marc Kleine-Budde <mkl@pengutronix.de> |
2388 | L: linux-can@vger.kernel.org | 2400 | L: linux-can@vger.kernel.org |
2389 | W: http://gitorious.org/linux-can | 2401 | W: https://github.com/linux-can |
2390 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git | 2402 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git |
2391 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git | 2403 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git |
2392 | S: Maintained | 2404 | S: Maintained |
@@ -3241,6 +3253,13 @@ S: Maintained | |||
3241 | F: Documentation/hwmon/dme1737 | 3253 | F: Documentation/hwmon/dme1737 |
3242 | F: drivers/hwmon/dme1737.c | 3254 | F: drivers/hwmon/dme1737.c |
3243 | 3255 | ||
3256 | DMI/SMBIOS SUPPORT | ||
3257 | M: Jean Delvare <jdelvare@suse.de> | ||
3258 | S: Maintained | ||
3259 | F: drivers/firmware/dmi-id.c | ||
3260 | F: drivers/firmware/dmi_scan.c | ||
3261 | F: include/linux/dmi.h | ||
3262 | |||
3244 | DOCKING STATION DRIVER | 3263 | DOCKING STATION DRIVER |
3245 | M: Shaohua Li <shaohua.li@intel.com> | 3264 | M: Shaohua Li <shaohua.li@intel.com> |
3246 | L: linux-acpi@vger.kernel.org | 3265 | L: linux-acpi@vger.kernel.org |
@@ -10196,6 +10215,13 @@ S: Maintained | |||
10196 | F: Documentation/usb/ohci.txt | 10215 | F: Documentation/usb/ohci.txt |
10197 | F: drivers/usb/host/ohci* | 10216 | F: drivers/usb/host/ohci* |
10198 | 10217 | ||
10218 | USB OTG FSM (Finite State Machine) | ||
10219 | M: Peter Chen <Peter.Chen@freescale.com> | ||
10220 | T: git git://github.com/hzpeterchen/linux-usb.git | ||
10221 | L: linux-usb@vger.kernel.org | ||
10222 | S: Maintained | ||
10223 | F: drivers/usb/common/usb-otg-fsm.c | ||
10224 | |||
10199 | USB OVER IP DRIVER | 10225 | USB OVER IP DRIVER |
10200 | M: Valentina Manea <valentina.manea.m@gmail.com> | 10226 | M: Valentina Manea <valentina.manea.m@gmail.com> |
10201 | M: Shuah Khan <shuah.kh@samsung.com> | 10227 | M: Shuah Khan <shuah.kh@samsung.com> |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 4 | 1 | VERSION = 4 |
2 | PATCHLEVEL = 0 | 2 | PATCHLEVEL = 0 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc3 | 4 | EXTRAVERSION = -rc6 |
5 | NAME = Hurr durr I'ma sheep | 5 | NAME = Hurr durr I'ma sheep |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index 114234e83caa..edda76fae83f 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c | |||
@@ -67,7 +67,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs, | |||
67 | sigset_t *set) | 67 | sigset_t *set) |
68 | { | 68 | { |
69 | int err; | 69 | int err; |
70 | err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs, | 70 | err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs, |
71 | sizeof(sf->uc.uc_mcontext.regs.scratch)); | 71 | sizeof(sf->uc.uc_mcontext.regs.scratch)); |
72 | err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t)); | 72 | err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t)); |
73 | 73 | ||
@@ -83,7 +83,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf) | |||
83 | if (!err) | 83 | if (!err) |
84 | set_current_blocked(&set); | 84 | set_current_blocked(&set); |
85 | 85 | ||
86 | err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs), | 86 | err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch), |
87 | sizeof(sf->uc.uc_mcontext.regs.scratch)); | 87 | sizeof(sf->uc.uc_mcontext.regs.scratch)); |
88 | 88 | ||
89 | return err; | 89 | return err; |
@@ -131,6 +131,15 @@ SYSCALL_DEFINE0(rt_sigreturn) | |||
131 | /* Don't restart from sigreturn */ | 131 | /* Don't restart from sigreturn */ |
132 | syscall_wont_restart(regs); | 132 | syscall_wont_restart(regs); |
133 | 133 | ||
134 | /* | ||
135 | * Ensure that sigreturn always returns to user mode (in case the | ||
136 | * regs saved on user stack got fudged between save and sigreturn) | ||
137 | * Otherwise it is easy to panic the kernel with a custom | ||
138 | * signal handler and/or restorer which clobberes the status32/ret | ||
139 | * to return to a bogus location in kernel mode. | ||
140 | */ | ||
141 | regs->status32 |= STATUS_U_MASK; | ||
142 | |||
134 | return regs->r0; | 143 | return regs->r0; |
135 | 144 | ||
136 | badframe: | 145 | badframe: |
@@ -229,8 +238,11 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) | |||
229 | 238 | ||
230 | /* | 239 | /* |
231 | * handler returns using sigreturn stub provided already by userpsace | 240 | * handler returns using sigreturn stub provided already by userpsace |
241 | * If not, nuke the process right away | ||
232 | */ | 242 | */ |
233 | BUG_ON(!(ksig->ka.sa.sa_flags & SA_RESTORER)); | 243 | if(!(ksig->ka.sa.sa_flags & SA_RESTORER)) |
244 | return 1; | ||
245 | |||
234 | regs->blink = (unsigned long)ksig->ka.sa.sa_restorer; | 246 | regs->blink = (unsigned long)ksig->ka.sa.sa_restorer; |
235 | 247 | ||
236 | /* User Stack for signal handler will be above the frame just carved */ | 248 | /* User Stack for signal handler will be above the frame just carved */ |
@@ -296,12 +308,12 @@ static void | |||
296 | handle_signal(struct ksignal *ksig, struct pt_regs *regs) | 308 | handle_signal(struct ksignal *ksig, struct pt_regs *regs) |
297 | { | 309 | { |
298 | sigset_t *oldset = sigmask_to_save(); | 310 | sigset_t *oldset = sigmask_to_save(); |
299 | int ret; | 311 | int failed; |
300 | 312 | ||
301 | /* Set up the stack frame */ | 313 | /* Set up the stack frame */ |
302 | ret = setup_rt_frame(ksig, oldset, regs); | 314 | failed = setup_rt_frame(ksig, oldset, regs); |
303 | 315 | ||
304 | signal_setup_done(ret, ksig, 0); | 316 | signal_setup_done(failed, ksig, 0); |
305 | } | 317 | } |
306 | 318 | ||
307 | void do_signal(struct pt_regs *regs) | 319 | void do_signal(struct pt_regs *regs) |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 9f1f09a2bc9b..cf4c0c99aa25 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -619,6 +619,7 @@ config ARCH_PXA | |||
619 | select GENERIC_CLOCKEVENTS | 619 | select GENERIC_CLOCKEVENTS |
620 | select GPIO_PXA | 620 | select GPIO_PXA |
621 | select HAVE_IDE | 621 | select HAVE_IDE |
622 | select IRQ_DOMAIN | ||
622 | select MULTI_IRQ_HANDLER | 623 | select MULTI_IRQ_HANDLER |
623 | select PLAT_PXA | 624 | select PLAT_PXA |
624 | select SPARSE_IRQ | 625 | select SPARSE_IRQ |
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 7f99cd652203..eb7bb511f853 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile | |||
@@ -150,6 +150,7 @@ machine-$(CONFIG_ARCH_BERLIN) += berlin | |||
150 | machine-$(CONFIG_ARCH_CLPS711X) += clps711x | 150 | machine-$(CONFIG_ARCH_CLPS711X) += clps711x |
151 | machine-$(CONFIG_ARCH_CNS3XXX) += cns3xxx | 151 | machine-$(CONFIG_ARCH_CNS3XXX) += cns3xxx |
152 | machine-$(CONFIG_ARCH_DAVINCI) += davinci | 152 | machine-$(CONFIG_ARCH_DAVINCI) += davinci |
153 | machine-$(CONFIG_ARCH_DIGICOLOR) += digicolor | ||
153 | machine-$(CONFIG_ARCH_DOVE) += dove | 154 | machine-$(CONFIG_ARCH_DOVE) += dove |
154 | machine-$(CONFIG_ARCH_EBSA110) += ebsa110 | 155 | machine-$(CONFIG_ARCH_EBSA110) += ebsa110 |
155 | machine-$(CONFIG_ARCH_EFM32) += efm32 | 156 | machine-$(CONFIG_ARCH_EFM32) += efm32 |
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi index 2c6248d9a9ef..c3255e0c90aa 100644 --- a/arch/arm/boot/dts/am335x-bone-common.dtsi +++ b/arch/arm/boot/dts/am335x-bone-common.dtsi | |||
@@ -301,3 +301,11 @@ | |||
301 | cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; | 301 | cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; |
302 | cd-inverted; | 302 | cd-inverted; |
303 | }; | 303 | }; |
304 | |||
305 | &aes { | ||
306 | status = "okay"; | ||
307 | }; | ||
308 | |||
309 | &sham { | ||
310 | status = "okay"; | ||
311 | }; | ||
diff --git a/arch/arm/boot/dts/am335x-bone.dts b/arch/arm/boot/dts/am335x-bone.dts index 83d40f7655e5..6b8493720424 100644 --- a/arch/arm/boot/dts/am335x-bone.dts +++ b/arch/arm/boot/dts/am335x-bone.dts | |||
@@ -24,11 +24,3 @@ | |||
24 | &mmc1 { | 24 | &mmc1 { |
25 | vmmc-supply = <&ldo3_reg>; | 25 | vmmc-supply = <&ldo3_reg>; |
26 | }; | 26 | }; |
27 | |||
28 | &sham { | ||
29 | status = "okay"; | ||
30 | }; | ||
31 | |||
32 | &aes { | ||
33 | status = "okay"; | ||
34 | }; | ||
diff --git a/arch/arm/boot/dts/am335x-lxm.dts b/arch/arm/boot/dts/am335x-lxm.dts index 7266a00aab2e..5c5667a3624d 100644 --- a/arch/arm/boot/dts/am335x-lxm.dts +++ b/arch/arm/boot/dts/am335x-lxm.dts | |||
@@ -328,6 +328,10 @@ | |||
328 | dual_emac_res_vlan = <3>; | 328 | dual_emac_res_vlan = <3>; |
329 | }; | 329 | }; |
330 | 330 | ||
331 | &phy_sel { | ||
332 | rmii-clock-ext; | ||
333 | }; | ||
334 | |||
331 | &mac { | 335 | &mac { |
332 | pinctrl-names = "default", "sleep"; | 336 | pinctrl-names = "default", "sleep"; |
333 | pinctrl-0 = <&cpsw_default>; | 337 | pinctrl-0 = <&cpsw_default>; |
diff --git a/arch/arm/boot/dts/am33xx-clocks.dtsi b/arch/arm/boot/dts/am33xx-clocks.dtsi index 712edce7d6fb..071b56aa0c7e 100644 --- a/arch/arm/boot/dts/am33xx-clocks.dtsi +++ b/arch/arm/boot/dts/am33xx-clocks.dtsi | |||
@@ -99,7 +99,7 @@ | |||
99 | ehrpwm0_tbclk: ehrpwm0_tbclk@44e10664 { | 99 | ehrpwm0_tbclk: ehrpwm0_tbclk@44e10664 { |
100 | #clock-cells = <0>; | 100 | #clock-cells = <0>; |
101 | compatible = "ti,gate-clock"; | 101 | compatible = "ti,gate-clock"; |
102 | clocks = <&dpll_per_m2_ck>; | 102 | clocks = <&l4ls_gclk>; |
103 | ti,bit-shift = <0>; | 103 | ti,bit-shift = <0>; |
104 | reg = <0x0664>; | 104 | reg = <0x0664>; |
105 | }; | 105 | }; |
@@ -107,7 +107,7 @@ | |||
107 | ehrpwm1_tbclk: ehrpwm1_tbclk@44e10664 { | 107 | ehrpwm1_tbclk: ehrpwm1_tbclk@44e10664 { |
108 | #clock-cells = <0>; | 108 | #clock-cells = <0>; |
109 | compatible = "ti,gate-clock"; | 109 | compatible = "ti,gate-clock"; |
110 | clocks = <&dpll_per_m2_ck>; | 110 | clocks = <&l4ls_gclk>; |
111 | ti,bit-shift = <1>; | 111 | ti,bit-shift = <1>; |
112 | reg = <0x0664>; | 112 | reg = <0x0664>; |
113 | }; | 113 | }; |
@@ -115,7 +115,7 @@ | |||
115 | ehrpwm2_tbclk: ehrpwm2_tbclk@44e10664 { | 115 | ehrpwm2_tbclk: ehrpwm2_tbclk@44e10664 { |
116 | #clock-cells = <0>; | 116 | #clock-cells = <0>; |
117 | compatible = "ti,gate-clock"; | 117 | compatible = "ti,gate-clock"; |
118 | clocks = <&dpll_per_m2_ck>; | 118 | clocks = <&l4ls_gclk>; |
119 | ti,bit-shift = <2>; | 119 | ti,bit-shift = <2>; |
120 | reg = <0x0664>; | 120 | reg = <0x0664>; |
121 | }; | 121 | }; |
diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi index c7dc9dab93a4..cfb49686ab6a 100644 --- a/arch/arm/boot/dts/am43xx-clocks.dtsi +++ b/arch/arm/boot/dts/am43xx-clocks.dtsi | |||
@@ -107,7 +107,7 @@ | |||
107 | ehrpwm0_tbclk: ehrpwm0_tbclk { | 107 | ehrpwm0_tbclk: ehrpwm0_tbclk { |
108 | #clock-cells = <0>; | 108 | #clock-cells = <0>; |
109 | compatible = "ti,gate-clock"; | 109 | compatible = "ti,gate-clock"; |
110 | clocks = <&dpll_per_m2_ck>; | 110 | clocks = <&l4ls_gclk>; |
111 | ti,bit-shift = <0>; | 111 | ti,bit-shift = <0>; |
112 | reg = <0x0664>; | 112 | reg = <0x0664>; |
113 | }; | 113 | }; |
@@ -115,7 +115,7 @@ | |||
115 | ehrpwm1_tbclk: ehrpwm1_tbclk { | 115 | ehrpwm1_tbclk: ehrpwm1_tbclk { |
116 | #clock-cells = <0>; | 116 | #clock-cells = <0>; |
117 | compatible = "ti,gate-clock"; | 117 | compatible = "ti,gate-clock"; |
118 | clocks = <&dpll_per_m2_ck>; | 118 | clocks = <&l4ls_gclk>; |
119 | ti,bit-shift = <1>; | 119 | ti,bit-shift = <1>; |
120 | reg = <0x0664>; | 120 | reg = <0x0664>; |
121 | }; | 121 | }; |
@@ -123,7 +123,7 @@ | |||
123 | ehrpwm2_tbclk: ehrpwm2_tbclk { | 123 | ehrpwm2_tbclk: ehrpwm2_tbclk { |
124 | #clock-cells = <0>; | 124 | #clock-cells = <0>; |
125 | compatible = "ti,gate-clock"; | 125 | compatible = "ti,gate-clock"; |
126 | clocks = <&dpll_per_m2_ck>; | 126 | clocks = <&l4ls_gclk>; |
127 | ti,bit-shift = <2>; | 127 | ti,bit-shift = <2>; |
128 | reg = <0x0664>; | 128 | reg = <0x0664>; |
129 | }; | 129 | }; |
@@ -131,7 +131,7 @@ | |||
131 | ehrpwm3_tbclk: ehrpwm3_tbclk { | 131 | ehrpwm3_tbclk: ehrpwm3_tbclk { |
132 | #clock-cells = <0>; | 132 | #clock-cells = <0>; |
133 | compatible = "ti,gate-clock"; | 133 | compatible = "ti,gate-clock"; |
134 | clocks = <&dpll_per_m2_ck>; | 134 | clocks = <&l4ls_gclk>; |
135 | ti,bit-shift = <4>; | 135 | ti,bit-shift = <4>; |
136 | reg = <0x0664>; | 136 | reg = <0x0664>; |
137 | }; | 137 | }; |
@@ -139,7 +139,7 @@ | |||
139 | ehrpwm4_tbclk: ehrpwm4_tbclk { | 139 | ehrpwm4_tbclk: ehrpwm4_tbclk { |
140 | #clock-cells = <0>; | 140 | #clock-cells = <0>; |
141 | compatible = "ti,gate-clock"; | 141 | compatible = "ti,gate-clock"; |
142 | clocks = <&dpll_per_m2_ck>; | 142 | clocks = <&l4ls_gclk>; |
143 | ti,bit-shift = <5>; | 143 | ti,bit-shift = <5>; |
144 | reg = <0x0664>; | 144 | reg = <0x0664>; |
145 | }; | 145 | }; |
@@ -147,7 +147,7 @@ | |||
147 | ehrpwm5_tbclk: ehrpwm5_tbclk { | 147 | ehrpwm5_tbclk: ehrpwm5_tbclk { |
148 | #clock-cells = <0>; | 148 | #clock-cells = <0>; |
149 | compatible = "ti,gate-clock"; | 149 | compatible = "ti,gate-clock"; |
150 | clocks = <&dpll_per_m2_ck>; | 150 | clocks = <&l4ls_gclk>; |
151 | ti,bit-shift = <6>; | 151 | ti,bit-shift = <6>; |
152 | reg = <0x0664>; | 152 | reg = <0x0664>; |
153 | }; | 153 | }; |
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi index fff0ee69aab4..e7f0a4ae271c 100644 --- a/arch/arm/boot/dts/at91sam9260.dtsi +++ b/arch/arm/boot/dts/at91sam9260.dtsi | |||
@@ -494,12 +494,12 @@ | |||
494 | 494 | ||
495 | pinctrl_usart3_rts: usart3_rts-0 { | 495 | pinctrl_usart3_rts: usart3_rts-0 { |
496 | atmel,pins = | 496 | atmel,pins = |
497 | <AT91_PIOB 8 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PC8 periph B */ | 497 | <AT91_PIOC 8 AT91_PERIPH_B AT91_PINCTRL_NONE>; |
498 | }; | 498 | }; |
499 | 499 | ||
500 | pinctrl_usart3_cts: usart3_cts-0 { | 500 | pinctrl_usart3_cts: usart3_cts-0 { |
501 | atmel,pins = | 501 | atmel,pins = |
502 | <AT91_PIOB 10 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PC10 periph B */ | 502 | <AT91_PIOC 10 AT91_PERIPH_B AT91_PINCTRL_NONE>; |
503 | }; | 503 | }; |
504 | }; | 504 | }; |
505 | 505 | ||
@@ -853,7 +853,7 @@ | |||
853 | }; | 853 | }; |
854 | 854 | ||
855 | usb1: gadget@fffa4000 { | 855 | usb1: gadget@fffa4000 { |
856 | compatible = "atmel,at91rm9200-udc"; | 856 | compatible = "atmel,at91sam9260-udc"; |
857 | reg = <0xfffa4000 0x4000>; | 857 | reg = <0xfffa4000 0x4000>; |
858 | interrupts = <10 IRQ_TYPE_LEVEL_HIGH 2>; | 858 | interrupts = <10 IRQ_TYPE_LEVEL_HIGH 2>; |
859 | clocks = <&udc_clk>, <&udpck>; | 859 | clocks = <&udc_clk>, <&udpck>; |
@@ -976,7 +976,6 @@ | |||
976 | atmel,watchdog-type = "hardware"; | 976 | atmel,watchdog-type = "hardware"; |
977 | atmel,reset-type = "all"; | 977 | atmel,reset-type = "all"; |
978 | atmel,dbg-halt; | 978 | atmel,dbg-halt; |
979 | atmel,idle-halt; | ||
980 | status = "disabled"; | 979 | status = "disabled"; |
981 | }; | 980 | }; |
982 | 981 | ||
diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi index e247b0b5fdab..d55fdf2487ef 100644 --- a/arch/arm/boot/dts/at91sam9261.dtsi +++ b/arch/arm/boot/dts/at91sam9261.dtsi | |||
@@ -124,11 +124,12 @@ | |||
124 | }; | 124 | }; |
125 | 125 | ||
126 | usb1: gadget@fffa4000 { | 126 | usb1: gadget@fffa4000 { |
127 | compatible = "atmel,at91rm9200-udc"; | 127 | compatible = "atmel,at91sam9261-udc"; |
128 | reg = <0xfffa4000 0x4000>; | 128 | reg = <0xfffa4000 0x4000>; |
129 | interrupts = <10 IRQ_TYPE_LEVEL_HIGH 2>; | 129 | interrupts = <10 IRQ_TYPE_LEVEL_HIGH 2>; |
130 | clocks = <&usb>, <&udc_clk>, <&udpck>; | 130 | clocks = <&udc_clk>, <&udpck>; |
131 | clock-names = "usb_clk", "udc_clk", "udpck"; | 131 | clock-names = "pclk", "hclk"; |
132 | atmel,matrix = <&matrix>; | ||
132 | status = "disabled"; | 133 | status = "disabled"; |
133 | }; | 134 | }; |
134 | 135 | ||
@@ -262,7 +263,7 @@ | |||
262 | }; | 263 | }; |
263 | 264 | ||
264 | matrix: matrix@ffffee00 { | 265 | matrix: matrix@ffffee00 { |
265 | compatible = "atmel,at91sam9260-bus-matrix"; | 266 | compatible = "atmel,at91sam9260-bus-matrix", "syscon"; |
266 | reg = <0xffffee00 0x200>; | 267 | reg = <0xffffee00 0x200>; |
267 | }; | 268 | }; |
268 | 269 | ||
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi index 1f67bb4c144e..fce301c4e9d6 100644 --- a/arch/arm/boot/dts/at91sam9263.dtsi +++ b/arch/arm/boot/dts/at91sam9263.dtsi | |||
@@ -69,7 +69,7 @@ | |||
69 | 69 | ||
70 | sram1: sram@00500000 { | 70 | sram1: sram@00500000 { |
71 | compatible = "mmio-sram"; | 71 | compatible = "mmio-sram"; |
72 | reg = <0x00300000 0x4000>; | 72 | reg = <0x00500000 0x4000>; |
73 | }; | 73 | }; |
74 | 74 | ||
75 | ahb { | 75 | ahb { |
@@ -856,7 +856,7 @@ | |||
856 | }; | 856 | }; |
857 | 857 | ||
858 | usb1: gadget@fff78000 { | 858 | usb1: gadget@fff78000 { |
859 | compatible = "atmel,at91rm9200-udc"; | 859 | compatible = "atmel,at91sam9263-udc"; |
860 | reg = <0xfff78000 0x4000>; | 860 | reg = <0xfff78000 0x4000>; |
861 | interrupts = <24 IRQ_TYPE_LEVEL_HIGH 2>; | 861 | interrupts = <24 IRQ_TYPE_LEVEL_HIGH 2>; |
862 | clocks = <&udc_clk>, <&udpck>; | 862 | clocks = <&udc_clk>, <&udpck>; |
@@ -905,7 +905,6 @@ | |||
905 | atmel,watchdog-type = "hardware"; | 905 | atmel,watchdog-type = "hardware"; |
906 | atmel,reset-type = "all"; | 906 | atmel,reset-type = "all"; |
907 | atmel,dbg-halt; | 907 | atmel,dbg-halt; |
908 | atmel,idle-halt; | ||
909 | status = "disabled"; | 908 | status = "disabled"; |
910 | }; | 909 | }; |
911 | 910 | ||
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi index ee80aa9c0759..488af63d5174 100644 --- a/arch/arm/boot/dts/at91sam9g45.dtsi +++ b/arch/arm/boot/dts/at91sam9g45.dtsi | |||
@@ -1116,7 +1116,6 @@ | |||
1116 | atmel,watchdog-type = "hardware"; | 1116 | atmel,watchdog-type = "hardware"; |
1117 | atmel,reset-type = "all"; | 1117 | atmel,reset-type = "all"; |
1118 | atmel,dbg-halt; | 1118 | atmel,dbg-halt; |
1119 | atmel,idle-halt; | ||
1120 | status = "disabled"; | 1119 | status = "disabled"; |
1121 | }; | 1120 | }; |
1122 | 1121 | ||
@@ -1301,7 +1300,7 @@ | |||
1301 | compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; | 1300 | compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; |
1302 | reg = <0x00800000 0x100000>; | 1301 | reg = <0x00800000 0x100000>; |
1303 | interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; | 1302 | interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; |
1304 | clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>; | 1303 | clocks = <&utmi>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>; |
1305 | clock-names = "usb_clk", "ehci_clk", "hclk", "uhpck"; | 1304 | clock-names = "usb_clk", "ehci_clk", "hclk", "uhpck"; |
1306 | status = "disabled"; | 1305 | status = "disabled"; |
1307 | }; | 1306 | }; |
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi index c2666a7cb5b1..0c53a375ba99 100644 --- a/arch/arm/boot/dts/at91sam9n12.dtsi +++ b/arch/arm/boot/dts/at91sam9n12.dtsi | |||
@@ -894,7 +894,6 @@ | |||
894 | atmel,watchdog-type = "hardware"; | 894 | atmel,watchdog-type = "hardware"; |
895 | atmel,reset-type = "all"; | 895 | atmel,reset-type = "all"; |
896 | atmel,dbg-halt; | 896 | atmel,dbg-halt; |
897 | atmel,idle-halt; | ||
898 | status = "disabled"; | 897 | status = "disabled"; |
899 | }; | 898 | }; |
900 | 899 | ||
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi index 818dabdd8c0e..d221179d0f1a 100644 --- a/arch/arm/boot/dts/at91sam9x5.dtsi +++ b/arch/arm/boot/dts/at91sam9x5.dtsi | |||
@@ -1066,7 +1066,7 @@ | |||
1066 | reg = <0x00500000 0x80000 | 1066 | reg = <0x00500000 0x80000 |
1067 | 0xf803c000 0x400>; | 1067 | 0xf803c000 0x400>; |
1068 | interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>; | 1068 | interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>; |
1069 | clocks = <&usb>, <&udphs_clk>; | 1069 | clocks = <&utmi>, <&udphs_clk>; |
1070 | clock-names = "hclk", "pclk"; | 1070 | clock-names = "hclk", "pclk"; |
1071 | status = "disabled"; | 1071 | status = "disabled"; |
1072 | 1072 | ||
@@ -1130,7 +1130,6 @@ | |||
1130 | atmel,watchdog-type = "hardware"; | 1130 | atmel,watchdog-type = "hardware"; |
1131 | atmel,reset-type = "all"; | 1131 | atmel,reset-type = "all"; |
1132 | atmel,dbg-halt; | 1132 | atmel,dbg-halt; |
1133 | atmel,idle-halt; | ||
1134 | status = "disabled"; | 1133 | status = "disabled"; |
1135 | }; | 1134 | }; |
1136 | 1135 | ||
@@ -1186,7 +1185,7 @@ | |||
1186 | compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; | 1185 | compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; |
1187 | reg = <0x00700000 0x100000>; | 1186 | reg = <0x00700000 0x100000>; |
1188 | interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; | 1187 | interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; |
1189 | clocks = <&usb>, <&uhphs_clk>, <&uhpck>; | 1188 | clocks = <&utmi>, <&uhphs_clk>, <&uhpck>; |
1190 | clock-names = "usb_clk", "ehci_clk", "uhpck"; | 1189 | clock-names = "usb_clk", "ehci_clk", "uhpck"; |
1191 | status = "disabled"; | 1190 | status = "disabled"; |
1192 | }; | 1191 | }; |
diff --git a/arch/arm/boot/dts/dm8168-evm.dts b/arch/arm/boot/dts/dm8168-evm.dts index d3a29c1b8417..afe678f6d2e9 100644 --- a/arch/arm/boot/dts/dm8168-evm.dts +++ b/arch/arm/boot/dts/dm8168-evm.dts | |||
@@ -36,6 +36,20 @@ | |||
36 | >; | 36 | >; |
37 | }; | 37 | }; |
38 | 38 | ||
39 | mmc_pins: pinmux_mmc_pins { | ||
40 | pinctrl-single,pins = < | ||
41 | DM816X_IOPAD(0x0a70, MUX_MODE0) /* SD_POW */ | ||
42 | DM816X_IOPAD(0x0a74, MUX_MODE0) /* SD_CLK */ | ||
43 | DM816X_IOPAD(0x0a78, MUX_MODE0) /* SD_CMD */ | ||
44 | DM816X_IOPAD(0x0a7C, MUX_MODE0) /* SD_DAT0 */ | ||
45 | DM816X_IOPAD(0x0a80, MUX_MODE0) /* SD_DAT1 */ | ||
46 | DM816X_IOPAD(0x0a84, MUX_MODE0) /* SD_DAT2 */ | ||
47 | DM816X_IOPAD(0x0a88, MUX_MODE0) /* SD_DAT2 */ | ||
48 | DM816X_IOPAD(0x0a8c, MUX_MODE2) /* GP1[7] */ | ||
49 | DM816X_IOPAD(0x0a90, MUX_MODE2) /* GP1[8] */ | ||
50 | >; | ||
51 | }; | ||
52 | |||
39 | usb0_pins: pinmux_usb0_pins { | 53 | usb0_pins: pinmux_usb0_pins { |
40 | pinctrl-single,pins = < | 54 | pinctrl-single,pins = < |
41 | DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */ | 55 | DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */ |
@@ -137,7 +151,12 @@ | |||
137 | }; | 151 | }; |
138 | 152 | ||
139 | &mmc1 { | 153 | &mmc1 { |
154 | pinctrl-names = "default"; | ||
155 | pinctrl-0 = <&mmc_pins>; | ||
140 | vmmc-supply = <&vmmcsd_fixed>; | 156 | vmmc-supply = <&vmmcsd_fixed>; |
157 | bus-width = <4>; | ||
158 | cd-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>; | ||
159 | wp-gpios = <&gpio2 8 GPIO_ACTIVE_LOW>; | ||
141 | }; | 160 | }; |
142 | 161 | ||
143 | /* At least dm8168-evm rev c won't support multipoint, later may */ | 162 | /* At least dm8168-evm rev c won't support multipoint, later may */ |
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi index 3c97b5f2addc..f35715bc6992 100644 --- a/arch/arm/boot/dts/dm816x.dtsi +++ b/arch/arm/boot/dts/dm816x.dtsi | |||
@@ -150,17 +150,27 @@ | |||
150 | }; | 150 | }; |
151 | 151 | ||
152 | gpio1: gpio@48032000 { | 152 | gpio1: gpio@48032000 { |
153 | compatible = "ti,omap3-gpio"; | 153 | compatible = "ti,omap4-gpio"; |
154 | ti,hwmods = "gpio1"; | 154 | ti,hwmods = "gpio1"; |
155 | ti,gpio-always-on; | ||
155 | reg = <0x48032000 0x1000>; | 156 | reg = <0x48032000 0x1000>; |
156 | interrupts = <97>; | 157 | interrupts = <96>; |
158 | gpio-controller; | ||
159 | #gpio-cells = <2>; | ||
160 | interrupt-controller; | ||
161 | #interrupt-cells = <2>; | ||
157 | }; | 162 | }; |
158 | 163 | ||
159 | gpio2: gpio@4804c000 { | 164 | gpio2: gpio@4804c000 { |
160 | compatible = "ti,omap3-gpio"; | 165 | compatible = "ti,omap4-gpio"; |
161 | ti,hwmods = "gpio2"; | 166 | ti,hwmods = "gpio2"; |
167 | ti,gpio-always-on; | ||
162 | reg = <0x4804c000 0x1000>; | 168 | reg = <0x4804c000 0x1000>; |
163 | interrupts = <99>; | 169 | interrupts = <98>; |
170 | gpio-controller; | ||
171 | #gpio-cells = <2>; | ||
172 | interrupt-controller; | ||
173 | #interrupt-cells = <2>; | ||
164 | }; | 174 | }; |
165 | 175 | ||
166 | gpmc: gpmc@50000000 { | 176 | gpmc: gpmc@50000000 { |
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts index 3290a96ba586..7563d7ce01bb 100644 --- a/arch/arm/boot/dts/dra7-evm.dts +++ b/arch/arm/boot/dts/dra7-evm.dts | |||
@@ -263,17 +263,15 @@ | |||
263 | 263 | ||
264 | dcan1_pins_default: dcan1_pins_default { | 264 | dcan1_pins_default: dcan1_pins_default { |
265 | pinctrl-single,pins = < | 265 | pinctrl-single,pins = < |
266 | 0x3d0 (PIN_OUTPUT | MUX_MODE0) /* dcan1_tx */ | 266 | 0x3d0 (PIN_OUTPUT_PULLUP | MUX_MODE0) /* dcan1_tx */ |
267 | 0x3d4 (MUX_MODE15) /* dcan1_rx.off */ | 267 | 0x418 (PULL_UP | MUX_MODE1) /* wakeup0.dcan1_rx */ |
268 | 0x418 (PULL_DIS | MUX_MODE1) /* wakeup0.dcan1_rx */ | ||
269 | >; | 268 | >; |
270 | }; | 269 | }; |
271 | 270 | ||
272 | dcan1_pins_sleep: dcan1_pins_sleep { | 271 | dcan1_pins_sleep: dcan1_pins_sleep { |
273 | pinctrl-single,pins = < | 272 | pinctrl-single,pins = < |
274 | 0x3d0 (MUX_MODE15) /* dcan1_tx.off */ | 273 | 0x3d0 (MUX_MODE15 | PULL_UP) /* dcan1_tx.off */ |
275 | 0x3d4 (MUX_MODE15) /* dcan1_rx.off */ | 274 | 0x418 (MUX_MODE15 | PULL_UP) /* wakeup0.off */ |
276 | 0x418 (MUX_MODE15) /* wakeup0.off */ | ||
277 | >; | 275 | >; |
278 | }; | 276 | }; |
279 | }; | 277 | }; |
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 127608d79033..c4659a979c41 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi | |||
@@ -1111,7 +1111,6 @@ | |||
1111 | "wkupclk", "refclk", | 1111 | "wkupclk", "refclk", |
1112 | "div-clk", "phy-div"; | 1112 | "div-clk", "phy-div"; |
1113 | #phy-cells = <0>; | 1113 | #phy-cells = <0>; |
1114 | ti,hwmods = "pcie1-phy"; | ||
1115 | }; | 1114 | }; |
1116 | 1115 | ||
1117 | pcie2_phy: pciephy@4a095000 { | 1116 | pcie2_phy: pciephy@4a095000 { |
@@ -1130,7 +1129,6 @@ | |||
1130 | "wkupclk", "refclk", | 1129 | "wkupclk", "refclk", |
1131 | "div-clk", "phy-div"; | 1130 | "div-clk", "phy-div"; |
1132 | #phy-cells = <0>; | 1131 | #phy-cells = <0>; |
1133 | ti,hwmods = "pcie2-phy"; | ||
1134 | status = "disabled"; | 1132 | status = "disabled"; |
1135 | }; | 1133 | }; |
1136 | }; | 1134 | }; |
diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts index e0264d0bf7b9..40ed539ce474 100644 --- a/arch/arm/boot/dts/dra72-evm.dts +++ b/arch/arm/boot/dts/dra72-evm.dts | |||
@@ -119,17 +119,15 @@ | |||
119 | 119 | ||
120 | dcan1_pins_default: dcan1_pins_default { | 120 | dcan1_pins_default: dcan1_pins_default { |
121 | pinctrl-single,pins = < | 121 | pinctrl-single,pins = < |
122 | 0x3d0 (PIN_OUTPUT | MUX_MODE0) /* dcan1_tx */ | 122 | 0x3d0 (PIN_OUTPUT_PULLUP | MUX_MODE0) /* dcan1_tx */ |
123 | 0x3d4 (MUX_MODE15) /* dcan1_rx.off */ | 123 | 0x418 (PULL_UP | MUX_MODE1) /* wakeup0.dcan1_rx */ |
124 | 0x418 (PULL_DIS | MUX_MODE1) /* wakeup0.dcan1_rx */ | ||
125 | >; | 124 | >; |
126 | }; | 125 | }; |
127 | 126 | ||
128 | dcan1_pins_sleep: dcan1_pins_sleep { | 127 | dcan1_pins_sleep: dcan1_pins_sleep { |
129 | pinctrl-single,pins = < | 128 | pinctrl-single,pins = < |
130 | 0x3d0 (MUX_MODE15) /* dcan1_tx.off */ | 129 | 0x3d0 (MUX_MODE15 | PULL_UP) /* dcan1_tx.off */ |
131 | 0x3d4 (MUX_MODE15) /* dcan1_rx.off */ | 130 | 0x418 (MUX_MODE15 | PULL_UP) /* wakeup0.off */ |
132 | 0x418 (MUX_MODE15) /* wakeup0.off */ | ||
133 | >; | 131 | >; |
134 | }; | 132 | }; |
135 | 133 | ||
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi index 4bdcbd61ce47..99b09a44e269 100644 --- a/arch/arm/boot/dts/dra7xx-clocks.dtsi +++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi | |||
@@ -243,10 +243,18 @@ | |||
243 | ti,invert-autoidle-bit; | 243 | ti,invert-autoidle-bit; |
244 | }; | 244 | }; |
245 | 245 | ||
246 | dpll_core_byp_mux: dpll_core_byp_mux { | ||
247 | #clock-cells = <0>; | ||
248 | compatible = "ti,mux-clock"; | ||
249 | clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; | ||
250 | ti,bit-shift = <23>; | ||
251 | reg = <0x012c>; | ||
252 | }; | ||
253 | |||
246 | dpll_core_ck: dpll_core_ck { | 254 | dpll_core_ck: dpll_core_ck { |
247 | #clock-cells = <0>; | 255 | #clock-cells = <0>; |
248 | compatible = "ti,omap4-dpll-core-clock"; | 256 | compatible = "ti,omap4-dpll-core-clock"; |
249 | clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; | 257 | clocks = <&sys_clkin1>, <&dpll_core_byp_mux>; |
250 | reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>; | 258 | reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>; |
251 | }; | 259 | }; |
252 | 260 | ||
@@ -309,10 +317,18 @@ | |||
309 | clock-div = <1>; | 317 | clock-div = <1>; |
310 | }; | 318 | }; |
311 | 319 | ||
320 | dpll_dsp_byp_mux: dpll_dsp_byp_mux { | ||
321 | #clock-cells = <0>; | ||
322 | compatible = "ti,mux-clock"; | ||
323 | clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>; | ||
324 | ti,bit-shift = <23>; | ||
325 | reg = <0x0240>; | ||
326 | }; | ||
327 | |||
312 | dpll_dsp_ck: dpll_dsp_ck { | 328 | dpll_dsp_ck: dpll_dsp_ck { |
313 | #clock-cells = <0>; | 329 | #clock-cells = <0>; |
314 | compatible = "ti,omap4-dpll-clock"; | 330 | compatible = "ti,omap4-dpll-clock"; |
315 | clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>; | 331 | clocks = <&sys_clkin1>, <&dpll_dsp_byp_mux>; |
316 | reg = <0x0234>, <0x0238>, <0x0240>, <0x023c>; | 332 | reg = <0x0234>, <0x0238>, <0x0240>, <0x023c>; |
317 | }; | 333 | }; |
318 | 334 | ||
@@ -335,10 +351,18 @@ | |||
335 | clock-div = <1>; | 351 | clock-div = <1>; |
336 | }; | 352 | }; |
337 | 353 | ||
354 | dpll_iva_byp_mux: dpll_iva_byp_mux { | ||
355 | #clock-cells = <0>; | ||
356 | compatible = "ti,mux-clock"; | ||
357 | clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>; | ||
358 | ti,bit-shift = <23>; | ||
359 | reg = <0x01ac>; | ||
360 | }; | ||
361 | |||
338 | dpll_iva_ck: dpll_iva_ck { | 362 | dpll_iva_ck: dpll_iva_ck { |
339 | #clock-cells = <0>; | 363 | #clock-cells = <0>; |
340 | compatible = "ti,omap4-dpll-clock"; | 364 | compatible = "ti,omap4-dpll-clock"; |
341 | clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>; | 365 | clocks = <&sys_clkin1>, <&dpll_iva_byp_mux>; |
342 | reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>; | 366 | reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>; |
343 | }; | 367 | }; |
344 | 368 | ||
@@ -361,10 +385,18 @@ | |||
361 | clock-div = <1>; | 385 | clock-div = <1>; |
362 | }; | 386 | }; |
363 | 387 | ||
388 | dpll_gpu_byp_mux: dpll_gpu_byp_mux { | ||
389 | #clock-cells = <0>; | ||
390 | compatible = "ti,mux-clock"; | ||
391 | clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; | ||
392 | ti,bit-shift = <23>; | ||
393 | reg = <0x02e4>; | ||
394 | }; | ||
395 | |||
364 | dpll_gpu_ck: dpll_gpu_ck { | 396 | dpll_gpu_ck: dpll_gpu_ck { |
365 | #clock-cells = <0>; | 397 | #clock-cells = <0>; |
366 | compatible = "ti,omap4-dpll-clock"; | 398 | compatible = "ti,omap4-dpll-clock"; |
367 | clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; | 399 | clocks = <&sys_clkin1>, <&dpll_gpu_byp_mux>; |
368 | reg = <0x02d8>, <0x02dc>, <0x02e4>, <0x02e0>; | 400 | reg = <0x02d8>, <0x02dc>, <0x02e4>, <0x02e0>; |
369 | }; | 401 | }; |
370 | 402 | ||
@@ -398,10 +430,18 @@ | |||
398 | clock-div = <1>; | 430 | clock-div = <1>; |
399 | }; | 431 | }; |
400 | 432 | ||
433 | dpll_ddr_byp_mux: dpll_ddr_byp_mux { | ||
434 | #clock-cells = <0>; | ||
435 | compatible = "ti,mux-clock"; | ||
436 | clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; | ||
437 | ti,bit-shift = <23>; | ||
438 | reg = <0x021c>; | ||
439 | }; | ||
440 | |||
401 | dpll_ddr_ck: dpll_ddr_ck { | 441 | dpll_ddr_ck: dpll_ddr_ck { |
402 | #clock-cells = <0>; | 442 | #clock-cells = <0>; |
403 | compatible = "ti,omap4-dpll-clock"; | 443 | compatible = "ti,omap4-dpll-clock"; |
404 | clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; | 444 | clocks = <&sys_clkin1>, <&dpll_ddr_byp_mux>; |
405 | reg = <0x0210>, <0x0214>, <0x021c>, <0x0218>; | 445 | reg = <0x0210>, <0x0214>, <0x021c>, <0x0218>; |
406 | }; | 446 | }; |
407 | 447 | ||
@@ -416,10 +456,18 @@ | |||
416 | ti,invert-autoidle-bit; | 456 | ti,invert-autoidle-bit; |
417 | }; | 457 | }; |
418 | 458 | ||
459 | dpll_gmac_byp_mux: dpll_gmac_byp_mux { | ||
460 | #clock-cells = <0>; | ||
461 | compatible = "ti,mux-clock"; | ||
462 | clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; | ||
463 | ti,bit-shift = <23>; | ||
464 | reg = <0x02b4>; | ||
465 | }; | ||
466 | |||
419 | dpll_gmac_ck: dpll_gmac_ck { | 467 | dpll_gmac_ck: dpll_gmac_ck { |
420 | #clock-cells = <0>; | 468 | #clock-cells = <0>; |
421 | compatible = "ti,omap4-dpll-clock"; | 469 | compatible = "ti,omap4-dpll-clock"; |
422 | clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; | 470 | clocks = <&sys_clkin1>, <&dpll_gmac_byp_mux>; |
423 | reg = <0x02a8>, <0x02ac>, <0x02b4>, <0x02b0>; | 471 | reg = <0x02a8>, <0x02ac>, <0x02b4>, <0x02b0>; |
424 | }; | 472 | }; |
425 | 473 | ||
@@ -482,10 +530,18 @@ | |||
482 | clock-div = <1>; | 530 | clock-div = <1>; |
483 | }; | 531 | }; |
484 | 532 | ||
533 | dpll_eve_byp_mux: dpll_eve_byp_mux { | ||
534 | #clock-cells = <0>; | ||
535 | compatible = "ti,mux-clock"; | ||
536 | clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>; | ||
537 | ti,bit-shift = <23>; | ||
538 | reg = <0x0290>; | ||
539 | }; | ||
540 | |||
485 | dpll_eve_ck: dpll_eve_ck { | 541 | dpll_eve_ck: dpll_eve_ck { |
486 | #clock-cells = <0>; | 542 | #clock-cells = <0>; |
487 | compatible = "ti,omap4-dpll-clock"; | 543 | compatible = "ti,omap4-dpll-clock"; |
488 | clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>; | 544 | clocks = <&sys_clkin1>, <&dpll_eve_byp_mux>; |
489 | reg = <0x0284>, <0x0288>, <0x0290>, <0x028c>; | 545 | reg = <0x0284>, <0x0288>, <0x0290>, <0x028c>; |
490 | }; | 546 | }; |
491 | 547 | ||
@@ -1249,10 +1305,18 @@ | |||
1249 | clock-div = <1>; | 1305 | clock-div = <1>; |
1250 | }; | 1306 | }; |
1251 | 1307 | ||
1308 | dpll_per_byp_mux: dpll_per_byp_mux { | ||
1309 | #clock-cells = <0>; | ||
1310 | compatible = "ti,mux-clock"; | ||
1311 | clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>; | ||
1312 | ti,bit-shift = <23>; | ||
1313 | reg = <0x014c>; | ||
1314 | }; | ||
1315 | |||
1252 | dpll_per_ck: dpll_per_ck { | 1316 | dpll_per_ck: dpll_per_ck { |
1253 | #clock-cells = <0>; | 1317 | #clock-cells = <0>; |
1254 | compatible = "ti,omap4-dpll-clock"; | 1318 | compatible = "ti,omap4-dpll-clock"; |
1255 | clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>; | 1319 | clocks = <&sys_clkin1>, <&dpll_per_byp_mux>; |
1256 | reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>; | 1320 | reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>; |
1257 | }; | 1321 | }; |
1258 | 1322 | ||
@@ -1275,10 +1339,18 @@ | |||
1275 | clock-div = <1>; | 1339 | clock-div = <1>; |
1276 | }; | 1340 | }; |
1277 | 1341 | ||
1342 | dpll_usb_byp_mux: dpll_usb_byp_mux { | ||
1343 | #clock-cells = <0>; | ||
1344 | compatible = "ti,mux-clock"; | ||
1345 | clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>; | ||
1346 | ti,bit-shift = <23>; | ||
1347 | reg = <0x018c>; | ||
1348 | }; | ||
1349 | |||
1278 | dpll_usb_ck: dpll_usb_ck { | 1350 | dpll_usb_ck: dpll_usb_ck { |
1279 | #clock-cells = <0>; | 1351 | #clock-cells = <0>; |
1280 | compatible = "ti,omap4-dpll-j-type-clock"; | 1352 | compatible = "ti,omap4-dpll-j-type-clock"; |
1281 | clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>; | 1353 | clocks = <&sys_clkin1>, <&dpll_usb_byp_mux>; |
1282 | reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>; | 1354 | reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>; |
1283 | }; | 1355 | }; |
1284 | 1356 | ||
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi index 277b48b0b6f9..ac6b0ae42caf 100644 --- a/arch/arm/boot/dts/exynos3250.dtsi +++ b/arch/arm/boot/dts/exynos3250.dtsi | |||
@@ -18,6 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include "skeleton.dtsi" | 20 | #include "skeleton.dtsi" |
21 | #include "exynos4-cpu-thermal.dtsi" | ||
21 | #include <dt-bindings/clock/exynos3250.h> | 22 | #include <dt-bindings/clock/exynos3250.h> |
22 | 23 | ||
23 | / { | 24 | / { |
@@ -193,6 +194,7 @@ | |||
193 | interrupts = <0 216 0>; | 194 | interrupts = <0 216 0>; |
194 | clocks = <&cmu CLK_TMU_APBIF>; | 195 | clocks = <&cmu CLK_TMU_APBIF>; |
195 | clock-names = "tmu_apbif"; | 196 | clock-names = "tmu_apbif"; |
197 | #include "exynos4412-tmu-sensor-conf.dtsi" | ||
196 | status = "disabled"; | 198 | status = "disabled"; |
197 | }; | 199 | }; |
198 | 200 | ||
diff --git a/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi new file mode 100644 index 000000000000..735cb2f10817 --- /dev/null +++ b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * Device tree sources for Exynos4 thermal zone | ||
3 | * | ||
4 | * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <dt-bindings/thermal/thermal.h> | ||
13 | |||
14 | / { | ||
15 | thermal-zones { | ||
16 | cpu_thermal: cpu-thermal { | ||
17 | thermal-sensors = <&tmu 0>; | ||
18 | polling-delay-passive = <0>; | ||
19 | polling-delay = <0>; | ||
20 | trips { | ||
21 | cpu_alert0: cpu-alert-0 { | ||
22 | temperature = <70000>; /* millicelsius */ | ||
23 | hysteresis = <10000>; /* millicelsius */ | ||
24 | type = "active"; | ||
25 | }; | ||
26 | cpu_alert1: cpu-alert-1 { | ||
27 | temperature = <95000>; /* millicelsius */ | ||
28 | hysteresis = <10000>; /* millicelsius */ | ||
29 | type = "active"; | ||
30 | }; | ||
31 | cpu_alert2: cpu-alert-2 { | ||
32 | temperature = <110000>; /* millicelsius */ | ||
33 | hysteresis = <10000>; /* millicelsius */ | ||
34 | type = "active"; | ||
35 | }; | ||
36 | cpu_crit0: cpu-crit-0 { | ||
37 | temperature = <120000>; /* millicelsius */ | ||
38 | hysteresis = <0>; /* millicelsius */ | ||
39 | type = "critical"; | ||
40 | }; | ||
41 | }; | ||
42 | cooling-maps { | ||
43 | map0 { | ||
44 | trip = <&cpu_alert0>; | ||
45 | }; | ||
46 | map1 { | ||
47 | trip = <&cpu_alert1>; | ||
48 | }; | ||
49 | }; | ||
50 | }; | ||
51 | }; | ||
52 | }; | ||
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi index 76173cacd450..77ea547768f4 100644 --- a/arch/arm/boot/dts/exynos4.dtsi +++ b/arch/arm/boot/dts/exynos4.dtsi | |||
@@ -38,6 +38,7 @@ | |||
38 | i2c5 = &i2c_5; | 38 | i2c5 = &i2c_5; |
39 | i2c6 = &i2c_6; | 39 | i2c6 = &i2c_6; |
40 | i2c7 = &i2c_7; | 40 | i2c7 = &i2c_7; |
41 | i2c8 = &i2c_8; | ||
41 | csis0 = &csis_0; | 42 | csis0 = &csis_0; |
42 | csis1 = &csis_1; | 43 | csis1 = &csis_1; |
43 | fimc0 = &fimc_0; | 44 | fimc0 = &fimc_0; |
@@ -104,6 +105,7 @@ | |||
104 | compatible = "samsung,exynos4210-pd"; | 105 | compatible = "samsung,exynos4210-pd"; |
105 | reg = <0x10023C20 0x20>; | 106 | reg = <0x10023C20 0x20>; |
106 | #power-domain-cells = <0>; | 107 | #power-domain-cells = <0>; |
108 | power-domains = <&pd_lcd0>; | ||
107 | }; | 109 | }; |
108 | 110 | ||
109 | pd_cam: cam-power-domain@10023C00 { | 111 | pd_cam: cam-power-domain@10023C00 { |
@@ -554,6 +556,22 @@ | |||
554 | status = "disabled"; | 556 | status = "disabled"; |
555 | }; | 557 | }; |
556 | 558 | ||
559 | i2c_8: i2c@138E0000 { | ||
560 | #address-cells = <1>; | ||
561 | #size-cells = <0>; | ||
562 | compatible = "samsung,s3c2440-hdmiphy-i2c"; | ||
563 | reg = <0x138E0000 0x100>; | ||
564 | interrupts = <0 93 0>; | ||
565 | clocks = <&clock CLK_I2C_HDMI>; | ||
566 | clock-names = "i2c"; | ||
567 | status = "disabled"; | ||
568 | |||
569 | hdmi_i2c_phy: hdmiphy@38 { | ||
570 | compatible = "exynos4210-hdmiphy"; | ||
571 | reg = <0x38>; | ||
572 | }; | ||
573 | }; | ||
574 | |||
557 | spi_0: spi@13920000 { | 575 | spi_0: spi@13920000 { |
558 | compatible = "samsung,exynos4210-spi"; | 576 | compatible = "samsung,exynos4210-spi"; |
559 | reg = <0x13920000 0x100>; | 577 | reg = <0x13920000 0x100>; |
@@ -663,6 +681,33 @@ | |||
663 | status = "disabled"; | 681 | status = "disabled"; |
664 | }; | 682 | }; |
665 | 683 | ||
684 | tmu: tmu@100C0000 { | ||
685 | #include "exynos4412-tmu-sensor-conf.dtsi" | ||
686 | }; | ||
687 | |||
688 | hdmi: hdmi@12D00000 { | ||
689 | compatible = "samsung,exynos4210-hdmi"; | ||
690 | reg = <0x12D00000 0x70000>; | ||
691 | interrupts = <0 92 0>; | ||
692 | clock-names = "hdmi", "sclk_hdmi", "sclk_pixel", "sclk_hdmiphy", | ||
693 | "mout_hdmi"; | ||
694 | clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>, | ||
695 | <&clock CLK_SCLK_PIXEL>, <&clock CLK_SCLK_HDMIPHY>, | ||
696 | <&clock CLK_MOUT_HDMI>; | ||
697 | phy = <&hdmi_i2c_phy>; | ||
698 | power-domains = <&pd_tv>; | ||
699 | samsung,syscon-phandle = <&pmu_system_controller>; | ||
700 | status = "disabled"; | ||
701 | }; | ||
702 | |||
703 | mixer: mixer@12C10000 { | ||
704 | compatible = "samsung,exynos4210-mixer"; | ||
705 | interrupts = <0 91 0>; | ||
706 | reg = <0x12C10000 0x2100>, <0x12c00000 0x300>; | ||
707 | power-domains = <&pd_tv>; | ||
708 | status = "disabled"; | ||
709 | }; | ||
710 | |||
666 | ppmu_dmc0: ppmu_dmc0@106a0000 { | 711 | ppmu_dmc0: ppmu_dmc0@106a0000 { |
667 | compatible = "samsung,exynos-ppmu"; | 712 | compatible = "samsung,exynos-ppmu"; |
668 | reg = <0x106a0000 0x2000>; | 713 | reg = <0x106a0000 0x2000>; |
diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts index 3d6652a4b6cb..32c5fd8f6269 100644 --- a/arch/arm/boot/dts/exynos4210-trats.dts +++ b/arch/arm/boot/dts/exynos4210-trats.dts | |||
@@ -426,6 +426,25 @@ | |||
426 | status = "okay"; | 426 | status = "okay"; |
427 | }; | 427 | }; |
428 | 428 | ||
429 | tmu@100C0000 { | ||
430 | status = "okay"; | ||
431 | }; | ||
432 | |||
433 | thermal-zones { | ||
434 | cpu_thermal: cpu-thermal { | ||
435 | cooling-maps { | ||
436 | map0 { | ||
437 | /* Corresponds to 800MHz at freq_table */ | ||
438 | cooling-device = <&cpu0 2 2>; | ||
439 | }; | ||
440 | map1 { | ||
441 | /* Corresponds to 200MHz at freq_table */ | ||
442 | cooling-device = <&cpu0 4 4>; | ||
443 | }; | ||
444 | }; | ||
445 | }; | ||
446 | }; | ||
447 | |||
429 | camera { | 448 | camera { |
430 | pinctrl-names = "default"; | 449 | pinctrl-names = "default"; |
431 | pinctrl-0 = <>; | 450 | pinctrl-0 = <>; |
diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts index b57e6b82ea20..d4f2b11319dd 100644 --- a/arch/arm/boot/dts/exynos4210-universal_c210.dts +++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts | |||
@@ -505,6 +505,63 @@ | |||
505 | assigned-clock-rates = <0>, <160000000>; | 505 | assigned-clock-rates = <0>, <160000000>; |
506 | }; | 506 | }; |
507 | }; | 507 | }; |
508 | |||
509 | hdmi_en: voltage-regulator-hdmi-5v { | ||
510 | compatible = "regulator-fixed"; | ||
511 | regulator-name = "HDMI_5V"; | ||
512 | regulator-min-microvolt = <5000000>; | ||
513 | regulator-max-microvolt = <5000000>; | ||
514 | gpio = <&gpe0 1 0>; | ||
515 | enable-active-high; | ||
516 | }; | ||
517 | |||
518 | hdmi_ddc: i2c-ddc { | ||
519 | compatible = "i2c-gpio"; | ||
520 | gpios = <&gpe4 2 0 &gpe4 3 0>; | ||
521 | i2c-gpio,delay-us = <100>; | ||
522 | #address-cells = <1>; | ||
523 | #size-cells = <0>; | ||
524 | |||
525 | pinctrl-0 = <&i2c_ddc_bus>; | ||
526 | pinctrl-names = "default"; | ||
527 | status = "okay"; | ||
528 | }; | ||
529 | |||
530 | mixer@12C10000 { | ||
531 | status = "okay"; | ||
532 | }; | ||
533 | |||
534 | hdmi@12D00000 { | ||
535 | hpd-gpio = <&gpx3 7 0>; | ||
536 | pinctrl-names = "default"; | ||
537 | pinctrl-0 = <&hdmi_hpd>; | ||
538 | hdmi-en-supply = <&hdmi_en>; | ||
539 | vdd-supply = <&ldo3_reg>; | ||
540 | vdd_osc-supply = <&ldo4_reg>; | ||
541 | vdd_pll-supply = <&ldo3_reg>; | ||
542 | ddc = <&hdmi_ddc>; | ||
543 | status = "okay"; | ||
544 | }; | ||
545 | |||
546 | i2c@138E0000 { | ||
547 | status = "okay"; | ||
548 | }; | ||
549 | }; | ||
550 | |||
551 | &pinctrl_1 { | ||
552 | hdmi_hpd: hdmi-hpd { | ||
553 | samsung,pins = "gpx3-7"; | ||
554 | samsung,pin-pud = <0>; | ||
555 | }; | ||
556 | }; | ||
557 | |||
558 | &pinctrl_0 { | ||
559 | i2c_ddc_bus: i2c-ddc-bus { | ||
560 | samsung,pins = "gpe4-2", "gpe4-3"; | ||
561 | samsung,pin-function = <2>; | ||
562 | samsung,pin-pud = <3>; | ||
563 | samsung,pin-drv = <0>; | ||
564 | }; | ||
508 | }; | 565 | }; |
509 | 566 | ||
510 | &mdma1 { | 567 | &mdma1 { |
diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi index 67c832c9dcf1..be89f83f70e7 100644 --- a/arch/arm/boot/dts/exynos4210.dtsi +++ b/arch/arm/boot/dts/exynos4210.dtsi | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #include "exynos4.dtsi" | 22 | #include "exynos4.dtsi" |
23 | #include "exynos4210-pinctrl.dtsi" | 23 | #include "exynos4210-pinctrl.dtsi" |
24 | #include "exynos4-cpu-thermal.dtsi" | ||
24 | 25 | ||
25 | / { | 26 | / { |
26 | compatible = "samsung,exynos4210", "samsung,exynos4"; | 27 | compatible = "samsung,exynos4210", "samsung,exynos4"; |
@@ -35,10 +36,13 @@ | |||
35 | #address-cells = <1>; | 36 | #address-cells = <1>; |
36 | #size-cells = <0>; | 37 | #size-cells = <0>; |
37 | 38 | ||
38 | cpu@900 { | 39 | cpu0: cpu@900 { |
39 | device_type = "cpu"; | 40 | device_type = "cpu"; |
40 | compatible = "arm,cortex-a9"; | 41 | compatible = "arm,cortex-a9"; |
41 | reg = <0x900>; | 42 | reg = <0x900>; |
43 | cooling-min-level = <4>; | ||
44 | cooling-max-level = <2>; | ||
45 | #cooling-cells = <2>; /* min followed by max */ | ||
42 | }; | 46 | }; |
43 | 47 | ||
44 | cpu@901 { | 48 | cpu@901 { |
@@ -153,16 +157,38 @@ | |||
153 | reg = <0x03860000 0x1000>; | 157 | reg = <0x03860000 0x1000>; |
154 | }; | 158 | }; |
155 | 159 | ||
156 | tmu@100C0000 { | 160 | tmu: tmu@100C0000 { |
157 | compatible = "samsung,exynos4210-tmu"; | 161 | compatible = "samsung,exynos4210-tmu"; |
158 | interrupt-parent = <&combiner>; | 162 | interrupt-parent = <&combiner>; |
159 | reg = <0x100C0000 0x100>; | 163 | reg = <0x100C0000 0x100>; |
160 | interrupts = <2 4>; | 164 | interrupts = <2 4>; |
161 | clocks = <&clock CLK_TMU_APBIF>; | 165 | clocks = <&clock CLK_TMU_APBIF>; |
162 | clock-names = "tmu_apbif"; | 166 | clock-names = "tmu_apbif"; |
167 | samsung,tmu_gain = <15>; | ||
168 | samsung,tmu_reference_voltage = <7>; | ||
163 | status = "disabled"; | 169 | status = "disabled"; |
164 | }; | 170 | }; |
165 | 171 | ||
172 | thermal-zones { | ||
173 | cpu_thermal: cpu-thermal { | ||
174 | polling-delay-passive = <0>; | ||
175 | polling-delay = <0>; | ||
176 | thermal-sensors = <&tmu 0>; | ||
177 | |||
178 | trips { | ||
179 | cpu_alert0: cpu-alert-0 { | ||
180 | temperature = <85000>; /* millicelsius */ | ||
181 | }; | ||
182 | cpu_alert1: cpu-alert-1 { | ||
183 | temperature = <100000>; /* millicelsius */ | ||
184 | }; | ||
185 | cpu_alert2: cpu-alert-2 { | ||
186 | temperature = <110000>; /* millicelsius */ | ||
187 | }; | ||
188 | }; | ||
189 | }; | ||
190 | }; | ||
191 | |||
166 | g2d@12800000 { | 192 | g2d@12800000 { |
167 | compatible = "samsung,s5pv210-g2d"; | 193 | compatible = "samsung,s5pv210-g2d"; |
168 | reg = <0x12800000 0x1000>; | 194 | reg = <0x12800000 0x1000>; |
@@ -203,6 +229,14 @@ | |||
203 | }; | 229 | }; |
204 | }; | 230 | }; |
205 | 231 | ||
232 | mixer: mixer@12C10000 { | ||
233 | clock-names = "mixer", "hdmi", "sclk_hdmi", "vp", "mout_mixer", | ||
234 | "sclk_mixer"; | ||
235 | clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>, | ||
236 | <&clock CLK_SCLK_HDMI>, <&clock CLK_VP>, | ||
237 | <&clock CLK_MOUT_MIXER>, <&clock CLK_SCLK_MIXER>; | ||
238 | }; | ||
239 | |||
206 | ppmu_lcd1: ppmu_lcd1@12240000 { | 240 | ppmu_lcd1: ppmu_lcd1@12240000 { |
207 | compatible = "samsung,exynos-ppmu"; | 241 | compatible = "samsung,exynos-ppmu"; |
208 | reg = <0x12240000 0x2000>; | 242 | reg = <0x12240000 0x2000>; |
diff --git a/arch/arm/boot/dts/exynos4212.dtsi b/arch/arm/boot/dts/exynos4212.dtsi index dd0a43ec56da..5be03288f1ee 100644 --- a/arch/arm/boot/dts/exynos4212.dtsi +++ b/arch/arm/boot/dts/exynos4212.dtsi | |||
@@ -26,10 +26,13 @@ | |||
26 | #address-cells = <1>; | 26 | #address-cells = <1>; |
27 | #size-cells = <0>; | 27 | #size-cells = <0>; |
28 | 28 | ||
29 | cpu@A00 { | 29 | cpu0: cpu@A00 { |
30 | device_type = "cpu"; | 30 | device_type = "cpu"; |
31 | compatible = "arm,cortex-a9"; | 31 | compatible = "arm,cortex-a9"; |
32 | reg = <0xA00>; | 32 | reg = <0xA00>; |
33 | cooling-min-level = <13>; | ||
34 | cooling-max-level = <7>; | ||
35 | #cooling-cells = <2>; /* min followed by max */ | ||
33 | }; | 36 | }; |
34 | 37 | ||
35 | cpu@A01 { | 38 | cpu@A01 { |
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi index de80b5bba204..adb4f6a97a1d 100644 --- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi +++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi | |||
@@ -249,6 +249,20 @@ | |||
249 | regulator-always-on; | 249 | regulator-always-on; |
250 | }; | 250 | }; |
251 | 251 | ||
252 | ldo8_reg: ldo@8 { | ||
253 | regulator-compatible = "LDO8"; | ||
254 | regulator-name = "VDD10_HDMI_1.0V"; | ||
255 | regulator-min-microvolt = <1000000>; | ||
256 | regulator-max-microvolt = <1000000>; | ||
257 | }; | ||
258 | |||
259 | ldo10_reg: ldo@10 { | ||
260 | regulator-compatible = "LDO10"; | ||
261 | regulator-name = "VDDQ_MIPIHSI_1.8V"; | ||
262 | regulator-min-microvolt = <1800000>; | ||
263 | regulator-max-microvolt = <1800000>; | ||
264 | }; | ||
265 | |||
252 | ldo11_reg: LDO11 { | 266 | ldo11_reg: LDO11 { |
253 | regulator-name = "VDD18_ABB1_1.8V"; | 267 | regulator-name = "VDD18_ABB1_1.8V"; |
254 | regulator-min-microvolt = <1800000>; | 268 | regulator-min-microvolt = <1800000>; |
@@ -411,6 +425,51 @@ | |||
411 | ehci: ehci@12580000 { | 425 | ehci: ehci@12580000 { |
412 | status = "okay"; | 426 | status = "okay"; |
413 | }; | 427 | }; |
428 | |||
429 | tmu@100C0000 { | ||
430 | vtmu-supply = <&ldo10_reg>; | ||
431 | status = "okay"; | ||
432 | }; | ||
433 | |||
434 | thermal-zones { | ||
435 | cpu_thermal: cpu-thermal { | ||
436 | cooling-maps { | ||
437 | map0 { | ||
438 | /* Corresponds to 800MHz at freq_table */ | ||
439 | cooling-device = <&cpu0 7 7>; | ||
440 | }; | ||
441 | map1 { | ||
442 | /* Corresponds to 200MHz at freq_table */ | ||
443 | cooling-device = <&cpu0 13 13>; | ||
444 | }; | ||
445 | }; | ||
446 | }; | ||
447 | }; | ||
448 | |||
449 | mixer: mixer@12C10000 { | ||
450 | status = "okay"; | ||
451 | }; | ||
452 | |||
453 | hdmi@12D00000 { | ||
454 | hpd-gpio = <&gpx3 7 0>; | ||
455 | pinctrl-names = "default"; | ||
456 | pinctrl-0 = <&hdmi_hpd>; | ||
457 | vdd-supply = <&ldo8_reg>; | ||
458 | vdd_osc-supply = <&ldo10_reg>; | ||
459 | vdd_pll-supply = <&ldo8_reg>; | ||
460 | ddc = <&hdmi_ddc>; | ||
461 | status = "okay"; | ||
462 | }; | ||
463 | |||
464 | hdmi_ddc: i2c@13880000 { | ||
465 | status = "okay"; | ||
466 | pinctrl-names = "default"; | ||
467 | pinctrl-0 = <&i2c2_bus>; | ||
468 | }; | ||
469 | |||
470 | i2c@138E0000 { | ||
471 | status = "okay"; | ||
472 | }; | ||
414 | }; | 473 | }; |
415 | 474 | ||
416 | &pinctrl_1 { | 475 | &pinctrl_1 { |
@@ -425,4 +484,9 @@ | |||
425 | samsung,pin-pud = <0>; | 484 | samsung,pin-pud = <0>; |
426 | samsung,pin-drv = <0>; | 485 | samsung,pin-drv = <0>; |
427 | }; | 486 | }; |
487 | |||
488 | hdmi_hpd: hdmi-hpd { | ||
489 | samsung,pins = "gpx3-7"; | ||
490 | samsung,pin-pud = <1>; | ||
491 | }; | ||
428 | }; | 492 | }; |
diff --git a/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi b/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi new file mode 100644 index 000000000000..e3f7934d19d0 --- /dev/null +++ b/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * Device tree sources for Exynos4412 TMU sensor configuration | ||
3 | * | ||
4 | * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <dt-bindings/thermal/thermal_exynos.h> | ||
13 | |||
14 | #thermal-sensor-cells = <0>; | ||
15 | samsung,tmu_gain = <8>; | ||
16 | samsung,tmu_reference_voltage = <16>; | ||
17 | samsung,tmu_noise_cancel_mode = <4>; | ||
18 | samsung,tmu_efuse_value = <55>; | ||
19 | samsung,tmu_min_efuse_value = <40>; | ||
20 | samsung,tmu_max_efuse_value = <100>; | ||
21 | samsung,tmu_first_point_trim = <25>; | ||
22 | samsung,tmu_second_point_trim = <85>; | ||
23 | samsung,tmu_default_temp_offset = <50>; | ||
24 | samsung,tmu_cal_type = <TYPE_ONE_POINT_TRIMMING>; | ||
diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts index 21f748083586..173ffa479ad3 100644 --- a/arch/arm/boot/dts/exynos4412-trats2.dts +++ b/arch/arm/boot/dts/exynos4412-trats2.dts | |||
@@ -927,6 +927,21 @@ | |||
927 | pulldown-ohm = <100000>; /* 100K */ | 927 | pulldown-ohm = <100000>; /* 100K */ |
928 | io-channels = <&adc 2>; /* Battery temperature */ | 928 | io-channels = <&adc 2>; /* Battery temperature */ |
929 | }; | 929 | }; |
930 | |||
931 | thermal-zones { | ||
932 | cpu_thermal: cpu-thermal { | ||
933 | cooling-maps { | ||
934 | map0 { | ||
935 | /* Corresponds to 800MHz at freq_table */ | ||
936 | cooling-device = <&cpu0 7 7>; | ||
937 | }; | ||
938 | map1 { | ||
939 | /* Corresponds to 200MHz at freq_table */ | ||
940 | cooling-device = <&cpu0 13 13>; | ||
941 | }; | ||
942 | }; | ||
943 | }; | ||
944 | }; | ||
930 | }; | 945 | }; |
931 | 946 | ||
932 | &pmu_system_controller { | 947 | &pmu_system_controller { |
diff --git a/arch/arm/boot/dts/exynos4412.dtsi b/arch/arm/boot/dts/exynos4412.dtsi index 0f6ec93bb1d8..68ad43b391ae 100644 --- a/arch/arm/boot/dts/exynos4412.dtsi +++ b/arch/arm/boot/dts/exynos4412.dtsi | |||
@@ -26,10 +26,13 @@ | |||
26 | #address-cells = <1>; | 26 | #address-cells = <1>; |
27 | #size-cells = <0>; | 27 | #size-cells = <0>; |
28 | 28 | ||
29 | cpu@A00 { | 29 | cpu0: cpu@A00 { |
30 | device_type = "cpu"; | 30 | device_type = "cpu"; |
31 | compatible = "arm,cortex-a9"; | 31 | compatible = "arm,cortex-a9"; |
32 | reg = <0xA00>; | 32 | reg = <0xA00>; |
33 | cooling-min-level = <13>; | ||
34 | cooling-max-level = <7>; | ||
35 | #cooling-cells = <2>; /* min followed by max */ | ||
33 | }; | 36 | }; |
34 | 37 | ||
35 | cpu@A01 { | 38 | cpu@A01 { |
diff --git a/arch/arm/boot/dts/exynos4x12.dtsi b/arch/arm/boot/dts/exynos4x12.dtsi index f5e0ae780d6c..6a6abe14fd9b 100644 --- a/arch/arm/boot/dts/exynos4x12.dtsi +++ b/arch/arm/boot/dts/exynos4x12.dtsi | |||
@@ -19,6 +19,7 @@ | |||
19 | 19 | ||
20 | #include "exynos4.dtsi" | 20 | #include "exynos4.dtsi" |
21 | #include "exynos4x12-pinctrl.dtsi" | 21 | #include "exynos4x12-pinctrl.dtsi" |
22 | #include "exynos4-cpu-thermal.dtsi" | ||
22 | 23 | ||
23 | / { | 24 | / { |
24 | aliases { | 25 | aliases { |
@@ -297,4 +298,15 @@ | |||
297 | clock-names = "tmu_apbif"; | 298 | clock-names = "tmu_apbif"; |
298 | status = "disabled"; | 299 | status = "disabled"; |
299 | }; | 300 | }; |
301 | |||
302 | hdmi: hdmi@12D00000 { | ||
303 | compatible = "samsung,exynos4212-hdmi"; | ||
304 | }; | ||
305 | |||
306 | mixer: mixer@12C10000 { | ||
307 | compatible = "samsung,exynos4212-mixer"; | ||
308 | clock-names = "mixer", "hdmi", "sclk_hdmi", "vp"; | ||
309 | clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>, | ||
310 | <&clock CLK_SCLK_HDMI>, <&clock CLK_VP>; | ||
311 | }; | ||
300 | }; | 312 | }; |
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index 9bb1b0b738f5..adbde1adad95 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <dt-bindings/clock/exynos5250.h> | 20 | #include <dt-bindings/clock/exynos5250.h> |
21 | #include "exynos5.dtsi" | 21 | #include "exynos5.dtsi" |
22 | #include "exynos5250-pinctrl.dtsi" | 22 | #include "exynos5250-pinctrl.dtsi" |
23 | 23 | #include "exynos4-cpu-thermal.dtsi" | |
24 | #include <dt-bindings/clock/exynos-audss-clk.h> | 24 | #include <dt-bindings/clock/exynos-audss-clk.h> |
25 | 25 | ||
26 | / { | 26 | / { |
@@ -58,11 +58,14 @@ | |||
58 | #address-cells = <1>; | 58 | #address-cells = <1>; |
59 | #size-cells = <0>; | 59 | #size-cells = <0>; |
60 | 60 | ||
61 | cpu@0 { | 61 | cpu0: cpu@0 { |
62 | device_type = "cpu"; | 62 | device_type = "cpu"; |
63 | compatible = "arm,cortex-a15"; | 63 | compatible = "arm,cortex-a15"; |
64 | reg = <0>; | 64 | reg = <0>; |
65 | clock-frequency = <1700000000>; | 65 | clock-frequency = <1700000000>; |
66 | cooling-min-level = <15>; | ||
67 | cooling-max-level = <9>; | ||
68 | #cooling-cells = <2>; /* min followed by max */ | ||
66 | }; | 69 | }; |
67 | cpu@1 { | 70 | cpu@1 { |
68 | device_type = "cpu"; | 71 | device_type = "cpu"; |
@@ -102,6 +105,12 @@ | |||
102 | #power-domain-cells = <0>; | 105 | #power-domain-cells = <0>; |
103 | }; | 106 | }; |
104 | 107 | ||
108 | pd_disp1: disp1-power-domain@100440A0 { | ||
109 | compatible = "samsung,exynos4210-pd"; | ||
110 | reg = <0x100440A0 0x20>; | ||
111 | #power-domain-cells = <0>; | ||
112 | }; | ||
113 | |||
105 | clock: clock-controller@10010000 { | 114 | clock: clock-controller@10010000 { |
106 | compatible = "samsung,exynos5250-clock"; | 115 | compatible = "samsung,exynos5250-clock"; |
107 | reg = <0x10010000 0x30000>; | 116 | reg = <0x10010000 0x30000>; |
@@ -235,12 +244,32 @@ | |||
235 | status = "disabled"; | 244 | status = "disabled"; |
236 | }; | 245 | }; |
237 | 246 | ||
238 | tmu@10060000 { | 247 | tmu: tmu@10060000 { |
239 | compatible = "samsung,exynos5250-tmu"; | 248 | compatible = "samsung,exynos5250-tmu"; |
240 | reg = <0x10060000 0x100>; | 249 | reg = <0x10060000 0x100>; |
241 | interrupts = <0 65 0>; | 250 | interrupts = <0 65 0>; |
242 | clocks = <&clock CLK_TMU>; | 251 | clocks = <&clock CLK_TMU>; |
243 | clock-names = "tmu_apbif"; | 252 | clock-names = "tmu_apbif"; |
253 | #include "exynos4412-tmu-sensor-conf.dtsi" | ||
254 | }; | ||
255 | |||
256 | thermal-zones { | ||
257 | cpu_thermal: cpu-thermal { | ||
258 | polling-delay-passive = <0>; | ||
259 | polling-delay = <0>; | ||
260 | thermal-sensors = <&tmu 0>; | ||
261 | |||
262 | cooling-maps { | ||
263 | map0 { | ||
264 | /* Corresponds to 800MHz at freq_table */ | ||
265 | cooling-device = <&cpu0 9 9>; | ||
266 | }; | ||
267 | map1 { | ||
268 | /* Corresponds to 200MHz at freq_table */ | ||
269 | cooling-device = <&cpu0 15 15>; | ||
270 | }; | ||
271 | }; | ||
272 | }; | ||
244 | }; | 273 | }; |
245 | 274 | ||
246 | serial@12C00000 { | 275 | serial@12C00000 { |
@@ -719,6 +748,7 @@ | |||
719 | hdmi: hdmi { | 748 | hdmi: hdmi { |
720 | compatible = "samsung,exynos4212-hdmi"; | 749 | compatible = "samsung,exynos4212-hdmi"; |
721 | reg = <0x14530000 0x70000>; | 750 | reg = <0x14530000 0x70000>; |
751 | power-domains = <&pd_disp1>; | ||
722 | interrupts = <0 95 0>; | 752 | interrupts = <0 95 0>; |
723 | clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>, | 753 | clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>, |
724 | <&clock CLK_SCLK_PIXEL>, <&clock CLK_SCLK_HDMIPHY>, | 754 | <&clock CLK_SCLK_PIXEL>, <&clock CLK_SCLK_HDMIPHY>, |
@@ -731,9 +761,11 @@ | |||
731 | mixer { | 761 | mixer { |
732 | compatible = "samsung,exynos5250-mixer"; | 762 | compatible = "samsung,exynos5250-mixer"; |
733 | reg = <0x14450000 0x10000>; | 763 | reg = <0x14450000 0x10000>; |
764 | power-domains = <&pd_disp1>; | ||
734 | interrupts = <0 94 0>; | 765 | interrupts = <0 94 0>; |
735 | clocks = <&clock CLK_MIXER>, <&clock CLK_SCLK_HDMI>; | 766 | clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>, |
736 | clock-names = "mixer", "sclk_hdmi"; | 767 | <&clock CLK_SCLK_HDMI>; |
768 | clock-names = "mixer", "hdmi", "sclk_hdmi"; | ||
737 | }; | 769 | }; |
738 | 770 | ||
739 | dp_phy: video-phy@10040720 { | 771 | dp_phy: video-phy@10040720 { |
@@ -743,6 +775,7 @@ | |||
743 | }; | 775 | }; |
744 | 776 | ||
745 | dp: dp-controller@145B0000 { | 777 | dp: dp-controller@145B0000 { |
778 | power-domains = <&pd_disp1>; | ||
746 | clocks = <&clock CLK_DP>; | 779 | clocks = <&clock CLK_DP>; |
747 | clock-names = "dp"; | 780 | clock-names = "dp"; |
748 | phys = <&dp_phy>; | 781 | phys = <&dp_phy>; |
@@ -750,6 +783,7 @@ | |||
750 | }; | 783 | }; |
751 | 784 | ||
752 | fimd: fimd@14400000 { | 785 | fimd: fimd@14400000 { |
786 | power-domains = <&pd_disp1>; | ||
753 | clocks = <&clock CLK_SCLK_FIMD1>, <&clock CLK_FIMD1>; | 787 | clocks = <&clock CLK_SCLK_FIMD1>, <&clock CLK_FIMD1>; |
754 | clock-names = "sclk_fimd", "fimd"; | 788 | clock-names = "sclk_fimd", "fimd"; |
755 | }; | 789 | }; |
diff --git a/arch/arm/boot/dts/exynos5420-trip-points.dtsi b/arch/arm/boot/dts/exynos5420-trip-points.dtsi new file mode 100644 index 000000000000..5d31fc140823 --- /dev/null +++ b/arch/arm/boot/dts/exynos5420-trip-points.dtsi | |||
@@ -0,0 +1,35 @@ | |||
1 | /* | ||
2 | * Device tree sources for default Exynos5420 thermal zone definition | ||
3 | * | ||
4 | * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | polling-delay-passive = <0>; | ||
13 | polling-delay = <0>; | ||
14 | trips { | ||
15 | cpu-alert-0 { | ||
16 | temperature = <85000>; /* millicelsius */ | ||
17 | hysteresis = <10000>; /* millicelsius */ | ||
18 | type = "active"; | ||
19 | }; | ||
20 | cpu-alert-1 { | ||
21 | temperature = <103000>; /* millicelsius */ | ||
22 | hysteresis = <10000>; /* millicelsius */ | ||
23 | type = "active"; | ||
24 | }; | ||
25 | cpu-alert-2 { | ||
26 | temperature = <110000>; /* millicelsius */ | ||
27 | hysteresis = <10000>; /* millicelsius */ | ||
28 | type = "active"; | ||
29 | }; | ||
30 | cpu-crit-0 { | ||
31 | temperature = <1200000>; /* millicelsius */ | ||
32 | hysteresis = <0>; /* millicelsius */ | ||
33 | type = "critical"; | ||
34 | }; | ||
35 | }; | ||
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi index 9dc2e9773b30..c0e98cf3514f 100644 --- a/arch/arm/boot/dts/exynos5420.dtsi +++ b/arch/arm/boot/dts/exynos5420.dtsi | |||
@@ -740,8 +740,9 @@ | |||
740 | compatible = "samsung,exynos5420-mixer"; | 740 | compatible = "samsung,exynos5420-mixer"; |
741 | reg = <0x14450000 0x10000>; | 741 | reg = <0x14450000 0x10000>; |
742 | interrupts = <0 94 0>; | 742 | interrupts = <0 94 0>; |
743 | clocks = <&clock CLK_MIXER>, <&clock CLK_SCLK_HDMI>; | 743 | clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>, |
744 | clock-names = "mixer", "sclk_hdmi"; | 744 | <&clock CLK_SCLK_HDMI>; |
745 | clock-names = "mixer", "hdmi", "sclk_hdmi"; | ||
745 | power-domains = <&disp_pd>; | 746 | power-domains = <&disp_pd>; |
746 | }; | 747 | }; |
747 | 748 | ||
@@ -782,6 +783,7 @@ | |||
782 | interrupts = <0 65 0>; | 783 | interrupts = <0 65 0>; |
783 | clocks = <&clock CLK_TMU>; | 784 | clocks = <&clock CLK_TMU>; |
784 | clock-names = "tmu_apbif"; | 785 | clock-names = "tmu_apbif"; |
786 | #include "exynos4412-tmu-sensor-conf.dtsi" | ||
785 | }; | 787 | }; |
786 | 788 | ||
787 | tmu_cpu1: tmu@10064000 { | 789 | tmu_cpu1: tmu@10064000 { |
@@ -790,6 +792,7 @@ | |||
790 | interrupts = <0 183 0>; | 792 | interrupts = <0 183 0>; |
791 | clocks = <&clock CLK_TMU>; | 793 | clocks = <&clock CLK_TMU>; |
792 | clock-names = "tmu_apbif"; | 794 | clock-names = "tmu_apbif"; |
795 | #include "exynos4412-tmu-sensor-conf.dtsi" | ||
793 | }; | 796 | }; |
794 | 797 | ||
795 | tmu_cpu2: tmu@10068000 { | 798 | tmu_cpu2: tmu@10068000 { |
@@ -798,6 +801,7 @@ | |||
798 | interrupts = <0 184 0>; | 801 | interrupts = <0 184 0>; |
799 | clocks = <&clock CLK_TMU>, <&clock CLK_TMU>; | 802 | clocks = <&clock CLK_TMU>, <&clock CLK_TMU>; |
800 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; | 803 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; |
804 | #include "exynos4412-tmu-sensor-conf.dtsi" | ||
801 | }; | 805 | }; |
802 | 806 | ||
803 | tmu_cpu3: tmu@1006c000 { | 807 | tmu_cpu3: tmu@1006c000 { |
@@ -806,6 +810,7 @@ | |||
806 | interrupts = <0 185 0>; | 810 | interrupts = <0 185 0>; |
807 | clocks = <&clock CLK_TMU>, <&clock CLK_TMU_GPU>; | 811 | clocks = <&clock CLK_TMU>, <&clock CLK_TMU_GPU>; |
808 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; | 812 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; |
813 | #include "exynos4412-tmu-sensor-conf.dtsi" | ||
809 | }; | 814 | }; |
810 | 815 | ||
811 | tmu_gpu: tmu@100a0000 { | 816 | tmu_gpu: tmu@100a0000 { |
@@ -814,6 +819,30 @@ | |||
814 | interrupts = <0 215 0>; | 819 | interrupts = <0 215 0>; |
815 | clocks = <&clock CLK_TMU_GPU>, <&clock CLK_TMU>; | 820 | clocks = <&clock CLK_TMU_GPU>, <&clock CLK_TMU>; |
816 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; | 821 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; |
822 | #include "exynos4412-tmu-sensor-conf.dtsi" | ||
823 | }; | ||
824 | |||
825 | thermal-zones { | ||
826 | cpu0_thermal: cpu0-thermal { | ||
827 | thermal-sensors = <&tmu_cpu0>; | ||
828 | #include "exynos5420-trip-points.dtsi" | ||
829 | }; | ||
830 | cpu1_thermal: cpu1-thermal { | ||
831 | thermal-sensors = <&tmu_cpu1>; | ||
832 | #include "exynos5420-trip-points.dtsi" | ||
833 | }; | ||
834 | cpu2_thermal: cpu2-thermal { | ||
835 | thermal-sensors = <&tmu_cpu2>; | ||
836 | #include "exynos5420-trip-points.dtsi" | ||
837 | }; | ||
838 | cpu3_thermal: cpu3-thermal { | ||
839 | thermal-sensors = <&tmu_cpu3>; | ||
840 | #include "exynos5420-trip-points.dtsi" | ||
841 | }; | ||
842 | gpu_thermal: gpu-thermal { | ||
843 | thermal-sensors = <&tmu_gpu>; | ||
844 | #include "exynos5420-trip-points.dtsi" | ||
845 | }; | ||
817 | }; | 846 | }; |
818 | 847 | ||
819 | watchdog: watchdog@101D0000 { | 848 | watchdog: watchdog@101D0000 { |
diff --git a/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi b/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi new file mode 100644 index 000000000000..7b2fba0ae92b --- /dev/null +++ b/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * Device tree sources for Exynos5440 TMU sensor configuration | ||
3 | * | ||
4 | * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <dt-bindings/thermal/thermal_exynos.h> | ||
13 | |||
14 | #thermal-sensor-cells = <0>; | ||
15 | samsung,tmu_gain = <5>; | ||
16 | samsung,tmu_reference_voltage = <16>; | ||
17 | samsung,tmu_noise_cancel_mode = <4>; | ||
18 | samsung,tmu_efuse_value = <0x5d2d>; | ||
19 | samsung,tmu_min_efuse_value = <16>; | ||
20 | samsung,tmu_max_efuse_value = <76>; | ||
21 | samsung,tmu_first_point_trim = <25>; | ||
22 | samsung,tmu_second_point_trim = <70>; | ||
23 | samsung,tmu_default_temp_offset = <25>; | ||
24 | samsung,tmu_cal_type = <TYPE_ONE_POINT_TRIMMING>; | ||
diff --git a/arch/arm/boot/dts/exynos5440-trip-points.dtsi b/arch/arm/boot/dts/exynos5440-trip-points.dtsi new file mode 100644 index 000000000000..48adfa8f4300 --- /dev/null +++ b/arch/arm/boot/dts/exynos5440-trip-points.dtsi | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * Device tree sources for default Exynos5440 thermal zone definition | ||
3 | * | ||
4 | * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | polling-delay-passive = <0>; | ||
13 | polling-delay = <0>; | ||
14 | trips { | ||
15 | cpu-alert-0 { | ||
16 | temperature = <100000>; /* millicelsius */ | ||
17 | hysteresis = <0>; /* millicelsius */ | ||
18 | type = "active"; | ||
19 | }; | ||
20 | cpu-crit-0 { | ||
21 | temperature = <1050000>; /* millicelsius */ | ||
22 | hysteresis = <0>; /* millicelsius */ | ||
23 | type = "critical"; | ||
24 | }; | ||
25 | }; | ||
diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi index 8f3373cd7b87..59d9416b3b03 100644 --- a/arch/arm/boot/dts/exynos5440.dtsi +++ b/arch/arm/boot/dts/exynos5440.dtsi | |||
@@ -219,6 +219,7 @@ | |||
219 | interrupts = <0 58 0>; | 219 | interrupts = <0 58 0>; |
220 | clocks = <&clock CLK_B_125>; | 220 | clocks = <&clock CLK_B_125>; |
221 | clock-names = "tmu_apbif"; | 221 | clock-names = "tmu_apbif"; |
222 | #include "exynos5440-tmu-sensor-conf.dtsi" | ||
222 | }; | 223 | }; |
223 | 224 | ||
224 | tmuctrl_1: tmuctrl@16011C { | 225 | tmuctrl_1: tmuctrl@16011C { |
@@ -227,6 +228,7 @@ | |||
227 | interrupts = <0 58 0>; | 228 | interrupts = <0 58 0>; |
228 | clocks = <&clock CLK_B_125>; | 229 | clocks = <&clock CLK_B_125>; |
229 | clock-names = "tmu_apbif"; | 230 | clock-names = "tmu_apbif"; |
231 | #include "exynos5440-tmu-sensor-conf.dtsi" | ||
230 | }; | 232 | }; |
231 | 233 | ||
232 | tmuctrl_2: tmuctrl@160120 { | 234 | tmuctrl_2: tmuctrl@160120 { |
@@ -235,6 +237,22 @@ | |||
235 | interrupts = <0 58 0>; | 237 | interrupts = <0 58 0>; |
236 | clocks = <&clock CLK_B_125>; | 238 | clocks = <&clock CLK_B_125>; |
237 | clock-names = "tmu_apbif"; | 239 | clock-names = "tmu_apbif"; |
240 | #include "exynos5440-tmu-sensor-conf.dtsi" | ||
241 | }; | ||
242 | |||
243 | thermal-zones { | ||
244 | cpu0_thermal: cpu0-thermal { | ||
245 | thermal-sensors = <&tmuctrl_0>; | ||
246 | #include "exynos5440-trip-points.dtsi" | ||
247 | }; | ||
248 | cpu1_thermal: cpu1-thermal { | ||
249 | thermal-sensors = <&tmuctrl_1>; | ||
250 | #include "exynos5440-trip-points.dtsi" | ||
251 | }; | ||
252 | cpu2_thermal: cpu2-thermal { | ||
253 | thermal-sensors = <&tmuctrl_2>; | ||
254 | #include "exynos5440-trip-points.dtsi" | ||
255 | }; | ||
238 | }; | 256 | }; |
239 | 257 | ||
240 | sata@210000 { | 258 | sata@210000 { |
diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi index f1cd2147421d..a626e6dd8022 100644 --- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi | |||
@@ -35,6 +35,7 @@ | |||
35 | regulator-max-microvolt = <5000000>; | 35 | regulator-max-microvolt = <5000000>; |
36 | gpio = <&gpio3 22 0>; | 36 | gpio = <&gpio3 22 0>; |
37 | enable-active-high; | 37 | enable-active-high; |
38 | vin-supply = <&swbst_reg>; | ||
38 | }; | 39 | }; |
39 | 40 | ||
40 | reg_usb_h1_vbus: regulator@1 { | 41 | reg_usb_h1_vbus: regulator@1 { |
@@ -45,6 +46,7 @@ | |||
45 | regulator-max-microvolt = <5000000>; | 46 | regulator-max-microvolt = <5000000>; |
46 | gpio = <&gpio1 29 0>; | 47 | gpio = <&gpio1 29 0>; |
47 | enable-active-high; | 48 | enable-active-high; |
49 | vin-supply = <&swbst_reg>; | ||
48 | }; | 50 | }; |
49 | 51 | ||
50 | reg_audio: regulator@2 { | 52 | reg_audio: regulator@2 { |
diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts index fda4932faefd..945887d3fdb3 100644 --- a/arch/arm/boot/dts/imx6sl-evk.dts +++ b/arch/arm/boot/dts/imx6sl-evk.dts | |||
@@ -52,6 +52,7 @@ | |||
52 | regulator-max-microvolt = <5000000>; | 52 | regulator-max-microvolt = <5000000>; |
53 | gpio = <&gpio4 0 0>; | 53 | gpio = <&gpio4 0 0>; |
54 | enable-active-high; | 54 | enable-active-high; |
55 | vin-supply = <&swbst_reg>; | ||
55 | }; | 56 | }; |
56 | 57 | ||
57 | reg_usb_otg2_vbus: regulator@1 { | 58 | reg_usb_otg2_vbus: regulator@1 { |
@@ -62,6 +63,7 @@ | |||
62 | regulator-max-microvolt = <5000000>; | 63 | regulator-max-microvolt = <5000000>; |
63 | gpio = <&gpio4 2 0>; | 64 | gpio = <&gpio4 2 0>; |
64 | enable-active-high; | 65 | enable-active-high; |
66 | vin-supply = <&swbst_reg>; | ||
65 | }; | 67 | }; |
66 | 68 | ||
67 | reg_aud3v: regulator@2 { | 69 | reg_aud3v: regulator@2 { |
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi index f4f78c40b564..3fdc84fddb70 100644 --- a/arch/arm/boot/dts/omap3.dtsi +++ b/arch/arm/boot/dts/omap3.dtsi | |||
@@ -92,6 +92,8 @@ | |||
92 | ti,hwmods = "aes"; | 92 | ti,hwmods = "aes"; |
93 | reg = <0x480c5000 0x50>; | 93 | reg = <0x480c5000 0x50>; |
94 | interrupts = <0>; | 94 | interrupts = <0>; |
95 | dmas = <&sdma 65 &sdma 66>; | ||
96 | dma-names = "tx", "rx"; | ||
95 | }; | 97 | }; |
96 | 98 | ||
97 | prm: prm@48306000 { | 99 | prm: prm@48306000 { |
@@ -550,6 +552,8 @@ | |||
550 | ti,hwmods = "sham"; | 552 | ti,hwmods = "sham"; |
551 | reg = <0x480c3000 0x64>; | 553 | reg = <0x480c3000 0x64>; |
552 | interrupts = <49>; | 554 | interrupts = <49>; |
555 | dmas = <&sdma 69>; | ||
556 | dma-names = "rx"; | ||
553 | }; | 557 | }; |
554 | 558 | ||
555 | smartreflex_core: smartreflex@480cb000 { | 559 | smartreflex_core: smartreflex@480cb000 { |
diff --git a/arch/arm/boot/dts/omap5-core-thermal.dtsi b/arch/arm/boot/dts/omap5-core-thermal.dtsi index 19212ac6eef0..de8a3d456cf7 100644 --- a/arch/arm/boot/dts/omap5-core-thermal.dtsi +++ b/arch/arm/boot/dts/omap5-core-thermal.dtsi | |||
@@ -13,7 +13,7 @@ | |||
13 | 13 | ||
14 | core_thermal: core_thermal { | 14 | core_thermal: core_thermal { |
15 | polling-delay-passive = <250>; /* milliseconds */ | 15 | polling-delay-passive = <250>; /* milliseconds */ |
16 | polling-delay = <1000>; /* milliseconds */ | 16 | polling-delay = <500>; /* milliseconds */ |
17 | 17 | ||
18 | /* sensor ID */ | 18 | /* sensor ID */ |
19 | thermal-sensors = <&bandgap 2>; | 19 | thermal-sensors = <&bandgap 2>; |
diff --git a/arch/arm/boot/dts/omap5-gpu-thermal.dtsi b/arch/arm/boot/dts/omap5-gpu-thermal.dtsi index 1b87aca88b77..bc3090f2e84b 100644 --- a/arch/arm/boot/dts/omap5-gpu-thermal.dtsi +++ b/arch/arm/boot/dts/omap5-gpu-thermal.dtsi | |||
@@ -13,7 +13,7 @@ | |||
13 | 13 | ||
14 | gpu_thermal: gpu_thermal { | 14 | gpu_thermal: gpu_thermal { |
15 | polling-delay-passive = <250>; /* milliseconds */ | 15 | polling-delay-passive = <250>; /* milliseconds */ |
16 | polling-delay = <1000>; /* milliseconds */ | 16 | polling-delay = <500>; /* milliseconds */ |
17 | 17 | ||
18 | /* sensor ID */ | 18 | /* sensor ID */ |
19 | thermal-sensors = <&bandgap 1>; | 19 | thermal-sensors = <&bandgap 1>; |
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index ddff674bd05e..4a485b63a141 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi | |||
@@ -1079,4 +1079,8 @@ | |||
1079 | }; | 1079 | }; |
1080 | }; | 1080 | }; |
1081 | 1081 | ||
1082 | &cpu_thermal { | ||
1083 | polling-delay = <500>; /* milliseconds */ | ||
1084 | }; | ||
1085 | |||
1082 | /include/ "omap54xx-clocks.dtsi" | 1086 | /include/ "omap54xx-clocks.dtsi" |
diff --git a/arch/arm/boot/dts/omap54xx-clocks.dtsi b/arch/arm/boot/dts/omap54xx-clocks.dtsi index 58c27466f012..83b425fb3ac2 100644 --- a/arch/arm/boot/dts/omap54xx-clocks.dtsi +++ b/arch/arm/boot/dts/omap54xx-clocks.dtsi | |||
@@ -167,10 +167,18 @@ | |||
167 | ti,index-starts-at-one; | 167 | ti,index-starts-at-one; |
168 | }; | 168 | }; |
169 | 169 | ||
170 | dpll_core_byp_mux: dpll_core_byp_mux { | ||
171 | #clock-cells = <0>; | ||
172 | compatible = "ti,mux-clock"; | ||
173 | clocks = <&sys_clkin>, <&dpll_abe_m3x2_ck>; | ||
174 | ti,bit-shift = <23>; | ||
175 | reg = <0x012c>; | ||
176 | }; | ||
177 | |||
170 | dpll_core_ck: dpll_core_ck { | 178 | dpll_core_ck: dpll_core_ck { |
171 | #clock-cells = <0>; | 179 | #clock-cells = <0>; |
172 | compatible = "ti,omap4-dpll-core-clock"; | 180 | compatible = "ti,omap4-dpll-core-clock"; |
173 | clocks = <&sys_clkin>, <&dpll_abe_m3x2_ck>; | 181 | clocks = <&sys_clkin>, <&dpll_core_byp_mux>; |
174 | reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>; | 182 | reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>; |
175 | }; | 183 | }; |
176 | 184 | ||
@@ -294,10 +302,18 @@ | |||
294 | clock-div = <1>; | 302 | clock-div = <1>; |
295 | }; | 303 | }; |
296 | 304 | ||
305 | dpll_iva_byp_mux: dpll_iva_byp_mux { | ||
306 | #clock-cells = <0>; | ||
307 | compatible = "ti,mux-clock"; | ||
308 | clocks = <&sys_clkin>, <&iva_dpll_hs_clk_div>; | ||
309 | ti,bit-shift = <23>; | ||
310 | reg = <0x01ac>; | ||
311 | }; | ||
312 | |||
297 | dpll_iva_ck: dpll_iva_ck { | 313 | dpll_iva_ck: dpll_iva_ck { |
298 | #clock-cells = <0>; | 314 | #clock-cells = <0>; |
299 | compatible = "ti,omap4-dpll-clock"; | 315 | compatible = "ti,omap4-dpll-clock"; |
300 | clocks = <&sys_clkin>, <&iva_dpll_hs_clk_div>; | 316 | clocks = <&sys_clkin>, <&dpll_iva_byp_mux>; |
301 | reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>; | 317 | reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>; |
302 | }; | 318 | }; |
303 | 319 | ||
@@ -599,10 +615,19 @@ | |||
599 | }; | 615 | }; |
600 | }; | 616 | }; |
601 | &cm_core_clocks { | 617 | &cm_core_clocks { |
618 | |||
619 | dpll_per_byp_mux: dpll_per_byp_mux { | ||
620 | #clock-cells = <0>; | ||
621 | compatible = "ti,mux-clock"; | ||
622 | clocks = <&sys_clkin>, <&per_dpll_hs_clk_div>; | ||
623 | ti,bit-shift = <23>; | ||
624 | reg = <0x014c>; | ||
625 | }; | ||
626 | |||
602 | dpll_per_ck: dpll_per_ck { | 627 | dpll_per_ck: dpll_per_ck { |
603 | #clock-cells = <0>; | 628 | #clock-cells = <0>; |
604 | compatible = "ti,omap4-dpll-clock"; | 629 | compatible = "ti,omap4-dpll-clock"; |
605 | clocks = <&sys_clkin>, <&per_dpll_hs_clk_div>; | 630 | clocks = <&sys_clkin>, <&dpll_per_byp_mux>; |
606 | reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>; | 631 | reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>; |
607 | }; | 632 | }; |
608 | 633 | ||
@@ -714,10 +739,18 @@ | |||
714 | ti,index-starts-at-one; | 739 | ti,index-starts-at-one; |
715 | }; | 740 | }; |
716 | 741 | ||
742 | dpll_usb_byp_mux: dpll_usb_byp_mux { | ||
743 | #clock-cells = <0>; | ||
744 | compatible = "ti,mux-clock"; | ||
745 | clocks = <&sys_clkin>, <&usb_dpll_hs_clk_div>; | ||
746 | ti,bit-shift = <23>; | ||
747 | reg = <0x018c>; | ||
748 | }; | ||
749 | |||
717 | dpll_usb_ck: dpll_usb_ck { | 750 | dpll_usb_ck: dpll_usb_ck { |
718 | #clock-cells = <0>; | 751 | #clock-cells = <0>; |
719 | compatible = "ti,omap4-dpll-j-type-clock"; | 752 | compatible = "ti,omap4-dpll-j-type-clock"; |
720 | clocks = <&sys_clkin>, <&usb_dpll_hs_clk_div>; | 753 | clocks = <&sys_clkin>, <&dpll_usb_byp_mux>; |
721 | reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>; | 754 | reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>; |
722 | }; | 755 | }; |
723 | 756 | ||
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index d771f687a13b..eccc78d3220b 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi | |||
@@ -411,6 +411,7 @@ | |||
411 | "mac_clk_rx", "mac_clk_tx", | 411 | "mac_clk_rx", "mac_clk_tx", |
412 | "clk_mac_ref", "clk_mac_refout", | 412 | "clk_mac_ref", "clk_mac_refout", |
413 | "aclk_mac", "pclk_mac"; | 413 | "aclk_mac", "pclk_mac"; |
414 | status = "disabled"; | ||
414 | }; | 415 | }; |
415 | 416 | ||
416 | usb_host0_ehci: usb@ff500000 { | 417 | usb_host0_ehci: usb@ff500000 { |
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi index 261311bdf65b..367af53c1b84 100644 --- a/arch/arm/boot/dts/sama5d3.dtsi +++ b/arch/arm/boot/dts/sama5d3.dtsi | |||
@@ -1248,7 +1248,6 @@ | |||
1248 | atmel,watchdog-type = "hardware"; | 1248 | atmel,watchdog-type = "hardware"; |
1249 | atmel,reset-type = "all"; | 1249 | atmel,reset-type = "all"; |
1250 | atmel,dbg-halt; | 1250 | atmel,dbg-halt; |
1251 | atmel,idle-halt; | ||
1252 | status = "disabled"; | 1251 | status = "disabled"; |
1253 | }; | 1252 | }; |
1254 | 1253 | ||
@@ -1416,7 +1415,7 @@ | |||
1416 | compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; | 1415 | compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; |
1417 | reg = <0x00700000 0x100000>; | 1416 | reg = <0x00700000 0x100000>; |
1418 | interrupts = <32 IRQ_TYPE_LEVEL_HIGH 2>; | 1417 | interrupts = <32 IRQ_TYPE_LEVEL_HIGH 2>; |
1419 | clocks = <&usb>, <&uhphs_clk>, <&uhpck>; | 1418 | clocks = <&utmi>, <&uhphs_clk>, <&uhpck>; |
1420 | clock-names = "usb_clk", "ehci_clk", "uhpck"; | 1419 | clock-names = "usb_clk", "ehci_clk", "uhpck"; |
1421 | status = "disabled"; | 1420 | status = "disabled"; |
1422 | }; | 1421 | }; |
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi index d986b41b9654..4303874889c6 100644 --- a/arch/arm/boot/dts/sama5d4.dtsi +++ b/arch/arm/boot/dts/sama5d4.dtsi | |||
@@ -66,6 +66,7 @@ | |||
66 | gpio4 = &pioE; | 66 | gpio4 = &pioE; |
67 | tcb0 = &tcb0; | 67 | tcb0 = &tcb0; |
68 | tcb1 = &tcb1; | 68 | tcb1 = &tcb1; |
69 | i2c0 = &i2c0; | ||
69 | i2c2 = &i2c2; | 70 | i2c2 = &i2c2; |
70 | }; | 71 | }; |
71 | cpus { | 72 | cpus { |
@@ -259,7 +260,7 @@ | |||
259 | compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; | 260 | compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; |
260 | reg = <0x00600000 0x100000>; | 261 | reg = <0x00600000 0x100000>; |
261 | interrupts = <46 IRQ_TYPE_LEVEL_HIGH 2>; | 262 | interrupts = <46 IRQ_TYPE_LEVEL_HIGH 2>; |
262 | clocks = <&usb>, <&uhphs_clk>, <&uhpck>; | 263 | clocks = <&utmi>, <&uhphs_clk>, <&uhpck>; |
263 | clock-names = "usb_clk", "ehci_clk", "uhpck"; | 264 | clock-names = "usb_clk", "ehci_clk", "uhpck"; |
264 | status = "disabled"; | 265 | status = "disabled"; |
265 | }; | 266 | }; |
@@ -461,8 +462,8 @@ | |||
461 | 462 | ||
462 | lcdck: lcdck { | 463 | lcdck: lcdck { |
463 | #clock-cells = <0>; | 464 | #clock-cells = <0>; |
464 | reg = <4>; | 465 | reg = <3>; |
465 | clocks = <&smd>; | 466 | clocks = <&mck>; |
466 | }; | 467 | }; |
467 | 468 | ||
468 | smdck: smdck { | 469 | smdck: smdck { |
@@ -770,7 +771,7 @@ | |||
770 | reg = <50>; | 771 | reg = <50>; |
771 | }; | 772 | }; |
772 | 773 | ||
773 | lcd_clk: lcd_clk { | 774 | lcdc_clk: lcdc_clk { |
774 | #clock-cells = <0>; | 775 | #clock-cells = <0>; |
775 | reg = <51>; | 776 | reg = <51>; |
776 | }; | 777 | }; |
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi index 252c3d1bda50..d9176e606173 100644 --- a/arch/arm/boot/dts/socfpga.dtsi +++ b/arch/arm/boot/dts/socfpga.dtsi | |||
@@ -660,7 +660,7 @@ | |||
660 | #address-cells = <1>; | 660 | #address-cells = <1>; |
661 | #size-cells = <0>; | 661 | #size-cells = <0>; |
662 | reg = <0xfff01000 0x1000>; | 662 | reg = <0xfff01000 0x1000>; |
663 | interrupts = <0 156 4>; | 663 | interrupts = <0 155 4>; |
664 | num-cs = <4>; | 664 | num-cs = <4>; |
665 | clocks = <&spi_m_clk>; | 665 | clocks = <&spi_m_clk>; |
666 | status = "disabled"; | 666 | status = "disabled"; |
@@ -713,6 +713,9 @@ | |||
713 | reg-shift = <2>; | 713 | reg-shift = <2>; |
714 | reg-io-width = <4>; | 714 | reg-io-width = <4>; |
715 | clocks = <&l4_sp_clk>; | 715 | clocks = <&l4_sp_clk>; |
716 | dmas = <&pdma 28>, | ||
717 | <&pdma 29>; | ||
718 | dma-names = "tx", "rx"; | ||
716 | }; | 719 | }; |
717 | 720 | ||
718 | uart1: serial1@ffc03000 { | 721 | uart1: serial1@ffc03000 { |
@@ -722,6 +725,9 @@ | |||
722 | reg-shift = <2>; | 725 | reg-shift = <2>; |
723 | reg-io-width = <4>; | 726 | reg-io-width = <4>; |
724 | clocks = <&l4_sp_clk>; | 727 | clocks = <&l4_sp_clk>; |
728 | dmas = <&pdma 30>, | ||
729 | <&pdma 31>; | ||
730 | dma-names = "tx", "rx"; | ||
725 | }; | 731 | }; |
726 | 732 | ||
727 | rst: rstmgr@ffd05000 { | 733 | rst: rstmgr@ffd05000 { |
diff --git a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts index ab7891c43231..75742f8f96f3 100644 --- a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts +++ b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts | |||
@@ -56,6 +56,22 @@ | |||
56 | model = "Olimex A10-OLinuXino-LIME"; | 56 | model = "Olimex A10-OLinuXino-LIME"; |
57 | compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10"; | 57 | compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10"; |
58 | 58 | ||
59 | cpus { | ||
60 | cpu0: cpu@0 { | ||
61 | /* | ||
62 | * The A10-Lime is known to be unstable | ||
63 | * when running at 1008 MHz | ||
64 | */ | ||
65 | operating-points = < | ||
66 | /* kHz uV */ | ||
67 | 912000 1350000 | ||
68 | 864000 1300000 | ||
69 | 624000 1250000 | ||
70 | >; | ||
71 | cooling-max-level = <2>; | ||
72 | }; | ||
73 | }; | ||
74 | |||
59 | soc@01c00000 { | 75 | soc@01c00000 { |
60 | emac: ethernet@01c0b000 { | 76 | emac: ethernet@01c0b000 { |
61 | pinctrl-names = "default"; | 77 | pinctrl-names = "default"; |
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index 5c2925831f20..eebb7853e00b 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi | |||
@@ -75,7 +75,6 @@ | |||
75 | clock-latency = <244144>; /* 8 32k periods */ | 75 | clock-latency = <244144>; /* 8 32k periods */ |
76 | operating-points = < | 76 | operating-points = < |
77 | /* kHz uV */ | 77 | /* kHz uV */ |
78 | 1056000 1500000 | ||
79 | 1008000 1400000 | 78 | 1008000 1400000 |
80 | 912000 1350000 | 79 | 912000 1350000 |
81 | 864000 1300000 | 80 | 864000 1300000 |
@@ -83,7 +82,7 @@ | |||
83 | >; | 82 | >; |
84 | #cooling-cells = <2>; | 83 | #cooling-cells = <2>; |
85 | cooling-min-level = <0>; | 84 | cooling-min-level = <0>; |
86 | cooling-max-level = <4>; | 85 | cooling-max-level = <3>; |
87 | }; | 86 | }; |
88 | }; | 87 | }; |
89 | 88 | ||
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi index f8818f1edbbe..883cb4873688 100644 --- a/arch/arm/boot/dts/sun5i-a13.dtsi +++ b/arch/arm/boot/dts/sun5i-a13.dtsi | |||
@@ -47,7 +47,6 @@ | |||
47 | clock-latency = <244144>; /* 8 32k periods */ | 47 | clock-latency = <244144>; /* 8 32k periods */ |
48 | operating-points = < | 48 | operating-points = < |
49 | /* kHz uV */ | 49 | /* kHz uV */ |
50 | 1104000 1500000 | ||
51 | 1008000 1400000 | 50 | 1008000 1400000 |
52 | 912000 1350000 | 51 | 912000 1350000 |
53 | 864000 1300000 | 52 | 864000 1300000 |
@@ -57,7 +56,7 @@ | |||
57 | >; | 56 | >; |
58 | #cooling-cells = <2>; | 57 | #cooling-cells = <2>; |
59 | cooling-min-level = <0>; | 58 | cooling-min-level = <0>; |
60 | cooling-max-level = <6>; | 59 | cooling-max-level = <5>; |
61 | }; | 60 | }; |
62 | }; | 61 | }; |
63 | 62 | ||
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index 3a8530b79f1c..fdd181792b4b 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi | |||
@@ -105,7 +105,6 @@ | |||
105 | clock-latency = <244144>; /* 8 32k periods */ | 105 | clock-latency = <244144>; /* 8 32k periods */ |
106 | operating-points = < | 106 | operating-points = < |
107 | /* kHz uV */ | 107 | /* kHz uV */ |
108 | 1008000 1450000 | ||
109 | 960000 1400000 | 108 | 960000 1400000 |
110 | 912000 1400000 | 109 | 912000 1400000 |
111 | 864000 1300000 | 110 | 864000 1300000 |
@@ -116,7 +115,7 @@ | |||
116 | >; | 115 | >; |
117 | #cooling-cells = <2>; | 116 | #cooling-cells = <2>; |
118 | cooling-min-level = <0>; | 117 | cooling-min-level = <0>; |
119 | cooling-max-level = <7>; | 118 | cooling-max-level = <6>; |
120 | }; | 119 | }; |
121 | 120 | ||
122 | cpu@1 { | 121 | cpu@1 { |
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig index f2670f638e97..811e72bbe642 100644 --- a/arch/arm/configs/at91_dt_defconfig +++ b/arch/arm/configs/at91_dt_defconfig | |||
@@ -70,6 +70,7 @@ CONFIG_SCSI=y | |||
70 | CONFIG_BLK_DEV_SD=y | 70 | CONFIG_BLK_DEV_SD=y |
71 | # CONFIG_SCSI_LOWLEVEL is not set | 71 | # CONFIG_SCSI_LOWLEVEL is not set |
72 | CONFIG_NETDEVICES=y | 72 | CONFIG_NETDEVICES=y |
73 | CONFIG_ARM_AT91_ETHER=y | ||
73 | CONFIG_MACB=y | 74 | CONFIG_MACB=y |
74 | # CONFIG_NET_VENDOR_BROADCOM is not set | 75 | # CONFIG_NET_VENDOR_BROADCOM is not set |
75 | CONFIG_DM9000=y | 76 | CONFIG_DM9000=y |
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index b7e6b6fba5e0..06075b6d2463 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig | |||
@@ -99,7 +99,7 @@ CONFIG_PCI_RCAR_GEN2=y | |||
99 | CONFIG_PCI_RCAR_GEN2_PCIE=y | 99 | CONFIG_PCI_RCAR_GEN2_PCIE=y |
100 | CONFIG_PCIEPORTBUS=y | 100 | CONFIG_PCIEPORTBUS=y |
101 | CONFIG_SMP=y | 101 | CONFIG_SMP=y |
102 | CONFIG_NR_CPUS=8 | 102 | CONFIG_NR_CPUS=16 |
103 | CONFIG_HIGHPTE=y | 103 | CONFIG_HIGHPTE=y |
104 | CONFIG_CMA=y | 104 | CONFIG_CMA=y |
105 | CONFIG_ARM_APPENDED_DTB=y | 105 | CONFIG_ARM_APPENDED_DTB=y |
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index a097cffa1231..8e108599e1af 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig | |||
@@ -377,6 +377,7 @@ CONFIG_PWM_TWL=m | |||
377 | CONFIG_PWM_TWL_LED=m | 377 | CONFIG_PWM_TWL_LED=m |
378 | CONFIG_OMAP_USB2=m | 378 | CONFIG_OMAP_USB2=m |
379 | CONFIG_TI_PIPE3=y | 379 | CONFIG_TI_PIPE3=y |
380 | CONFIG_TWL4030_USB=m | ||
380 | CONFIG_EXT2_FS=y | 381 | CONFIG_EXT2_FS=y |
381 | CONFIG_EXT3_FS=y | 382 | CONFIG_EXT3_FS=y |
382 | # CONFIG_EXT3_FS_XATTR is not set | 383 | # CONFIG_EXT3_FS_XATTR is not set |
diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig index 41d856effe6c..510c747c65b4 100644 --- a/arch/arm/configs/sama5_defconfig +++ b/arch/arm/configs/sama5_defconfig | |||
@@ -3,8 +3,6 @@ | |||
3 | CONFIG_SYSVIPC=y | 3 | CONFIG_SYSVIPC=y |
4 | CONFIG_IRQ_DOMAIN_DEBUG=y | 4 | CONFIG_IRQ_DOMAIN_DEBUG=y |
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | CONFIG_SYSFS_DEPRECATED=y | ||
7 | CONFIG_SYSFS_DEPRECATED_V2=y | ||
8 | CONFIG_BLK_DEV_INITRD=y | 6 | CONFIG_BLK_DEV_INITRD=y |
9 | CONFIG_EMBEDDED=y | 7 | CONFIG_EMBEDDED=y |
10 | CONFIG_SLAB=y | 8 | CONFIG_SLAB=y |
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig index 38840a812924..8f6a5702b696 100644 --- a/arch/arm/configs/sunxi_defconfig +++ b/arch/arm/configs/sunxi_defconfig | |||
@@ -4,6 +4,7 @@ CONFIG_BLK_DEV_INITRD=y | |||
4 | CONFIG_PERF_EVENTS=y | 4 | CONFIG_PERF_EVENTS=y |
5 | CONFIG_ARCH_SUNXI=y | 5 | CONFIG_ARCH_SUNXI=y |
6 | CONFIG_SMP=y | 6 | CONFIG_SMP=y |
7 | CONFIG_NR_CPUS=8 | ||
7 | CONFIG_AEABI=y | 8 | CONFIG_AEABI=y |
8 | CONFIG_HIGHMEM=y | 9 | CONFIG_HIGHMEM=y |
9 | CONFIG_HIGHPTE=y | 10 | CONFIG_HIGHPTE=y |
diff --git a/arch/arm/configs/vexpress_defconfig b/arch/arm/configs/vexpress_defconfig index f489fdaa19b8..37fe607a4ede 100644 --- a/arch/arm/configs/vexpress_defconfig +++ b/arch/arm/configs/vexpress_defconfig | |||
@@ -118,8 +118,8 @@ CONFIG_HID_ZEROPLUS=y | |||
118 | CONFIG_USB=y | 118 | CONFIG_USB=y |
119 | CONFIG_USB_ANNOUNCE_NEW_DEVICES=y | 119 | CONFIG_USB_ANNOUNCE_NEW_DEVICES=y |
120 | CONFIG_USB_MON=y | 120 | CONFIG_USB_MON=y |
121 | CONFIG_USB_ISP1760_HCD=y | ||
122 | CONFIG_USB_STORAGE=y | 121 | CONFIG_USB_STORAGE=y |
122 | CONFIG_USB_ISP1760=y | ||
123 | CONFIG_MMC=y | 123 | CONFIG_MMC=y |
124 | CONFIG_MMC_ARMMMCI=y | 124 | CONFIG_MMC_ARMMMCI=y |
125 | CONFIG_NEW_LEDS=y | 125 | CONFIG_NEW_LEDS=y |
diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped index 71e5fc7cfb18..1d1800f71c5b 100644 --- a/arch/arm/crypto/aesbs-core.S_shipped +++ b/arch/arm/crypto/aesbs-core.S_shipped | |||
@@ -58,14 +58,18 @@ | |||
58 | # define VFP_ABI_FRAME 0 | 58 | # define VFP_ABI_FRAME 0 |
59 | # define BSAES_ASM_EXTENDED_KEY | 59 | # define BSAES_ASM_EXTENDED_KEY |
60 | # define XTS_CHAIN_TWEAK | 60 | # define XTS_CHAIN_TWEAK |
61 | # define __ARM_ARCH__ 7 | 61 | # define __ARM_ARCH__ __LINUX_ARM_ARCH__ |
62 | # define __ARM_MAX_ARCH__ 7 | ||
62 | #endif | 63 | #endif |
63 | 64 | ||
64 | #ifdef __thumb__ | 65 | #ifdef __thumb__ |
65 | # define adrl adr | 66 | # define adrl adr |
66 | #endif | 67 | #endif |
67 | 68 | ||
68 | #if __ARM_ARCH__>=7 | 69 | #if __ARM_MAX_ARCH__>=7 |
70 | .arch armv7-a | ||
71 | .fpu neon | ||
72 | |||
69 | .text | 73 | .text |
70 | .syntax unified @ ARMv7-capable assembler is expected to handle this | 74 | .syntax unified @ ARMv7-capable assembler is expected to handle this |
71 | #ifdef __thumb2__ | 75 | #ifdef __thumb2__ |
@@ -74,8 +78,6 @@ | |||
74 | .code 32 | 78 | .code 32 |
75 | #endif | 79 | #endif |
76 | 80 | ||
77 | .fpu neon | ||
78 | |||
79 | .type _bsaes_decrypt8,%function | 81 | .type _bsaes_decrypt8,%function |
80 | .align 4 | 82 | .align 4 |
81 | _bsaes_decrypt8: | 83 | _bsaes_decrypt8: |
@@ -2095,9 +2097,11 @@ bsaes_xts_decrypt: | |||
2095 | vld1.8 {q8}, [r0] @ initial tweak | 2097 | vld1.8 {q8}, [r0] @ initial tweak |
2096 | adr r2, .Lxts_magic | 2098 | adr r2, .Lxts_magic |
2097 | 2099 | ||
2100 | #ifndef XTS_CHAIN_TWEAK | ||
2098 | tst r9, #0xf @ if not multiple of 16 | 2101 | tst r9, #0xf @ if not multiple of 16 |
2099 | it ne @ Thumb2 thing, sanity check in ARM | 2102 | it ne @ Thumb2 thing, sanity check in ARM |
2100 | subne r9, #0x10 @ subtract another 16 bytes | 2103 | subne r9, #0x10 @ subtract another 16 bytes |
2104 | #endif | ||
2101 | subs r9, #0x80 | 2105 | subs r9, #0x80 |
2102 | 2106 | ||
2103 | blo .Lxts_dec_short | 2107 | blo .Lxts_dec_short |
diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl index be068db960ee..a4d3856e7d24 100644 --- a/arch/arm/crypto/bsaes-armv7.pl +++ b/arch/arm/crypto/bsaes-armv7.pl | |||
@@ -701,14 +701,18 @@ $code.=<<___; | |||
701 | # define VFP_ABI_FRAME 0 | 701 | # define VFP_ABI_FRAME 0 |
702 | # define BSAES_ASM_EXTENDED_KEY | 702 | # define BSAES_ASM_EXTENDED_KEY |
703 | # define XTS_CHAIN_TWEAK | 703 | # define XTS_CHAIN_TWEAK |
704 | # define __ARM_ARCH__ 7 | 704 | # define __ARM_ARCH__ __LINUX_ARM_ARCH__ |
705 | # define __ARM_MAX_ARCH__ 7 | ||
705 | #endif | 706 | #endif |
706 | 707 | ||
707 | #ifdef __thumb__ | 708 | #ifdef __thumb__ |
708 | # define adrl adr | 709 | # define adrl adr |
709 | #endif | 710 | #endif |
710 | 711 | ||
711 | #if __ARM_ARCH__>=7 | 712 | #if __ARM_MAX_ARCH__>=7 |
713 | .arch armv7-a | ||
714 | .fpu neon | ||
715 | |||
712 | .text | 716 | .text |
713 | .syntax unified @ ARMv7-capable assembler is expected to handle this | 717 | .syntax unified @ ARMv7-capable assembler is expected to handle this |
714 | #ifdef __thumb2__ | 718 | #ifdef __thumb2__ |
@@ -717,8 +721,6 @@ $code.=<<___; | |||
717 | .code 32 | 721 | .code 32 |
718 | #endif | 722 | #endif |
719 | 723 | ||
720 | .fpu neon | ||
721 | |||
722 | .type _bsaes_decrypt8,%function | 724 | .type _bsaes_decrypt8,%function |
723 | .align 4 | 725 | .align 4 |
724 | _bsaes_decrypt8: | 726 | _bsaes_decrypt8: |
@@ -2076,9 +2078,11 @@ bsaes_xts_decrypt: | |||
2076 | vld1.8 {@XMM[8]}, [r0] @ initial tweak | 2078 | vld1.8 {@XMM[8]}, [r0] @ initial tweak |
2077 | adr $magic, .Lxts_magic | 2079 | adr $magic, .Lxts_magic |
2078 | 2080 | ||
2081 | #ifndef XTS_CHAIN_TWEAK | ||
2079 | tst $len, #0xf @ if not multiple of 16 | 2082 | tst $len, #0xf @ if not multiple of 16 |
2080 | it ne @ Thumb2 thing, sanity check in ARM | 2083 | it ne @ Thumb2 thing, sanity check in ARM |
2081 | subne $len, #0x10 @ subtract another 16 bytes | 2084 | subne $len, #0x10 @ subtract another 16 bytes |
2085 | #endif | ||
2082 | subs $len, #0x80 | 2086 | subs $len, #0x80 |
2083 | 2087 | ||
2084 | blo .Lxts_dec_short | 2088 | blo .Lxts_dec_short |
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index bf0fe99e8ca9..4cf48c3aca13 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h | |||
@@ -149,29 +149,28 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) | |||
149 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ | 149 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ |
150 | }) | 150 | }) |
151 | 151 | ||
152 | #define kvm_pgd_index(addr) pgd_index(addr) | ||
153 | |||
152 | static inline bool kvm_page_empty(void *ptr) | 154 | static inline bool kvm_page_empty(void *ptr) |
153 | { | 155 | { |
154 | struct page *ptr_page = virt_to_page(ptr); | 156 | struct page *ptr_page = virt_to_page(ptr); |
155 | return page_count(ptr_page) == 1; | 157 | return page_count(ptr_page) == 1; |
156 | } | 158 | } |
157 | 159 | ||
158 | |||
159 | #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) | 160 | #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) |
160 | #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp) | 161 | #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp) |
161 | #define kvm_pud_table_empty(kvm, pudp) (0) | 162 | #define kvm_pud_table_empty(kvm, pudp) (0) |
162 | 163 | ||
163 | #define KVM_PREALLOC_LEVEL 0 | 164 | #define KVM_PREALLOC_LEVEL 0 |
164 | 165 | ||
165 | static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd) | 166 | static inline void *kvm_get_hwpgd(struct kvm *kvm) |
166 | { | 167 | { |
167 | return 0; | 168 | return kvm->arch.pgd; |
168 | } | 169 | } |
169 | 170 | ||
170 | static inline void kvm_free_hwpgd(struct kvm *kvm) { } | 171 | static inline unsigned int kvm_get_hwpgd_size(void) |
171 | |||
172 | static inline void *kvm_get_hwpgd(struct kvm *kvm) | ||
173 | { | 172 | { |
174 | return kvm->arch.pgd; | 173 | return PTRS_PER_S2_PGD * sizeof(pgd_t); |
175 | } | 174 | } |
176 | 175 | ||
177 | struct kvm; | 176 | struct kvm; |
diff --git a/arch/arm/include/debug/at91.S b/arch/arm/include/debug/at91.S index 80a6501b4d50..c3c45e628e33 100644 --- a/arch/arm/include/debug/at91.S +++ b/arch/arm/include/debug/at91.S | |||
@@ -18,8 +18,11 @@ | |||
18 | #define AT91_DBGU 0xfc00c000 /* SAMA5D4_BASE_USART3 */ | 18 | #define AT91_DBGU 0xfc00c000 /* SAMA5D4_BASE_USART3 */ |
19 | #endif | 19 | #endif |
20 | 20 | ||
21 | /* Keep in sync with mach-at91/include/mach/hardware.h */ | 21 | #ifdef CONFIG_MMU |
22 | #define AT91_IO_P2V(x) ((x) - 0x01000000) | 22 | #define AT91_IO_P2V(x) ((x) - 0x01000000) |
23 | #else | ||
24 | #define AT91_IO_P2V(x) (x) | ||
25 | #endif | ||
23 | 26 | ||
24 | #define AT91_DBGU_SR (0x14) /* Status Register */ | 27 | #define AT91_DBGU_SR (0x14) /* Status Register */ |
25 | #define AT91_DBGU_THR (0x1c) /* Transmitter Holding Register */ | 28 | #define AT91_DBGU_THR (0x1c) /* Transmitter Holding Register */ |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index e55408e96559..1d60bebea4b8 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -246,12 +246,9 @@ static int __get_cpu_architecture(void) | |||
246 | if (cpu_arch) | 246 | if (cpu_arch) |
247 | cpu_arch += CPU_ARCH_ARMv3; | 247 | cpu_arch += CPU_ARCH_ARMv3; |
248 | } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { | 248 | } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { |
249 | unsigned int mmfr0; | ||
250 | |||
251 | /* Revised CPUID format. Read the Memory Model Feature | 249 | /* Revised CPUID format. Read the Memory Model Feature |
252 | * Register 0 and check for VMSAv7 or PMSAv7 */ | 250 | * Register 0 and check for VMSAv7 or PMSAv7 */ |
253 | asm("mrc p15, 0, %0, c0, c1, 4" | 251 | unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0); |
254 | : "=r" (mmfr0)); | ||
255 | if ((mmfr0 & 0x0000000f) >= 0x00000003 || | 252 | if ((mmfr0 & 0x0000000f) >= 0x00000003 || |
256 | (mmfr0 & 0x000000f0) >= 0x00000030) | 253 | (mmfr0 & 0x000000f0) >= 0x00000030) |
257 | cpu_arch = CPU_ARCH_ARMv7; | 254 | cpu_arch = CPU_ARCH_ARMv7; |
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 3e6859bc3e11..5656d79c5a44 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -290,7 +290,7 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, | |||
290 | phys_addr_t addr = start, end = start + size; | 290 | phys_addr_t addr = start, end = start + size; |
291 | phys_addr_t next; | 291 | phys_addr_t next; |
292 | 292 | ||
293 | pgd = pgdp + pgd_index(addr); | 293 | pgd = pgdp + kvm_pgd_index(addr); |
294 | do { | 294 | do { |
295 | next = kvm_pgd_addr_end(addr, end); | 295 | next = kvm_pgd_addr_end(addr, end); |
296 | if (!pgd_none(*pgd)) | 296 | if (!pgd_none(*pgd)) |
@@ -355,7 +355,7 @@ static void stage2_flush_memslot(struct kvm *kvm, | |||
355 | phys_addr_t next; | 355 | phys_addr_t next; |
356 | pgd_t *pgd; | 356 | pgd_t *pgd; |
357 | 357 | ||
358 | pgd = kvm->arch.pgd + pgd_index(addr); | 358 | pgd = kvm->arch.pgd + kvm_pgd_index(addr); |
359 | do { | 359 | do { |
360 | next = kvm_pgd_addr_end(addr, end); | 360 | next = kvm_pgd_addr_end(addr, end); |
361 | stage2_flush_puds(kvm, pgd, addr, next); | 361 | stage2_flush_puds(kvm, pgd, addr, next); |
@@ -632,6 +632,20 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) | |||
632 | __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE); | 632 | __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE); |
633 | } | 633 | } |
634 | 634 | ||
635 | /* Free the HW pgd, one page at a time */ | ||
636 | static void kvm_free_hwpgd(void *hwpgd) | ||
637 | { | ||
638 | free_pages_exact(hwpgd, kvm_get_hwpgd_size()); | ||
639 | } | ||
640 | |||
641 | /* Allocate the HW PGD, making sure that each page gets its own refcount */ | ||
642 | static void *kvm_alloc_hwpgd(void) | ||
643 | { | ||
644 | unsigned int size = kvm_get_hwpgd_size(); | ||
645 | |||
646 | return alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); | ||
647 | } | ||
648 | |||
635 | /** | 649 | /** |
636 | * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. | 650 | * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. |
637 | * @kvm: The KVM struct pointer for the VM. | 651 | * @kvm: The KVM struct pointer for the VM. |
@@ -645,15 +659,31 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) | |||
645 | */ | 659 | */ |
646 | int kvm_alloc_stage2_pgd(struct kvm *kvm) | 660 | int kvm_alloc_stage2_pgd(struct kvm *kvm) |
647 | { | 661 | { |
648 | int ret; | ||
649 | pgd_t *pgd; | 662 | pgd_t *pgd; |
663 | void *hwpgd; | ||
650 | 664 | ||
651 | if (kvm->arch.pgd != NULL) { | 665 | if (kvm->arch.pgd != NULL) { |
652 | kvm_err("kvm_arch already initialized?\n"); | 666 | kvm_err("kvm_arch already initialized?\n"); |
653 | return -EINVAL; | 667 | return -EINVAL; |
654 | } | 668 | } |
655 | 669 | ||
670 | hwpgd = kvm_alloc_hwpgd(); | ||
671 | if (!hwpgd) | ||
672 | return -ENOMEM; | ||
673 | |||
674 | /* When the kernel uses more levels of page tables than the | ||
675 | * guest, we allocate a fake PGD and pre-populate it to point | ||
676 | * to the next-level page table, which will be the real | ||
677 | * initial page table pointed to by the VTTBR. | ||
678 | * | ||
679 | * When KVM_PREALLOC_LEVEL==2, we allocate a single page for | ||
680 | * the PMD and the kernel will use folded pud. | ||
681 | * When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD | ||
682 | * pages. | ||
683 | */ | ||
656 | if (KVM_PREALLOC_LEVEL > 0) { | 684 | if (KVM_PREALLOC_LEVEL > 0) { |
685 | int i; | ||
686 | |||
657 | /* | 687 | /* |
658 | * Allocate fake pgd for the page table manipulation macros to | 688 | * Allocate fake pgd for the page table manipulation macros to |
659 | * work. This is not used by the hardware and we have no | 689 | * work. This is not used by the hardware and we have no |
@@ -661,30 +691,32 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) | |||
661 | */ | 691 | */ |
662 | pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t), | 692 | pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t), |
663 | GFP_KERNEL | __GFP_ZERO); | 693 | GFP_KERNEL | __GFP_ZERO); |
694 | |||
695 | if (!pgd) { | ||
696 | kvm_free_hwpgd(hwpgd); | ||
697 | return -ENOMEM; | ||
698 | } | ||
699 | |||
700 | /* Plug the HW PGD into the fake one. */ | ||
701 | for (i = 0; i < PTRS_PER_S2_PGD; i++) { | ||
702 | if (KVM_PREALLOC_LEVEL == 1) | ||
703 | pgd_populate(NULL, pgd + i, | ||
704 | (pud_t *)hwpgd + i * PTRS_PER_PUD); | ||
705 | else if (KVM_PREALLOC_LEVEL == 2) | ||
706 | pud_populate(NULL, pud_offset(pgd, 0) + i, | ||
707 | (pmd_t *)hwpgd + i * PTRS_PER_PMD); | ||
708 | } | ||
664 | } else { | 709 | } else { |
665 | /* | 710 | /* |
666 | * Allocate actual first-level Stage-2 page table used by the | 711 | * Allocate actual first-level Stage-2 page table used by the |
667 | * hardware for Stage-2 page table walks. | 712 | * hardware for Stage-2 page table walks. |
668 | */ | 713 | */ |
669 | pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER); | 714 | pgd = (pgd_t *)hwpgd; |
670 | } | 715 | } |
671 | 716 | ||
672 | if (!pgd) | ||
673 | return -ENOMEM; | ||
674 | |||
675 | ret = kvm_prealloc_hwpgd(kvm, pgd); | ||
676 | if (ret) | ||
677 | goto out_err; | ||
678 | |||
679 | kvm_clean_pgd(pgd); | 717 | kvm_clean_pgd(pgd); |
680 | kvm->arch.pgd = pgd; | 718 | kvm->arch.pgd = pgd; |
681 | return 0; | 719 | return 0; |
682 | out_err: | ||
683 | if (KVM_PREALLOC_LEVEL > 0) | ||
684 | kfree(pgd); | ||
685 | else | ||
686 | free_pages((unsigned long)pgd, S2_PGD_ORDER); | ||
687 | return ret; | ||
688 | } | 720 | } |
689 | 721 | ||
690 | /** | 722 | /** |
@@ -785,11 +817,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm) | |||
785 | return; | 817 | return; |
786 | 818 | ||
787 | unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); | 819 | unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); |
788 | kvm_free_hwpgd(kvm); | 820 | kvm_free_hwpgd(kvm_get_hwpgd(kvm)); |
789 | if (KVM_PREALLOC_LEVEL > 0) | 821 | if (KVM_PREALLOC_LEVEL > 0) |
790 | kfree(kvm->arch.pgd); | 822 | kfree(kvm->arch.pgd); |
791 | else | 823 | |
792 | free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER); | ||
793 | kvm->arch.pgd = NULL; | 824 | kvm->arch.pgd = NULL; |
794 | } | 825 | } |
795 | 826 | ||
@@ -799,7 +830,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache | |||
799 | pgd_t *pgd; | 830 | pgd_t *pgd; |
800 | pud_t *pud; | 831 | pud_t *pud; |
801 | 832 | ||
802 | pgd = kvm->arch.pgd + pgd_index(addr); | 833 | pgd = kvm->arch.pgd + kvm_pgd_index(addr); |
803 | if (WARN_ON(pgd_none(*pgd))) { | 834 | if (WARN_ON(pgd_none(*pgd))) { |
804 | if (!cache) | 835 | if (!cache) |
805 | return NULL; | 836 | return NULL; |
@@ -1089,7 +1120,7 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) | |||
1089 | pgd_t *pgd; | 1120 | pgd_t *pgd; |
1090 | phys_addr_t next; | 1121 | phys_addr_t next; |
1091 | 1122 | ||
1092 | pgd = kvm->arch.pgd + pgd_index(addr); | 1123 | pgd = kvm->arch.pgd + kvm_pgd_index(addr); |
1093 | do { | 1124 | do { |
1094 | /* | 1125 | /* |
1095 | * Release kvm_mmu_lock periodically if the memory region is | 1126 | * Release kvm_mmu_lock periodically if the memory region is |
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 5e34fb143309..aa4116e9452f 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c | |||
@@ -270,37 +270,35 @@ static void __init at91_pm_sram_init(void) | |||
270 | phys_addr_t sram_pbase; | 270 | phys_addr_t sram_pbase; |
271 | unsigned long sram_base; | 271 | unsigned long sram_base; |
272 | struct device_node *node; | 272 | struct device_node *node; |
273 | struct platform_device *pdev; | 273 | struct platform_device *pdev = NULL; |
274 | 274 | ||
275 | node = of_find_compatible_node(NULL, NULL, "mmio-sram"); | 275 | for_each_compatible_node(node, NULL, "mmio-sram") { |
276 | if (!node) { | 276 | pdev = of_find_device_by_node(node); |
277 | pr_warn("%s: failed to find sram node!\n", __func__); | 277 | if (pdev) { |
278 | return; | 278 | of_node_put(node); |
279 | break; | ||
280 | } | ||
279 | } | 281 | } |
280 | 282 | ||
281 | pdev = of_find_device_by_node(node); | ||
282 | if (!pdev) { | 283 | if (!pdev) { |
283 | pr_warn("%s: failed to find sram device!\n", __func__); | 284 | pr_warn("%s: failed to find sram device!\n", __func__); |
284 | goto put_node; | 285 | return; |
285 | } | 286 | } |
286 | 287 | ||
287 | sram_pool = dev_get_gen_pool(&pdev->dev); | 288 | sram_pool = dev_get_gen_pool(&pdev->dev); |
288 | if (!sram_pool) { | 289 | if (!sram_pool) { |
289 | pr_warn("%s: sram pool unavailable!\n", __func__); | 290 | pr_warn("%s: sram pool unavailable!\n", __func__); |
290 | goto put_node; | 291 | return; |
291 | } | 292 | } |
292 | 293 | ||
293 | sram_base = gen_pool_alloc(sram_pool, at91_slow_clock_sz); | 294 | sram_base = gen_pool_alloc(sram_pool, at91_slow_clock_sz); |
294 | if (!sram_base) { | 295 | if (!sram_base) { |
295 | pr_warn("%s: unable to alloc ocram!\n", __func__); | 296 | pr_warn("%s: unable to alloc ocram!\n", __func__); |
296 | goto put_node; | 297 | return; |
297 | } | 298 | } |
298 | 299 | ||
299 | sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base); | 300 | sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base); |
300 | slow_clock = __arm_ioremap_exec(sram_pbase, at91_slow_clock_sz, false); | 301 | slow_clock = __arm_ioremap_exec(sram_pbase, at91_slow_clock_sz, false); |
301 | |||
302 | put_node: | ||
303 | of_node_put(node); | ||
304 | } | 302 | } |
305 | #endif | 303 | #endif |
306 | 304 | ||
diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h index d2c89963af2d..86c0aa819d25 100644 --- a/arch/arm/mach-at91/pm.h +++ b/arch/arm/mach-at91/pm.h | |||
@@ -44,7 +44,7 @@ static inline void at91rm9200_standby(void) | |||
44 | " mcr p15, 0, %0, c7, c0, 4\n\t" | 44 | " mcr p15, 0, %0, c7, c0, 4\n\t" |
45 | " str %5, [%1, %2]" | 45 | " str %5, [%1, %2]" |
46 | : | 46 | : |
47 | : "r" (0), "r" (AT91_BASE_SYS), "r" (AT91RM9200_SDRAMC_LPR), | 47 | : "r" (0), "r" (at91_ramc_base[0]), "r" (AT91RM9200_SDRAMC_LPR), |
48 | "r" (1), "r" (AT91RM9200_SDRAMC_SRR), | 48 | "r" (1), "r" (AT91RM9200_SDRAMC_SRR), |
49 | "r" (lpr)); | 49 | "r" (lpr)); |
50 | } | 50 | } |
diff --git a/arch/arm/mach-at91/pm_slowclock.S b/arch/arm/mach-at91/pm_slowclock.S index 556151e85ec4..931f0e302c03 100644 --- a/arch/arm/mach-at91/pm_slowclock.S +++ b/arch/arm/mach-at91/pm_slowclock.S | |||
@@ -25,11 +25,6 @@ | |||
25 | */ | 25 | */ |
26 | #undef SLOWDOWN_MASTER_CLOCK | 26 | #undef SLOWDOWN_MASTER_CLOCK |
27 | 27 | ||
28 | #define MCKRDY_TIMEOUT 1000 | ||
29 | #define MOSCRDY_TIMEOUT 1000 | ||
30 | #define PLLALOCK_TIMEOUT 1000 | ||
31 | #define PLLBLOCK_TIMEOUT 1000 | ||
32 | |||
33 | pmc .req r0 | 28 | pmc .req r0 |
34 | sdramc .req r1 | 29 | sdramc .req r1 |
35 | ramc1 .req r2 | 30 | ramc1 .req r2 |
@@ -41,60 +36,42 @@ tmp2 .req r5 | |||
41 | * Wait until master clock is ready (after switching master clock source) | 36 | * Wait until master clock is ready (after switching master clock source) |
42 | */ | 37 | */ |
43 | .macro wait_mckrdy | 38 | .macro wait_mckrdy |
44 | mov tmp2, #MCKRDY_TIMEOUT | 39 | 1: ldr tmp1, [pmc, #AT91_PMC_SR] |
45 | 1: sub tmp2, tmp2, #1 | ||
46 | cmp tmp2, #0 | ||
47 | beq 2f | ||
48 | ldr tmp1, [pmc, #AT91_PMC_SR] | ||
49 | tst tmp1, #AT91_PMC_MCKRDY | 40 | tst tmp1, #AT91_PMC_MCKRDY |
50 | beq 1b | 41 | beq 1b |
51 | 2: | ||
52 | .endm | 42 | .endm |
53 | 43 | ||
54 | /* | 44 | /* |
55 | * Wait until master oscillator has stabilized. | 45 | * Wait until master oscillator has stabilized. |
56 | */ | 46 | */ |
57 | .macro wait_moscrdy | 47 | .macro wait_moscrdy |
58 | mov tmp2, #MOSCRDY_TIMEOUT | 48 | 1: ldr tmp1, [pmc, #AT91_PMC_SR] |
59 | 1: sub tmp2, tmp2, #1 | ||
60 | cmp tmp2, #0 | ||
61 | beq 2f | ||
62 | ldr tmp1, [pmc, #AT91_PMC_SR] | ||
63 | tst tmp1, #AT91_PMC_MOSCS | 49 | tst tmp1, #AT91_PMC_MOSCS |
64 | beq 1b | 50 | beq 1b |
65 | 2: | ||
66 | .endm | 51 | .endm |
67 | 52 | ||
68 | /* | 53 | /* |
69 | * Wait until PLLA has locked. | 54 | * Wait until PLLA has locked. |
70 | */ | 55 | */ |
71 | .macro wait_pllalock | 56 | .macro wait_pllalock |
72 | mov tmp2, #PLLALOCK_TIMEOUT | 57 | 1: ldr tmp1, [pmc, #AT91_PMC_SR] |
73 | 1: sub tmp2, tmp2, #1 | ||
74 | cmp tmp2, #0 | ||
75 | beq 2f | ||
76 | ldr tmp1, [pmc, #AT91_PMC_SR] | ||
77 | tst tmp1, #AT91_PMC_LOCKA | 58 | tst tmp1, #AT91_PMC_LOCKA |
78 | beq 1b | 59 | beq 1b |
79 | 2: | ||
80 | .endm | 60 | .endm |
81 | 61 | ||
82 | /* | 62 | /* |
83 | * Wait until PLLB has locked. | 63 | * Wait until PLLB has locked. |
84 | */ | 64 | */ |
85 | .macro wait_pllblock | 65 | .macro wait_pllblock |
86 | mov tmp2, #PLLBLOCK_TIMEOUT | 66 | 1: ldr tmp1, [pmc, #AT91_PMC_SR] |
87 | 1: sub tmp2, tmp2, #1 | ||
88 | cmp tmp2, #0 | ||
89 | beq 2f | ||
90 | ldr tmp1, [pmc, #AT91_PMC_SR] | ||
91 | tst tmp1, #AT91_PMC_LOCKB | 67 | tst tmp1, #AT91_PMC_LOCKB |
92 | beq 1b | 68 | beq 1b |
93 | 2: | ||
94 | .endm | 69 | .endm |
95 | 70 | ||
96 | .text | 71 | .text |
97 | 72 | ||
73 | .arm | ||
74 | |||
98 | /* void at91_slow_clock(void __iomem *pmc, void __iomem *sdramc, | 75 | /* void at91_slow_clock(void __iomem *pmc, void __iomem *sdramc, |
99 | * void __iomem *ramc1, int memctrl) | 76 | * void __iomem *ramc1, int memctrl) |
100 | */ | 77 | */ |
@@ -134,6 +111,16 @@ ddr_sr_enable: | |||
134 | cmp memctrl, #AT91_MEMCTRL_DDRSDR | 111 | cmp memctrl, #AT91_MEMCTRL_DDRSDR |
135 | bne sdr_sr_enable | 112 | bne sdr_sr_enable |
136 | 113 | ||
114 | /* LPDDR1 --> force DDR2 mode during self-refresh */ | ||
115 | ldr tmp1, [sdramc, #AT91_DDRSDRC_MDR] | ||
116 | str tmp1, .saved_sam9_mdr | ||
117 | bic tmp1, tmp1, #~AT91_DDRSDRC_MD | ||
118 | cmp tmp1, #AT91_DDRSDRC_MD_LOW_POWER_DDR | ||
119 | ldreq tmp1, [sdramc, #AT91_DDRSDRC_MDR] | ||
120 | biceq tmp1, tmp1, #AT91_DDRSDRC_MD | ||
121 | orreq tmp1, tmp1, #AT91_DDRSDRC_MD_DDR2 | ||
122 | streq tmp1, [sdramc, #AT91_DDRSDRC_MDR] | ||
123 | |||
137 | /* prepare for DDRAM self-refresh mode */ | 124 | /* prepare for DDRAM self-refresh mode */ |
138 | ldr tmp1, [sdramc, #AT91_DDRSDRC_LPR] | 125 | ldr tmp1, [sdramc, #AT91_DDRSDRC_LPR] |
139 | str tmp1, .saved_sam9_lpr | 126 | str tmp1, .saved_sam9_lpr |
@@ -142,14 +129,26 @@ ddr_sr_enable: | |||
142 | 129 | ||
143 | /* figure out if we use the second ram controller */ | 130 | /* figure out if we use the second ram controller */ |
144 | cmp ramc1, #0 | 131 | cmp ramc1, #0 |
145 | ldrne tmp2, [ramc1, #AT91_DDRSDRC_LPR] | 132 | beq ddr_no_2nd_ctrl |
146 | strne tmp2, .saved_sam9_lpr1 | 133 | |
147 | bicne tmp2, #AT91_DDRSDRC_LPCB | 134 | ldr tmp2, [ramc1, #AT91_DDRSDRC_MDR] |
148 | orrne tmp2, #AT91_DDRSDRC_LPCB_SELF_REFRESH | 135 | str tmp2, .saved_sam9_mdr1 |
136 | bic tmp2, tmp2, #~AT91_DDRSDRC_MD | ||
137 | cmp tmp2, #AT91_DDRSDRC_MD_LOW_POWER_DDR | ||
138 | ldreq tmp2, [ramc1, #AT91_DDRSDRC_MDR] | ||
139 | biceq tmp2, tmp2, #AT91_DDRSDRC_MD | ||
140 | orreq tmp2, tmp2, #AT91_DDRSDRC_MD_DDR2 | ||
141 | streq tmp2, [ramc1, #AT91_DDRSDRC_MDR] | ||
142 | |||
143 | ldr tmp2, [ramc1, #AT91_DDRSDRC_LPR] | ||
144 | str tmp2, .saved_sam9_lpr1 | ||
145 | bic tmp2, #AT91_DDRSDRC_LPCB | ||
146 | orr tmp2, #AT91_DDRSDRC_LPCB_SELF_REFRESH | ||
149 | 147 | ||
150 | /* Enable DDRAM self-refresh mode */ | 148 | /* Enable DDRAM self-refresh mode */ |
149 | str tmp2, [ramc1, #AT91_DDRSDRC_LPR] | ||
150 | ddr_no_2nd_ctrl: | ||
151 | str tmp1, [sdramc, #AT91_DDRSDRC_LPR] | 151 | str tmp1, [sdramc, #AT91_DDRSDRC_LPR] |
152 | strne tmp2, [ramc1, #AT91_DDRSDRC_LPR] | ||
153 | 152 | ||
154 | b sdr_sr_done | 153 | b sdr_sr_done |
155 | 154 | ||
@@ -208,6 +207,7 @@ sdr_sr_done: | |||
208 | /* Turn off the main oscillator */ | 207 | /* Turn off the main oscillator */ |
209 | ldr tmp1, [pmc, #AT91_CKGR_MOR] | 208 | ldr tmp1, [pmc, #AT91_CKGR_MOR] |
210 | bic tmp1, tmp1, #AT91_PMC_MOSCEN | 209 | bic tmp1, tmp1, #AT91_PMC_MOSCEN |
210 | orr tmp1, tmp1, #AT91_PMC_KEY | ||
211 | str tmp1, [pmc, #AT91_CKGR_MOR] | 211 | str tmp1, [pmc, #AT91_CKGR_MOR] |
212 | 212 | ||
213 | /* Wait for interrupt */ | 213 | /* Wait for interrupt */ |
@@ -216,6 +216,7 @@ sdr_sr_done: | |||
216 | /* Turn on the main oscillator */ | 216 | /* Turn on the main oscillator */ |
217 | ldr tmp1, [pmc, #AT91_CKGR_MOR] | 217 | ldr tmp1, [pmc, #AT91_CKGR_MOR] |
218 | orr tmp1, tmp1, #AT91_PMC_MOSCEN | 218 | orr tmp1, tmp1, #AT91_PMC_MOSCEN |
219 | orr tmp1, tmp1, #AT91_PMC_KEY | ||
219 | str tmp1, [pmc, #AT91_CKGR_MOR] | 220 | str tmp1, [pmc, #AT91_CKGR_MOR] |
220 | 221 | ||
221 | wait_moscrdy | 222 | wait_moscrdy |
@@ -280,12 +281,17 @@ sdr_sr_done: | |||
280 | */ | 281 | */ |
281 | cmp memctrl, #AT91_MEMCTRL_DDRSDR | 282 | cmp memctrl, #AT91_MEMCTRL_DDRSDR |
282 | bne sdr_en_restore | 283 | bne sdr_en_restore |
284 | /* Restore MDR in case of LPDDR1 */ | ||
285 | ldr tmp1, .saved_sam9_mdr | ||
286 | str tmp1, [sdramc, #AT91_DDRSDRC_MDR] | ||
283 | /* Restore LPR on AT91 with DDRAM */ | 287 | /* Restore LPR on AT91 with DDRAM */ |
284 | ldr tmp1, .saved_sam9_lpr | 288 | ldr tmp1, .saved_sam9_lpr |
285 | str tmp1, [sdramc, #AT91_DDRSDRC_LPR] | 289 | str tmp1, [sdramc, #AT91_DDRSDRC_LPR] |
286 | 290 | ||
287 | /* if we use the second ram controller */ | 291 | /* if we use the second ram controller */ |
288 | cmp ramc1, #0 | 292 | cmp ramc1, #0 |
293 | ldrne tmp2, .saved_sam9_mdr1 | ||
294 | strne tmp2, [ramc1, #AT91_DDRSDRC_MDR] | ||
289 | ldrne tmp2, .saved_sam9_lpr1 | 295 | ldrne tmp2, .saved_sam9_lpr1 |
290 | strne tmp2, [ramc1, #AT91_DDRSDRC_LPR] | 296 | strne tmp2, [ramc1, #AT91_DDRSDRC_LPR] |
291 | 297 | ||
@@ -319,5 +325,11 @@ ram_restored: | |||
319 | .saved_sam9_lpr1: | 325 | .saved_sam9_lpr1: |
320 | .word 0 | 326 | .word 0 |
321 | 327 | ||
328 | .saved_sam9_mdr: | ||
329 | .word 0 | ||
330 | |||
331 | .saved_sam9_mdr1: | ||
332 | .word 0 | ||
333 | |||
322 | ENTRY(at91_slow_clock_sz) | 334 | ENTRY(at91_slow_clock_sz) |
323 | .word .-at91_slow_clock | 335 | .word .-at91_slow_clock |
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c index 3f32c47a6d74..d2e9f12d12f1 100644 --- a/arch/arm/mach-exynos/platsmp.c +++ b/arch/arm/mach-exynos/platsmp.c | |||
@@ -126,8 +126,7 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious) | |||
126 | */ | 126 | */ |
127 | void exynos_cpu_power_down(int cpu) | 127 | void exynos_cpu_power_down(int cpu) |
128 | { | 128 | { |
129 | if (cpu == 0 && (of_machine_is_compatible("samsung,exynos5420") || | 129 | if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) { |
130 | of_machine_is_compatible("samsung,exynos5800"))) { | ||
131 | /* | 130 | /* |
132 | * Bypass power down for CPU0 during suspend. Check for | 131 | * Bypass power down for CPU0 during suspend. Check for |
133 | * the SYS_PWR_REG value to decide if we are suspending | 132 | * the SYS_PWR_REG value to decide if we are suspending |
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c index 20f267121b3e..37266a826437 100644 --- a/arch/arm/mach-exynos/pm_domains.c +++ b/arch/arm/mach-exynos/pm_domains.c | |||
@@ -161,6 +161,34 @@ no_clk: | |||
161 | of_genpd_add_provider_simple(np, &pd->pd); | 161 | of_genpd_add_provider_simple(np, &pd->pd); |
162 | } | 162 | } |
163 | 163 | ||
164 | /* Assign the child power domains to their parents */ | ||
165 | for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") { | ||
166 | struct generic_pm_domain *child_domain, *parent_domain; | ||
167 | struct of_phandle_args args; | ||
168 | |||
169 | args.np = np; | ||
170 | args.args_count = 0; | ||
171 | child_domain = of_genpd_get_from_provider(&args); | ||
172 | if (!child_domain) | ||
173 | continue; | ||
174 | |||
175 | if (of_parse_phandle_with_args(np, "power-domains", | ||
176 | "#power-domain-cells", 0, &args) != 0) | ||
177 | continue; | ||
178 | |||
179 | parent_domain = of_genpd_get_from_provider(&args); | ||
180 | if (!parent_domain) | ||
181 | continue; | ||
182 | |||
183 | if (pm_genpd_add_subdomain(parent_domain, child_domain)) | ||
184 | pr_warn("%s failed to add subdomain: %s\n", | ||
185 | parent_domain->name, child_domain->name); | ||
186 | else | ||
187 | pr_info("%s has as child subdomain: %s.\n", | ||
188 | parent_domain->name, child_domain->name); | ||
189 | of_node_put(np); | ||
190 | } | ||
191 | |||
164 | return 0; | 192 | return 0; |
165 | } | 193 | } |
166 | arch_initcall(exynos4_pm_init_power_domain); | 194 | arch_initcall(exynos4_pm_init_power_domain); |
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c index 52e2b1a2fddb..318d127df147 100644 --- a/arch/arm/mach-exynos/suspend.c +++ b/arch/arm/mach-exynos/suspend.c | |||
@@ -87,8 +87,8 @@ static unsigned int exynos_pmu_spare3; | |||
87 | static u32 exynos_irqwake_intmask = 0xffffffff; | 87 | static u32 exynos_irqwake_intmask = 0xffffffff; |
88 | 88 | ||
89 | static const struct exynos_wkup_irq exynos3250_wkup_irq[] = { | 89 | static const struct exynos_wkup_irq exynos3250_wkup_irq[] = { |
90 | { 73, BIT(1) }, /* RTC alarm */ | 90 | { 105, BIT(1) }, /* RTC alarm */ |
91 | { 74, BIT(2) }, /* RTC tick */ | 91 | { 106, BIT(2) }, /* RTC tick */ |
92 | { /* sentinel */ }, | 92 | { /* sentinel */ }, |
93 | }; | 93 | }; |
94 | 94 | ||
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c index 4ad6e473cf83..9de3412af406 100644 --- a/arch/arm/mach-imx/mach-imx6q.c +++ b/arch/arm/mach-imx/mach-imx6q.c | |||
@@ -211,8 +211,9 @@ static void __init imx6q_1588_init(void) | |||
211 | * set bit IOMUXC_GPR1[21]. Or the PTP clock must be from pad | 211 | * set bit IOMUXC_GPR1[21]. Or the PTP clock must be from pad |
212 | * (external OSC), and we need to clear the bit. | 212 | * (external OSC), and we need to clear the bit. |
213 | */ | 213 | */ |
214 | clksel = ptp_clk == enet_ref ? IMX6Q_GPR1_ENET_CLK_SEL_ANATOP : | 214 | clksel = clk_is_match(ptp_clk, enet_ref) ? |
215 | IMX6Q_GPR1_ENET_CLK_SEL_PAD; | 215 | IMX6Q_GPR1_ENET_CLK_SEL_ANATOP : |
216 | IMX6Q_GPR1_ENET_CLK_SEL_PAD; | ||
216 | gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); | 217 | gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); |
217 | if (!IS_ERR(gpr)) | 218 | if (!IS_ERR(gpr)) |
218 | regmap_update_bits(gpr, IOMUXC_GPR1, | 219 | regmap_update_bits(gpr, IOMUXC_GPR1, |
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index 2a2f4d56e4c8..25f1beea453e 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c | |||
@@ -720,6 +720,8 @@ static const char * __init omap_get_family(void) | |||
720 | return kasprintf(GFP_KERNEL, "OMAP4"); | 720 | return kasprintf(GFP_KERNEL, "OMAP4"); |
721 | else if (soc_is_omap54xx()) | 721 | else if (soc_is_omap54xx()) |
722 | return kasprintf(GFP_KERNEL, "OMAP5"); | 722 | return kasprintf(GFP_KERNEL, "OMAP5"); |
723 | else if (soc_is_am33xx() || soc_is_am335x()) | ||
724 | return kasprintf(GFP_KERNEL, "AM33xx"); | ||
723 | else if (soc_is_am43xx()) | 725 | else if (soc_is_am43xx()) |
724 | return kasprintf(GFP_KERNEL, "AM43xx"); | 726 | return kasprintf(GFP_KERNEL, "AM43xx"); |
725 | else if (soc_is_dra7xx()) | 727 | else if (soc_is_dra7xx()) |
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 92afb723dcfc..355b08936871 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -1692,16 +1692,15 @@ static int _deassert_hardreset(struct omap_hwmod *oh, const char *name) | |||
1692 | if (ret == -EBUSY) | 1692 | if (ret == -EBUSY) |
1693 | pr_warn("omap_hwmod: %s: failed to hardreset\n", oh->name); | 1693 | pr_warn("omap_hwmod: %s: failed to hardreset\n", oh->name); |
1694 | 1694 | ||
1695 | if (!ret) { | 1695 | if (oh->clkdm) { |
1696 | /* | 1696 | /* |
1697 | * Set the clockdomain to HW_AUTO, assuming that the | 1697 | * Set the clockdomain to HW_AUTO, assuming that the |
1698 | * previous state was HW_AUTO. | 1698 | * previous state was HW_AUTO. |
1699 | */ | 1699 | */ |
1700 | if (oh->clkdm && hwsup) | 1700 | if (hwsup) |
1701 | clkdm_allow_idle(oh->clkdm); | 1701 | clkdm_allow_idle(oh->clkdm); |
1702 | } else { | 1702 | |
1703 | if (oh->clkdm) | 1703 | clkdm_hwmod_disable(oh->clkdm, oh); |
1704 | clkdm_hwmod_disable(oh->clkdm, oh); | ||
1705 | } | 1704 | } |
1706 | 1705 | ||
1707 | return ret; | 1706 | return ret; |
@@ -2698,6 +2697,7 @@ static int __init _register(struct omap_hwmod *oh) | |||
2698 | INIT_LIST_HEAD(&oh->master_ports); | 2697 | INIT_LIST_HEAD(&oh->master_ports); |
2699 | INIT_LIST_HEAD(&oh->slave_ports); | 2698 | INIT_LIST_HEAD(&oh->slave_ports); |
2700 | spin_lock_init(&oh->_lock); | 2699 | spin_lock_init(&oh->_lock); |
2700 | lockdep_set_class(&oh->_lock, &oh->hwmod_key); | ||
2701 | 2701 | ||
2702 | oh->_state = _HWMOD_STATE_REGISTERED; | 2702 | oh->_state = _HWMOD_STATE_REGISTERED; |
2703 | 2703 | ||
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index 9d4bec6ee742..9611c91d9b82 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h | |||
@@ -674,6 +674,7 @@ struct omap_hwmod { | |||
674 | u32 _sysc_cache; | 674 | u32 _sysc_cache; |
675 | void __iomem *_mpu_rt_va; | 675 | void __iomem *_mpu_rt_va; |
676 | spinlock_t _lock; | 676 | spinlock_t _lock; |
677 | struct lock_class_key hwmod_key; /* unique lock class */ | ||
677 | struct list_head node; | 678 | struct list_head node; |
678 | struct omap_hwmod_ocp_if *_mpu_port; | 679 | struct omap_hwmod_ocp_if *_mpu_port; |
679 | unsigned int (*xlate_irq)(unsigned int); | 680 | unsigned int (*xlate_irq)(unsigned int); |
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index e8692e7675b8..16fe7a1b7a35 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c | |||
@@ -1466,55 +1466,18 @@ static struct omap_hwmod dra7xx_ocp2scp3_hwmod = { | |||
1466 | * | 1466 | * |
1467 | */ | 1467 | */ |
1468 | 1468 | ||
1469 | static struct omap_hwmod_class dra7xx_pcie_hwmod_class = { | 1469 | static struct omap_hwmod_class dra7xx_pciess_hwmod_class = { |
1470 | .name = "pcie", | 1470 | .name = "pcie", |
1471 | }; | 1471 | }; |
1472 | 1472 | ||
1473 | /* pcie1 */ | 1473 | /* pcie1 */ |
1474 | static struct omap_hwmod dra7xx_pcie1_hwmod = { | 1474 | static struct omap_hwmod dra7xx_pciess1_hwmod = { |
1475 | .name = "pcie1", | 1475 | .name = "pcie1", |
1476 | .class = &dra7xx_pcie_hwmod_class, | 1476 | .class = &dra7xx_pciess_hwmod_class, |
1477 | .clkdm_name = "pcie_clkdm", | 1477 | .clkdm_name = "pcie_clkdm", |
1478 | .main_clk = "l4_root_clk_div", | 1478 | .main_clk = "l4_root_clk_div", |
1479 | .prcm = { | 1479 | .prcm = { |
1480 | .omap4 = { | 1480 | .omap4 = { |
1481 | .clkctrl_offs = DRA7XX_CM_PCIE_CLKSTCTRL_OFFSET, | ||
1482 | .modulemode = MODULEMODE_SWCTRL, | ||
1483 | }, | ||
1484 | }, | ||
1485 | }; | ||
1486 | |||
1487 | /* pcie2 */ | ||
1488 | static struct omap_hwmod dra7xx_pcie2_hwmod = { | ||
1489 | .name = "pcie2", | ||
1490 | .class = &dra7xx_pcie_hwmod_class, | ||
1491 | .clkdm_name = "pcie_clkdm", | ||
1492 | .main_clk = "l4_root_clk_div", | ||
1493 | .prcm = { | ||
1494 | .omap4 = { | ||
1495 | .clkctrl_offs = DRA7XX_CM_PCIE_CLKSTCTRL_OFFSET, | ||
1496 | .modulemode = MODULEMODE_SWCTRL, | ||
1497 | }, | ||
1498 | }, | ||
1499 | }; | ||
1500 | |||
1501 | /* | ||
1502 | * 'PCIE PHY' class | ||
1503 | * | ||
1504 | */ | ||
1505 | |||
1506 | static struct omap_hwmod_class dra7xx_pcie_phy_hwmod_class = { | ||
1507 | .name = "pcie-phy", | ||
1508 | }; | ||
1509 | |||
1510 | /* pcie1 phy */ | ||
1511 | static struct omap_hwmod dra7xx_pcie1_phy_hwmod = { | ||
1512 | .name = "pcie1-phy", | ||
1513 | .class = &dra7xx_pcie_phy_hwmod_class, | ||
1514 | .clkdm_name = "l3init_clkdm", | ||
1515 | .main_clk = "l4_root_clk_div", | ||
1516 | .prcm = { | ||
1517 | .omap4 = { | ||
1518 | .clkctrl_offs = DRA7XX_CM_L3INIT_PCIESS1_CLKCTRL_OFFSET, | 1481 | .clkctrl_offs = DRA7XX_CM_L3INIT_PCIESS1_CLKCTRL_OFFSET, |
1519 | .context_offs = DRA7XX_RM_L3INIT_PCIESS1_CONTEXT_OFFSET, | 1482 | .context_offs = DRA7XX_RM_L3INIT_PCIESS1_CONTEXT_OFFSET, |
1520 | .modulemode = MODULEMODE_SWCTRL, | 1483 | .modulemode = MODULEMODE_SWCTRL, |
@@ -1522,11 +1485,11 @@ static struct omap_hwmod dra7xx_pcie1_phy_hwmod = { | |||
1522 | }, | 1485 | }, |
1523 | }; | 1486 | }; |
1524 | 1487 | ||
1525 | /* pcie2 phy */ | 1488 | /* pcie2 */ |
1526 | static struct omap_hwmod dra7xx_pcie2_phy_hwmod = { | 1489 | static struct omap_hwmod dra7xx_pciess2_hwmod = { |
1527 | .name = "pcie2-phy", | 1490 | .name = "pcie2", |
1528 | .class = &dra7xx_pcie_phy_hwmod_class, | 1491 | .class = &dra7xx_pciess_hwmod_class, |
1529 | .clkdm_name = "l3init_clkdm", | 1492 | .clkdm_name = "pcie_clkdm", |
1530 | .main_clk = "l4_root_clk_div", | 1493 | .main_clk = "l4_root_clk_div", |
1531 | .prcm = { | 1494 | .prcm = { |
1532 | .omap4 = { | 1495 | .omap4 = { |
@@ -2877,50 +2840,34 @@ static struct omap_hwmod_ocp_if dra7xx_l4_cfg__ocp2scp3 = { | |||
2877 | .user = OCP_USER_MPU | OCP_USER_SDMA, | 2840 | .user = OCP_USER_MPU | OCP_USER_SDMA, |
2878 | }; | 2841 | }; |
2879 | 2842 | ||
2880 | /* l3_main_1 -> pcie1 */ | 2843 | /* l3_main_1 -> pciess1 */ |
2881 | static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pcie1 = { | 2844 | static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pciess1 = { |
2882 | .master = &dra7xx_l3_main_1_hwmod, | 2845 | .master = &dra7xx_l3_main_1_hwmod, |
2883 | .slave = &dra7xx_pcie1_hwmod, | 2846 | .slave = &dra7xx_pciess1_hwmod, |
2884 | .clk = "l3_iclk_div", | 2847 | .clk = "l3_iclk_div", |
2885 | .user = OCP_USER_MPU | OCP_USER_SDMA, | 2848 | .user = OCP_USER_MPU | OCP_USER_SDMA, |
2886 | }; | 2849 | }; |
2887 | 2850 | ||
2888 | /* l4_cfg -> pcie1 */ | 2851 | /* l4_cfg -> pciess1 */ |
2889 | static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie1 = { | 2852 | static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pciess1 = { |
2890 | .master = &dra7xx_l4_cfg_hwmod, | 2853 | .master = &dra7xx_l4_cfg_hwmod, |
2891 | .slave = &dra7xx_pcie1_hwmod, | 2854 | .slave = &dra7xx_pciess1_hwmod, |
2892 | .clk = "l4_root_clk_div", | 2855 | .clk = "l4_root_clk_div", |
2893 | .user = OCP_USER_MPU | OCP_USER_SDMA, | 2856 | .user = OCP_USER_MPU | OCP_USER_SDMA, |
2894 | }; | 2857 | }; |
2895 | 2858 | ||
2896 | /* l3_main_1 -> pcie2 */ | 2859 | /* l3_main_1 -> pciess2 */ |
2897 | static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pcie2 = { | 2860 | static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pciess2 = { |
2898 | .master = &dra7xx_l3_main_1_hwmod, | 2861 | .master = &dra7xx_l3_main_1_hwmod, |
2899 | .slave = &dra7xx_pcie2_hwmod, | 2862 | .slave = &dra7xx_pciess2_hwmod, |
2900 | .clk = "l3_iclk_div", | 2863 | .clk = "l3_iclk_div", |
2901 | .user = OCP_USER_MPU | OCP_USER_SDMA, | 2864 | .user = OCP_USER_MPU | OCP_USER_SDMA, |
2902 | }; | 2865 | }; |
2903 | 2866 | ||
2904 | /* l4_cfg -> pcie2 */ | 2867 | /* l4_cfg -> pciess2 */ |
2905 | static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie2 = { | 2868 | static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pciess2 = { |
2906 | .master = &dra7xx_l4_cfg_hwmod, | ||
2907 | .slave = &dra7xx_pcie2_hwmod, | ||
2908 | .clk = "l4_root_clk_div", | ||
2909 | .user = OCP_USER_MPU | OCP_USER_SDMA, | ||
2910 | }; | ||
2911 | |||
2912 | /* l4_cfg -> pcie1 phy */ | ||
2913 | static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie1_phy = { | ||
2914 | .master = &dra7xx_l4_cfg_hwmod, | ||
2915 | .slave = &dra7xx_pcie1_phy_hwmod, | ||
2916 | .clk = "l4_root_clk_div", | ||
2917 | .user = OCP_USER_MPU | OCP_USER_SDMA, | ||
2918 | }; | ||
2919 | |||
2920 | /* l4_cfg -> pcie2 phy */ | ||
2921 | static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie2_phy = { | ||
2922 | .master = &dra7xx_l4_cfg_hwmod, | 2869 | .master = &dra7xx_l4_cfg_hwmod, |
2923 | .slave = &dra7xx_pcie2_phy_hwmod, | 2870 | .slave = &dra7xx_pciess2_hwmod, |
2924 | .clk = "l4_root_clk_div", | 2871 | .clk = "l4_root_clk_div", |
2925 | .user = OCP_USER_MPU | OCP_USER_SDMA, | 2872 | .user = OCP_USER_MPU | OCP_USER_SDMA, |
2926 | }; | 2873 | }; |
@@ -3327,12 +3274,10 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = { | |||
3327 | &dra7xx_l4_cfg__mpu, | 3274 | &dra7xx_l4_cfg__mpu, |
3328 | &dra7xx_l4_cfg__ocp2scp1, | 3275 | &dra7xx_l4_cfg__ocp2scp1, |
3329 | &dra7xx_l4_cfg__ocp2scp3, | 3276 | &dra7xx_l4_cfg__ocp2scp3, |
3330 | &dra7xx_l3_main_1__pcie1, | 3277 | &dra7xx_l3_main_1__pciess1, |
3331 | &dra7xx_l4_cfg__pcie1, | 3278 | &dra7xx_l4_cfg__pciess1, |
3332 | &dra7xx_l3_main_1__pcie2, | 3279 | &dra7xx_l3_main_1__pciess2, |
3333 | &dra7xx_l4_cfg__pcie2, | 3280 | &dra7xx_l4_cfg__pciess2, |
3334 | &dra7xx_l4_cfg__pcie1_phy, | ||
3335 | &dra7xx_l4_cfg__pcie2_phy, | ||
3336 | &dra7xx_l3_main_1__qspi, | 3281 | &dra7xx_l3_main_1__qspi, |
3337 | &dra7xx_l4_per3__rtcss, | 3282 | &dra7xx_l4_per3__rtcss, |
3338 | &dra7xx_l4_cfg__sata, | 3283 | &dra7xx_l4_cfg__sata, |
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 190fa43e7479..e642b079e9f3 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c | |||
@@ -173,6 +173,7 @@ static void __init omap3_igep0030_rev_g_legacy_init(void) | |||
173 | 173 | ||
174 | static void __init omap3_evm_legacy_init(void) | 174 | static void __init omap3_evm_legacy_init(void) |
175 | { | 175 | { |
176 | hsmmc2_internal_input_clk(); | ||
176 | legacy_init_wl12xx(WL12XX_REFCLOCK_38, 0, 149); | 177 | legacy_init_wl12xx(WL12XX_REFCLOCK_38, 0, 149); |
177 | } | 178 | } |
178 | 179 | ||
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c index a08a617a6c11..d6d6bc39e05c 100644 --- a/arch/arm/mach-omap2/prm44xx.c +++ b/arch/arm/mach-omap2/prm44xx.c | |||
@@ -252,10 +252,10 @@ static void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask) | |||
252 | { | 252 | { |
253 | saved_mask[0] = | 253 | saved_mask[0] = |
254 | omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, | 254 | omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, |
255 | OMAP4_PRM_IRQSTATUS_MPU_OFFSET); | 255 | OMAP4_PRM_IRQENABLE_MPU_OFFSET); |
256 | saved_mask[1] = | 256 | saved_mask[1] = |
257 | omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, | 257 | omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, |
258 | OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET); | 258 | OMAP4_PRM_IRQENABLE_MPU_2_OFFSET); |
259 | 259 | ||
260 | omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST, | 260 | omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST, |
261 | OMAP4_PRM_IRQENABLE_MPU_OFFSET); | 261 | OMAP4_PRM_IRQENABLE_MPU_OFFSET); |
diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c index 7d8eab857a93..f6d02e4cbcda 100644 --- a/arch/arm/mach-pxa/idp.c +++ b/arch/arm/mach-pxa/idp.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/platform_data/video-pxafb.h> | 36 | #include <linux/platform_data/video-pxafb.h> |
37 | #include <mach/bitfield.h> | 37 | #include <mach/bitfield.h> |
38 | #include <linux/platform_data/mmc-pxamci.h> | 38 | #include <linux/platform_data/mmc-pxamci.h> |
39 | #include <linux/smc91x.h> | ||
39 | 40 | ||
40 | #include "generic.h" | 41 | #include "generic.h" |
41 | #include "devices.h" | 42 | #include "devices.h" |
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c index 0eecd83c624e..89a7c06570d3 100644 --- a/arch/arm/mach-pxa/irq.c +++ b/arch/arm/mach-pxa/irq.c | |||
@@ -11,6 +11,7 @@ | |||
11 | * it under the terms of the GNU General Public License version 2 as | 11 | * it under the terms of the GNU General Public License version 2 as |
12 | * published by the Free Software Foundation. | 12 | * published by the Free Software Foundation. |
13 | */ | 13 | */ |
14 | #include <linux/bitops.h> | ||
14 | #include <linux/init.h> | 15 | #include <linux/init.h> |
15 | #include <linux/module.h> | 16 | #include <linux/module.h> |
16 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
@@ -40,7 +41,6 @@ | |||
40 | #define ICHP_VAL_IRQ (1 << 31) | 41 | #define ICHP_VAL_IRQ (1 << 31) |
41 | #define ICHP_IRQ(i) (((i) >> 16) & 0x7fff) | 42 | #define ICHP_IRQ(i) (((i) >> 16) & 0x7fff) |
42 | #define IPR_VALID (1 << 31) | 43 | #define IPR_VALID (1 << 31) |
43 | #define IRQ_BIT(n) (((n) - PXA_IRQ(0)) & 0x1f) | ||
44 | 44 | ||
45 | #define MAX_INTERNAL_IRQS 128 | 45 | #define MAX_INTERNAL_IRQS 128 |
46 | 46 | ||
@@ -51,6 +51,7 @@ | |||
51 | static void __iomem *pxa_irq_base; | 51 | static void __iomem *pxa_irq_base; |
52 | static int pxa_internal_irq_nr; | 52 | static int pxa_internal_irq_nr; |
53 | static bool cpu_has_ipr; | 53 | static bool cpu_has_ipr; |
54 | static struct irq_domain *pxa_irq_domain; | ||
54 | 55 | ||
55 | static inline void __iomem *irq_base(int i) | 56 | static inline void __iomem *irq_base(int i) |
56 | { | 57 | { |
@@ -66,18 +67,20 @@ static inline void __iomem *irq_base(int i) | |||
66 | void pxa_mask_irq(struct irq_data *d) | 67 | void pxa_mask_irq(struct irq_data *d) |
67 | { | 68 | { |
68 | void __iomem *base = irq_data_get_irq_chip_data(d); | 69 | void __iomem *base = irq_data_get_irq_chip_data(d); |
70 | irq_hw_number_t irq = irqd_to_hwirq(d); | ||
69 | uint32_t icmr = __raw_readl(base + ICMR); | 71 | uint32_t icmr = __raw_readl(base + ICMR); |
70 | 72 | ||
71 | icmr &= ~(1 << IRQ_BIT(d->irq)); | 73 | icmr &= ~BIT(irq & 0x1f); |
72 | __raw_writel(icmr, base + ICMR); | 74 | __raw_writel(icmr, base + ICMR); |
73 | } | 75 | } |
74 | 76 | ||
75 | void pxa_unmask_irq(struct irq_data *d) | 77 | void pxa_unmask_irq(struct irq_data *d) |
76 | { | 78 | { |
77 | void __iomem *base = irq_data_get_irq_chip_data(d); | 79 | void __iomem *base = irq_data_get_irq_chip_data(d); |
80 | irq_hw_number_t irq = irqd_to_hwirq(d); | ||
78 | uint32_t icmr = __raw_readl(base + ICMR); | 81 | uint32_t icmr = __raw_readl(base + ICMR); |
79 | 82 | ||
80 | icmr |= 1 << IRQ_BIT(d->irq); | 83 | icmr |= BIT(irq & 0x1f); |
81 | __raw_writel(icmr, base + ICMR); | 84 | __raw_writel(icmr, base + ICMR); |
82 | } | 85 | } |
83 | 86 | ||
@@ -118,40 +121,63 @@ asmlinkage void __exception_irq_entry ichp_handle_irq(struct pt_regs *regs) | |||
118 | } while (1); | 121 | } while (1); |
119 | } | 122 | } |
120 | 123 | ||
121 | void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int)) | 124 | static int pxa_irq_map(struct irq_domain *h, unsigned int virq, |
125 | irq_hw_number_t hw) | ||
122 | { | 126 | { |
123 | int irq, i, n; | 127 | void __iomem *base = irq_base(hw / 32); |
124 | 128 | ||
125 | BUG_ON(irq_nr > MAX_INTERNAL_IRQS); | 129 | /* initialize interrupt priority */ |
130 | if (cpu_has_ipr) | ||
131 | __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw)); | ||
132 | |||
133 | irq_set_chip_and_handler(virq, &pxa_internal_irq_chip, | ||
134 | handle_level_irq); | ||
135 | irq_set_chip_data(virq, base); | ||
136 | set_irq_flags(virq, IRQF_VALID); | ||
137 | |||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | static struct irq_domain_ops pxa_irq_ops = { | ||
142 | .map = pxa_irq_map, | ||
143 | .xlate = irq_domain_xlate_onecell, | ||
144 | }; | ||
145 | |||
146 | static __init void | ||
147 | pxa_init_irq_common(struct device_node *node, int irq_nr, | ||
148 | int (*fn)(struct irq_data *, unsigned int)) | ||
149 | { | ||
150 | int n; | ||
126 | 151 | ||
127 | pxa_internal_irq_nr = irq_nr; | 152 | pxa_internal_irq_nr = irq_nr; |
128 | cpu_has_ipr = !cpu_is_pxa25x(); | 153 | pxa_irq_domain = irq_domain_add_legacy(node, irq_nr, |
129 | pxa_irq_base = io_p2v(0x40d00000); | 154 | PXA_IRQ(0), 0, |
155 | &pxa_irq_ops, NULL); | ||
156 | if (!pxa_irq_domain) | ||
157 | panic("Unable to add PXA IRQ domain\n"); | ||
158 | irq_set_default_host(pxa_irq_domain); | ||
130 | 159 | ||
131 | for (n = 0; n < irq_nr; n += 32) { | 160 | for (n = 0; n < irq_nr; n += 32) { |
132 | void __iomem *base = irq_base(n >> 5); | 161 | void __iomem *base = irq_base(n >> 5); |
133 | 162 | ||
134 | __raw_writel(0, base + ICMR); /* disable all IRQs */ | 163 | __raw_writel(0, base + ICMR); /* disable all IRQs */ |
135 | __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */ | 164 | __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */ |
136 | for (i = n; (i < (n + 32)) && (i < irq_nr); i++) { | ||
137 | /* initialize interrupt priority */ | ||
138 | if (cpu_has_ipr) | ||
139 | __raw_writel(i | IPR_VALID, pxa_irq_base + IPR(i)); | ||
140 | |||
141 | irq = PXA_IRQ(i); | ||
142 | irq_set_chip_and_handler(irq, &pxa_internal_irq_chip, | ||
143 | handle_level_irq); | ||
144 | irq_set_chip_data(irq, base); | ||
145 | set_irq_flags(irq, IRQF_VALID); | ||
146 | } | ||
147 | } | 165 | } |
148 | |||
149 | /* only unmasked interrupts kick us out of idle */ | 166 | /* only unmasked interrupts kick us out of idle */ |
150 | __raw_writel(1, irq_base(0) + ICCR); | 167 | __raw_writel(1, irq_base(0) + ICCR); |
151 | 168 | ||
152 | pxa_internal_irq_chip.irq_set_wake = fn; | 169 | pxa_internal_irq_chip.irq_set_wake = fn; |
153 | } | 170 | } |
154 | 171 | ||
172 | void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int)) | ||
173 | { | ||
174 | BUG_ON(irq_nr > MAX_INTERNAL_IRQS); | ||
175 | |||
176 | pxa_irq_base = io_p2v(0x40d00000); | ||
177 | cpu_has_ipr = !cpu_is_pxa25x(); | ||
178 | pxa_init_irq_common(NULL, irq_nr, fn); | ||
179 | } | ||
180 | |||
155 | #ifdef CONFIG_PM | 181 | #ifdef CONFIG_PM |
156 | static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32]; | 182 | static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32]; |
157 | static unsigned long saved_ipr[MAX_INTERNAL_IRQS]; | 183 | static unsigned long saved_ipr[MAX_INTERNAL_IRQS]; |
@@ -203,30 +229,6 @@ struct syscore_ops pxa_irq_syscore_ops = { | |||
203 | }; | 229 | }; |
204 | 230 | ||
205 | #ifdef CONFIG_OF | 231 | #ifdef CONFIG_OF |
206 | static struct irq_domain *pxa_irq_domain; | ||
207 | |||
208 | static int pxa_irq_map(struct irq_domain *h, unsigned int virq, | ||
209 | irq_hw_number_t hw) | ||
210 | { | ||
211 | void __iomem *base = irq_base(hw / 32); | ||
212 | |||
213 | /* initialize interrupt priority */ | ||
214 | if (cpu_has_ipr) | ||
215 | __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw)); | ||
216 | |||
217 | irq_set_chip_and_handler(hw, &pxa_internal_irq_chip, | ||
218 | handle_level_irq); | ||
219 | irq_set_chip_data(hw, base); | ||
220 | set_irq_flags(hw, IRQF_VALID); | ||
221 | |||
222 | return 0; | ||
223 | } | ||
224 | |||
225 | static struct irq_domain_ops pxa_irq_ops = { | ||
226 | .map = pxa_irq_map, | ||
227 | .xlate = irq_domain_xlate_onecell, | ||
228 | }; | ||
229 | |||
230 | static const struct of_device_id intc_ids[] __initconst = { | 232 | static const struct of_device_id intc_ids[] __initconst = { |
231 | { .compatible = "marvell,pxa-intc", }, | 233 | { .compatible = "marvell,pxa-intc", }, |
232 | {} | 234 | {} |
@@ -236,7 +238,7 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int)) | |||
236 | { | 238 | { |
237 | struct device_node *node; | 239 | struct device_node *node; |
238 | struct resource res; | 240 | struct resource res; |
239 | int n, ret; | 241 | int ret; |
240 | 242 | ||
241 | node = of_find_matching_node(NULL, intc_ids); | 243 | node = of_find_matching_node(NULL, intc_ids); |
242 | if (!node) { | 244 | if (!node) { |
@@ -267,23 +269,6 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int)) | |||
267 | return; | 269 | return; |
268 | } | 270 | } |
269 | 271 | ||
270 | pxa_irq_domain = irq_domain_add_legacy(node, pxa_internal_irq_nr, 0, 0, | 272 | pxa_init_irq_common(node, pxa_internal_irq_nr, fn); |
271 | &pxa_irq_ops, NULL); | ||
272 | if (!pxa_irq_domain) | ||
273 | panic("Unable to add PXA IRQ domain\n"); | ||
274 | |||
275 | irq_set_default_host(pxa_irq_domain); | ||
276 | |||
277 | for (n = 0; n < pxa_internal_irq_nr; n += 32) { | ||
278 | void __iomem *base = irq_base(n >> 5); | ||
279 | |||
280 | __raw_writel(0, base + ICMR); /* disable all IRQs */ | ||
281 | __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */ | ||
282 | } | ||
283 | |||
284 | /* only unmasked interrupts kick us out of idle */ | ||
285 | __raw_writel(1, irq_base(0) + ICCR); | ||
286 | |||
287 | pxa_internal_irq_chip.irq_set_wake = fn; | ||
288 | } | 273 | } |
289 | #endif /* CONFIG_OF */ | 274 | #endif /* CONFIG_OF */ |
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c index 28da319d389f..eaee2c20b189 100644 --- a/arch/arm/mach-pxa/lpd270.c +++ b/arch/arm/mach-pxa/lpd270.c | |||
@@ -195,7 +195,7 @@ static struct resource smc91x_resources[] = { | |||
195 | }; | 195 | }; |
196 | 196 | ||
197 | struct smc91x_platdata smc91x_platdata = { | 197 | struct smc91x_platdata smc91x_platdata = { |
198 | .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT; | 198 | .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, |
199 | }; | 199 | }; |
200 | 200 | ||
201 | static struct platform_device smc91x_device = { | 201 | static struct platform_device smc91x_device = { |
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c index 205f9bf3821e..ac2ae5c71ab4 100644 --- a/arch/arm/mach-pxa/zeus.c +++ b/arch/arm/mach-pxa/zeus.c | |||
@@ -412,7 +412,7 @@ static struct fixed_voltage_config can_regulator_pdata = { | |||
412 | }; | 412 | }; |
413 | 413 | ||
414 | static struct platform_device can_regulator_device = { | 414 | static struct platform_device can_regulator_device = { |
415 | .name = "reg-fixed-volage", | 415 | .name = "reg-fixed-voltage", |
416 | .id = 0, | 416 | .id = 0, |
417 | .dev = { | 417 | .dev = { |
418 | .platform_data = &can_regulator_pdata, | 418 | .platform_data = &can_regulator_pdata, |
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c index 7b0cd3172354..af868d258e66 100644 --- a/arch/arm/mach-sa1100/neponset.c +++ b/arch/arm/mach-sa1100/neponset.c | |||
@@ -268,8 +268,8 @@ static int neponset_probe(struct platform_device *dev) | |||
268 | .id = 0, | 268 | .id = 0, |
269 | .res = smc91x_resources, | 269 | .res = smc91x_resources, |
270 | .num_res = ARRAY_SIZE(smc91x_resources), | 270 | .num_res = ARRAY_SIZE(smc91x_resources), |
271 | .data = &smc91c_platdata, | 271 | .data = &smc91x_platdata, |
272 | .size_data = sizeof(smc91c_platdata), | 272 | .size_data = sizeof(smc91x_platdata), |
273 | }; | 273 | }; |
274 | int ret, irq; | 274 | int ret, irq; |
275 | 275 | ||
diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c index 696fd0fe4806..1525d7b5f1b7 100644 --- a/arch/arm/mach-sa1100/pleb.c +++ b/arch/arm/mach-sa1100/pleb.c | |||
@@ -54,7 +54,7 @@ static struct platform_device smc91x_device = { | |||
54 | .num_resources = ARRAY_SIZE(smc91x_resources), | 54 | .num_resources = ARRAY_SIZE(smc91x_resources), |
55 | .resource = smc91x_resources, | 55 | .resource = smc91x_resources, |
56 | .dev = { | 56 | .dev = { |
57 | .platform_data = &smc91c_platdata, | 57 | .platform_data = &smc91x_platdata, |
58 | }, | 58 | }, |
59 | }; | 59 | }; |
60 | 60 | ||
diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h index 483cb467bf65..a0f3b1cd497c 100644 --- a/arch/arm/mach-socfpga/core.h +++ b/arch/arm/mach-socfpga/core.h | |||
@@ -45,6 +45,6 @@ extern char secondary_trampoline, secondary_trampoline_end; | |||
45 | 45 | ||
46 | extern unsigned long socfpga_cpu1start_addr; | 46 | extern unsigned long socfpga_cpu1start_addr; |
47 | 47 | ||
48 | #define SOCFPGA_SCU_VIRT_BASE 0xfffec000 | 48 | #define SOCFPGA_SCU_VIRT_BASE 0xfee00000 |
49 | 49 | ||
50 | #endif | 50 | #endif |
diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c index 383d61e138af..f5e597c207b9 100644 --- a/arch/arm/mach-socfpga/socfpga.c +++ b/arch/arm/mach-socfpga/socfpga.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <asm/hardware/cache-l2x0.h> | 23 | #include <asm/hardware/cache-l2x0.h> |
24 | #include <asm/mach/arch.h> | 24 | #include <asm/mach/arch.h> |
25 | #include <asm/mach/map.h> | 25 | #include <asm/mach/map.h> |
26 | #include <asm/cacheflush.h> | ||
26 | 27 | ||
27 | #include "core.h" | 28 | #include "core.h" |
28 | 29 | ||
@@ -73,6 +74,10 @@ void __init socfpga_sysmgr_init(void) | |||
73 | (u32 *) &socfpga_cpu1start_addr)) | 74 | (u32 *) &socfpga_cpu1start_addr)) |
74 | pr_err("SMP: Need cpu1-start-addr in device tree.\n"); | 75 | pr_err("SMP: Need cpu1-start-addr in device tree.\n"); |
75 | 76 | ||
77 | /* Ensure that socfpga_cpu1start_addr is visible to other CPUs */ | ||
78 | smp_wmb(); | ||
79 | sync_cache_w(&socfpga_cpu1start_addr); | ||
80 | |||
76 | sys_manager_base_addr = of_iomap(np, 0); | 81 | sys_manager_base_addr = of_iomap(np, 0); |
77 | 82 | ||
78 | np = of_find_compatible_node(NULL, NULL, "altr,rst-mgr"); | 83 | np = of_find_compatible_node(NULL, NULL, "altr,rst-mgr"); |
diff --git a/arch/arm/mach-sti/board-dt.c b/arch/arm/mach-sti/board-dt.c index b067390cef4e..b373acade338 100644 --- a/arch/arm/mach-sti/board-dt.c +++ b/arch/arm/mach-sti/board-dt.c | |||
@@ -18,6 +18,7 @@ static const char *stih41x_dt_match[] __initdata = { | |||
18 | "st,stih415", | 18 | "st,stih415", |
19 | "st,stih416", | 19 | "st,stih416", |
20 | "st,stih407", | 20 | "st,stih407", |
21 | "st,stih410", | ||
21 | "st,stih418", | 22 | "st,stih418", |
22 | NULL | 23 | NULL |
23 | }; | 24 | }; |
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig index a77604fbaf25..81502b90dd91 100644 --- a/arch/arm/mach-sunxi/Kconfig +++ b/arch/arm/mach-sunxi/Kconfig | |||
@@ -1,10 +1,12 @@ | |||
1 | menuconfig ARCH_SUNXI | 1 | menuconfig ARCH_SUNXI |
2 | bool "Allwinner SoCs" if ARCH_MULTI_V7 | 2 | bool "Allwinner SoCs" if ARCH_MULTI_V7 |
3 | select ARCH_REQUIRE_GPIOLIB | 3 | select ARCH_REQUIRE_GPIOLIB |
4 | select ARCH_HAS_RESET_CONTROLLER | ||
4 | select CLKSRC_MMIO | 5 | select CLKSRC_MMIO |
5 | select GENERIC_IRQ_CHIP | 6 | select GENERIC_IRQ_CHIP |
6 | select PINCTRL | 7 | select PINCTRL |
7 | select SUN4I_TIMER | 8 | select SUN4I_TIMER |
9 | select RESET_CONTROLLER | ||
8 | 10 | ||
9 | if ARCH_SUNXI | 11 | if ARCH_SUNXI |
10 | 12 | ||
@@ -20,10 +22,8 @@ config MACH_SUN5I | |||
20 | config MACH_SUN6I | 22 | config MACH_SUN6I |
21 | bool "Allwinner A31 (sun6i) SoCs support" | 23 | bool "Allwinner A31 (sun6i) SoCs support" |
22 | default ARCH_SUNXI | 24 | default ARCH_SUNXI |
23 | select ARCH_HAS_RESET_CONTROLLER | ||
24 | select ARM_GIC | 25 | select ARM_GIC |
25 | select MFD_SUN6I_PRCM | 26 | select MFD_SUN6I_PRCM |
26 | select RESET_CONTROLLER | ||
27 | select SUN5I_HSTIMER | 27 | select SUN5I_HSTIMER |
28 | 28 | ||
29 | config MACH_SUN7I | 29 | config MACH_SUN7I |
@@ -37,16 +37,12 @@ config MACH_SUN7I | |||
37 | config MACH_SUN8I | 37 | config MACH_SUN8I |
38 | bool "Allwinner A23 (sun8i) SoCs support" | 38 | bool "Allwinner A23 (sun8i) SoCs support" |
39 | default ARCH_SUNXI | 39 | default ARCH_SUNXI |
40 | select ARCH_HAS_RESET_CONTROLLER | ||
41 | select ARM_GIC | 40 | select ARM_GIC |
42 | select MFD_SUN6I_PRCM | 41 | select MFD_SUN6I_PRCM |
43 | select RESET_CONTROLLER | ||
44 | 42 | ||
45 | config MACH_SUN9I | 43 | config MACH_SUN9I |
46 | bool "Allwinner (sun9i) SoCs support" | 44 | bool "Allwinner (sun9i) SoCs support" |
47 | default ARCH_SUNXI | 45 | default ARCH_SUNXI |
48 | select ARCH_HAS_RESET_CONTROLLER | ||
49 | select ARM_GIC | 46 | select ARM_GIC |
50 | select RESET_CONTROLLER | ||
51 | 47 | ||
52 | endif | 48 | endif |
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index c6c7696b8db9..8f15f70622a6 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -1131,23 +1131,22 @@ static void __init l2c310_of_parse(const struct device_node *np, | |||
1131 | } | 1131 | } |
1132 | 1132 | ||
1133 | ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); | 1133 | ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); |
1134 | if (ret) | 1134 | if (!ret) { |
1135 | return; | 1135 | switch (assoc) { |
1136 | 1136 | case 16: | |
1137 | switch (assoc) { | 1137 | *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; |
1138 | case 16: | 1138 | *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16; |
1139 | *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; | 1139 | *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; |
1140 | *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16; | 1140 | break; |
1141 | *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; | 1141 | case 8: |
1142 | break; | 1142 | *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; |
1143 | case 8: | 1143 | *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; |
1144 | *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; | 1144 | break; |
1145 | *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; | 1145 | default: |
1146 | break; | 1146 | pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n", |
1147 | default: | 1147 | assoc); |
1148 | pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n", | 1148 | break; |
1149 | assoc); | 1149 | } |
1150 | break; | ||
1151 | } | 1150 | } |
1152 | 1151 | ||
1153 | prefetch = l2x0_saved_regs.prefetch_ctrl; | 1152 | prefetch = l2x0_saved_regs.prefetch_ctrl; |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 170a116d1b29..c27447653903 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -171,7 +171,7 @@ static int __dma_supported(struct device *dev, u64 mask, bool warn) | |||
171 | */ | 171 | */ |
172 | if (sizeof(mask) != sizeof(dma_addr_t) && | 172 | if (sizeof(mask) != sizeof(dma_addr_t) && |
173 | mask > (dma_addr_t)~0 && | 173 | mask > (dma_addr_t)~0 && |
174 | dma_to_pfn(dev, ~0) < max_pfn) { | 174 | dma_to_pfn(dev, ~0) < max_pfn - 1) { |
175 | if (warn) { | 175 | if (warn) { |
176 | dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", | 176 | dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", |
177 | mask); | 177 | mask); |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index a982dc3190df..6333d9c17875 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
@@ -552,6 +552,7 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
552 | 552 | ||
553 | pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n", | 553 | pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n", |
554 | inf->name, fsr, addr); | 554 | inf->name, fsr, addr); |
555 | show_pte(current->mm, addr); | ||
555 | 556 | ||
556 | info.si_signo = inf->sig; | 557 | info.si_signo = inf->sig; |
557 | info.si_errno = 0; | 558 | info.si_errno = 0; |
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c index 004e35cdcfff..cf30daff8932 100644 --- a/arch/arm/mm/pageattr.c +++ b/arch/arm/mm/pageattr.c | |||
@@ -49,7 +49,10 @@ static int change_memory_common(unsigned long addr, int numpages, | |||
49 | WARN_ON_ONCE(1); | 49 | WARN_ON_ONCE(1); |
50 | } | 50 | } |
51 | 51 | ||
52 | if (!is_module_address(start) || !is_module_address(end - 1)) | 52 | if (start < MODULES_VADDR || start >= MODULES_END) |
53 | return -EINVAL; | ||
54 | |||
55 | if (end < MODULES_VADDR || start >= MODULES_END) | ||
53 | return -EINVAL; | 56 | return -EINVAL; |
54 | 57 | ||
55 | data.set_mask = set_mask; | 58 | data.set_mask = set_mask; |
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c index db10169a08de..8ca94d379bc3 100644 --- a/arch/arm/plat-omap/dmtimer.c +++ b/arch/arm/plat-omap/dmtimer.c | |||
@@ -799,6 +799,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev) | |||
799 | struct device *dev = &pdev->dev; | 799 | struct device *dev = &pdev->dev; |
800 | const struct of_device_id *match; | 800 | const struct of_device_id *match; |
801 | const struct dmtimer_platform_data *pdata; | 801 | const struct dmtimer_platform_data *pdata; |
802 | int ret; | ||
802 | 803 | ||
803 | match = of_match_device(of_match_ptr(omap_timer_match), dev); | 804 | match = of_match_device(of_match_ptr(omap_timer_match), dev); |
804 | pdata = match ? match->data : dev->platform_data; | 805 | pdata = match ? match->data : dev->platform_data; |
@@ -860,7 +861,12 @@ static int omap_dm_timer_probe(struct platform_device *pdev) | |||
860 | } | 861 | } |
861 | 862 | ||
862 | if (!timer->reserved) { | 863 | if (!timer->reserved) { |
863 | pm_runtime_get_sync(dev); | 864 | ret = pm_runtime_get_sync(dev); |
865 | if (ret < 0) { | ||
866 | dev_err(dev, "%s: pm_runtime_get_sync failed!\n", | ||
867 | __func__); | ||
868 | goto err_get_sync; | ||
869 | } | ||
864 | __omap_dm_timer_init_regs(timer); | 870 | __omap_dm_timer_init_regs(timer); |
865 | pm_runtime_put(dev); | 871 | pm_runtime_put(dev); |
866 | } | 872 | } |
@@ -873,6 +879,11 @@ static int omap_dm_timer_probe(struct platform_device *pdev) | |||
873 | dev_dbg(dev, "Device Probed.\n"); | 879 | dev_dbg(dev, "Device Probed.\n"); |
874 | 880 | ||
875 | return 0; | 881 | return 0; |
882 | |||
883 | err_get_sync: | ||
884 | pm_runtime_put_noidle(dev); | ||
885 | pm_runtime_disable(dev); | ||
886 | return ret; | ||
876 | } | 887 | } |
877 | 888 | ||
878 | /** | 889 | /** |
@@ -899,6 +910,8 @@ static int omap_dm_timer_remove(struct platform_device *pdev) | |||
899 | } | 910 | } |
900 | spin_unlock_irqrestore(&dm_timer_lock, flags); | 911 | spin_unlock_irqrestore(&dm_timer_lock, flags); |
901 | 912 | ||
913 | pm_runtime_disable(&pdev->dev); | ||
914 | |||
902 | return ret; | 915 | return ret; |
903 | } | 916 | } |
904 | 917 | ||
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index f1ad9c2ab2e9..a857794432d6 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi | |||
@@ -622,7 +622,7 @@ | |||
622 | }; | 622 | }; |
623 | 623 | ||
624 | sgenet0: ethernet@1f210000 { | 624 | sgenet0: ethernet@1f210000 { |
625 | compatible = "apm,xgene-enet"; | 625 | compatible = "apm,xgene1-sgenet"; |
626 | status = "disabled"; | 626 | status = "disabled"; |
627 | reg = <0x0 0x1f210000 0x0 0xd100>, | 627 | reg = <0x0 0x1f210000 0x0 0xd100>, |
628 | <0x0 0x1f200000 0x0 0Xc300>, | 628 | <0x0 0x1f200000 0x0 0Xc300>, |
@@ -636,7 +636,7 @@ | |||
636 | }; | 636 | }; |
637 | 637 | ||
638 | xgenet: ethernet@1f610000 { | 638 | xgenet: ethernet@1f610000 { |
639 | compatible = "apm,xgene-enet"; | 639 | compatible = "apm,xgene1-xgenet"; |
640 | status = "disabled"; | 640 | status = "disabled"; |
641 | reg = <0x0 0x1f610000 0x0 0xd100>, | 641 | reg = <0x0 0x1f610000 0x0 0xd100>, |
642 | <0x0 0x1f600000 0x0 0Xc300>, | 642 | <0x0 0x1f600000 0x0 0Xc300>, |
diff --git a/arch/arm64/boot/dts/arm/juno-clocks.dtsi b/arch/arm64/boot/dts/arm/juno-clocks.dtsi index ea2b5666a16f..c9b89efe0f56 100644 --- a/arch/arm64/boot/dts/arm/juno-clocks.dtsi +++ b/arch/arm64/boot/dts/arm/juno-clocks.dtsi | |||
@@ -8,7 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | /* SoC fixed clocks */ | 10 | /* SoC fixed clocks */ |
11 | soc_uartclk: refclk72738khz { | 11 | soc_uartclk: refclk7273800hz { |
12 | compatible = "fixed-clock"; | 12 | compatible = "fixed-clock"; |
13 | #clock-cells = <0>; | 13 | #clock-cells = <0>; |
14 | clock-frequency = <7273800>; | 14 | clock-frequency = <7273800>; |
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index cb9593079f29..d8c25b7b18fb 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h | |||
@@ -246,14 +246,30 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, | |||
246 | __ret; \ | 246 | __ret; \ |
247 | }) | 247 | }) |
248 | 248 | ||
249 | #define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) | 249 | #define _protect_cmpxchg_local(pcp, o, n) \ |
250 | #define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) | 250 | ({ \ |
251 | #define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) | 251 | typeof(*raw_cpu_ptr(&(pcp))) __ret; \ |
252 | #define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) | 252 | preempt_disable(); \ |
253 | 253 | __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \ | |
254 | #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ | 254 | preempt_enable(); \ |
255 | cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \ | 255 | __ret; \ |
256 | o1, o2, n1, n2) | 256 | }) |
257 | |||
258 | #define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) | ||
259 | #define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) | ||
260 | #define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) | ||
261 | #define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) | ||
262 | |||
263 | #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ | ||
264 | ({ \ | ||
265 | int __ret; \ | ||
266 | preempt_disable(); \ | ||
267 | __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \ | ||
268 | raw_cpu_ptr(&(ptr2)), \ | ||
269 | o1, o2, n1, n2); \ | ||
270 | preempt_enable(); \ | ||
271 | __ret; \ | ||
272 | }) | ||
257 | 273 | ||
258 | #define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) | 274 | #define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) |
259 | #define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) | 275 | #define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) |
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 94674eb7e7bb..54bb4ba97441 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h | |||
@@ -129,6 +129,9 @@ | |||
129 | * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are | 129 | * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are |
130 | * not known to exist and will break with this configuration. | 130 | * not known to exist and will break with this configuration. |
131 | * | 131 | * |
132 | * VTCR_EL2.PS is extracted from ID_AA64MMFR0_EL1.PARange at boot time | ||
133 | * (see hyp-init.S). | ||
134 | * | ||
132 | * Note that when using 4K pages, we concatenate two first level page tables | 135 | * Note that when using 4K pages, we concatenate two first level page tables |
133 | * together. | 136 | * together. |
134 | * | 137 | * |
@@ -138,7 +141,6 @@ | |||
138 | #ifdef CONFIG_ARM64_64K_PAGES | 141 | #ifdef CONFIG_ARM64_64K_PAGES |
139 | /* | 142 | /* |
140 | * Stage2 translation configuration: | 143 | * Stage2 translation configuration: |
141 | * 40bits output (PS = 2) | ||
142 | * 40bits input (T0SZ = 24) | 144 | * 40bits input (T0SZ = 24) |
143 | * 64kB pages (TG0 = 1) | 145 | * 64kB pages (TG0 = 1) |
144 | * 2 level page tables (SL = 1) | 146 | * 2 level page tables (SL = 1) |
@@ -150,7 +152,6 @@ | |||
150 | #else | 152 | #else |
151 | /* | 153 | /* |
152 | * Stage2 translation configuration: | 154 | * Stage2 translation configuration: |
153 | * 40bits output (PS = 2) | ||
154 | * 40bits input (T0SZ = 24) | 155 | * 40bits input (T0SZ = 24) |
155 | * 4kB pages (TG0 = 0) | 156 | * 4kB pages (TG0 = 0) |
156 | * 3 level page tables (SL = 1) | 157 | * 3 level page tables (SL = 1) |
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 6458b5373142..bbfb600fa822 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h | |||
@@ -158,6 +158,8 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) | |||
158 | #define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT) | 158 | #define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT) |
159 | #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) | 159 | #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) |
160 | 160 | ||
161 | #define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)) | ||
162 | |||
161 | /* | 163 | /* |
162 | * If we are concatenating first level stage-2 page tables, we would have less | 164 | * If we are concatenating first level stage-2 page tables, we would have less |
163 | * than or equal to 16 pointers in the fake PGD, because that's what the | 165 | * than or equal to 16 pointers in the fake PGD, because that's what the |
@@ -171,43 +173,6 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) | |||
171 | #define KVM_PREALLOC_LEVEL (0) | 173 | #define KVM_PREALLOC_LEVEL (0) |
172 | #endif | 174 | #endif |
173 | 175 | ||
174 | /** | ||
175 | * kvm_prealloc_hwpgd - allocate inital table for VTTBR | ||
176 | * @kvm: The KVM struct pointer for the VM. | ||
177 | * @pgd: The kernel pseudo pgd | ||
178 | * | ||
179 | * When the kernel uses more levels of page tables than the guest, we allocate | ||
180 | * a fake PGD and pre-populate it to point to the next-level page table, which | ||
181 | * will be the real initial page table pointed to by the VTTBR. | ||
182 | * | ||
183 | * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and | ||
184 | * the kernel will use folded pud. When KVM_PREALLOC_LEVEL==1, we | ||
185 | * allocate 2 consecutive PUD pages. | ||
186 | */ | ||
187 | static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd) | ||
188 | { | ||
189 | unsigned int i; | ||
190 | unsigned long hwpgd; | ||
191 | |||
192 | if (KVM_PREALLOC_LEVEL == 0) | ||
193 | return 0; | ||
194 | |||
195 | hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT); | ||
196 | if (!hwpgd) | ||
197 | return -ENOMEM; | ||
198 | |||
199 | for (i = 0; i < PTRS_PER_S2_PGD; i++) { | ||
200 | if (KVM_PREALLOC_LEVEL == 1) | ||
201 | pgd_populate(NULL, pgd + i, | ||
202 | (pud_t *)hwpgd + i * PTRS_PER_PUD); | ||
203 | else if (KVM_PREALLOC_LEVEL == 2) | ||
204 | pud_populate(NULL, pud_offset(pgd, 0) + i, | ||
205 | (pmd_t *)hwpgd + i * PTRS_PER_PMD); | ||
206 | } | ||
207 | |||
208 | return 0; | ||
209 | } | ||
210 | |||
211 | static inline void *kvm_get_hwpgd(struct kvm *kvm) | 176 | static inline void *kvm_get_hwpgd(struct kvm *kvm) |
212 | { | 177 | { |
213 | pgd_t *pgd = kvm->arch.pgd; | 178 | pgd_t *pgd = kvm->arch.pgd; |
@@ -224,12 +189,11 @@ static inline void *kvm_get_hwpgd(struct kvm *kvm) | |||
224 | return pmd_offset(pud, 0); | 189 | return pmd_offset(pud, 0); |
225 | } | 190 | } |
226 | 191 | ||
227 | static inline void kvm_free_hwpgd(struct kvm *kvm) | 192 | static inline unsigned int kvm_get_hwpgd_size(void) |
228 | { | 193 | { |
229 | if (KVM_PREALLOC_LEVEL > 0) { | 194 | if (KVM_PREALLOC_LEVEL > 0) |
230 | unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm); | 195 | return PTRS_PER_S2_PGD * PAGE_SIZE; |
231 | free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT); | 196 | return PTRS_PER_S2_PGD * sizeof(pgd_t); |
232 | } | ||
233 | } | 197 | } |
234 | 198 | ||
235 | static inline bool kvm_page_empty(void *ptr) | 199 | static inline bool kvm_page_empty(void *ptr) |
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index a9eee33dfa62..101a42bde728 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h | |||
@@ -151,6 +151,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
151 | { | 151 | { |
152 | unsigned int cpu = smp_processor_id(); | 152 | unsigned int cpu = smp_processor_id(); |
153 | 153 | ||
154 | /* | ||
155 | * init_mm.pgd does not contain any user mappings and it is always | ||
156 | * active for kernel addresses in TTBR1. Just set the reserved TTBR0. | ||
157 | */ | ||
158 | if (next == &init_mm) { | ||
159 | cpu_set_reserved_ttbr0(); | ||
160 | return; | ||
161 | } | ||
162 | |||
154 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) | 163 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) |
155 | check_and_switch_context(next, tsk); | 164 | check_and_switch_context(next, tsk); |
156 | } | 165 | } |
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 09da25bc596f..4fde8c1df97f 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h | |||
@@ -204,25 +204,47 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, | |||
204 | return ret; | 204 | return ret; |
205 | } | 205 | } |
206 | 206 | ||
207 | #define _percpu_read(pcp) \ | ||
208 | ({ \ | ||
209 | typeof(pcp) __retval; \ | ||
210 | preempt_disable(); \ | ||
211 | __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \ | ||
212 | sizeof(pcp)); \ | ||
213 | preempt_enable(); \ | ||
214 | __retval; \ | ||
215 | }) | ||
216 | |||
217 | #define _percpu_write(pcp, val) \ | ||
218 | do { \ | ||
219 | preempt_disable(); \ | ||
220 | __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \ | ||
221 | sizeof(pcp)); \ | ||
222 | preempt_enable(); \ | ||
223 | } while(0) \ | ||
224 | |||
225 | #define _pcp_protect(operation, pcp, val) \ | ||
226 | ({ \ | ||
227 | typeof(pcp) __retval; \ | ||
228 | preempt_disable(); \ | ||
229 | __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \ | ||
230 | (val), sizeof(pcp)); \ | ||
231 | preempt_enable(); \ | ||
232 | __retval; \ | ||
233 | }) | ||
234 | |||
207 | #define _percpu_add(pcp, val) \ | 235 | #define _percpu_add(pcp, val) \ |
208 | __percpu_add(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) | 236 | _pcp_protect(__percpu_add, pcp, val) |
209 | 237 | ||
210 | #define _percpu_add_return(pcp, val) (typeof(pcp)) (_percpu_add(pcp, val)) | 238 | #define _percpu_add_return(pcp, val) _percpu_add(pcp, val) |
211 | 239 | ||
212 | #define _percpu_and(pcp, val) \ | 240 | #define _percpu_and(pcp, val) \ |
213 | __percpu_and(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) | 241 | _pcp_protect(__percpu_and, pcp, val) |
214 | 242 | ||
215 | #define _percpu_or(pcp, val) \ | 243 | #define _percpu_or(pcp, val) \ |
216 | __percpu_or(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) | 244 | _pcp_protect(__percpu_or, pcp, val) |
217 | |||
218 | #define _percpu_read(pcp) (typeof(pcp)) \ | ||
219 | (__percpu_read(raw_cpu_ptr(&(pcp)), sizeof(pcp))) | ||
220 | |||
221 | #define _percpu_write(pcp, val) \ | ||
222 | __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp)) | ||
223 | 245 | ||
224 | #define _percpu_xchg(pcp, val) (typeof(pcp)) \ | 246 | #define _percpu_xchg(pcp, val) (typeof(pcp)) \ |
225 | (__percpu_xchg(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))) | 247 | _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val)) |
226 | 248 | ||
227 | #define this_cpu_add_1(pcp, val) _percpu_add(pcp, val) | 249 | #define this_cpu_add_1(pcp, val) _percpu_add(pcp, val) |
228 | #define this_cpu_add_2(pcp, val) _percpu_add(pcp, val) | 250 | #define this_cpu_add_2(pcp, val) _percpu_add(pcp, val) |
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 9a8fd84f8fb2..941c375616e2 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h | |||
@@ -39,7 +39,11 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); | |||
39 | 39 | ||
40 | #include <asm/memory.h> | 40 | #include <asm/memory.h> |
41 | 41 | ||
42 | #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) | 42 | #define cpu_switch_mm(pgd,mm) \ |
43 | do { \ | ||
44 | BUG_ON(pgd == swapper_pg_dir); \ | ||
45 | cpu_do_switch_mm(virt_to_phys(pgd),mm); \ | ||
46 | } while (0) | ||
43 | 47 | ||
44 | #define cpu_get_pgd() \ | 48 | #define cpu_get_pgd() \ |
45 | ({ \ | 49 | ({ \ |
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index c028fe37456f..53d9c354219f 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h | |||
@@ -48,6 +48,7 @@ static inline void tlb_flush(struct mmu_gather *tlb) | |||
48 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | 48 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, |
49 | unsigned long addr) | 49 | unsigned long addr) |
50 | { | 50 | { |
51 | __flush_tlb_pgtable(tlb->mm, addr); | ||
51 | pgtable_page_dtor(pte); | 52 | pgtable_page_dtor(pte); |
52 | tlb_remove_entry(tlb, pte); | 53 | tlb_remove_entry(tlb, pte); |
53 | } | 54 | } |
@@ -56,6 +57,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | |||
56 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, | 57 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, |
57 | unsigned long addr) | 58 | unsigned long addr) |
58 | { | 59 | { |
60 | __flush_tlb_pgtable(tlb->mm, addr); | ||
59 | tlb_remove_entry(tlb, virt_to_page(pmdp)); | 61 | tlb_remove_entry(tlb, virt_to_page(pmdp)); |
60 | } | 62 | } |
61 | #endif | 63 | #endif |
@@ -64,6 +66,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, | |||
64 | static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, | 66 | static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, |
65 | unsigned long addr) | 67 | unsigned long addr) |
66 | { | 68 | { |
69 | __flush_tlb_pgtable(tlb->mm, addr); | ||
67 | tlb_remove_entry(tlb, virt_to_page(pudp)); | 70 | tlb_remove_entry(tlb, virt_to_page(pudp)); |
68 | } | 71 | } |
69 | #endif | 72 | #endif |
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 4abe9b945f77..c3bb05b98616 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h | |||
@@ -144,6 +144,19 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end | |||
144 | } | 144 | } |
145 | 145 | ||
146 | /* | 146 | /* |
147 | * Used to invalidate the TLB (walk caches) corresponding to intermediate page | ||
148 | * table levels (pgd/pud/pmd). | ||
149 | */ | ||
150 | static inline void __flush_tlb_pgtable(struct mm_struct *mm, | ||
151 | unsigned long uaddr) | ||
152 | { | ||
153 | unsigned long addr = uaddr >> 12 | ((unsigned long)ASID(mm) << 48); | ||
154 | |||
155 | dsb(ishst); | ||
156 | asm("tlbi vae1is, %0" : : "r" (addr)); | ||
157 | dsb(ish); | ||
158 | } | ||
159 | /* | ||
147 | * On AArch64, the cache coherency is handled via the set_pte_at() function. | 160 | * On AArch64, the cache coherency is handled via the set_pte_at() function. |
148 | */ | 161 | */ |
149 | static inline void update_mmu_cache(struct vm_area_struct *vma, | 162 | static inline void update_mmu_cache(struct vm_area_struct *vma, |
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index b42c7b480e1e..ab21e0d58278 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c | |||
@@ -337,7 +337,11 @@ core_initcall(arm64_dmi_init); | |||
337 | 337 | ||
338 | static void efi_set_pgd(struct mm_struct *mm) | 338 | static void efi_set_pgd(struct mm_struct *mm) |
339 | { | 339 | { |
340 | cpu_switch_mm(mm->pgd, mm); | 340 | if (mm == &init_mm) |
341 | cpu_set_reserved_ttbr0(); | ||
342 | else | ||
343 | cpu_switch_mm(mm->pgd, mm); | ||
344 | |||
341 | flush_tlb_all(); | 345 | flush_tlb_all(); |
342 | if (icache_is_aivivt()) | 346 | if (icache_is_aivivt()) |
343 | __flush_icache_all(); | 347 | __flush_icache_all(); |
@@ -354,3 +358,12 @@ void efi_virtmap_unload(void) | |||
354 | efi_set_pgd(current->active_mm); | 358 | efi_set_pgd(current->active_mm); |
355 | preempt_enable(); | 359 | preempt_enable(); |
356 | } | 360 | } |
361 | |||
362 | /* | ||
363 | * UpdateCapsule() depends on the system being shutdown via | ||
364 | * ResetSystem(). | ||
365 | */ | ||
366 | bool efi_poweroff_required(void) | ||
367 | { | ||
368 | return efi_enabled(EFI_RUNTIME_SERVICES); | ||
369 | } | ||
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 8ce88e08c030..07f930540f4a 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
@@ -585,8 +585,8 @@ ENDPROC(set_cpu_boot_mode_flag) | |||
585 | * zeroing of .bss would clobber it. | 585 | * zeroing of .bss would clobber it. |
586 | */ | 586 | */ |
587 | .pushsection .data..cacheline_aligned | 587 | .pushsection .data..cacheline_aligned |
588 | ENTRY(__boot_cpu_mode) | ||
589 | .align L1_CACHE_SHIFT | 588 | .align L1_CACHE_SHIFT |
589 | ENTRY(__boot_cpu_mode) | ||
590 | .long BOOT_CPU_MODE_EL2 | 590 | .long BOOT_CPU_MODE_EL2 |
591 | .long 0 | 591 | .long 0 |
592 | .popsection | 592 | .popsection |
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index fde9923af859..c6b1f3b96f45 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <stdarg.h> | 21 | #include <stdarg.h> |
22 | 22 | ||
23 | #include <linux/compat.h> | 23 | #include <linux/compat.h> |
24 | #include <linux/efi.h> | ||
24 | #include <linux/export.h> | 25 | #include <linux/export.h> |
25 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
26 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
@@ -150,6 +151,13 @@ void machine_restart(char *cmd) | |||
150 | local_irq_disable(); | 151 | local_irq_disable(); |
151 | smp_send_stop(); | 152 | smp_send_stop(); |
152 | 153 | ||
154 | /* | ||
155 | * UpdateCapsule() depends on the system being reset via | ||
156 | * ResetSystem(). | ||
157 | */ | ||
158 | if (efi_enabled(EFI_RUNTIME_SERVICES)) | ||
159 | efi_reboot(reboot_mode, NULL); | ||
160 | |||
153 | /* Now call the architecture specific reboot code. */ | 161 | /* Now call the architecture specific reboot code. */ |
154 | if (arm_pm_restart) | 162 | if (arm_pm_restart) |
155 | arm_pm_restart(reboot_mode, cmd); | 163 | arm_pm_restart(reboot_mode, cmd); |
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 58e0c2bdde04..ef7d112f5ce0 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c | |||
@@ -51,7 +51,7 @@ static int __init early_coherent_pool(char *p) | |||
51 | } | 51 | } |
52 | early_param("coherent_pool", early_coherent_pool); | 52 | early_param("coherent_pool", early_coherent_pool); |
53 | 53 | ||
54 | static void *__alloc_from_pool(size_t size, struct page **ret_page) | 54 | static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags) |
55 | { | 55 | { |
56 | unsigned long val; | 56 | unsigned long val; |
57 | void *ptr = NULL; | 57 | void *ptr = NULL; |
@@ -67,6 +67,8 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page) | |||
67 | 67 | ||
68 | *ret_page = phys_to_page(phys); | 68 | *ret_page = phys_to_page(phys); |
69 | ptr = (void *)val; | 69 | ptr = (void *)val; |
70 | if (flags & __GFP_ZERO) | ||
71 | memset(ptr, 0, size); | ||
70 | } | 72 | } |
71 | 73 | ||
72 | return ptr; | 74 | return ptr; |
@@ -101,6 +103,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, | |||
101 | flags |= GFP_DMA; | 103 | flags |= GFP_DMA; |
102 | if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { | 104 | if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { |
103 | struct page *page; | 105 | struct page *page; |
106 | void *addr; | ||
104 | 107 | ||
105 | size = PAGE_ALIGN(size); | 108 | size = PAGE_ALIGN(size); |
106 | page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, | 109 | page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, |
@@ -109,7 +112,10 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, | |||
109 | return NULL; | 112 | return NULL; |
110 | 113 | ||
111 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); | 114 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); |
112 | return page_address(page); | 115 | addr = page_address(page); |
116 | if (flags & __GFP_ZERO) | ||
117 | memset(addr, 0, size); | ||
118 | return addr; | ||
113 | } else { | 119 | } else { |
114 | return swiotlb_alloc_coherent(dev, size, dma_handle, flags); | 120 | return swiotlb_alloc_coherent(dev, size, dma_handle, flags); |
115 | } | 121 | } |
@@ -146,7 +152,7 @@ static void *__dma_alloc(struct device *dev, size_t size, | |||
146 | 152 | ||
147 | if (!coherent && !(flags & __GFP_WAIT)) { | 153 | if (!coherent && !(flags & __GFP_WAIT)) { |
148 | struct page *page = NULL; | 154 | struct page *page = NULL; |
149 | void *addr = __alloc_from_pool(size, &page); | 155 | void *addr = __alloc_from_pool(size, &page, flags); |
150 | 156 | ||
151 | if (addr) | 157 | if (addr) |
152 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); | 158 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); |
diff --git a/arch/c6x/include/asm/pgtable.h b/arch/c6x/include/asm/pgtable.h index 78d4483ba40c..ec4db6df5e0d 100644 --- a/arch/c6x/include/asm/pgtable.h +++ b/arch/c6x/include/asm/pgtable.h | |||
@@ -67,6 +67,11 @@ extern unsigned long empty_zero_page; | |||
67 | */ | 67 | */ |
68 | #define pgtable_cache_init() do { } while (0) | 68 | #define pgtable_cache_init() do { } while (0) |
69 | 69 | ||
70 | /* | ||
71 | * c6x is !MMU, so define the simpliest implementation | ||
72 | */ | ||
73 | #define pgprot_writecombine pgprot_noncached | ||
74 | |||
70 | #include <asm-generic/pgtable.h> | 75 | #include <asm-generic/pgtable.h> |
71 | 76 | ||
72 | #endif /* _ASM_C6X_PGTABLE_H */ | 77 | #endif /* _ASM_C6X_PGTABLE_H */ |
diff --git a/arch/metag/include/asm/io.h b/arch/metag/include/asm/io.h index 9359e5048442..d5779b0ec573 100644 --- a/arch/metag/include/asm/io.h +++ b/arch/metag/include/asm/io.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _ASM_METAG_IO_H | 2 | #define _ASM_METAG_IO_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <asm/pgtable-bits.h> | ||
5 | 6 | ||
6 | #define IO_SPACE_LIMIT 0 | 7 | #define IO_SPACE_LIMIT 0 |
7 | 8 | ||
diff --git a/arch/metag/include/asm/pgtable-bits.h b/arch/metag/include/asm/pgtable-bits.h new file mode 100644 index 000000000000..25ba6729f496 --- /dev/null +++ b/arch/metag/include/asm/pgtable-bits.h | |||
@@ -0,0 +1,104 @@ | |||
1 | /* | ||
2 | * Meta page table definitions. | ||
3 | */ | ||
4 | |||
5 | #ifndef _METAG_PGTABLE_BITS_H | ||
6 | #define _METAG_PGTABLE_BITS_H | ||
7 | |||
8 | #include <asm/metag_mem.h> | ||
9 | |||
10 | /* | ||
11 | * Definitions for MMU descriptors | ||
12 | * | ||
13 | * These are the hardware bits in the MMCU pte entries. | ||
14 | * Derived from the Meta toolkit headers. | ||
15 | */ | ||
16 | #define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT | ||
17 | #define _PAGE_WRITE MMCU_ENTRY_WR_BIT | ||
18 | #define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT | ||
19 | /* Write combine bit - this can cause writes to occur out of order */ | ||
20 | #define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT | ||
21 | /* Sys coherent bit - this bit is never used by Linux */ | ||
22 | #define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT | ||
23 | #define _PAGE_ALWAYS_ZERO_1 0x020 | ||
24 | #define _PAGE_CACHE_CTRL0 0x040 | ||
25 | #define _PAGE_CACHE_CTRL1 0x080 | ||
26 | #define _PAGE_ALWAYS_ZERO_2 0x100 | ||
27 | #define _PAGE_ALWAYS_ZERO_3 0x200 | ||
28 | #define _PAGE_ALWAYS_ZERO_4 0x400 | ||
29 | #define _PAGE_ALWAYS_ZERO_5 0x800 | ||
30 | |||
31 | /* These are software bits that we stuff into the gaps in the hardware | ||
32 | * pte entries that are not used. Note, these DO get stored in the actual | ||
33 | * hardware, but the hardware just does not use them. | ||
34 | */ | ||
35 | #define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1 | ||
36 | #define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2 | ||
37 | |||
38 | /* Pages owned, and protected by, the kernel. */ | ||
39 | #define _PAGE_KERNEL _PAGE_PRIV | ||
40 | |||
41 | /* No cacheing of this page */ | ||
42 | #define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S) | ||
43 | /* burst cacheing - good for data streaming */ | ||
44 | #define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S) | ||
45 | /* One cache way per thread */ | ||
46 | #define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S) | ||
47 | /* Full on cacheing */ | ||
48 | #define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S) | ||
49 | |||
50 | #define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE) | ||
51 | |||
52 | /* which bits are used for cache control ... */ | ||
53 | #define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \ | ||
54 | _PAGE_WR_COMBINE) | ||
55 | |||
56 | /* This is a mask of the bits that pte_modify is allowed to change. */ | ||
57 | #define _PAGE_CHG_MASK (PAGE_MASK) | ||
58 | |||
59 | #define _PAGE_SZ_SHIFT 1 | ||
60 | #define _PAGE_SZ_4K (0x0) | ||
61 | #define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT) | ||
62 | #define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT) | ||
63 | #define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT) | ||
64 | #define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT) | ||
65 | #define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT) | ||
66 | #define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT) | ||
67 | #define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT) | ||
68 | #define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT) | ||
69 | #define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT) | ||
70 | #define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT) | ||
71 | #define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT) | ||
72 | |||
73 | #if defined(CONFIG_PAGE_SIZE_4K) | ||
74 | #define _PAGE_SZ (_PAGE_SZ_4K) | ||
75 | #elif defined(CONFIG_PAGE_SIZE_8K) | ||
76 | #define _PAGE_SZ (_PAGE_SZ_8K) | ||
77 | #elif defined(CONFIG_PAGE_SIZE_16K) | ||
78 | #define _PAGE_SZ (_PAGE_SZ_16K) | ||
79 | #endif | ||
80 | #define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT) | ||
81 | |||
82 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_8K) | ||
83 | # define _PAGE_SZHUGE (_PAGE_SZ_8K) | ||
84 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K) | ||
85 | # define _PAGE_SZHUGE (_PAGE_SZ_16K) | ||
86 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K) | ||
87 | # define _PAGE_SZHUGE (_PAGE_SZ_32K) | ||
88 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | ||
89 | # define _PAGE_SZHUGE (_PAGE_SZ_64K) | ||
90 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K) | ||
91 | # define _PAGE_SZHUGE (_PAGE_SZ_128K) | ||
92 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) | ||
93 | # define _PAGE_SZHUGE (_PAGE_SZ_256K) | ||
94 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) | ||
95 | # define _PAGE_SZHUGE (_PAGE_SZ_512K) | ||
96 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M) | ||
97 | # define _PAGE_SZHUGE (_PAGE_SZ_1M) | ||
98 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M) | ||
99 | # define _PAGE_SZHUGE (_PAGE_SZ_2M) | ||
100 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M) | ||
101 | # define _PAGE_SZHUGE (_PAGE_SZ_4M) | ||
102 | #endif | ||
103 | |||
104 | #endif /* _METAG_PGTABLE_BITS_H */ | ||
diff --git a/arch/metag/include/asm/pgtable.h b/arch/metag/include/asm/pgtable.h index d0604c0a8702..ffa3a3a2ecad 100644 --- a/arch/metag/include/asm/pgtable.h +++ b/arch/metag/include/asm/pgtable.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #ifndef _METAG_PGTABLE_H | 5 | #ifndef _METAG_PGTABLE_H |
6 | #define _METAG_PGTABLE_H | 6 | #define _METAG_PGTABLE_H |
7 | 7 | ||
8 | #include <asm/pgtable-bits.h> | ||
8 | #include <asm-generic/pgtable-nopmd.h> | 9 | #include <asm-generic/pgtable-nopmd.h> |
9 | 10 | ||
10 | /* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */ | 11 | /* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */ |
@@ -21,100 +22,6 @@ | |||
21 | #endif | 22 | #endif |
22 | 23 | ||
23 | /* | 24 | /* |
24 | * Definitions for MMU descriptors | ||
25 | * | ||
26 | * These are the hardware bits in the MMCU pte entries. | ||
27 | * Derived from the Meta toolkit headers. | ||
28 | */ | ||
29 | #define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT | ||
30 | #define _PAGE_WRITE MMCU_ENTRY_WR_BIT | ||
31 | #define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT | ||
32 | /* Write combine bit - this can cause writes to occur out of order */ | ||
33 | #define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT | ||
34 | /* Sys coherent bit - this bit is never used by Linux */ | ||
35 | #define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT | ||
36 | #define _PAGE_ALWAYS_ZERO_1 0x020 | ||
37 | #define _PAGE_CACHE_CTRL0 0x040 | ||
38 | #define _PAGE_CACHE_CTRL1 0x080 | ||
39 | #define _PAGE_ALWAYS_ZERO_2 0x100 | ||
40 | #define _PAGE_ALWAYS_ZERO_3 0x200 | ||
41 | #define _PAGE_ALWAYS_ZERO_4 0x400 | ||
42 | #define _PAGE_ALWAYS_ZERO_5 0x800 | ||
43 | |||
44 | /* These are software bits that we stuff into the gaps in the hardware | ||
45 | * pte entries that are not used. Note, these DO get stored in the actual | ||
46 | * hardware, but the hardware just does not use them. | ||
47 | */ | ||
48 | #define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1 | ||
49 | #define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2 | ||
50 | |||
51 | /* Pages owned, and protected by, the kernel. */ | ||
52 | #define _PAGE_KERNEL _PAGE_PRIV | ||
53 | |||
54 | /* No cacheing of this page */ | ||
55 | #define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S) | ||
56 | /* burst cacheing - good for data streaming */ | ||
57 | #define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S) | ||
58 | /* One cache way per thread */ | ||
59 | #define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S) | ||
60 | /* Full on cacheing */ | ||
61 | #define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S) | ||
62 | |||
63 | #define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE) | ||
64 | |||
65 | /* which bits are used for cache control ... */ | ||
66 | #define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \ | ||
67 | _PAGE_WR_COMBINE) | ||
68 | |||
69 | /* This is a mask of the bits that pte_modify is allowed to change. */ | ||
70 | #define _PAGE_CHG_MASK (PAGE_MASK) | ||
71 | |||
72 | #define _PAGE_SZ_SHIFT 1 | ||
73 | #define _PAGE_SZ_4K (0x0) | ||
74 | #define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT) | ||
75 | #define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT) | ||
76 | #define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT) | ||
77 | #define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT) | ||
78 | #define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT) | ||
79 | #define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT) | ||
80 | #define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT) | ||
81 | #define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT) | ||
82 | #define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT) | ||
83 | #define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT) | ||
84 | #define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT) | ||
85 | |||
86 | #if defined(CONFIG_PAGE_SIZE_4K) | ||
87 | #define _PAGE_SZ (_PAGE_SZ_4K) | ||
88 | #elif defined(CONFIG_PAGE_SIZE_8K) | ||
89 | #define _PAGE_SZ (_PAGE_SZ_8K) | ||
90 | #elif defined(CONFIG_PAGE_SIZE_16K) | ||
91 | #define _PAGE_SZ (_PAGE_SZ_16K) | ||
92 | #endif | ||
93 | #define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT) | ||
94 | |||
95 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_8K) | ||
96 | # define _PAGE_SZHUGE (_PAGE_SZ_8K) | ||
97 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K) | ||
98 | # define _PAGE_SZHUGE (_PAGE_SZ_16K) | ||
99 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K) | ||
100 | # define _PAGE_SZHUGE (_PAGE_SZ_32K) | ||
101 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | ||
102 | # define _PAGE_SZHUGE (_PAGE_SZ_64K) | ||
103 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K) | ||
104 | # define _PAGE_SZHUGE (_PAGE_SZ_128K) | ||
105 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) | ||
106 | # define _PAGE_SZHUGE (_PAGE_SZ_256K) | ||
107 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) | ||
108 | # define _PAGE_SZHUGE (_PAGE_SZ_512K) | ||
109 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M) | ||
110 | # define _PAGE_SZHUGE (_PAGE_SZ_1M) | ||
111 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M) | ||
112 | # define _PAGE_SZHUGE (_PAGE_SZ_2M) | ||
113 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M) | ||
114 | # define _PAGE_SZHUGE (_PAGE_SZ_4M) | ||
115 | #endif | ||
116 | |||
117 | /* | ||
118 | * The Linux memory management assumes a three-level page table setup. On | 25 | * The Linux memory management assumes a three-level page table setup. On |
119 | * Meta, we use that, but "fold" the mid level into the top-level page | 26 | * Meta, we use that, but "fold" the mid level into the top-level page |
120 | * table. | 27 | * table. |
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S index 0536bc021cc6..ef548510b951 100644 --- a/arch/microblaze/kernel/entry.S +++ b/arch/microblaze/kernel/entry.S | |||
@@ -348,8 +348,9 @@ C_ENTRY(_user_exception): | |||
348 | * The LP register should point to the location where the called function | 348 | * The LP register should point to the location where the called function |
349 | * should return. [note that MAKE_SYS_CALL uses label 1] */ | 349 | * should return. [note that MAKE_SYS_CALL uses label 1] */ |
350 | /* See if the system call number is valid */ | 350 | /* See if the system call number is valid */ |
351 | blti r12, 5f | ||
351 | addi r11, r12, -__NR_syscalls; | 352 | addi r11, r12, -__NR_syscalls; |
352 | bgei r11,5f; | 353 | bgei r11, 5f; |
353 | /* Figure out which function to use for this system call. */ | 354 | /* Figure out which function to use for this system call. */ |
354 | /* Note Microblaze barrel shift is optional, so don't rely on it */ | 355 | /* Note Microblaze barrel shift is optional, so don't rely on it */ |
355 | add r12, r12, r12; /* convert num -> ptr */ | 356 | add r12, r12, r12; /* convert num -> ptr */ |
@@ -375,7 +376,7 @@ C_ENTRY(_user_exception): | |||
375 | 376 | ||
376 | /* The syscall number is invalid, return an error. */ | 377 | /* The syscall number is invalid, return an error. */ |
377 | 5: | 378 | 5: |
378 | rtsd r15, 8; /* looks like a normal subroutine return */ | 379 | braid ret_from_trap |
379 | addi r3, r0, -ENOSYS; | 380 | addi r3, r0, -ENOSYS; |
380 | 381 | ||
381 | /* Entry point used to return from a syscall/trap */ | 382 | /* Entry point used to return from a syscall/trap */ |
@@ -411,7 +412,7 @@ C_ENTRY(ret_from_trap): | |||
411 | bri 1b | 412 | bri 1b |
412 | 413 | ||
413 | /* Maybe handle a signal */ | 414 | /* Maybe handle a signal */ |
414 | 5: | 415 | 5: |
415 | andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME; | 416 | andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME; |
416 | beqi r11, 4f; /* Signals to handle, handle them */ | 417 | beqi r11, 4f; /* Signals to handle, handle them */ |
417 | 418 | ||
diff --git a/arch/nios2/include/asm/ptrace.h b/arch/nios2/include/asm/ptrace.h index 20fb1cf2dab6..642462144872 100644 --- a/arch/nios2/include/asm/ptrace.h +++ b/arch/nios2/include/asm/ptrace.h | |||
@@ -15,7 +15,54 @@ | |||
15 | 15 | ||
16 | #include <uapi/asm/ptrace.h> | 16 | #include <uapi/asm/ptrace.h> |
17 | 17 | ||
18 | /* This struct defines the way the registers are stored on the | ||
19 | stack during a system call. */ | ||
20 | |||
18 | #ifndef __ASSEMBLY__ | 21 | #ifndef __ASSEMBLY__ |
22 | struct pt_regs { | ||
23 | unsigned long r8; /* r8-r15 Caller-saved GP registers */ | ||
24 | unsigned long r9; | ||
25 | unsigned long r10; | ||
26 | unsigned long r11; | ||
27 | unsigned long r12; | ||
28 | unsigned long r13; | ||
29 | unsigned long r14; | ||
30 | unsigned long r15; | ||
31 | unsigned long r1; /* Assembler temporary */ | ||
32 | unsigned long r2; /* Retval LS 32bits */ | ||
33 | unsigned long r3; /* Retval MS 32bits */ | ||
34 | unsigned long r4; /* r4-r7 Register arguments */ | ||
35 | unsigned long r5; | ||
36 | unsigned long r6; | ||
37 | unsigned long r7; | ||
38 | unsigned long orig_r2; /* Copy of r2 ?? */ | ||
39 | unsigned long ra; /* Return address */ | ||
40 | unsigned long fp; /* Frame pointer */ | ||
41 | unsigned long sp; /* Stack pointer */ | ||
42 | unsigned long gp; /* Global pointer */ | ||
43 | unsigned long estatus; | ||
44 | unsigned long ea; /* Exception return address (pc) */ | ||
45 | unsigned long orig_r7; | ||
46 | }; | ||
47 | |||
48 | /* | ||
49 | * This is the extended stack used by signal handlers and the context | ||
50 | * switcher: it's pushed after the normal "struct pt_regs". | ||
51 | */ | ||
52 | struct switch_stack { | ||
53 | unsigned long r16; /* r16-r23 Callee-saved GP registers */ | ||
54 | unsigned long r17; | ||
55 | unsigned long r18; | ||
56 | unsigned long r19; | ||
57 | unsigned long r20; | ||
58 | unsigned long r21; | ||
59 | unsigned long r22; | ||
60 | unsigned long r23; | ||
61 | unsigned long fp; | ||
62 | unsigned long gp; | ||
63 | unsigned long ra; | ||
64 | }; | ||
65 | |||
19 | #define user_mode(regs) (((regs)->estatus & ESTATUS_EU)) | 66 | #define user_mode(regs) (((regs)->estatus & ESTATUS_EU)) |
20 | 67 | ||
21 | #define instruction_pointer(regs) ((regs)->ra) | 68 | #define instruction_pointer(regs) ((regs)->ra) |
diff --git a/arch/nios2/include/asm/ucontext.h b/arch/nios2/include/asm/ucontext.h deleted file mode 100644 index 2c87614b0f6e..000000000000 --- a/arch/nios2/include/asm/ucontext.h +++ /dev/null | |||
@@ -1,32 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch> | ||
3 | * Copyright (C) 2004 Microtronix Datacom Ltd | ||
4 | * | ||
5 | * This file is subject to the terms and conditions of the GNU General Public | ||
6 | * License. See the file "COPYING" in the main directory of this archive | ||
7 | * for more details. | ||
8 | */ | ||
9 | |||
10 | #ifndef _ASM_NIOS2_UCONTEXT_H | ||
11 | #define _ASM_NIOS2_UCONTEXT_H | ||
12 | |||
13 | typedef int greg_t; | ||
14 | #define NGREG 32 | ||
15 | typedef greg_t gregset_t[NGREG]; | ||
16 | |||
17 | struct mcontext { | ||
18 | int version; | ||
19 | gregset_t gregs; | ||
20 | }; | ||
21 | |||
22 | #define MCONTEXT_VERSION 2 | ||
23 | |||
24 | struct ucontext { | ||
25 | unsigned long uc_flags; | ||
26 | struct ucontext *uc_link; | ||
27 | stack_t uc_stack; | ||
28 | struct mcontext uc_mcontext; | ||
29 | sigset_t uc_sigmask; /* mask last for extensibility */ | ||
30 | }; | ||
31 | |||
32 | #endif | ||
diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild index 4f07ca3f8d10..e0bb972a50d7 100644 --- a/arch/nios2/include/uapi/asm/Kbuild +++ b/arch/nios2/include/uapi/asm/Kbuild | |||
@@ -1,4 +1,5 @@ | |||
1 | include include/uapi/asm-generic/Kbuild.asm | 1 | include include/uapi/asm-generic/Kbuild.asm |
2 | 2 | ||
3 | header-y += elf.h | 3 | header-y += elf.h |
4 | header-y += ucontext.h | 4 | |
5 | generic-y += ucontext.h | ||
diff --git a/arch/nios2/include/uapi/asm/elf.h b/arch/nios2/include/uapi/asm/elf.h index a5b91ae5cf56..6f06d3b2949e 100644 --- a/arch/nios2/include/uapi/asm/elf.h +++ b/arch/nios2/include/uapi/asm/elf.h | |||
@@ -50,9 +50,7 @@ | |||
50 | 50 | ||
51 | typedef unsigned long elf_greg_t; | 51 | typedef unsigned long elf_greg_t; |
52 | 52 | ||
53 | #define ELF_NGREG \ | 53 | #define ELF_NGREG 49 |
54 | ((sizeof(struct pt_regs) + sizeof(struct switch_stack)) / \ | ||
55 | sizeof(elf_greg_t)) | ||
56 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | 54 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; |
57 | 55 | ||
58 | typedef unsigned long elf_fpregset_t; | 56 | typedef unsigned long elf_fpregset_t; |
diff --git a/arch/nios2/include/uapi/asm/ptrace.h b/arch/nios2/include/uapi/asm/ptrace.h index e83a7c9d1c36..71a330597adf 100644 --- a/arch/nios2/include/uapi/asm/ptrace.h +++ b/arch/nios2/include/uapi/asm/ptrace.h | |||
@@ -67,53 +67,9 @@ | |||
67 | 67 | ||
68 | #define NUM_PTRACE_REG (PTR_TLBMISC + 1) | 68 | #define NUM_PTRACE_REG (PTR_TLBMISC + 1) |
69 | 69 | ||
70 | /* this struct defines the way the registers are stored on the | 70 | /* User structures for general purpose registers. */ |
71 | stack during a system call. | 71 | struct user_pt_regs { |
72 | 72 | __u32 regs[49]; | |
73 | There is a fake_regs in setup.c that has to match pt_regs.*/ | ||
74 | |||
75 | struct pt_regs { | ||
76 | unsigned long r8; /* r8-r15 Caller-saved GP registers */ | ||
77 | unsigned long r9; | ||
78 | unsigned long r10; | ||
79 | unsigned long r11; | ||
80 | unsigned long r12; | ||
81 | unsigned long r13; | ||
82 | unsigned long r14; | ||
83 | unsigned long r15; | ||
84 | unsigned long r1; /* Assembler temporary */ | ||
85 | unsigned long r2; /* Retval LS 32bits */ | ||
86 | unsigned long r3; /* Retval MS 32bits */ | ||
87 | unsigned long r4; /* r4-r7 Register arguments */ | ||
88 | unsigned long r5; | ||
89 | unsigned long r6; | ||
90 | unsigned long r7; | ||
91 | unsigned long orig_r2; /* Copy of r2 ?? */ | ||
92 | unsigned long ra; /* Return address */ | ||
93 | unsigned long fp; /* Frame pointer */ | ||
94 | unsigned long sp; /* Stack pointer */ | ||
95 | unsigned long gp; /* Global pointer */ | ||
96 | unsigned long estatus; | ||
97 | unsigned long ea; /* Exception return address (pc) */ | ||
98 | unsigned long orig_r7; | ||
99 | }; | ||
100 | |||
101 | /* | ||
102 | * This is the extended stack used by signal handlers and the context | ||
103 | * switcher: it's pushed after the normal "struct pt_regs". | ||
104 | */ | ||
105 | struct switch_stack { | ||
106 | unsigned long r16; /* r16-r23 Callee-saved GP registers */ | ||
107 | unsigned long r17; | ||
108 | unsigned long r18; | ||
109 | unsigned long r19; | ||
110 | unsigned long r20; | ||
111 | unsigned long r21; | ||
112 | unsigned long r22; | ||
113 | unsigned long r23; | ||
114 | unsigned long fp; | ||
115 | unsigned long gp; | ||
116 | unsigned long ra; | ||
117 | }; | 73 | }; |
118 | 74 | ||
119 | #endif /* __ASSEMBLY__ */ | 75 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/nios2/include/uapi/asm/sigcontext.h b/arch/nios2/include/uapi/asm/sigcontext.h index 7b8bb41867d4..b67944a50927 100644 --- a/arch/nios2/include/uapi/asm/sigcontext.h +++ b/arch/nios2/include/uapi/asm/sigcontext.h | |||
@@ -15,14 +15,16 @@ | |||
15 | * details. | 15 | * details. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #ifndef _ASM_NIOS2_SIGCONTEXT_H | 18 | #ifndef _UAPI__ASM_SIGCONTEXT_H |
19 | #define _ASM_NIOS2_SIGCONTEXT_H | 19 | #define _UAPI__ASM_SIGCONTEXT_H |
20 | 20 | ||
21 | #include <asm/ptrace.h> | 21 | #include <linux/types.h> |
22 | |||
23 | #define MCONTEXT_VERSION 2 | ||
22 | 24 | ||
23 | struct sigcontext { | 25 | struct sigcontext { |
24 | struct pt_regs regs; | 26 | int version; |
25 | unsigned long sc_mask; /* old sigmask */ | 27 | unsigned long gregs[32]; |
26 | }; | 28 | }; |
27 | 29 | ||
28 | #endif | 30 | #endif |
diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c index 2d0ea25be171..dda41e4fe707 100644 --- a/arch/nios2/kernel/signal.c +++ b/arch/nios2/kernel/signal.c | |||
@@ -39,7 +39,7 @@ static inline int rt_restore_ucontext(struct pt_regs *regs, | |||
39 | struct ucontext *uc, int *pr2) | 39 | struct ucontext *uc, int *pr2) |
40 | { | 40 | { |
41 | int temp; | 41 | int temp; |
42 | greg_t *gregs = uc->uc_mcontext.gregs; | 42 | unsigned long *gregs = uc->uc_mcontext.gregs; |
43 | int err; | 43 | int err; |
44 | 44 | ||
45 | /* Always make any pending restarted system calls return -EINTR */ | 45 | /* Always make any pending restarted system calls return -EINTR */ |
@@ -127,7 +127,7 @@ badframe: | |||
127 | static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs) | 127 | static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs) |
128 | { | 128 | { |
129 | struct switch_stack *sw = (struct switch_stack *)regs - 1; | 129 | struct switch_stack *sw = (struct switch_stack *)regs - 1; |
130 | greg_t *gregs = uc->uc_mcontext.gregs; | 130 | unsigned long *gregs = uc->uc_mcontext.gregs; |
131 | int err = 0; | 131 | int err = 0; |
132 | 132 | ||
133 | err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version); | 133 | err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version); |
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index 0d231adfe576..0c9b6afe69e9 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c | |||
@@ -126,7 +126,6 @@ good_area: | |||
126 | break; | 126 | break; |
127 | } | 127 | } |
128 | 128 | ||
129 | survive: | ||
130 | /* | 129 | /* |
131 | * If for any reason at all we couldn't handle the fault, | 130 | * If for any reason at all we couldn't handle the fault, |
132 | * make sure we exit gracefully rather than endlessly redo | 131 | * make sure we exit gracefully rather than endlessly redo |
@@ -220,11 +219,6 @@ no_context: | |||
220 | */ | 219 | */ |
221 | out_of_memory: | 220 | out_of_memory: |
222 | up_read(&mm->mmap_sem); | 221 | up_read(&mm->mmap_sem); |
223 | if (is_global_init(tsk)) { | ||
224 | yield(); | ||
225 | down_read(&mm->mmap_sem); | ||
226 | goto survive; | ||
227 | } | ||
228 | if (!user_mode(regs)) | 222 | if (!user_mode(regs)) |
229 | goto no_context; | 223 | goto no_context; |
230 | pagefault_out_of_memory(); | 224 | pagefault_out_of_memory(); |
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index f213f5b4c423..d17437238a2c 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h | |||
@@ -26,7 +26,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) | |||
26 | 26 | ||
27 | if (likely(pgd != NULL)) { | 27 | if (likely(pgd != NULL)) { |
28 | memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER); | 28 | memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER); |
29 | #ifdef CONFIG_64BIT | 29 | #if PT_NLEVELS == 3 |
30 | actual_pgd += PTRS_PER_PGD; | 30 | actual_pgd += PTRS_PER_PGD; |
31 | /* Populate first pmd with allocated memory. We mark it | 31 | /* Populate first pmd with allocated memory. We mark it |
32 | * with PxD_FLAG_ATTACHED as a signal to the system that this | 32 | * with PxD_FLAG_ATTACHED as a signal to the system that this |
@@ -45,7 +45,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) | |||
45 | 45 | ||
46 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | 46 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
47 | { | 47 | { |
48 | #ifdef CONFIG_64BIT | 48 | #if PT_NLEVELS == 3 |
49 | pgd -= PTRS_PER_PGD; | 49 | pgd -= PTRS_PER_PGD; |
50 | #endif | 50 | #endif |
51 | free_pages((unsigned long)pgd, PGD_ALLOC_ORDER); | 51 | free_pages((unsigned long)pgd, PGD_ALLOC_ORDER); |
@@ -72,12 +72,15 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) | |||
72 | 72 | ||
73 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | 73 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) |
74 | { | 74 | { |
75 | #ifdef CONFIG_64BIT | ||
76 | if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) | 75 | if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) |
77 | /* This is the permanent pmd attached to the pgd; | 76 | /* |
78 | * cannot free it */ | 77 | * This is the permanent pmd attached to the pgd; |
78 | * cannot free it. | ||
79 | * Increment the counter to compensate for the decrement | ||
80 | * done by generic mm code. | ||
81 | */ | ||
82 | mm_inc_nr_pmds(mm); | ||
79 | return; | 83 | return; |
80 | #endif | ||
81 | free_pages((unsigned long)pmd, PMD_ORDER); | 84 | free_pages((unsigned long)pmd, PMD_ORDER); |
82 | } | 85 | } |
83 | 86 | ||
@@ -99,7 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | |||
99 | static inline void | 102 | static inline void |
100 | pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) | 103 | pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) |
101 | { | 104 | { |
102 | #ifdef CONFIG_64BIT | 105 | #if PT_NLEVELS == 3 |
103 | /* preserve the gateway marker if this is the beginning of | 106 | /* preserve the gateway marker if this is the beginning of |
104 | * the permanent pmd */ | 107 | * the permanent pmd */ |
105 | if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) | 108 | if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) |
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 5a8997d63899..8eefb12d1d33 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S | |||
@@ -55,8 +55,8 @@ | |||
55 | #define ENTRY_COMP(_name_) .word sys_##_name_ | 55 | #define ENTRY_COMP(_name_) .word sys_##_name_ |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | ENTRY_SAME(restart_syscall) /* 0 */ | 58 | 90: ENTRY_SAME(restart_syscall) /* 0 */ |
59 | ENTRY_SAME(exit) | 59 | 91: ENTRY_SAME(exit) |
60 | ENTRY_SAME(fork_wrapper) | 60 | ENTRY_SAME(fork_wrapper) |
61 | ENTRY_SAME(read) | 61 | ENTRY_SAME(read) |
62 | ENTRY_SAME(write) | 62 | ENTRY_SAME(write) |
@@ -439,7 +439,10 @@ | |||
439 | ENTRY_SAME(bpf) | 439 | ENTRY_SAME(bpf) |
440 | ENTRY_COMP(execveat) | 440 | ENTRY_COMP(execveat) |
441 | 441 | ||
442 | /* Nothing yet */ | 442 | |
443 | .ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b)) | ||
444 | .error "size of syscall table does not fit value of __NR_Linux_syscalls" | ||
445 | .endif | ||
443 | 446 | ||
444 | #undef ENTRY_SAME | 447 | #undef ENTRY_SAME |
445 | #undef ENTRY_DIFF | 448 | #undef ENTRY_DIFF |
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 03cd858a401c..4cbe23af400a 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h | |||
@@ -153,6 +153,7 @@ | |||
153 | #define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff | 153 | #define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff |
154 | #define PPC_INST_MFTMR 0x7c0002dc | 154 | #define PPC_INST_MFTMR 0x7c0002dc |
155 | #define PPC_INST_MSGSND 0x7c00019c | 155 | #define PPC_INST_MSGSND 0x7c00019c |
156 | #define PPC_INST_MSGCLR 0x7c0001dc | ||
156 | #define PPC_INST_MSGSNDP 0x7c00011c | 157 | #define PPC_INST_MSGSNDP 0x7c00011c |
157 | #define PPC_INST_MTTMR 0x7c0003dc | 158 | #define PPC_INST_MTTMR 0x7c0003dc |
158 | #define PPC_INST_NOP 0x60000000 | 159 | #define PPC_INST_NOP 0x60000000 |
@@ -309,6 +310,8 @@ | |||
309 | ___PPC_RB(b) | __PPC_EH(eh)) | 310 | ___PPC_RB(b) | __PPC_EH(eh)) |
310 | #define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \ | 311 | #define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \ |
311 | ___PPC_RB(b)) | 312 | ___PPC_RB(b)) |
313 | #define PPC_MSGCLR(b) stringify_in_c(.long PPC_INST_MSGCLR | \ | ||
314 | ___PPC_RB(b)) | ||
312 | #define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \ | 315 | #define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \ |
313 | ___PPC_RB(b)) | 316 | ___PPC_RB(b)) |
314 | #define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \ | 317 | #define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \ |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 1c874fb533bb..af56b5c6c81a 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -608,13 +608,16 @@ | |||
608 | #define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */ | 608 | #define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */ |
609 | #define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */ | 609 | #define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */ |
610 | #define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ | 610 | #define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ |
611 | #define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */ | ||
611 | #define SRR1_WAKESYSERR 0x00300000 /* System error */ | 612 | #define SRR1_WAKESYSERR 0x00300000 /* System error */ |
612 | #define SRR1_WAKEEE 0x00200000 /* External interrupt */ | 613 | #define SRR1_WAKEEE 0x00200000 /* External interrupt */ |
613 | #define SRR1_WAKEMT 0x00280000 /* mtctrl */ | 614 | #define SRR1_WAKEMT 0x00280000 /* mtctrl */ |
614 | #define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */ | 615 | #define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */ |
615 | #define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ | 616 | #define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ |
617 | #define SRR1_WAKEDBELL 0x00140000 /* Privileged doorbell on P8 */ | ||
616 | #define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */ | 618 | #define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */ |
617 | #define SRR1_WAKERESET 0x00100000 /* System reset */ | 619 | #define SRR1_WAKERESET 0x00100000 /* System reset */ |
620 | #define SRR1_WAKEHDBELL 0x000c0000 /* Hypervisor doorbell on P8 */ | ||
618 | #define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */ | 621 | #define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */ |
619 | #define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained, | 622 | #define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained, |
620 | * may not be recoverable */ | 623 | * may not be recoverable */ |
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index f337666768a7..f83046878336 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
@@ -437,6 +437,26 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
437 | .machine_check_early = __machine_check_early_realmode_p8, | 437 | .machine_check_early = __machine_check_early_realmode_p8, |
438 | .platform = "power8", | 438 | .platform = "power8", |
439 | }, | 439 | }, |
440 | { /* Power8NVL */ | ||
441 | .pvr_mask = 0xffff0000, | ||
442 | .pvr_value = 0x004c0000, | ||
443 | .cpu_name = "POWER8NVL (raw)", | ||
444 | .cpu_features = CPU_FTRS_POWER8, | ||
445 | .cpu_user_features = COMMON_USER_POWER8, | ||
446 | .cpu_user_features2 = COMMON_USER2_POWER8, | ||
447 | .mmu_features = MMU_FTRS_POWER8, | ||
448 | .icache_bsize = 128, | ||
449 | .dcache_bsize = 128, | ||
450 | .num_pmcs = 6, | ||
451 | .pmc_type = PPC_PMC_IBM, | ||
452 | .oprofile_cpu_type = "ppc64/power8", | ||
453 | .oprofile_type = PPC_OPROFILE_INVALID, | ||
454 | .cpu_setup = __setup_cpu_power8, | ||
455 | .cpu_restore = __restore_cpu_power8, | ||
456 | .flush_tlb = __flush_tlb_power8, | ||
457 | .machine_check_early = __machine_check_early_realmode_p8, | ||
458 | .platform = "power8", | ||
459 | }, | ||
440 | { /* Power8 DD1: Does not support doorbell IPIs */ | 460 | { /* Power8 DD1: Does not support doorbell IPIs */ |
441 | .pvr_mask = 0xffffff00, | 461 | .pvr_mask = 0xffffff00, |
442 | .pvr_value = 0x004d0100, | 462 | .pvr_value = 0x004d0100, |
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c index f4217819cc31..2128f3a96c32 100644 --- a/arch/powerpc/kernel/dbell.c +++ b/arch/powerpc/kernel/dbell.c | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | #include <asm/dbell.h> | 18 | #include <asm/dbell.h> |
19 | #include <asm/irq_regs.h> | 19 | #include <asm/irq_regs.h> |
20 | #include <asm/kvm_ppc.h> | ||
20 | 21 | ||
21 | #ifdef CONFIG_SMP | 22 | #ifdef CONFIG_SMP |
22 | void doorbell_setup_this_cpu(void) | 23 | void doorbell_setup_this_cpu(void) |
@@ -41,6 +42,7 @@ void doorbell_exception(struct pt_regs *regs) | |||
41 | 42 | ||
42 | may_hard_irq_enable(); | 43 | may_hard_irq_enable(); |
43 | 44 | ||
45 | kvmppc_set_host_ipi(smp_processor_id(), 0); | ||
44 | __this_cpu_inc(irq_stat.doorbell_irqs); | 46 | __this_cpu_inc(irq_stat.doorbell_irqs); |
45 | 47 | ||
46 | smp_ipi_demux(); | 48 | smp_ipi_demux(); |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index c2df8150bd7a..9519e6bdc6d7 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -1408,7 +1408,7 @@ machine_check_handle_early: | |||
1408 | bne 9f /* continue in V mode if we are. */ | 1408 | bne 9f /* continue in V mode if we are. */ |
1409 | 1409 | ||
1410 | 5: | 1410 | 5: |
1411 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 1411 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER |
1412 | /* | 1412 | /* |
1413 | * We are coming from kernel context. Check if we are coming from | 1413 | * We are coming from kernel context. Check if we are coming from |
1414 | * guest. if yes, then we can continue. We will fall through | 1414 | * guest. if yes, then we can continue. We will fall through |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index de4018a1bc4b..de747563d29d 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -636,7 +636,7 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu) | |||
636 | spin_lock(&vcpu->arch.vpa_update_lock); | 636 | spin_lock(&vcpu->arch.vpa_update_lock); |
637 | lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; | 637 | lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; |
638 | if (lppaca) | 638 | if (lppaca) |
639 | yield_count = lppaca->yield_count; | 639 | yield_count = be32_to_cpu(lppaca->yield_count); |
640 | spin_unlock(&vcpu->arch.vpa_update_lock); | 640 | spin_unlock(&vcpu->arch.vpa_update_lock); |
641 | return yield_count; | 641 | return yield_count; |
642 | } | 642 | } |
@@ -942,20 +942,20 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu, | |||
942 | static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, | 942 | static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, |
943 | bool preserve_top32) | 943 | bool preserve_top32) |
944 | { | 944 | { |
945 | struct kvm *kvm = vcpu->kvm; | ||
945 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | 946 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
946 | u64 mask; | 947 | u64 mask; |
947 | 948 | ||
949 | mutex_lock(&kvm->lock); | ||
948 | spin_lock(&vc->lock); | 950 | spin_lock(&vc->lock); |
949 | /* | 951 | /* |
950 | * If ILE (interrupt little-endian) has changed, update the | 952 | * If ILE (interrupt little-endian) has changed, update the |
951 | * MSR_LE bit in the intr_msr for each vcpu in this vcore. | 953 | * MSR_LE bit in the intr_msr for each vcpu in this vcore. |
952 | */ | 954 | */ |
953 | if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { | 955 | if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { |
954 | struct kvm *kvm = vcpu->kvm; | ||
955 | struct kvm_vcpu *vcpu; | 956 | struct kvm_vcpu *vcpu; |
956 | int i; | 957 | int i; |
957 | 958 | ||
958 | mutex_lock(&kvm->lock); | ||
959 | kvm_for_each_vcpu(i, vcpu, kvm) { | 959 | kvm_for_each_vcpu(i, vcpu, kvm) { |
960 | if (vcpu->arch.vcore != vc) | 960 | if (vcpu->arch.vcore != vc) |
961 | continue; | 961 | continue; |
@@ -964,7 +964,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, | |||
964 | else | 964 | else |
965 | vcpu->arch.intr_msr &= ~MSR_LE; | 965 | vcpu->arch.intr_msr &= ~MSR_LE; |
966 | } | 966 | } |
967 | mutex_unlock(&kvm->lock); | ||
968 | } | 967 | } |
969 | 968 | ||
970 | /* | 969 | /* |
@@ -981,6 +980,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, | |||
981 | mask &= 0xFFFFFFFF; | 980 | mask &= 0xFFFFFFFF; |
982 | vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); | 981 | vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); |
983 | spin_unlock(&vc->lock); | 982 | spin_unlock(&vc->lock); |
983 | mutex_unlock(&kvm->lock); | ||
984 | } | 984 | } |
985 | 985 | ||
986 | static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, | 986 | static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index bb94e6f20c81..6cbf1630cb70 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -1005,6 +1005,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | |||
1005 | /* Save HEIR (HV emulation assist reg) in emul_inst | 1005 | /* Save HEIR (HV emulation assist reg) in emul_inst |
1006 | if this is an HEI (HV emulation interrupt, e40) */ | 1006 | if this is an HEI (HV emulation interrupt, e40) */ |
1007 | li r3,KVM_INST_FETCH_FAILED | 1007 | li r3,KVM_INST_FETCH_FAILED |
1008 | stw r3,VCPU_LAST_INST(r9) | ||
1008 | cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST | 1009 | cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST |
1009 | bne 11f | 1010 | bne 11f |
1010 | mfspr r3,SPRN_HEIR | 1011 | mfspr r3,SPRN_HEIR |
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index fc34025ef822..38a45088f633 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c | |||
@@ -33,6 +33,8 @@ | |||
33 | #include <asm/runlatch.h> | 33 | #include <asm/runlatch.h> |
34 | #include <asm/code-patching.h> | 34 | #include <asm/code-patching.h> |
35 | #include <asm/dbell.h> | 35 | #include <asm/dbell.h> |
36 | #include <asm/kvm_ppc.h> | ||
37 | #include <asm/ppc-opcode.h> | ||
36 | 38 | ||
37 | #include "powernv.h" | 39 | #include "powernv.h" |
38 | 40 | ||
@@ -149,7 +151,7 @@ static int pnv_smp_cpu_disable(void) | |||
149 | static void pnv_smp_cpu_kill_self(void) | 151 | static void pnv_smp_cpu_kill_self(void) |
150 | { | 152 | { |
151 | unsigned int cpu; | 153 | unsigned int cpu; |
152 | unsigned long srr1; | 154 | unsigned long srr1, wmask; |
153 | u32 idle_states; | 155 | u32 idle_states; |
154 | 156 | ||
155 | /* Standard hot unplug procedure */ | 157 | /* Standard hot unplug procedure */ |
@@ -161,6 +163,10 @@ static void pnv_smp_cpu_kill_self(void) | |||
161 | generic_set_cpu_dead(cpu); | 163 | generic_set_cpu_dead(cpu); |
162 | smp_wmb(); | 164 | smp_wmb(); |
163 | 165 | ||
166 | wmask = SRR1_WAKEMASK; | ||
167 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) | ||
168 | wmask = SRR1_WAKEMASK_P8; | ||
169 | |||
164 | idle_states = pnv_get_supported_cpuidle_states(); | 170 | idle_states = pnv_get_supported_cpuidle_states(); |
165 | /* We don't want to take decrementer interrupts while we are offline, | 171 | /* We don't want to take decrementer interrupts while we are offline, |
166 | * so clear LPCR:PECE1. We keep PECE2 enabled. | 172 | * so clear LPCR:PECE1. We keep PECE2 enabled. |
@@ -191,10 +197,14 @@ static void pnv_smp_cpu_kill_self(void) | |||
191 | * having finished executing in a KVM guest, then srr1 | 197 | * having finished executing in a KVM guest, then srr1 |
192 | * contains 0. | 198 | * contains 0. |
193 | */ | 199 | */ |
194 | if ((srr1 & SRR1_WAKEMASK) == SRR1_WAKEEE) { | 200 | if ((srr1 & wmask) == SRR1_WAKEEE) { |
195 | icp_native_flush_interrupt(); | 201 | icp_native_flush_interrupt(); |
196 | local_paca->irq_happened &= PACA_IRQ_HARD_DIS; | 202 | local_paca->irq_happened &= PACA_IRQ_HARD_DIS; |
197 | smp_mb(); | 203 | smp_mb(); |
204 | } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) { | ||
205 | unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); | ||
206 | asm volatile(PPC_MSGCLR(%0) : : "r" (msg)); | ||
207 | kvmppc_set_host_ipi(cpu, 0); | ||
198 | } | 208 | } |
199 | 209 | ||
200 | if (cpu_core_split_required()) | 210 | if (cpu_core_split_required()) |
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index 90cf3dcbd9f2..8f35d525cede 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c | |||
@@ -25,10 +25,10 @@ | |||
25 | static struct kobject *mobility_kobj; | 25 | static struct kobject *mobility_kobj; |
26 | 26 | ||
27 | struct update_props_workarea { | 27 | struct update_props_workarea { |
28 | u32 phandle; | 28 | __be32 phandle; |
29 | u32 state; | 29 | __be32 state; |
30 | u64 reserved; | 30 | __be64 reserved; |
31 | u32 nprops; | 31 | __be32 nprops; |
32 | } __packed; | 32 | } __packed; |
33 | 33 | ||
34 | #define NODE_ACTION_MASK 0xff000000 | 34 | #define NODE_ACTION_MASK 0xff000000 |
@@ -54,11 +54,11 @@ static int mobility_rtas_call(int token, char *buf, s32 scope) | |||
54 | return rc; | 54 | return rc; |
55 | } | 55 | } |
56 | 56 | ||
57 | static int delete_dt_node(u32 phandle) | 57 | static int delete_dt_node(__be32 phandle) |
58 | { | 58 | { |
59 | struct device_node *dn; | 59 | struct device_node *dn; |
60 | 60 | ||
61 | dn = of_find_node_by_phandle(phandle); | 61 | dn = of_find_node_by_phandle(be32_to_cpu(phandle)); |
62 | if (!dn) | 62 | if (!dn) |
63 | return -ENOENT; | 63 | return -ENOENT; |
64 | 64 | ||
@@ -127,7 +127,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop, | |||
127 | return 0; | 127 | return 0; |
128 | } | 128 | } |
129 | 129 | ||
130 | static int update_dt_node(u32 phandle, s32 scope) | 130 | static int update_dt_node(__be32 phandle, s32 scope) |
131 | { | 131 | { |
132 | struct update_props_workarea *upwa; | 132 | struct update_props_workarea *upwa; |
133 | struct device_node *dn; | 133 | struct device_node *dn; |
@@ -136,6 +136,7 @@ static int update_dt_node(u32 phandle, s32 scope) | |||
136 | char *prop_data; | 136 | char *prop_data; |
137 | char *rtas_buf; | 137 | char *rtas_buf; |
138 | int update_properties_token; | 138 | int update_properties_token; |
139 | u32 nprops; | ||
139 | u32 vd; | 140 | u32 vd; |
140 | 141 | ||
141 | update_properties_token = rtas_token("ibm,update-properties"); | 142 | update_properties_token = rtas_token("ibm,update-properties"); |
@@ -146,7 +147,7 @@ static int update_dt_node(u32 phandle, s32 scope) | |||
146 | if (!rtas_buf) | 147 | if (!rtas_buf) |
147 | return -ENOMEM; | 148 | return -ENOMEM; |
148 | 149 | ||
149 | dn = of_find_node_by_phandle(phandle); | 150 | dn = of_find_node_by_phandle(be32_to_cpu(phandle)); |
150 | if (!dn) { | 151 | if (!dn) { |
151 | kfree(rtas_buf); | 152 | kfree(rtas_buf); |
152 | return -ENOENT; | 153 | return -ENOENT; |
@@ -162,6 +163,7 @@ static int update_dt_node(u32 phandle, s32 scope) | |||
162 | break; | 163 | break; |
163 | 164 | ||
164 | prop_data = rtas_buf + sizeof(*upwa); | 165 | prop_data = rtas_buf + sizeof(*upwa); |
166 | nprops = be32_to_cpu(upwa->nprops); | ||
165 | 167 | ||
166 | /* On the first call to ibm,update-properties for a node the | 168 | /* On the first call to ibm,update-properties for a node the |
167 | * the first property value descriptor contains an empty | 169 | * the first property value descriptor contains an empty |
@@ -170,17 +172,17 @@ static int update_dt_node(u32 phandle, s32 scope) | |||
170 | */ | 172 | */ |
171 | if (*prop_data == 0) { | 173 | if (*prop_data == 0) { |
172 | prop_data++; | 174 | prop_data++; |
173 | vd = *(u32 *)prop_data; | 175 | vd = be32_to_cpu(*(__be32 *)prop_data); |
174 | prop_data += vd + sizeof(vd); | 176 | prop_data += vd + sizeof(vd); |
175 | upwa->nprops--; | 177 | nprops--; |
176 | } | 178 | } |
177 | 179 | ||
178 | for (i = 0; i < upwa->nprops; i++) { | 180 | for (i = 0; i < nprops; i++) { |
179 | char *prop_name; | 181 | char *prop_name; |
180 | 182 | ||
181 | prop_name = prop_data; | 183 | prop_name = prop_data; |
182 | prop_data += strlen(prop_name) + 1; | 184 | prop_data += strlen(prop_name) + 1; |
183 | vd = *(u32 *)prop_data; | 185 | vd = be32_to_cpu(*(__be32 *)prop_data); |
184 | prop_data += sizeof(vd); | 186 | prop_data += sizeof(vd); |
185 | 187 | ||
186 | switch (vd) { | 188 | switch (vd) { |
@@ -212,13 +214,13 @@ static int update_dt_node(u32 phandle, s32 scope) | |||
212 | return 0; | 214 | return 0; |
213 | } | 215 | } |
214 | 216 | ||
215 | static int add_dt_node(u32 parent_phandle, u32 drc_index) | 217 | static int add_dt_node(__be32 parent_phandle, __be32 drc_index) |
216 | { | 218 | { |
217 | struct device_node *dn; | 219 | struct device_node *dn; |
218 | struct device_node *parent_dn; | 220 | struct device_node *parent_dn; |
219 | int rc; | 221 | int rc; |
220 | 222 | ||
221 | parent_dn = of_find_node_by_phandle(parent_phandle); | 223 | parent_dn = of_find_node_by_phandle(be32_to_cpu(parent_phandle)); |
222 | if (!parent_dn) | 224 | if (!parent_dn) |
223 | return -ENOENT; | 225 | return -ENOENT; |
224 | 226 | ||
@@ -237,7 +239,7 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index) | |||
237 | int pseries_devicetree_update(s32 scope) | 239 | int pseries_devicetree_update(s32 scope) |
238 | { | 240 | { |
239 | char *rtas_buf; | 241 | char *rtas_buf; |
240 | u32 *data; | 242 | __be32 *data; |
241 | int update_nodes_token; | 243 | int update_nodes_token; |
242 | int rc; | 244 | int rc; |
243 | 245 | ||
@@ -254,17 +256,17 @@ int pseries_devicetree_update(s32 scope) | |||
254 | if (rc && rc != 1) | 256 | if (rc && rc != 1) |
255 | break; | 257 | break; |
256 | 258 | ||
257 | data = (u32 *)rtas_buf + 4; | 259 | data = (__be32 *)rtas_buf + 4; |
258 | while (*data & NODE_ACTION_MASK) { | 260 | while (be32_to_cpu(*data) & NODE_ACTION_MASK) { |
259 | int i; | 261 | int i; |
260 | u32 action = *data & NODE_ACTION_MASK; | 262 | u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK; |
261 | int node_count = *data & NODE_COUNT_MASK; | 263 | u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK; |
262 | 264 | ||
263 | data++; | 265 | data++; |
264 | 266 | ||
265 | for (i = 0; i < node_count; i++) { | 267 | for (i = 0; i < node_count; i++) { |
266 | u32 phandle = *data++; | 268 | __be32 phandle = *data++; |
267 | u32 drc_index; | 269 | __be32 drc_index; |
268 | 270 | ||
269 | switch (action) { | 271 | switch (action) { |
270 | case DELETE_DT_NODE: | 272 | case DELETE_DT_NODE: |
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index c9df40b5c0ac..c9c875d9ed31 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h | |||
@@ -211,7 +211,7 @@ do { \ | |||
211 | 211 | ||
212 | extern unsigned long mmap_rnd_mask; | 212 | extern unsigned long mmap_rnd_mask; |
213 | 213 | ||
214 | #define STACK_RND_MASK (mmap_rnd_mask) | 214 | #define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask) |
215 | 215 | ||
216 | #define ARCH_DLINFO \ | 216 | #define ARCH_DLINFO \ |
217 | do { \ | 217 | do { \ |
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index d84559e31f32..f407bbf5ee94 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
@@ -515,15 +515,15 @@ struct s390_io_adapter { | |||
515 | #define S390_ARCH_FAC_MASK_SIZE_U64 \ | 515 | #define S390_ARCH_FAC_MASK_SIZE_U64 \ |
516 | (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64)) | 516 | (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64)) |
517 | 517 | ||
518 | struct s390_model_fac { | 518 | struct kvm_s390_fac { |
519 | /* facilities used in SIE context */ | 519 | /* facility list requested by guest */ |
520 | __u64 sie[S390_ARCH_FAC_LIST_SIZE_U64]; | 520 | __u64 list[S390_ARCH_FAC_LIST_SIZE_U64]; |
521 | /* subset enabled by kvm */ | 521 | /* facility mask supported by kvm & hosting machine */ |
522 | __u64 kvm[S390_ARCH_FAC_LIST_SIZE_U64]; | 522 | __u64 mask[S390_ARCH_FAC_LIST_SIZE_U64]; |
523 | }; | 523 | }; |
524 | 524 | ||
525 | struct kvm_s390_cpu_model { | 525 | struct kvm_s390_cpu_model { |
526 | struct s390_model_fac *fac; | 526 | struct kvm_s390_fac *fac; |
527 | struct cpuid cpu_id; | 527 | struct cpuid cpu_id; |
528 | unsigned short ibc; | 528 | unsigned short ibc; |
529 | }; | 529 | }; |
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index f49b71954654..8fb3802f8fad 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
@@ -62,6 +62,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
62 | { | 62 | { |
63 | int cpu = smp_processor_id(); | 63 | int cpu = smp_processor_id(); |
64 | 64 | ||
65 | S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); | ||
65 | if (prev == next) | 66 | if (prev == next) |
66 | return; | 67 | return; |
67 | if (MACHINE_HAS_TLB_LC) | 68 | if (MACHINE_HAS_TLB_LC) |
@@ -73,7 +74,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
73 | atomic_dec(&prev->context.attach_count); | 74 | atomic_dec(&prev->context.attach_count); |
74 | if (MACHINE_HAS_TLB_LC) | 75 | if (MACHINE_HAS_TLB_LC) |
75 | cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); | 76 | cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); |
76 | S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); | ||
77 | } | 77 | } |
78 | 78 | ||
79 | #define finish_arch_post_lock_switch finish_arch_post_lock_switch | 79 | #define finish_arch_post_lock_switch finish_arch_post_lock_switch |
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 7b2ac6e44166..53eacbd4f09b 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h | |||
@@ -37,16 +37,7 @@ static inline void storage_key_init_range(unsigned long start, unsigned long end | |||
37 | #endif | 37 | #endif |
38 | } | 38 | } |
39 | 39 | ||
40 | static inline void clear_page(void *page) | 40 | #define clear_page(page) memset((page), 0, PAGE_SIZE) |
41 | { | ||
42 | register unsigned long reg1 asm ("1") = 0; | ||
43 | register void *reg2 asm ("2") = page; | ||
44 | register unsigned long reg3 asm ("3") = 4096; | ||
45 | asm volatile( | ||
46 | " mvcl 2,0" | ||
47 | : "+d" (reg2), "+d" (reg3) : "d" (reg1) | ||
48 | : "memory", "cc"); | ||
49 | } | ||
50 | 41 | ||
51 | /* | 42 | /* |
52 | * copy_page uses the mvcl instruction with 0xb0 padding byte in order to | 43 | * copy_page uses the mvcl instruction with 0xb0 padding byte in order to |
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 82c19899574f..6c79f1b44fe7 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c | |||
@@ -57,6 +57,44 @@ | |||
57 | 57 | ||
58 | unsigned long ftrace_plt; | 58 | unsigned long ftrace_plt; |
59 | 59 | ||
60 | static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn) | ||
61 | { | ||
62 | #ifdef CC_USING_HOTPATCH | ||
63 | /* brcl 0,0 */ | ||
64 | insn->opc = 0xc004; | ||
65 | insn->disp = 0; | ||
66 | #else | ||
67 | /* stg r14,8(r15) */ | ||
68 | insn->opc = 0xe3e0; | ||
69 | insn->disp = 0xf0080024; | ||
70 | #endif | ||
71 | } | ||
72 | |||
73 | static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn) | ||
74 | { | ||
75 | #ifdef CONFIG_KPROBES | ||
76 | if (insn->opc == BREAKPOINT_INSTRUCTION) | ||
77 | return 1; | ||
78 | #endif | ||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn) | ||
83 | { | ||
84 | #ifdef CONFIG_KPROBES | ||
85 | insn->opc = BREAKPOINT_INSTRUCTION; | ||
86 | insn->disp = KPROBE_ON_FTRACE_NOP; | ||
87 | #endif | ||
88 | } | ||
89 | |||
90 | static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn *insn) | ||
91 | { | ||
92 | #ifdef CONFIG_KPROBES | ||
93 | insn->opc = BREAKPOINT_INSTRUCTION; | ||
94 | insn->disp = KPROBE_ON_FTRACE_CALL; | ||
95 | #endif | ||
96 | } | ||
97 | |||
60 | int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, | 98 | int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, |
61 | unsigned long addr) | 99 | unsigned long addr) |
62 | { | 100 | { |
@@ -72,16 +110,9 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, | |||
72 | return -EFAULT; | 110 | return -EFAULT; |
73 | if (addr == MCOUNT_ADDR) { | 111 | if (addr == MCOUNT_ADDR) { |
74 | /* Initial code replacement */ | 112 | /* Initial code replacement */ |
75 | #ifdef CC_USING_HOTPATCH | 113 | ftrace_generate_orig_insn(&orig); |
76 | /* We expect to see brcl 0,0 */ | ||
77 | ftrace_generate_nop_insn(&orig); | ||
78 | #else | ||
79 | /* We expect to see stg r14,8(r15) */ | ||
80 | orig.opc = 0xe3e0; | ||
81 | orig.disp = 0xf0080024; | ||
82 | #endif | ||
83 | ftrace_generate_nop_insn(&new); | 114 | ftrace_generate_nop_insn(&new); |
84 | } else if (old.opc == BREAKPOINT_INSTRUCTION) { | 115 | } else if (is_kprobe_on_ftrace(&old)) { |
85 | /* | 116 | /* |
86 | * If we find a breakpoint instruction, a kprobe has been | 117 | * If we find a breakpoint instruction, a kprobe has been |
87 | * placed at the beginning of the function. We write the | 118 | * placed at the beginning of the function. We write the |
@@ -89,9 +120,8 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, | |||
89 | * bytes of the original instruction so that the kprobes | 120 | * bytes of the original instruction so that the kprobes |
90 | * handler can execute a nop, if it reaches this breakpoint. | 121 | * handler can execute a nop, if it reaches this breakpoint. |
91 | */ | 122 | */ |
92 | new.opc = orig.opc = BREAKPOINT_INSTRUCTION; | 123 | ftrace_generate_kprobe_call_insn(&orig); |
93 | orig.disp = KPROBE_ON_FTRACE_CALL; | 124 | ftrace_generate_kprobe_nop_insn(&new); |
94 | new.disp = KPROBE_ON_FTRACE_NOP; | ||
95 | } else { | 125 | } else { |
96 | /* Replace ftrace call with a nop. */ | 126 | /* Replace ftrace call with a nop. */ |
97 | ftrace_generate_call_insn(&orig, rec->ip); | 127 | ftrace_generate_call_insn(&orig, rec->ip); |
@@ -111,7 +141,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |||
111 | 141 | ||
112 | if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old))) | 142 | if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old))) |
113 | return -EFAULT; | 143 | return -EFAULT; |
114 | if (old.opc == BREAKPOINT_INSTRUCTION) { | 144 | if (is_kprobe_on_ftrace(&old)) { |
115 | /* | 145 | /* |
116 | * If we find a breakpoint instruction, a kprobe has been | 146 | * If we find a breakpoint instruction, a kprobe has been |
117 | * placed at the beginning of the function. We write the | 147 | * placed at the beginning of the function. We write the |
@@ -119,9 +149,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |||
119 | * bytes of the original instruction so that the kprobes | 149 | * bytes of the original instruction so that the kprobes |
120 | * handler can execute a brasl if it reaches this breakpoint. | 150 | * handler can execute a brasl if it reaches this breakpoint. |
121 | */ | 151 | */ |
122 | new.opc = orig.opc = BREAKPOINT_INSTRUCTION; | 152 | ftrace_generate_kprobe_nop_insn(&orig); |
123 | orig.disp = KPROBE_ON_FTRACE_NOP; | 153 | ftrace_generate_kprobe_call_insn(&new); |
124 | new.disp = KPROBE_ON_FTRACE_CALL; | ||
125 | } else { | 154 | } else { |
126 | /* Replace nop with an ftrace call. */ | 155 | /* Replace nop with an ftrace call. */ |
127 | ftrace_generate_nop_insn(&orig); | 156 | ftrace_generate_nop_insn(&orig); |
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c index cb2d51e779df..830066f936c8 100644 --- a/arch/s390/kernel/jump_label.c +++ b/arch/s390/kernel/jump_label.c | |||
@@ -36,16 +36,20 @@ static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn) | |||
36 | insn->offset = (entry->target - entry->code) >> 1; | 36 | insn->offset = (entry->target - entry->code) >> 1; |
37 | } | 37 | } |
38 | 38 | ||
39 | static void jump_label_bug(struct jump_entry *entry, struct insn *insn) | 39 | static void jump_label_bug(struct jump_entry *entry, struct insn *expected, |
40 | struct insn *new) | ||
40 | { | 41 | { |
41 | unsigned char *ipc = (unsigned char *)entry->code; | 42 | unsigned char *ipc = (unsigned char *)entry->code; |
42 | unsigned char *ipe = (unsigned char *)insn; | 43 | unsigned char *ipe = (unsigned char *)expected; |
44 | unsigned char *ipn = (unsigned char *)new; | ||
43 | 45 | ||
44 | pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc); | 46 | pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc); |
45 | pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n", | 47 | pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n", |
46 | ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]); | 48 | ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]); |
47 | pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n", | 49 | pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n", |
48 | ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]); | 50 | ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]); |
51 | pr_emerg("New: %02x %02x %02x %02x %02x %02x\n", | ||
52 | ipn[0], ipn[1], ipn[2], ipn[3], ipn[4], ipn[5]); | ||
49 | panic("Corrupted kernel text"); | 53 | panic("Corrupted kernel text"); |
50 | } | 54 | } |
51 | 55 | ||
@@ -69,10 +73,10 @@ static void __jump_label_transform(struct jump_entry *entry, | |||
69 | } | 73 | } |
70 | if (init) { | 74 | if (init) { |
71 | if (memcmp((void *)entry->code, &orignop, sizeof(orignop))) | 75 | if (memcmp((void *)entry->code, &orignop, sizeof(orignop))) |
72 | jump_label_bug(entry, &old); | 76 | jump_label_bug(entry, &orignop, &new); |
73 | } else { | 77 | } else { |
74 | if (memcmp((void *)entry->code, &old, sizeof(old))) | 78 | if (memcmp((void *)entry->code, &old, sizeof(old))) |
75 | jump_label_bug(entry, &old); | 79 | jump_label_bug(entry, &old, &new); |
76 | } | 80 | } |
77 | probe_kernel_write((void *)entry->code, &new, sizeof(new)); | 81 | probe_kernel_write((void *)entry->code, &new, sizeof(new)); |
78 | } | 82 | } |
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 36154a2f1814..2ca95862e336 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c | |||
@@ -436,6 +436,7 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
436 | const Elf_Shdr *sechdrs, | 436 | const Elf_Shdr *sechdrs, |
437 | struct module *me) | 437 | struct module *me) |
438 | { | 438 | { |
439 | jump_label_apply_nops(me); | ||
439 | vfree(me->arch.syminfo); | 440 | vfree(me->arch.syminfo); |
440 | me->arch.syminfo = NULL; | 441 | me->arch.syminfo = NULL; |
441 | return 0; | 442 | return 0; |
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index c3f8d157cb0d..e6a1578fc000 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c | |||
@@ -1415,7 +1415,7 @@ CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG); | |||
1415 | 1415 | ||
1416 | static struct attribute *cpumsf_pmu_events_attr[] = { | 1416 | static struct attribute *cpumsf_pmu_events_attr[] = { |
1417 | CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC), | 1417 | CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC), |
1418 | CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG), | 1418 | NULL, |
1419 | NULL, | 1419 | NULL, |
1420 | }; | 1420 | }; |
1421 | 1421 | ||
@@ -1606,8 +1606,11 @@ static int __init init_cpum_sampling_pmu(void) | |||
1606 | return -EINVAL; | 1606 | return -EINVAL; |
1607 | } | 1607 | } |
1608 | 1608 | ||
1609 | if (si.ad) | 1609 | if (si.ad) { |
1610 | sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); | 1610 | sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); |
1611 | cpumsf_pmu_events_attr[1] = | ||
1612 | CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG); | ||
1613 | } | ||
1611 | 1614 | ||
1612 | sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); | 1615 | sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); |
1613 | if (!sfdbg) | 1616 | if (!sfdbg) |
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 26108232fcaa..dc488e13b7e3 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c | |||
@@ -18,7 +18,7 @@ | |||
18 | 18 | ||
19 | static DEFINE_PER_CPU(struct cpuid, cpu_id); | 19 | static DEFINE_PER_CPU(struct cpuid, cpu_id); |
20 | 20 | ||
21 | void cpu_relax(void) | 21 | void notrace cpu_relax(void) |
22 | { | 22 | { |
23 | if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) | 23 | if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) |
24 | asm volatile("diag 0,0,0x44"); | 24 | asm volatile("diag 0,0,0x44"); |
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index 6b09fdffbd2f..ca6294645dd3 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S | |||
@@ -177,6 +177,17 @@ restart_entry: | |||
177 | lhi %r1,1 | 177 | lhi %r1,1 |
178 | sigp %r1,%r0,SIGP_SET_ARCHITECTURE | 178 | sigp %r1,%r0,SIGP_SET_ARCHITECTURE |
179 | sam64 | 179 | sam64 |
180 | #ifdef CONFIG_SMP | ||
181 | larl %r1,smp_cpu_mt_shift | ||
182 | icm %r1,15,0(%r1) | ||
183 | jz smt_done | ||
184 | llgfr %r1,%r1 | ||
185 | smt_loop: | ||
186 | sigp %r1,%r0,SIGP_SET_MULTI_THREADING | ||
187 | brc 8,smt_done /* accepted */ | ||
188 | brc 2,smt_loop /* busy, try again */ | ||
189 | smt_done: | ||
190 | #endif | ||
180 | larl %r1,.Lnew_pgm_check_psw | 191 | larl %r1,.Lnew_pgm_check_psw |
181 | lpswe 0(%r1) | 192 | lpswe 0(%r1) |
182 | pgm_check_entry: | 193 | pgm_check_entry: |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0c3623927563..19e17bd7aec0 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -165,7 +165,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
165 | case KVM_CAP_ONE_REG: | 165 | case KVM_CAP_ONE_REG: |
166 | case KVM_CAP_ENABLE_CAP: | 166 | case KVM_CAP_ENABLE_CAP: |
167 | case KVM_CAP_S390_CSS_SUPPORT: | 167 | case KVM_CAP_S390_CSS_SUPPORT: |
168 | case KVM_CAP_IRQFD: | ||
169 | case KVM_CAP_IOEVENTFD: | 168 | case KVM_CAP_IOEVENTFD: |
170 | case KVM_CAP_DEVICE_CTRL: | 169 | case KVM_CAP_DEVICE_CTRL: |
171 | case KVM_CAP_ENABLE_CAP_VM: | 170 | case KVM_CAP_ENABLE_CAP_VM: |
@@ -522,7 +521,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) | |||
522 | memcpy(&kvm->arch.model.cpu_id, &proc->cpuid, | 521 | memcpy(&kvm->arch.model.cpu_id, &proc->cpuid, |
523 | sizeof(struct cpuid)); | 522 | sizeof(struct cpuid)); |
524 | kvm->arch.model.ibc = proc->ibc; | 523 | kvm->arch.model.ibc = proc->ibc; |
525 | memcpy(kvm->arch.model.fac->kvm, proc->fac_list, | 524 | memcpy(kvm->arch.model.fac->list, proc->fac_list, |
526 | S390_ARCH_FAC_LIST_SIZE_BYTE); | 525 | S390_ARCH_FAC_LIST_SIZE_BYTE); |
527 | } else | 526 | } else |
528 | ret = -EFAULT; | 527 | ret = -EFAULT; |
@@ -556,7 +555,7 @@ static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr) | |||
556 | } | 555 | } |
557 | memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid)); | 556 | memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid)); |
558 | proc->ibc = kvm->arch.model.ibc; | 557 | proc->ibc = kvm->arch.model.ibc; |
559 | memcpy(&proc->fac_list, kvm->arch.model.fac->kvm, S390_ARCH_FAC_LIST_SIZE_BYTE); | 558 | memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE); |
560 | if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) | 559 | if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) |
561 | ret = -EFAULT; | 560 | ret = -EFAULT; |
562 | kfree(proc); | 561 | kfree(proc); |
@@ -576,10 +575,10 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) | |||
576 | } | 575 | } |
577 | get_cpu_id((struct cpuid *) &mach->cpuid); | 576 | get_cpu_id((struct cpuid *) &mach->cpuid); |
578 | mach->ibc = sclp_get_ibc(); | 577 | mach->ibc = sclp_get_ibc(); |
579 | memcpy(&mach->fac_mask, kvm_s390_fac_list_mask, | 578 | memcpy(&mach->fac_mask, kvm->arch.model.fac->mask, |
580 | kvm_s390_fac_list_mask_size() * sizeof(u64)); | 579 | S390_ARCH_FAC_LIST_SIZE_BYTE); |
581 | memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, | 580 | memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, |
582 | S390_ARCH_FAC_LIST_SIZE_U64); | 581 | S390_ARCH_FAC_LIST_SIZE_BYTE); |
583 | if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) | 582 | if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) |
584 | ret = -EFAULT; | 583 | ret = -EFAULT; |
585 | kfree(mach); | 584 | kfree(mach); |
@@ -778,15 +777,18 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
778 | static int kvm_s390_query_ap_config(u8 *config) | 777 | static int kvm_s390_query_ap_config(u8 *config) |
779 | { | 778 | { |
780 | u32 fcn_code = 0x04000000UL; | 779 | u32 fcn_code = 0x04000000UL; |
781 | u32 cc; | 780 | u32 cc = 0; |
782 | 781 | ||
782 | memset(config, 0, 128); | ||
783 | asm volatile( | 783 | asm volatile( |
784 | "lgr 0,%1\n" | 784 | "lgr 0,%1\n" |
785 | "lgr 2,%2\n" | 785 | "lgr 2,%2\n" |
786 | ".long 0xb2af0000\n" /* PQAP(QCI) */ | 786 | ".long 0xb2af0000\n" /* PQAP(QCI) */ |
787 | "ipm %0\n" | 787 | "0: ipm %0\n" |
788 | "srl %0,28\n" | 788 | "srl %0,28\n" |
789 | : "=r" (cc) | 789 | "1:\n" |
790 | EX_TABLE(0b, 1b) | ||
791 | : "+r" (cc) | ||
790 | : "r" (fcn_code), "r" (config) | 792 | : "r" (fcn_code), "r" (config) |
791 | : "cc", "0", "2", "memory" | 793 | : "cc", "0", "2", "memory" |
792 | ); | 794 | ); |
@@ -839,9 +841,13 @@ static int kvm_s390_crypto_init(struct kvm *kvm) | |||
839 | 841 | ||
840 | kvm_s390_set_crycb_format(kvm); | 842 | kvm_s390_set_crycb_format(kvm); |
841 | 843 | ||
842 | /* Disable AES/DEA protected key functions by default */ | 844 | /* Enable AES/DEA protected key functions by default */ |
843 | kvm->arch.crypto.aes_kw = 0; | 845 | kvm->arch.crypto.aes_kw = 1; |
844 | kvm->arch.crypto.dea_kw = 0; | 846 | kvm->arch.crypto.dea_kw = 1; |
847 | get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, | ||
848 | sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); | ||
849 | get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, | ||
850 | sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); | ||
845 | 851 | ||
846 | return 0; | 852 | return 0; |
847 | } | 853 | } |
@@ -886,40 +892,29 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
886 | /* | 892 | /* |
887 | * The architectural maximum amount of facilities is 16 kbit. To store | 893 | * The architectural maximum amount of facilities is 16 kbit. To store |
888 | * this amount, 2 kbyte of memory is required. Thus we need a full | 894 | * this amount, 2 kbyte of memory is required. Thus we need a full |
889 | * page to hold the active copy (arch.model.fac->sie) and the current | 895 | * page to hold the guest facility list (arch.model.fac->list) and the |
890 | * facilities set (arch.model.fac->kvm). Its address size has to be | 896 | * facility mask (arch.model.fac->mask). Its address size has to be |
891 | * 31 bits and word aligned. | 897 | * 31 bits and word aligned. |
892 | */ | 898 | */ |
893 | kvm->arch.model.fac = | 899 | kvm->arch.model.fac = |
894 | (struct s390_model_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | 900 | (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); |
895 | if (!kvm->arch.model.fac) | 901 | if (!kvm->arch.model.fac) |
896 | goto out_nofac; | 902 | goto out_nofac; |
897 | 903 | ||
898 | memcpy(kvm->arch.model.fac->kvm, S390_lowcore.stfle_fac_list, | 904 | /* Populate the facility mask initially. */ |
899 | S390_ARCH_FAC_LIST_SIZE_U64); | 905 | memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list, |
900 | 906 | S390_ARCH_FAC_LIST_SIZE_BYTE); | |
901 | /* | ||
902 | * If this KVM host runs *not* in a LPAR, relax the facility bits | ||
903 | * of the kvm facility mask by all missing facilities. This will allow | ||
904 | * to determine the right CPU model by means of the remaining facilities. | ||
905 | * Live guest migration must prohibit the migration of KVMs running in | ||
906 | * a LPAR to non LPAR hosts. | ||
907 | */ | ||
908 | if (!MACHINE_IS_LPAR) | ||
909 | for (i = 0; i < kvm_s390_fac_list_mask_size(); i++) | ||
910 | kvm_s390_fac_list_mask[i] &= kvm->arch.model.fac->kvm[i]; | ||
911 | |||
912 | /* | ||
913 | * Apply the kvm facility mask to limit the kvm supported/tolerated | ||
914 | * facility list. | ||
915 | */ | ||
916 | for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { | 907 | for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { |
917 | if (i < kvm_s390_fac_list_mask_size()) | 908 | if (i < kvm_s390_fac_list_mask_size()) |
918 | kvm->arch.model.fac->kvm[i] &= kvm_s390_fac_list_mask[i]; | 909 | kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i]; |
919 | else | 910 | else |
920 | kvm->arch.model.fac->kvm[i] = 0UL; | 911 | kvm->arch.model.fac->mask[i] = 0UL; |
921 | } | 912 | } |
922 | 913 | ||
914 | /* Populate the facility list initially. */ | ||
915 | memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask, | ||
916 | S390_ARCH_FAC_LIST_SIZE_BYTE); | ||
917 | |||
923 | kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); | 918 | kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); |
924 | kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; | 919 | kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; |
925 | 920 | ||
@@ -1165,8 +1160,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1165 | 1160 | ||
1166 | mutex_lock(&vcpu->kvm->lock); | 1161 | mutex_lock(&vcpu->kvm->lock); |
1167 | vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id; | 1162 | vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id; |
1168 | memcpy(vcpu->kvm->arch.model.fac->sie, vcpu->kvm->arch.model.fac->kvm, | ||
1169 | S390_ARCH_FAC_LIST_SIZE_BYTE); | ||
1170 | vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc; | 1163 | vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc; |
1171 | mutex_unlock(&vcpu->kvm->lock); | 1164 | mutex_unlock(&vcpu->kvm->lock); |
1172 | 1165 | ||
@@ -1212,7 +1205,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | |||
1212 | vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; | 1205 | vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; |
1213 | set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); | 1206 | set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); |
1214 | } | 1207 | } |
1215 | vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->sie; | 1208 | vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->list; |
1216 | 1209 | ||
1217 | spin_lock_init(&vcpu->arch.local_int.lock); | 1210 | spin_lock_init(&vcpu->arch.local_int.lock); |
1218 | vcpu->arch.local_int.float_int = &kvm->arch.float_int; | 1211 | vcpu->arch.local_int.float_int = &kvm->arch.float_int; |
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 985c2114d7ef..c34109aa552d 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h | |||
@@ -128,7 +128,8 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc) | |||
128 | /* test availability of facility in a kvm intance */ | 128 | /* test availability of facility in a kvm intance */ |
129 | static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) | 129 | static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) |
130 | { | 130 | { |
131 | return __test_facility(nr, kvm->arch.model.fac->kvm); | 131 | return __test_facility(nr, kvm->arch.model.fac->mask) && |
132 | __test_facility(nr, kvm->arch.model.fac->list); | ||
132 | } | 133 | } |
133 | 134 | ||
134 | /* are cpu states controlled by user space */ | 135 | /* are cpu states controlled by user space */ |
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index bdd9b5b17e03..351116939ea2 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
@@ -348,7 +348,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu) | |||
348 | * We need to shift the lower 32 facility bits (bit 0-31) from a u64 | 348 | * We need to shift the lower 32 facility bits (bit 0-31) from a u64 |
349 | * into a u32 memory representation. They will remain bits 0-31. | 349 | * into a u32 memory representation. They will remain bits 0-31. |
350 | */ | 350 | */ |
351 | fac = *vcpu->kvm->arch.model.fac->sie >> 32; | 351 | fac = *vcpu->kvm->arch.model.fac->list >> 32; |
352 | rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), | 352 | rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), |
353 | &fac, sizeof(fac)); | 353 | &fac, sizeof(fac)); |
354 | if (rc) | 354 | if (rc) |
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 753a56731951..f0b85443e060 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c | |||
@@ -287,7 +287,7 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev, | |||
287 | addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); | 287 | addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); |
288 | return (void __iomem *) addr + offset; | 288 | return (void __iomem *) addr + offset; |
289 | } | 289 | } |
290 | EXPORT_SYMBOL_GPL(pci_iomap_range); | 290 | EXPORT_SYMBOL(pci_iomap_range); |
291 | 291 | ||
292 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | 292 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
293 | { | 293 | { |
@@ -309,7 +309,7 @@ void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) | |||
309 | } | 309 | } |
310 | spin_unlock(&zpci_iomap_lock); | 310 | spin_unlock(&zpci_iomap_lock); |
311 | } | 311 | } |
312 | EXPORT_SYMBOL_GPL(pci_iounmap); | 312 | EXPORT_SYMBOL(pci_iounmap); |
313 | 313 | ||
314 | static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, | 314 | static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, |
315 | int size, u32 *val) | 315 | int size, u32 *val) |
@@ -483,9 +483,8 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev) | |||
483 | airq_iv_free_bit(zpci_aisb_iv, zdev->aisb); | 483 | airq_iv_free_bit(zpci_aisb_iv, zdev->aisb); |
484 | } | 484 | } |
485 | 485 | ||
486 | static void zpci_map_resources(struct zpci_dev *zdev) | 486 | static void zpci_map_resources(struct pci_dev *pdev) |
487 | { | 487 | { |
488 | struct pci_dev *pdev = zdev->pdev; | ||
489 | resource_size_t len; | 488 | resource_size_t len; |
490 | int i; | 489 | int i; |
491 | 490 | ||
@@ -499,9 +498,8 @@ static void zpci_map_resources(struct zpci_dev *zdev) | |||
499 | } | 498 | } |
500 | } | 499 | } |
501 | 500 | ||
502 | static void zpci_unmap_resources(struct zpci_dev *zdev) | 501 | static void zpci_unmap_resources(struct pci_dev *pdev) |
503 | { | 502 | { |
504 | struct pci_dev *pdev = zdev->pdev; | ||
505 | resource_size_t len; | 503 | resource_size_t len; |
506 | int i; | 504 | int i; |
507 | 505 | ||
@@ -651,7 +649,7 @@ int pcibios_add_device(struct pci_dev *pdev) | |||
651 | 649 | ||
652 | zdev->pdev = pdev; | 650 | zdev->pdev = pdev; |
653 | pdev->dev.groups = zpci_attr_groups; | 651 | pdev->dev.groups = zpci_attr_groups; |
654 | zpci_map_resources(zdev); | 652 | zpci_map_resources(pdev); |
655 | 653 | ||
656 | for (i = 0; i < PCI_BAR_COUNT; i++) { | 654 | for (i = 0; i < PCI_BAR_COUNT; i++) { |
657 | res = &pdev->resource[i]; | 655 | res = &pdev->resource[i]; |
@@ -663,6 +661,11 @@ int pcibios_add_device(struct pci_dev *pdev) | |||
663 | return 0; | 661 | return 0; |
664 | } | 662 | } |
665 | 663 | ||
664 | void pcibios_release_device(struct pci_dev *pdev) | ||
665 | { | ||
666 | zpci_unmap_resources(pdev); | ||
667 | } | ||
668 | |||
666 | int pcibios_enable_device(struct pci_dev *pdev, int mask) | 669 | int pcibios_enable_device(struct pci_dev *pdev, int mask) |
667 | { | 670 | { |
668 | struct zpci_dev *zdev = get_zdev(pdev); | 671 | struct zpci_dev *zdev = get_zdev(pdev); |
@@ -670,7 +673,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask) | |||
670 | zdev->pdev = pdev; | 673 | zdev->pdev = pdev; |
671 | zpci_debug_init_device(zdev); | 674 | zpci_debug_init_device(zdev); |
672 | zpci_fmb_enable_device(zdev); | 675 | zpci_fmb_enable_device(zdev); |
673 | zpci_map_resources(zdev); | ||
674 | 676 | ||
675 | return pci_enable_resources(pdev, mask); | 677 | return pci_enable_resources(pdev, mask); |
676 | } | 678 | } |
@@ -679,7 +681,6 @@ void pcibios_disable_device(struct pci_dev *pdev) | |||
679 | { | 681 | { |
680 | struct zpci_dev *zdev = get_zdev(pdev); | 682 | struct zpci_dev *zdev = get_zdev(pdev); |
681 | 683 | ||
682 | zpci_unmap_resources(zdev); | ||
683 | zpci_fmb_disable_device(zdev); | 684 | zpci_fmb_disable_device(zdev); |
684 | zpci_debug_exit_device(zdev); | 685 | zpci_debug_exit_device(zdev); |
685 | zdev->pdev = NULL; | 686 | zdev->pdev = NULL; |
@@ -688,7 +689,8 @@ void pcibios_disable_device(struct pci_dev *pdev) | |||
688 | #ifdef CONFIG_HIBERNATE_CALLBACKS | 689 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
689 | static int zpci_restore(struct device *dev) | 690 | static int zpci_restore(struct device *dev) |
690 | { | 691 | { |
691 | struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); | 692 | struct pci_dev *pdev = to_pci_dev(dev); |
693 | struct zpci_dev *zdev = get_zdev(pdev); | ||
692 | int ret = 0; | 694 | int ret = 0; |
693 | 695 | ||
694 | if (zdev->state != ZPCI_FN_STATE_ONLINE) | 696 | if (zdev->state != ZPCI_FN_STATE_ONLINE) |
@@ -698,7 +700,7 @@ static int zpci_restore(struct device *dev) | |||
698 | if (ret) | 700 | if (ret) |
699 | goto out; | 701 | goto out; |
700 | 702 | ||
701 | zpci_map_resources(zdev); | 703 | zpci_map_resources(pdev); |
702 | zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET, | 704 | zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET, |
703 | zdev->start_dma + zdev->iommu_size - 1, | 705 | zdev->start_dma + zdev->iommu_size - 1, |
704 | (u64) zdev->dma_table); | 706 | (u64) zdev->dma_table); |
@@ -709,12 +711,14 @@ out: | |||
709 | 711 | ||
710 | static int zpci_freeze(struct device *dev) | 712 | static int zpci_freeze(struct device *dev) |
711 | { | 713 | { |
712 | struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); | 714 | struct pci_dev *pdev = to_pci_dev(dev); |
715 | struct zpci_dev *zdev = get_zdev(pdev); | ||
713 | 716 | ||
714 | if (zdev->state != ZPCI_FN_STATE_ONLINE) | 717 | if (zdev->state != ZPCI_FN_STATE_ONLINE) |
715 | return 0; | 718 | return 0; |
716 | 719 | ||
717 | zpci_unregister_ioat(zdev, 0); | 720 | zpci_unregister_ioat(zdev, 0); |
721 | zpci_unmap_resources(pdev); | ||
718 | return clp_disable_fh(zdev); | 722 | return clp_disable_fh(zdev); |
719 | } | 723 | } |
720 | 724 | ||
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c index 8aa271b3d1ad..b1bb2b72302c 100644 --- a/arch/s390/pci/pci_mmio.c +++ b/arch/s390/pci/pci_mmio.c | |||
@@ -64,8 +64,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr, | |||
64 | if (copy_from_user(buf, user_buffer, length)) | 64 | if (copy_from_user(buf, user_buffer, length)) |
65 | goto out; | 65 | goto out; |
66 | 66 | ||
67 | memcpy_toio(io_addr, buf, length); | 67 | ret = zpci_memcpy_toio(io_addr, buf, length); |
68 | ret = 0; | ||
69 | out: | 68 | out: |
70 | if (buf != local_buf) | 69 | if (buf != local_buf) |
71 | kfree(buf); | 70 | kfree(buf); |
@@ -98,16 +97,16 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr, | |||
98 | goto out; | 97 | goto out; |
99 | io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); | 98 | io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); |
100 | 99 | ||
101 | ret = -EFAULT; | 100 | if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) { |
102 | if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) | 101 | ret = -EFAULT; |
103 | goto out; | 102 | goto out; |
104 | 103 | } | |
105 | memcpy_fromio(buf, io_addr, length); | 104 | ret = zpci_memcpy_fromio(buf, io_addr, length); |
106 | 105 | if (ret) | |
107 | if (copy_to_user(user_buffer, buf, length)) | ||
108 | goto out; | 106 | goto out; |
107 | if (copy_to_user(user_buffer, buf, length)) | ||
108 | ret = -EFAULT; | ||
109 | 109 | ||
110 | ret = 0; | ||
111 | out: | 110 | out: |
112 | if (buf != local_buf) | 111 | if (buf != local_buf) |
113 | kfree(buf); | 112 | kfree(buf); |
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 96ac69c5eba0..efb00ec75805 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -86,6 +86,9 @@ config ARCH_DEFCONFIG | |||
86 | default "arch/sparc/configs/sparc32_defconfig" if SPARC32 | 86 | default "arch/sparc/configs/sparc32_defconfig" if SPARC32 |
87 | default "arch/sparc/configs/sparc64_defconfig" if SPARC64 | 87 | default "arch/sparc/configs/sparc64_defconfig" if SPARC64 |
88 | 88 | ||
89 | config ARCH_PROC_KCORE_TEXT | ||
90 | def_bool y | ||
91 | |||
89 | config IOMMU_HELPER | 92 | config IOMMU_HELPER |
90 | bool | 93 | bool |
91 | default y if SPARC64 | 94 | default y if SPARC64 |
diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h index 4f6725ff4c33..f5b6537306f0 100644 --- a/arch/sparc/include/asm/hypervisor.h +++ b/arch/sparc/include/asm/hypervisor.h | |||
@@ -2957,6 +2957,17 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num, | |||
2957 | unsigned long reg_val); | 2957 | unsigned long reg_val); |
2958 | #endif | 2958 | #endif |
2959 | 2959 | ||
2960 | |||
2961 | #define HV_FAST_M7_GET_PERFREG 0x43 | ||
2962 | #define HV_FAST_M7_SET_PERFREG 0x44 | ||
2963 | |||
2964 | #ifndef __ASSEMBLY__ | ||
2965 | unsigned long sun4v_m7_get_perfreg(unsigned long reg_num, | ||
2966 | unsigned long *reg_val); | ||
2967 | unsigned long sun4v_m7_set_perfreg(unsigned long reg_num, | ||
2968 | unsigned long reg_val); | ||
2969 | #endif | ||
2970 | |||
2960 | /* Function numbers for HV_CORE_TRAP. */ | 2971 | /* Function numbers for HV_CORE_TRAP. */ |
2961 | #define HV_CORE_SET_VER 0x00 | 2972 | #define HV_CORE_SET_VER 0x00 |
2962 | #define HV_CORE_PUTCHAR 0x01 | 2973 | #define HV_CORE_PUTCHAR 0x01 |
@@ -2981,6 +2992,7 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num, | |||
2981 | #define HV_GRP_SDIO 0x0108 | 2992 | #define HV_GRP_SDIO 0x0108 |
2982 | #define HV_GRP_SDIO_ERR 0x0109 | 2993 | #define HV_GRP_SDIO_ERR 0x0109 |
2983 | #define HV_GRP_REBOOT_DATA 0x0110 | 2994 | #define HV_GRP_REBOOT_DATA 0x0110 |
2995 | #define HV_GRP_M7_PERF 0x0114 | ||
2984 | #define HV_GRP_NIAG_PERF 0x0200 | 2996 | #define HV_GRP_NIAG_PERF 0x0200 |
2985 | #define HV_GRP_FIRE_PERF 0x0201 | 2997 | #define HV_GRP_FIRE_PERF 0x0201 |
2986 | #define HV_GRP_N2_CPU 0x0202 | 2998 | #define HV_GRP_N2_CPU 0x0202 |
diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h index 9b672be70dda..50d4840d9aeb 100644 --- a/arch/sparc/include/asm/io_64.h +++ b/arch/sparc/include/asm/io_64.h | |||
@@ -407,16 +407,16 @@ static inline void iounmap(volatile void __iomem *addr) | |||
407 | { | 407 | { |
408 | } | 408 | } |
409 | 409 | ||
410 | #define ioread8(X) readb(X) | 410 | #define ioread8 readb |
411 | #define ioread16(X) readw(X) | 411 | #define ioread16 readw |
412 | #define ioread16be(X) __raw_readw(X) | 412 | #define ioread16be __raw_readw |
413 | #define ioread32(X) readl(X) | 413 | #define ioread32 readl |
414 | #define ioread32be(X) __raw_readl(X) | 414 | #define ioread32be __raw_readl |
415 | #define iowrite8(val,X) writeb(val,X) | 415 | #define iowrite8 writeb |
416 | #define iowrite16(val,X) writew(val,X) | 416 | #define iowrite16 writew |
417 | #define iowrite16be(val,X) __raw_writew(val,X) | 417 | #define iowrite16be __raw_writew |
418 | #define iowrite32(val,X) writel(val,X) | 418 | #define iowrite32 writel |
419 | #define iowrite32be(val,X) __raw_writel(val,X) | 419 | #define iowrite32be __raw_writel |
420 | 420 | ||
421 | /* Create a virtual mapping cookie for an IO port range */ | 421 | /* Create a virtual mapping cookie for an IO port range */ |
422 | void __iomem *ioport_map(unsigned long port, unsigned int nr); | 422 | void __iomem *ioport_map(unsigned long port, unsigned int nr); |
diff --git a/arch/sparc/include/asm/starfire.h b/arch/sparc/include/asm/starfire.h index c100dc27a0a9..176fa0ad19f1 100644 --- a/arch/sparc/include/asm/starfire.h +++ b/arch/sparc/include/asm/starfire.h | |||
@@ -12,7 +12,6 @@ | |||
12 | extern int this_is_starfire; | 12 | extern int this_is_starfire; |
13 | 13 | ||
14 | void check_if_starfire(void); | 14 | void check_if_starfire(void); |
15 | int starfire_hard_smp_processor_id(void); | ||
16 | void starfire_hookup(int); | 15 | void starfire_hookup(int); |
17 | unsigned int starfire_translate(unsigned long imap, unsigned int upaid); | 16 | unsigned int starfire_translate(unsigned long imap, unsigned int upaid); |
18 | 17 | ||
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h index 88d322b67fac..07cc49e541f4 100644 --- a/arch/sparc/kernel/entry.h +++ b/arch/sparc/kernel/entry.h | |||
@@ -98,11 +98,7 @@ void sun4v_do_mna(struct pt_regs *regs, | |||
98 | void do_privop(struct pt_regs *regs); | 98 | void do_privop(struct pt_regs *regs); |
99 | void do_privact(struct pt_regs *regs); | 99 | void do_privact(struct pt_regs *regs); |
100 | void do_cee(struct pt_regs *regs); | 100 | void do_cee(struct pt_regs *regs); |
101 | void do_cee_tl1(struct pt_regs *regs); | ||
102 | void do_dae_tl1(struct pt_regs *regs); | ||
103 | void do_iae_tl1(struct pt_regs *regs); | ||
104 | void do_div0_tl1(struct pt_regs *regs); | 101 | void do_div0_tl1(struct pt_regs *regs); |
105 | void do_fpdis_tl1(struct pt_regs *regs); | ||
106 | void do_fpieee_tl1(struct pt_regs *regs); | 102 | void do_fpieee_tl1(struct pt_regs *regs); |
107 | void do_fpother_tl1(struct pt_regs *regs); | 103 | void do_fpother_tl1(struct pt_regs *regs); |
108 | void do_ill_tl1(struct pt_regs *regs); | 104 | void do_ill_tl1(struct pt_regs *regs); |
diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c index 5c55145bfbf0..662500fa555f 100644 --- a/arch/sparc/kernel/hvapi.c +++ b/arch/sparc/kernel/hvapi.c | |||
@@ -48,6 +48,7 @@ static struct api_info api_table[] = { | |||
48 | { .group = HV_GRP_VT_CPU, }, | 48 | { .group = HV_GRP_VT_CPU, }, |
49 | { .group = HV_GRP_T5_CPU, }, | 49 | { .group = HV_GRP_T5_CPU, }, |
50 | { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API }, | 50 | { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API }, |
51 | { .group = HV_GRP_M7_PERF, }, | ||
51 | }; | 52 | }; |
52 | 53 | ||
53 | static DEFINE_SPINLOCK(hvapi_lock); | 54 | static DEFINE_SPINLOCK(hvapi_lock); |
diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S index caedf8320416..afbaba52d2f1 100644 --- a/arch/sparc/kernel/hvcalls.S +++ b/arch/sparc/kernel/hvcalls.S | |||
@@ -837,3 +837,19 @@ ENTRY(sun4v_t5_set_perfreg) | |||
837 | retl | 837 | retl |
838 | nop | 838 | nop |
839 | ENDPROC(sun4v_t5_set_perfreg) | 839 | ENDPROC(sun4v_t5_set_perfreg) |
840 | |||
841 | ENTRY(sun4v_m7_get_perfreg) | ||
842 | mov %o1, %o4 | ||
843 | mov HV_FAST_M7_GET_PERFREG, %o5 | ||
844 | ta HV_FAST_TRAP | ||
845 | stx %o1, [%o4] | ||
846 | retl | ||
847 | nop | ||
848 | ENDPROC(sun4v_m7_get_perfreg) | ||
849 | |||
850 | ENTRY(sun4v_m7_set_perfreg) | ||
851 | mov HV_FAST_M7_SET_PERFREG, %o5 | ||
852 | ta HV_FAST_TRAP | ||
853 | retl | ||
854 | nop | ||
855 | ENDPROC(sun4v_m7_set_perfreg) | ||
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index 7e967c8018c8..eb978c77c76a 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c | |||
@@ -217,6 +217,31 @@ static const struct pcr_ops n5_pcr_ops = { | |||
217 | .pcr_nmi_disable = PCR_N4_PICNPT, | 217 | .pcr_nmi_disable = PCR_N4_PICNPT, |
218 | }; | 218 | }; |
219 | 219 | ||
220 | static u64 m7_pcr_read(unsigned long reg_num) | ||
221 | { | ||
222 | unsigned long val; | ||
223 | |||
224 | (void) sun4v_m7_get_perfreg(reg_num, &val); | ||
225 | |||
226 | return val; | ||
227 | } | ||
228 | |||
229 | static void m7_pcr_write(unsigned long reg_num, u64 val) | ||
230 | { | ||
231 | (void) sun4v_m7_set_perfreg(reg_num, val); | ||
232 | } | ||
233 | |||
234 | static const struct pcr_ops m7_pcr_ops = { | ||
235 | .read_pcr = m7_pcr_read, | ||
236 | .write_pcr = m7_pcr_write, | ||
237 | .read_pic = n4_pic_read, | ||
238 | .write_pic = n4_pic_write, | ||
239 | .nmi_picl_value = n4_picl_value, | ||
240 | .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE | | ||
241 | PCR_N4_UTRACE | PCR_N4_TOE | | ||
242 | (26 << PCR_N4_SL_SHIFT)), | ||
243 | .pcr_nmi_disable = PCR_N4_PICNPT, | ||
244 | }; | ||
220 | 245 | ||
221 | static unsigned long perf_hsvc_group; | 246 | static unsigned long perf_hsvc_group; |
222 | static unsigned long perf_hsvc_major; | 247 | static unsigned long perf_hsvc_major; |
@@ -248,6 +273,10 @@ static int __init register_perf_hsvc(void) | |||
248 | perf_hsvc_group = HV_GRP_T5_CPU; | 273 | perf_hsvc_group = HV_GRP_T5_CPU; |
249 | break; | 274 | break; |
250 | 275 | ||
276 | case SUN4V_CHIP_SPARC_M7: | ||
277 | perf_hsvc_group = HV_GRP_M7_PERF; | ||
278 | break; | ||
279 | |||
251 | default: | 280 | default: |
252 | return -ENODEV; | 281 | return -ENODEV; |
253 | } | 282 | } |
@@ -293,6 +322,10 @@ static int __init setup_sun4v_pcr_ops(void) | |||
293 | pcr_ops = &n5_pcr_ops; | 322 | pcr_ops = &n5_pcr_ops; |
294 | break; | 323 | break; |
295 | 324 | ||
325 | case SUN4V_CHIP_SPARC_M7: | ||
326 | pcr_ops = &m7_pcr_ops; | ||
327 | break; | ||
328 | |||
296 | default: | 329 | default: |
297 | ret = -ENODEV; | 330 | ret = -ENODEV; |
298 | break; | 331 | break; |
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 46a5e4508752..86eebfa3b158 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
@@ -792,6 +792,42 @@ static const struct sparc_pmu niagara4_pmu = { | |||
792 | .num_pic_regs = 4, | 792 | .num_pic_regs = 4, |
793 | }; | 793 | }; |
794 | 794 | ||
795 | static void sparc_m7_write_pmc(int idx, u64 val) | ||
796 | { | ||
797 | u64 pcr; | ||
798 | |||
799 | pcr = pcr_ops->read_pcr(idx); | ||
800 | /* ensure ov and ntc are reset */ | ||
801 | pcr &= ~(PCR_N4_OV | PCR_N4_NTC); | ||
802 | |||
803 | pcr_ops->write_pic(idx, val & 0xffffffff); | ||
804 | |||
805 | pcr_ops->write_pcr(idx, pcr); | ||
806 | } | ||
807 | |||
808 | static const struct sparc_pmu sparc_m7_pmu = { | ||
809 | .event_map = niagara4_event_map, | ||
810 | .cache_map = &niagara4_cache_map, | ||
811 | .max_events = ARRAY_SIZE(niagara4_perfmon_event_map), | ||
812 | .read_pmc = sparc_vt_read_pmc, | ||
813 | .write_pmc = sparc_m7_write_pmc, | ||
814 | .upper_shift = 5, | ||
815 | .lower_shift = 5, | ||
816 | .event_mask = 0x7ff, | ||
817 | .user_bit = PCR_N4_UTRACE, | ||
818 | .priv_bit = PCR_N4_STRACE, | ||
819 | |||
820 | /* We explicitly don't support hypervisor tracing. */ | ||
821 | .hv_bit = 0, | ||
822 | |||
823 | .irq_bit = PCR_N4_TOE, | ||
824 | .upper_nop = 0, | ||
825 | .lower_nop = 0, | ||
826 | .flags = 0, | ||
827 | .max_hw_events = 4, | ||
828 | .num_pcrs = 4, | ||
829 | .num_pic_regs = 4, | ||
830 | }; | ||
795 | static const struct sparc_pmu *sparc_pmu __read_mostly; | 831 | static const struct sparc_pmu *sparc_pmu __read_mostly; |
796 | 832 | ||
797 | static u64 event_encoding(u64 event_id, int idx) | 833 | static u64 event_encoding(u64 event_id, int idx) |
@@ -960,6 +996,8 @@ out: | |||
960 | cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; | 996 | cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; |
961 | } | 997 | } |
962 | 998 | ||
999 | static void sparc_pmu_start(struct perf_event *event, int flags); | ||
1000 | |||
963 | /* On this PMU each PIC has it's own PCR control register. */ | 1001 | /* On this PMU each PIC has it's own PCR control register. */ |
964 | static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) | 1002 | static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) |
965 | { | 1003 | { |
@@ -972,20 +1010,13 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) | |||
972 | struct perf_event *cp = cpuc->event[i]; | 1010 | struct perf_event *cp = cpuc->event[i]; |
973 | struct hw_perf_event *hwc = &cp->hw; | 1011 | struct hw_perf_event *hwc = &cp->hw; |
974 | int idx = hwc->idx; | 1012 | int idx = hwc->idx; |
975 | u64 enc; | ||
976 | 1013 | ||
977 | if (cpuc->current_idx[i] != PIC_NO_INDEX) | 1014 | if (cpuc->current_idx[i] != PIC_NO_INDEX) |
978 | continue; | 1015 | continue; |
979 | 1016 | ||
980 | sparc_perf_event_set_period(cp, hwc, idx); | ||
981 | cpuc->current_idx[i] = idx; | 1017 | cpuc->current_idx[i] = idx; |
982 | 1018 | ||
983 | enc = perf_event_get_enc(cpuc->events[i]); | 1019 | sparc_pmu_start(cp, PERF_EF_RELOAD); |
984 | cpuc->pcr[idx] &= ~mask_for_index(idx); | ||
985 | if (hwc->state & PERF_HES_STOPPED) | ||
986 | cpuc->pcr[idx] |= nop_for_index(idx); | ||
987 | else | ||
988 | cpuc->pcr[idx] |= event_encoding(enc, idx); | ||
989 | } | 1020 | } |
990 | out: | 1021 | out: |
991 | for (i = 0; i < cpuc->n_events; i++) { | 1022 | for (i = 0; i < cpuc->n_events; i++) { |
@@ -1101,7 +1132,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags) | |||
1101 | int i; | 1132 | int i; |
1102 | 1133 | ||
1103 | local_irq_save(flags); | 1134 | local_irq_save(flags); |
1104 | perf_pmu_disable(event->pmu); | ||
1105 | 1135 | ||
1106 | for (i = 0; i < cpuc->n_events; i++) { | 1136 | for (i = 0; i < cpuc->n_events; i++) { |
1107 | if (event == cpuc->event[i]) { | 1137 | if (event == cpuc->event[i]) { |
@@ -1127,7 +1157,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags) | |||
1127 | } | 1157 | } |
1128 | } | 1158 | } |
1129 | 1159 | ||
1130 | perf_pmu_enable(event->pmu); | ||
1131 | local_irq_restore(flags); | 1160 | local_irq_restore(flags); |
1132 | } | 1161 | } |
1133 | 1162 | ||
@@ -1361,7 +1390,6 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags) | |||
1361 | unsigned long flags; | 1390 | unsigned long flags; |
1362 | 1391 | ||
1363 | local_irq_save(flags); | 1392 | local_irq_save(flags); |
1364 | perf_pmu_disable(event->pmu); | ||
1365 | 1393 | ||
1366 | n0 = cpuc->n_events; | 1394 | n0 = cpuc->n_events; |
1367 | if (n0 >= sparc_pmu->max_hw_events) | 1395 | if (n0 >= sparc_pmu->max_hw_events) |
@@ -1394,7 +1422,6 @@ nocheck: | |||
1394 | 1422 | ||
1395 | ret = 0; | 1423 | ret = 0; |
1396 | out: | 1424 | out: |
1397 | perf_pmu_enable(event->pmu); | ||
1398 | local_irq_restore(flags); | 1425 | local_irq_restore(flags); |
1399 | return ret; | 1426 | return ret; |
1400 | } | 1427 | } |
@@ -1667,6 +1694,10 @@ static bool __init supported_pmu(void) | |||
1667 | sparc_pmu = &niagara4_pmu; | 1694 | sparc_pmu = &niagara4_pmu; |
1668 | return true; | 1695 | return true; |
1669 | } | 1696 | } |
1697 | if (!strcmp(sparc_pmu_type, "sparc-m7")) { | ||
1698 | sparc_pmu = &sparc_m7_pmu; | ||
1699 | return true; | ||
1700 | } | ||
1670 | return false; | 1701 | return false; |
1671 | } | 1702 | } |
1672 | 1703 | ||
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 0be7bf978cb1..46a59643bb1c 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c | |||
@@ -287,6 +287,8 @@ void arch_trigger_all_cpu_backtrace(bool include_self) | |||
287 | printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n", | 287 | printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n", |
288 | gp->tpc, gp->o7, gp->i7, gp->rpc); | 288 | gp->tpc, gp->o7, gp->i7, gp->rpc); |
289 | } | 289 | } |
290 | |||
291 | touch_nmi_watchdog(); | ||
290 | } | 292 | } |
291 | 293 | ||
292 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); | 294 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); |
@@ -362,6 +364,8 @@ static void pmu_snapshot_all_cpus(void) | |||
362 | (cpu == this_cpu ? '*' : ' '), cpu, | 364 | (cpu == this_cpu ? '*' : ' '), cpu, |
363 | pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3], | 365 | pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3], |
364 | pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]); | 366 | pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]); |
367 | |||
368 | touch_nmi_watchdog(); | ||
365 | } | 369 | } |
366 | 370 | ||
367 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); | 371 | memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); |
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index da6f1a7fc4db..61139d9924ca 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
@@ -1406,11 +1406,32 @@ void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) | |||
1406 | scheduler_ipi(); | 1406 | scheduler_ipi(); |
1407 | } | 1407 | } |
1408 | 1408 | ||
1409 | /* This is a nop because we capture all other cpus | 1409 | static void stop_this_cpu(void *dummy) |
1410 | * anyways when making the PROM active. | 1410 | { |
1411 | */ | 1411 | prom_stopself(); |
1412 | } | ||
1413 | |||
1412 | void smp_send_stop(void) | 1414 | void smp_send_stop(void) |
1413 | { | 1415 | { |
1416 | int cpu; | ||
1417 | |||
1418 | if (tlb_type == hypervisor) { | ||
1419 | for_each_online_cpu(cpu) { | ||
1420 | if (cpu == smp_processor_id()) | ||
1421 | continue; | ||
1422 | #ifdef CONFIG_SUN_LDOMS | ||
1423 | if (ldom_domaining_enabled) { | ||
1424 | unsigned long hv_err; | ||
1425 | hv_err = sun4v_cpu_stop(cpu); | ||
1426 | if (hv_err) | ||
1427 | printk(KERN_ERR "sun4v_cpu_stop() " | ||
1428 | "failed err=%lu\n", hv_err); | ||
1429 | } else | ||
1430 | #endif | ||
1431 | prom_stopcpu_cpuid(cpu); | ||
1432 | } | ||
1433 | } else | ||
1434 | smp_call_function(stop_this_cpu, NULL, 0); | ||
1414 | } | 1435 | } |
1415 | 1436 | ||
1416 | /** | 1437 | /** |
diff --git a/arch/sparc/kernel/starfire.c b/arch/sparc/kernel/starfire.c index 82281a566bb8..167fdfd9c837 100644 --- a/arch/sparc/kernel/starfire.c +++ b/arch/sparc/kernel/starfire.c | |||
@@ -28,11 +28,6 @@ void check_if_starfire(void) | |||
28 | this_is_starfire = 1; | 28 | this_is_starfire = 1; |
29 | } | 29 | } |
30 | 30 | ||
31 | int starfire_hard_smp_processor_id(void) | ||
32 | { | ||
33 | return upa_readl(0x1fff40000d0UL); | ||
34 | } | ||
35 | |||
36 | /* | 31 | /* |
37 | * Each Starfire board has 32 registers which perform translation | 32 | * Each Starfire board has 32 registers which perform translation |
38 | * and delivery of traditional interrupt packets into the extended | 33 | * and delivery of traditional interrupt packets into the extended |
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index c85403d0496c..30e7ddb27a3a 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c | |||
@@ -333,7 +333,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second | |||
333 | long err; | 333 | long err; |
334 | 334 | ||
335 | /* No need for backward compatibility. We can start fresh... */ | 335 | /* No need for backward compatibility. We can start fresh... */ |
336 | if (call <= SEMCTL) { | 336 | if (call <= SEMTIMEDOP) { |
337 | switch (call) { | 337 | switch (call) { |
338 | case SEMOP: | 338 | case SEMOP: |
339 | err = sys_semtimedop(first, ptr, | 339 | err = sys_semtimedop(first, ptr, |
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index a27651e866e7..0e699745d643 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c | |||
@@ -2427,6 +2427,8 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs) | |||
2427 | } | 2427 | } |
2428 | user_instruction_dump ((unsigned int __user *) regs->tpc); | 2428 | user_instruction_dump ((unsigned int __user *) regs->tpc); |
2429 | } | 2429 | } |
2430 | if (panic_on_oops) | ||
2431 | panic("Fatal exception"); | ||
2430 | if (regs->tstate & TSTATE_PRIV) | 2432 | if (regs->tstate & TSTATE_PRIV) |
2431 | do_exit(SIGKILL); | 2433 | do_exit(SIGKILL); |
2432 | do_exit(SIGSEGV); | 2434 | do_exit(SIGSEGV); |
@@ -2564,27 +2566,6 @@ void do_cee(struct pt_regs *regs) | |||
2564 | die_if_kernel("TL0: Cache Error Exception", regs); | 2566 | die_if_kernel("TL0: Cache Error Exception", regs); |
2565 | } | 2567 | } |
2566 | 2568 | ||
2567 | void do_cee_tl1(struct pt_regs *regs) | ||
2568 | { | ||
2569 | exception_enter(); | ||
2570 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
2571 | die_if_kernel("TL1: Cache Error Exception", regs); | ||
2572 | } | ||
2573 | |||
2574 | void do_dae_tl1(struct pt_regs *regs) | ||
2575 | { | ||
2576 | exception_enter(); | ||
2577 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
2578 | die_if_kernel("TL1: Data Access Exception", regs); | ||
2579 | } | ||
2580 | |||
2581 | void do_iae_tl1(struct pt_regs *regs) | ||
2582 | { | ||
2583 | exception_enter(); | ||
2584 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
2585 | die_if_kernel("TL1: Instruction Access Exception", regs); | ||
2586 | } | ||
2587 | |||
2588 | void do_div0_tl1(struct pt_regs *regs) | 2569 | void do_div0_tl1(struct pt_regs *regs) |
2589 | { | 2570 | { |
2590 | exception_enter(); | 2571 | exception_enter(); |
@@ -2592,13 +2573,6 @@ void do_div0_tl1(struct pt_regs *regs) | |||
2592 | die_if_kernel("TL1: DIV0 Exception", regs); | 2573 | die_if_kernel("TL1: DIV0 Exception", regs); |
2593 | } | 2574 | } |
2594 | 2575 | ||
2595 | void do_fpdis_tl1(struct pt_regs *regs) | ||
2596 | { | ||
2597 | exception_enter(); | ||
2598 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
2599 | die_if_kernel("TL1: FPU Disabled", regs); | ||
2600 | } | ||
2601 | |||
2602 | void do_fpieee_tl1(struct pt_regs *regs) | 2576 | void do_fpieee_tl1(struct pt_regs *regs) |
2603 | { | 2577 | { |
2604 | exception_enter(); | 2578 | exception_enter(); |
diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S index b7f6334e159f..857ad4f8905f 100644 --- a/arch/sparc/lib/memmove.S +++ b/arch/sparc/lib/memmove.S | |||
@@ -8,9 +8,11 @@ | |||
8 | 8 | ||
9 | .text | 9 | .text |
10 | ENTRY(memmove) /* o0=dst o1=src o2=len */ | 10 | ENTRY(memmove) /* o0=dst o1=src o2=len */ |
11 | mov %o0, %g1 | 11 | brz,pn %o2, 99f |
12 | mov %o0, %g1 | ||
13 | |||
12 | cmp %o0, %o1 | 14 | cmp %o0, %o1 |
13 | bleu,pt %xcc, memcpy | 15 | bleu,pt %xcc, 2f |
14 | add %o1, %o2, %g7 | 16 | add %o1, %o2, %g7 |
15 | cmp %g7, %o0 | 17 | cmp %g7, %o0 |
16 | bleu,pt %xcc, memcpy | 18 | bleu,pt %xcc, memcpy |
@@ -24,7 +26,34 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */ | |||
24 | stb %g7, [%o0] | 26 | stb %g7, [%o0] |
25 | bne,pt %icc, 1b | 27 | bne,pt %icc, 1b |
26 | sub %o0, 1, %o0 | 28 | sub %o0, 1, %o0 |
27 | 29 | 99: | |
28 | retl | 30 | retl |
29 | mov %g1, %o0 | 31 | mov %g1, %o0 |
32 | |||
33 | /* We can't just call memcpy for these memmove cases. On some | ||
34 | * chips the memcpy uses cache initializing stores and when dst | ||
35 | * and src are close enough, those can clobber the source data | ||
36 | * before we've loaded it in. | ||
37 | */ | ||
38 | 2: or %o0, %o1, %g7 | ||
39 | or %o2, %g7, %g7 | ||
40 | andcc %g7, 0x7, %g0 | ||
41 | bne,pn %xcc, 4f | ||
42 | nop | ||
43 | |||
44 | 3: ldx [%o1], %g7 | ||
45 | add %o1, 8, %o1 | ||
46 | subcc %o2, 8, %o2 | ||
47 | add %o0, 8, %o0 | ||
48 | bne,pt %icc, 3b | ||
49 | stx %g7, [%o0 - 0x8] | ||
50 | ba,a,pt %xcc, 99b | ||
51 | |||
52 | 4: ldub [%o1], %g7 | ||
53 | add %o1, 1, %o1 | ||
54 | subcc %o2, 1, %o2 | ||
55 | add %o0, 1, %o0 | ||
56 | bne,pt %icc, 4b | ||
57 | stb %g7, [%o0 - 0x1] | ||
58 | ba,a,pt %xcc, 99b | ||
30 | ENDPROC(memmove) | 59 | ENDPROC(memmove) |
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 3ea267c53320..4ca0d6ba5ec8 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
@@ -2820,7 +2820,7 @@ static int __init report_memory(void) | |||
2820 | 2820 | ||
2821 | return 0; | 2821 | return 0; |
2822 | } | 2822 | } |
2823 | device_initcall(report_memory); | 2823 | arch_initcall(report_memory); |
2824 | 2824 | ||
2825 | #ifdef CONFIG_SMP | 2825 | #ifdef CONFIG_SMP |
2826 | #define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range | 2826 | #define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range |
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c index 7083c16cccba..bb1376381985 100644 --- a/arch/x86/boot/compressed/aslr.c +++ b/arch/x86/boot/compressed/aslr.c | |||
@@ -14,13 +14,6 @@ | |||
14 | static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" | 14 | static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" |
15 | LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; | 15 | LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; |
16 | 16 | ||
17 | struct kaslr_setup_data { | ||
18 | __u64 next; | ||
19 | __u32 type; | ||
20 | __u32 len; | ||
21 | __u8 data[1]; | ||
22 | } kaslr_setup_data; | ||
23 | |||
24 | #define I8254_PORT_CONTROL 0x43 | 17 | #define I8254_PORT_CONTROL 0x43 |
25 | #define I8254_PORT_COUNTER0 0x40 | 18 | #define I8254_PORT_COUNTER0 0x40 |
26 | #define I8254_CMD_READBACK 0xC0 | 19 | #define I8254_CMD_READBACK 0xC0 |
@@ -302,29 +295,7 @@ static unsigned long find_random_addr(unsigned long minimum, | |||
302 | return slots_fetch_random(); | 295 | return slots_fetch_random(); |
303 | } | 296 | } |
304 | 297 | ||
305 | static void add_kaslr_setup_data(struct boot_params *params, __u8 enabled) | 298 | unsigned char *choose_kernel_location(unsigned char *input, |
306 | { | ||
307 | struct setup_data *data; | ||
308 | |||
309 | kaslr_setup_data.type = SETUP_KASLR; | ||
310 | kaslr_setup_data.len = 1; | ||
311 | kaslr_setup_data.next = 0; | ||
312 | kaslr_setup_data.data[0] = enabled; | ||
313 | |||
314 | data = (struct setup_data *)(unsigned long)params->hdr.setup_data; | ||
315 | |||
316 | while (data && data->next) | ||
317 | data = (struct setup_data *)(unsigned long)data->next; | ||
318 | |||
319 | if (data) | ||
320 | data->next = (unsigned long)&kaslr_setup_data; | ||
321 | else | ||
322 | params->hdr.setup_data = (unsigned long)&kaslr_setup_data; | ||
323 | |||
324 | } | ||
325 | |||
326 | unsigned char *choose_kernel_location(struct boot_params *params, | ||
327 | unsigned char *input, | ||
328 | unsigned long input_size, | 299 | unsigned long input_size, |
329 | unsigned char *output, | 300 | unsigned char *output, |
330 | unsigned long output_size) | 301 | unsigned long output_size) |
@@ -335,17 +306,14 @@ unsigned char *choose_kernel_location(struct boot_params *params, | |||
335 | #ifdef CONFIG_HIBERNATION | 306 | #ifdef CONFIG_HIBERNATION |
336 | if (!cmdline_find_option_bool("kaslr")) { | 307 | if (!cmdline_find_option_bool("kaslr")) { |
337 | debug_putstr("KASLR disabled by default...\n"); | 308 | debug_putstr("KASLR disabled by default...\n"); |
338 | add_kaslr_setup_data(params, 0); | ||
339 | goto out; | 309 | goto out; |
340 | } | 310 | } |
341 | #else | 311 | #else |
342 | if (cmdline_find_option_bool("nokaslr")) { | 312 | if (cmdline_find_option_bool("nokaslr")) { |
343 | debug_putstr("KASLR disabled by cmdline...\n"); | 313 | debug_putstr("KASLR disabled by cmdline...\n"); |
344 | add_kaslr_setup_data(params, 0); | ||
345 | goto out; | 314 | goto out; |
346 | } | 315 | } |
347 | #endif | 316 | #endif |
348 | add_kaslr_setup_data(params, 1); | ||
349 | 317 | ||
350 | /* Record the various known unsafe memory ranges. */ | 318 | /* Record the various known unsafe memory ranges. */ |
351 | mem_avoid_init((unsigned long)input, input_size, | 319 | mem_avoid_init((unsigned long)input, input_size, |
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 5903089c818f..a950864a64da 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c | |||
@@ -401,8 +401,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap, | |||
401 | * the entire decompressed kernel plus relocation table, or the | 401 | * the entire decompressed kernel plus relocation table, or the |
402 | * entire decompressed kernel plus .bss and .brk sections. | 402 | * entire decompressed kernel plus .bss and .brk sections. |
403 | */ | 403 | */ |
404 | output = choose_kernel_location(real_mode, input_data, input_len, | 404 | output = choose_kernel_location(input_data, input_len, output, |
405 | output, | ||
406 | output_len > run_size ? output_len | 405 | output_len > run_size ? output_len |
407 | : run_size); | 406 | : run_size); |
408 | 407 | ||
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index ee3576b2666b..04477d68403f 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h | |||
@@ -57,8 +57,7 @@ int cmdline_find_option_bool(const char *option); | |||
57 | 57 | ||
58 | #if CONFIG_RANDOMIZE_BASE | 58 | #if CONFIG_RANDOMIZE_BASE |
59 | /* aslr.c */ | 59 | /* aslr.c */ |
60 | unsigned char *choose_kernel_location(struct boot_params *params, | 60 | unsigned char *choose_kernel_location(unsigned char *input, |
61 | unsigned char *input, | ||
62 | unsigned long input_size, | 61 | unsigned long input_size, |
63 | unsigned char *output, | 62 | unsigned char *output, |
64 | unsigned long output_size); | 63 | unsigned long output_size); |
@@ -66,8 +65,7 @@ unsigned char *choose_kernel_location(struct boot_params *params, | |||
66 | bool has_cpuflag(int flag); | 65 | bool has_cpuflag(int flag); |
67 | #else | 66 | #else |
68 | static inline | 67 | static inline |
69 | unsigned char *choose_kernel_location(struct boot_params *params, | 68 | unsigned char *choose_kernel_location(unsigned char *input, |
70 | unsigned char *input, | ||
71 | unsigned long input_size, | 69 | unsigned long input_size, |
72 | unsigned char *output, | 70 | unsigned char *output, |
73 | unsigned long output_size) | 71 | unsigned long output_size) |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 947c6bf52c33..54f60ab41c63 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
@@ -1155,7 +1155,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req) | |||
1155 | src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC); | 1155 | src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC); |
1156 | if (!src) | 1156 | if (!src) |
1157 | return -ENOMEM; | 1157 | return -ENOMEM; |
1158 | assoc = (src + req->cryptlen + auth_tag_len); | 1158 | assoc = (src + req->cryptlen); |
1159 | scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); | 1159 | scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); |
1160 | scatterwalk_map_and_copy(assoc, req->assoc, 0, | 1160 | scatterwalk_map_and_copy(assoc, req->assoc, 0, |
1161 | req->assoclen, 0); | 1161 | req->assoclen, 0); |
@@ -1180,7 +1180,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req) | |||
1180 | scatterwalk_done(&src_sg_walk, 0, 0); | 1180 | scatterwalk_done(&src_sg_walk, 0, 0); |
1181 | scatterwalk_done(&assoc_sg_walk, 0, 0); | 1181 | scatterwalk_done(&assoc_sg_walk, 0, 0); |
1182 | } else { | 1182 | } else { |
1183 | scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1); | 1183 | scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1); |
1184 | kfree(src); | 1184 | kfree(src); |
1185 | } | 1185 | } |
1186 | return retval; | 1186 | return retval; |
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 0dbc08282291..72ba21a8b5fc 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h | |||
@@ -370,7 +370,7 @@ static inline void drop_fpu(struct task_struct *tsk) | |||
370 | preempt_disable(); | 370 | preempt_disable(); |
371 | tsk->thread.fpu_counter = 0; | 371 | tsk->thread.fpu_counter = 0; |
372 | __drop_fpu(tsk); | 372 | __drop_fpu(tsk); |
373 | clear_used_math(); | 373 | clear_stopped_child_used_math(tsk); |
374 | preempt_enable(); | 374 | preempt_enable(); |
375 | } | 375 | } |
376 | 376 | ||
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index 95e11f79f123..f97fbe3abb67 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h | |||
@@ -51,8 +51,6 @@ extern int devmem_is_allowed(unsigned long pagenr); | |||
51 | extern unsigned long max_low_pfn_mapped; | 51 | extern unsigned long max_low_pfn_mapped; |
52 | extern unsigned long max_pfn_mapped; | 52 | extern unsigned long max_pfn_mapped; |
53 | 53 | ||
54 | extern bool kaslr_enabled; | ||
55 | |||
56 | static inline phys_addr_t get_max_mapped(void) | 54 | static inline phys_addr_t get_max_mapped(void) |
57 | { | 55 | { |
58 | return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; | 56 | return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; |
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index fa1195dae425..164e3f8d3c3d 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h | |||
@@ -93,6 +93,8 @@ extern raw_spinlock_t pci_config_lock; | |||
93 | extern int (*pcibios_enable_irq)(struct pci_dev *dev); | 93 | extern int (*pcibios_enable_irq)(struct pci_dev *dev); |
94 | extern void (*pcibios_disable_irq)(struct pci_dev *dev); | 94 | extern void (*pcibios_disable_irq)(struct pci_dev *dev); |
95 | 95 | ||
96 | extern bool mp_should_keep_irq(struct device *dev); | ||
97 | |||
96 | struct pci_raw_ops { | 98 | struct pci_raw_ops { |
97 | int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, | 99 | int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, |
98 | int reg, int len, u32 *val); | 100 | int reg, int len, u32 *val); |
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index 44e6dd7e36a2..225b0988043a 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h | |||
@@ -7,7 +7,6 @@ | |||
7 | #define SETUP_DTB 2 | 7 | #define SETUP_DTB 2 |
8 | #define SETUP_PCI 3 | 8 | #define SETUP_PCI 3 |
9 | #define SETUP_EFI 4 | 9 | #define SETUP_EFI 4 |
10 | #define SETUP_KASLR 5 | ||
11 | 10 | ||
12 | /* ram_size flags */ | 11 | /* ram_size flags */ |
13 | #define RAMDISK_IMAGE_START_MASK 0x07FF | 12 | #define RAMDISK_IMAGE_START_MASK 0x07FF |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 3d525c6124f6..803b684676ff 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -1338,6 +1338,26 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d) | |||
1338 | } | 1338 | } |
1339 | 1339 | ||
1340 | /* | 1340 | /* |
1341 | * ACPI offers an alternative platform interface model that removes | ||
1342 | * ACPI hardware requirements for platforms that do not implement | ||
1343 | * the PC Architecture. | ||
1344 | * | ||
1345 | * We initialize the Hardware-reduced ACPI model here: | ||
1346 | */ | ||
1347 | static void __init acpi_reduced_hw_init(void) | ||
1348 | { | ||
1349 | if (acpi_gbl_reduced_hardware) { | ||
1350 | /* | ||
1351 | * Override x86_init functions and bypass legacy pic | ||
1352 | * in Hardware-reduced ACPI mode | ||
1353 | */ | ||
1354 | x86_init.timers.timer_init = x86_init_noop; | ||
1355 | x86_init.irqs.pre_vector_init = x86_init_noop; | ||
1356 | legacy_pic = &null_legacy_pic; | ||
1357 | } | ||
1358 | } | ||
1359 | |||
1360 | /* | ||
1341 | * If your system is blacklisted here, but you find that acpi=force | 1361 | * If your system is blacklisted here, but you find that acpi=force |
1342 | * works for you, please contact linux-acpi@vger.kernel.org | 1362 | * works for you, please contact linux-acpi@vger.kernel.org |
1343 | */ | 1363 | */ |
@@ -1536,6 +1556,11 @@ int __init early_acpi_boot_init(void) | |||
1536 | */ | 1556 | */ |
1537 | early_acpi_process_madt(); | 1557 | early_acpi_process_madt(); |
1538 | 1558 | ||
1559 | /* | ||
1560 | * Hardware-reduced ACPI mode initialization: | ||
1561 | */ | ||
1562 | acpi_reduced_hw_init(); | ||
1563 | |||
1539 | return 0; | 1564 | return 0; |
1540 | } | 1565 | } |
1541 | 1566 | ||
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c index c2fd21fed002..017149cded07 100644 --- a/arch/x86/kernel/apic/apic_numachip.c +++ b/arch/x86/kernel/apic/apic_numachip.c | |||
@@ -37,10 +37,12 @@ static const struct apic apic_numachip; | |||
37 | static unsigned int get_apic_id(unsigned long x) | 37 | static unsigned int get_apic_id(unsigned long x) |
38 | { | 38 | { |
39 | unsigned long value; | 39 | unsigned long value; |
40 | unsigned int id; | 40 | unsigned int id = (x >> 24) & 0xff; |
41 | 41 | ||
42 | rdmsrl(MSR_FAM10H_NODE_ID, value); | 42 | if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) { |
43 | id = ((x >> 24) & 0xffU) | ((value << 2) & 0xff00U); | 43 | rdmsrl(MSR_FAM10H_NODE_ID, value); |
44 | id |= (value << 2) & 0xff00; | ||
45 | } | ||
44 | 46 | ||
45 | return id; | 47 | return id; |
46 | } | 48 | } |
@@ -155,10 +157,18 @@ static int __init numachip_probe(void) | |||
155 | 157 | ||
156 | static void fixup_cpu_id(struct cpuinfo_x86 *c, int node) | 158 | static void fixup_cpu_id(struct cpuinfo_x86 *c, int node) |
157 | { | 159 | { |
158 | if (c->phys_proc_id != node) { | 160 | u64 val; |
159 | c->phys_proc_id = node; | 161 | u32 nodes = 1; |
160 | per_cpu(cpu_llc_id, smp_processor_id()) = node; | 162 | |
163 | this_cpu_write(cpu_llc_id, node); | ||
164 | |||
165 | /* Account for nodes per socket in multi-core-module processors */ | ||
166 | if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) { | ||
167 | rdmsrl(MSR_FAM10H_NODE_ID, val); | ||
168 | nodes = ((val >> 3) & 7) + 1; | ||
161 | } | 169 | } |
170 | |||
171 | c->phys_proc_id = node / nodes; | ||
162 | } | 172 | } |
163 | 173 | ||
164 | static int __init numachip_system_init(void) | 174 | static int __init numachip_system_init(void) |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 1d74d161687c..2babb393915e 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -364,12 +364,21 @@ system_call_fastpath: | |||
364 | * Has incomplete stack frame and undefined top of stack. | 364 | * Has incomplete stack frame and undefined top of stack. |
365 | */ | 365 | */ |
366 | ret_from_sys_call: | 366 | ret_from_sys_call: |
367 | testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | ||
368 | jnz int_ret_from_sys_call_fixup /* Go the the slow path */ | ||
369 | |||
370 | LOCKDEP_SYS_EXIT | 367 | LOCKDEP_SYS_EXIT |
371 | DISABLE_INTERRUPTS(CLBR_NONE) | 368 | DISABLE_INTERRUPTS(CLBR_NONE) |
372 | TRACE_IRQS_OFF | 369 | TRACE_IRQS_OFF |
370 | |||
371 | /* | ||
372 | * We must check ti flags with interrupts (or at least preemption) | ||
373 | * off because we must *never* return to userspace without | ||
374 | * processing exit work that is enqueued if we're preempted here. | ||
375 | * In particular, returning to userspace with any of the one-shot | ||
376 | * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is | ||
377 | * very bad. | ||
378 | */ | ||
379 | testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | ||
380 | jnz int_ret_from_sys_call_fixup /* Go the the slow path */ | ||
381 | |||
373 | CFI_REMEMBER_STATE | 382 | CFI_REMEMBER_STATE |
374 | /* | 383 | /* |
375 | * sysretq will re-enable interrupts: | 384 | * sysretq will re-enable interrupts: |
@@ -386,7 +395,7 @@ ret_from_sys_call: | |||
386 | 395 | ||
387 | int_ret_from_sys_call_fixup: | 396 | int_ret_from_sys_call_fixup: |
388 | FIXUP_TOP_OF_STACK %r11, -ARGOFFSET | 397 | FIXUP_TOP_OF_STACK %r11, -ARGOFFSET |
389 | jmp int_ret_from_sys_call | 398 | jmp int_ret_from_sys_call_irqs_off |
390 | 399 | ||
391 | /* Do syscall tracing */ | 400 | /* Do syscall tracing */ |
392 | tracesys: | 401 | tracesys: |
@@ -432,6 +441,7 @@ tracesys_phase2: | |||
432 | GLOBAL(int_ret_from_sys_call) | 441 | GLOBAL(int_ret_from_sys_call) |
433 | DISABLE_INTERRUPTS(CLBR_NONE) | 442 | DISABLE_INTERRUPTS(CLBR_NONE) |
434 | TRACE_IRQS_OFF | 443 | TRACE_IRQS_OFF |
444 | int_ret_from_sys_call_irqs_off: | ||
435 | movl $_TIF_ALLWORK_MASK,%edi | 445 | movl $_TIF_ALLWORK_MASK,%edi |
436 | /* edi: mask to check */ | 446 | /* edi: mask to check */ |
437 | GLOBAL(int_with_check) | 447 | GLOBAL(int_with_check) |
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index 9bbb9b35c144..d1ac80b72c72 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c | |||
@@ -47,13 +47,21 @@ do { \ | |||
47 | 47 | ||
48 | #ifdef CONFIG_RANDOMIZE_BASE | 48 | #ifdef CONFIG_RANDOMIZE_BASE |
49 | static unsigned long module_load_offset; | 49 | static unsigned long module_load_offset; |
50 | static int randomize_modules = 1; | ||
50 | 51 | ||
51 | /* Mutex protects the module_load_offset. */ | 52 | /* Mutex protects the module_load_offset. */ |
52 | static DEFINE_MUTEX(module_kaslr_mutex); | 53 | static DEFINE_MUTEX(module_kaslr_mutex); |
53 | 54 | ||
55 | static int __init parse_nokaslr(char *p) | ||
56 | { | ||
57 | randomize_modules = 0; | ||
58 | return 0; | ||
59 | } | ||
60 | early_param("nokaslr", parse_nokaslr); | ||
61 | |||
54 | static unsigned long int get_module_load_offset(void) | 62 | static unsigned long int get_module_load_offset(void) |
55 | { | 63 | { |
56 | if (kaslr_enabled) { | 64 | if (randomize_modules) { |
57 | mutex_lock(&module_kaslr_mutex); | 65 | mutex_lock(&module_kaslr_mutex); |
58 | /* | 66 | /* |
59 | * Calculate the module_load_offset the first time this | 67 | * Calculate the module_load_offset the first time this |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 98dc9317286e..0a2421cca01f 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -122,8 +122,6 @@ | |||
122 | unsigned long max_low_pfn_mapped; | 122 | unsigned long max_low_pfn_mapped; |
123 | unsigned long max_pfn_mapped; | 123 | unsigned long max_pfn_mapped; |
124 | 124 | ||
125 | bool __read_mostly kaslr_enabled = false; | ||
126 | |||
127 | #ifdef CONFIG_DMI | 125 | #ifdef CONFIG_DMI |
128 | RESERVE_BRK(dmi_alloc, 65536); | 126 | RESERVE_BRK(dmi_alloc, 65536); |
129 | #endif | 127 | #endif |
@@ -427,11 +425,6 @@ static void __init reserve_initrd(void) | |||
427 | } | 425 | } |
428 | #endif /* CONFIG_BLK_DEV_INITRD */ | 426 | #endif /* CONFIG_BLK_DEV_INITRD */ |
429 | 427 | ||
430 | static void __init parse_kaslr_setup(u64 pa_data, u32 data_len) | ||
431 | { | ||
432 | kaslr_enabled = (bool)(pa_data + sizeof(struct setup_data)); | ||
433 | } | ||
434 | |||
435 | static void __init parse_setup_data(void) | 428 | static void __init parse_setup_data(void) |
436 | { | 429 | { |
437 | struct setup_data *data; | 430 | struct setup_data *data; |
@@ -457,9 +450,6 @@ static void __init parse_setup_data(void) | |||
457 | case SETUP_EFI: | 450 | case SETUP_EFI: |
458 | parse_efi_setup(pa_data, data_len); | 451 | parse_efi_setup(pa_data, data_len); |
459 | break; | 452 | break; |
460 | case SETUP_KASLR: | ||
461 | parse_kaslr_setup(pa_data, data_len); | ||
462 | break; | ||
463 | default: | 453 | default: |
464 | break; | 454 | break; |
465 | } | 455 | } |
@@ -842,14 +832,10 @@ static void __init trim_low_memory_range(void) | |||
842 | static int | 832 | static int |
843 | dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) | 833 | dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) |
844 | { | 834 | { |
845 | if (kaslr_enabled) | 835 | pr_emerg("Kernel Offset: 0x%lx from 0x%lx " |
846 | pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n", | 836 | "(relocation range: 0x%lx-0x%lx)\n", |
847 | (unsigned long)&_text - __START_KERNEL, | 837 | (unsigned long)&_text - __START_KERNEL, __START_KERNEL, |
848 | __START_KERNEL, | 838 | __START_KERNEL_map, MODULES_VADDR-1); |
849 | __START_KERNEL_map, | ||
850 | MODULES_VADDR-1); | ||
851 | else | ||
852 | pr_emerg("Kernel Offset: disabled\n"); | ||
853 | 839 | ||
854 | return 0; | 840 | return 0; |
855 | } | 841 | } |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 9d2073e2ecc9..4ff5d162ff9f 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -384,7 +384,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code) | |||
384 | goto exit; | 384 | goto exit; |
385 | conditional_sti(regs); | 385 | conditional_sti(regs); |
386 | 386 | ||
387 | if (!user_mode(regs)) | 387 | if (!user_mode_vm(regs)) |
388 | die("bounds", regs, error_code); | 388 | die("bounds", regs, error_code); |
389 | 389 | ||
390 | if (!cpu_feature_enabled(X86_FEATURE_MPX)) { | 390 | if (!cpu_feature_enabled(X86_FEATURE_MPX)) { |
@@ -637,7 +637,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code) | |||
637 | * then it's very likely the result of an icebp/int01 trap. | 637 | * then it's very likely the result of an icebp/int01 trap. |
638 | * User wants a sigtrap for that. | 638 | * User wants a sigtrap for that. |
639 | */ | 639 | */ |
640 | if (!dr6 && user_mode(regs)) | 640 | if (!dr6 && user_mode_vm(regs)) |
641 | user_icebp = 1; | 641 | user_icebp = 1; |
642 | 642 | ||
643 | /* Catch kmemcheck conditions first of all! */ | 643 | /* Catch kmemcheck conditions first of all! */ |
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 34f66e58a896..cdc6cf903078 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
@@ -379,7 +379,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) | |||
379 | * thread's fpu state, reconstruct fxstate from the fsave | 379 | * thread's fpu state, reconstruct fxstate from the fsave |
380 | * header. Sanitize the copied state etc. | 380 | * header. Sanitize the copied state etc. |
381 | */ | 381 | */ |
382 | struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave; | 382 | struct fpu *fpu = &tsk->thread.fpu; |
383 | struct user_i387_ia32_struct env; | 383 | struct user_i387_ia32_struct env; |
384 | int err = 0; | 384 | int err = 0; |
385 | 385 | ||
@@ -393,14 +393,15 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) | |||
393 | */ | 393 | */ |
394 | drop_fpu(tsk); | 394 | drop_fpu(tsk); |
395 | 395 | ||
396 | if (__copy_from_user(xsave, buf_fx, state_size) || | 396 | if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) || |
397 | __copy_from_user(&env, buf, sizeof(env))) { | 397 | __copy_from_user(&env, buf, sizeof(env))) { |
398 | fpu_finit(fpu); | ||
398 | err = -1; | 399 | err = -1; |
399 | } else { | 400 | } else { |
400 | sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only); | 401 | sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only); |
401 | set_used_math(); | ||
402 | } | 402 | } |
403 | 403 | ||
404 | set_used_math(); | ||
404 | if (use_eager_fpu()) { | 405 | if (use_eager_fpu()) { |
405 | preempt_disable(); | 406 | preempt_disable(); |
406 | math_state_restore(); | 407 | math_state_restore(); |
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index cc31f7c06d3d..9541ba34126b 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c | |||
@@ -507,6 +507,7 @@ static int picdev_read(struct kvm_pic *s, | |||
507 | return -EOPNOTSUPP; | 507 | return -EOPNOTSUPP; |
508 | 508 | ||
509 | if (len != 1) { | 509 | if (len != 1) { |
510 | memset(val, 0, len); | ||
510 | pr_pic_unimpl("non byte read\n"); | 511 | pr_pic_unimpl("non byte read\n"); |
511 | return 0; | 512 | return 0; |
512 | } | 513 | } |
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index b1947e0f3e10..46d4449772bc 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c | |||
@@ -422,6 +422,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, | |||
422 | struct kvm_ioapic *ioapic, int vector, int trigger_mode) | 422 | struct kvm_ioapic *ioapic, int vector, int trigger_mode) |
423 | { | 423 | { |
424 | int i; | 424 | int i; |
425 | struct kvm_lapic *apic = vcpu->arch.apic; | ||
425 | 426 | ||
426 | for (i = 0; i < IOAPIC_NUM_PINS; i++) { | 427 | for (i = 0; i < IOAPIC_NUM_PINS; i++) { |
427 | union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; | 428 | union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; |
@@ -443,7 +444,8 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, | |||
443 | kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); | 444 | kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); |
444 | spin_lock(&ioapic->lock); | 445 | spin_lock(&ioapic->lock); |
445 | 446 | ||
446 | if (trigger_mode != IOAPIC_LEVEL_TRIG) | 447 | if (trigger_mode != IOAPIC_LEVEL_TRIG || |
448 | kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) | ||
447 | continue; | 449 | continue; |
448 | 450 | ||
449 | ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); | 451 | ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index bd4e34de24c7..4ee827d7bf36 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -833,8 +833,7 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2) | |||
833 | 833 | ||
834 | static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector) | 834 | static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector) |
835 | { | 835 | { |
836 | if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) && | 836 | if (kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) { |
837 | kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) { | ||
838 | int trigger_mode; | 837 | int trigger_mode; |
839 | if (apic_test_vector(vector, apic->regs + APIC_TMR)) | 838 | if (apic_test_vector(vector, apic->regs + APIC_TMR)) |
840 | trigger_mode = IOAPIC_LEVEL_TRIG; | 839 | trigger_mode = IOAPIC_LEVEL_TRIG; |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f7b20b417a3a..ae4f6d35d19c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -2168,7 +2168,10 @@ static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu) | |||
2168 | { | 2168 | { |
2169 | unsigned long *msr_bitmap; | 2169 | unsigned long *msr_bitmap; |
2170 | 2170 | ||
2171 | if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) { | 2171 | if (is_guest_mode(vcpu)) |
2172 | msr_bitmap = vmx_msr_bitmap_nested; | ||
2173 | else if (irqchip_in_kernel(vcpu->kvm) && | ||
2174 | apic_x2apic_mode(vcpu->arch.apic)) { | ||
2172 | if (is_long_mode(vcpu)) | 2175 | if (is_long_mode(vcpu)) |
2173 | msr_bitmap = vmx_msr_bitmap_longmode_x2apic; | 2176 | msr_bitmap = vmx_msr_bitmap_longmode_x2apic; |
2174 | else | 2177 | else |
@@ -2476,8 +2479,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) | |||
2476 | if (enable_ept) { | 2479 | if (enable_ept) { |
2477 | /* nested EPT: emulate EPT also to L1 */ | 2480 | /* nested EPT: emulate EPT also to L1 */ |
2478 | vmx->nested.nested_vmx_secondary_ctls_high |= | 2481 | vmx->nested.nested_vmx_secondary_ctls_high |= |
2479 | SECONDARY_EXEC_ENABLE_EPT | | 2482 | SECONDARY_EXEC_ENABLE_EPT; |
2480 | SECONDARY_EXEC_UNRESTRICTED_GUEST; | ||
2481 | vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | | 2483 | vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | |
2482 | VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT | | 2484 | VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT | |
2483 | VMX_EPT_INVEPT_BIT; | 2485 | VMX_EPT_INVEPT_BIT; |
@@ -2491,6 +2493,10 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) | |||
2491 | } else | 2493 | } else |
2492 | vmx->nested.nested_vmx_ept_caps = 0; | 2494 | vmx->nested.nested_vmx_ept_caps = 0; |
2493 | 2495 | ||
2496 | if (enable_unrestricted_guest) | ||
2497 | vmx->nested.nested_vmx_secondary_ctls_high |= | ||
2498 | SECONDARY_EXEC_UNRESTRICTED_GUEST; | ||
2499 | |||
2494 | /* miscellaneous data */ | 2500 | /* miscellaneous data */ |
2495 | rdmsr(MSR_IA32_VMX_MISC, | 2501 | rdmsr(MSR_IA32_VMX_MISC, |
2496 | vmx->nested.nested_vmx_misc_low, | 2502 | vmx->nested.nested_vmx_misc_low, |
@@ -9218,9 +9224,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) | |||
9218 | } | 9224 | } |
9219 | 9225 | ||
9220 | if (cpu_has_vmx_msr_bitmap() && | 9226 | if (cpu_has_vmx_msr_bitmap() && |
9221 | exec_control & CPU_BASED_USE_MSR_BITMAPS && | 9227 | exec_control & CPU_BASED_USE_MSR_BITMAPS) { |
9222 | nested_vmx_merge_msr_bitmap(vcpu, vmcs12)) { | 9228 | nested_vmx_merge_msr_bitmap(vcpu, vmcs12); |
9223 | vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_nested)); | 9229 | /* MSR_BITMAP will be set by following vmx_set_efer. */ |
9224 | } else | 9230 | } else |
9225 | exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; | 9231 | exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; |
9226 | 9232 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index bd7a70be41b3..32bf19ef3115 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2744,7 +2744,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
2744 | case KVM_CAP_USER_NMI: | 2744 | case KVM_CAP_USER_NMI: |
2745 | case KVM_CAP_REINJECT_CONTROL: | 2745 | case KVM_CAP_REINJECT_CONTROL: |
2746 | case KVM_CAP_IRQ_INJECT_STATUS: | 2746 | case KVM_CAP_IRQ_INJECT_STATUS: |
2747 | case KVM_CAP_IRQFD: | ||
2748 | case KVM_CAP_IOEVENTFD: | 2747 | case KVM_CAP_IOEVENTFD: |
2749 | case KVM_CAP_IOEVENTFD_NO_LENGTH: | 2748 | case KVM_CAP_IOEVENTFD_NO_LENGTH: |
2750 | case KVM_CAP_PIT2: | 2749 | case KVM_CAP_PIT2: |
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 3d2612b68694..2fb384724ebb 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c | |||
@@ -513,31 +513,6 @@ void __init pcibios_set_cache_line_size(void) | |||
513 | } | 513 | } |
514 | } | 514 | } |
515 | 515 | ||
516 | /* | ||
517 | * Some device drivers assume dev->irq won't change after calling | ||
518 | * pci_disable_device(). So delay releasing of IRQ resource to driver | ||
519 | * unbinding time. Otherwise it will break PM subsystem and drivers | ||
520 | * like xen-pciback etc. | ||
521 | */ | ||
522 | static int pci_irq_notifier(struct notifier_block *nb, unsigned long action, | ||
523 | void *data) | ||
524 | { | ||
525 | struct pci_dev *dev = to_pci_dev(data); | ||
526 | |||
527 | if (action != BUS_NOTIFY_UNBOUND_DRIVER) | ||
528 | return NOTIFY_DONE; | ||
529 | |||
530 | if (pcibios_disable_irq) | ||
531 | pcibios_disable_irq(dev); | ||
532 | |||
533 | return NOTIFY_OK; | ||
534 | } | ||
535 | |||
536 | static struct notifier_block pci_irq_nb = { | ||
537 | .notifier_call = pci_irq_notifier, | ||
538 | .priority = INT_MIN, | ||
539 | }; | ||
540 | |||
541 | int __init pcibios_init(void) | 516 | int __init pcibios_init(void) |
542 | { | 517 | { |
543 | if (!raw_pci_ops) { | 518 | if (!raw_pci_ops) { |
@@ -550,9 +525,6 @@ int __init pcibios_init(void) | |||
550 | 525 | ||
551 | if (pci_bf_sort >= pci_force_bf) | 526 | if (pci_bf_sort >= pci_force_bf) |
552 | pci_sort_breadthfirst(); | 527 | pci_sort_breadthfirst(); |
553 | |||
554 | bus_register_notifier(&pci_bus_type, &pci_irq_nb); | ||
555 | |||
556 | return 0; | 528 | return 0; |
557 | } | 529 | } |
558 | 530 | ||
@@ -711,6 +683,12 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) | |||
711 | return 0; | 683 | return 0; |
712 | } | 684 | } |
713 | 685 | ||
686 | void pcibios_disable_device (struct pci_dev *dev) | ||
687 | { | ||
688 | if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq) | ||
689 | pcibios_disable_irq(dev); | ||
690 | } | ||
691 | |||
714 | int pci_ext_cfg_avail(void) | 692 | int pci_ext_cfg_avail(void) |
715 | { | 693 | { |
716 | if (raw_pci_ext_ops) | 694 | if (raw_pci_ext_ops) |
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c index efb849323c74..852aa4c92da0 100644 --- a/arch/x86/pci/intel_mid_pci.c +++ b/arch/x86/pci/intel_mid_pci.c | |||
@@ -234,10 +234,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) | |||
234 | 234 | ||
235 | static void intel_mid_pci_irq_disable(struct pci_dev *dev) | 235 | static void intel_mid_pci_irq_disable(struct pci_dev *dev) |
236 | { | 236 | { |
237 | if (dev->irq_managed && dev->irq > 0) { | 237 | if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed && |
238 | dev->irq > 0) { | ||
238 | mp_unmap_irq(dev->irq); | 239 | mp_unmap_irq(dev->irq); |
239 | dev->irq_managed = 0; | 240 | dev->irq_managed = 0; |
240 | dev->irq = 0; | ||
241 | } | 241 | } |
242 | } | 242 | } |
243 | 243 | ||
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index e71b3dbd87b8..5dc6ca5e1741 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c | |||
@@ -1256,9 +1256,22 @@ static int pirq_enable_irq(struct pci_dev *dev) | |||
1256 | return 0; | 1256 | return 0; |
1257 | } | 1257 | } |
1258 | 1258 | ||
1259 | bool mp_should_keep_irq(struct device *dev) | ||
1260 | { | ||
1261 | if (dev->power.is_prepared) | ||
1262 | return true; | ||
1263 | #ifdef CONFIG_PM | ||
1264 | if (dev->power.runtime_status == RPM_SUSPENDING) | ||
1265 | return true; | ||
1266 | #endif | ||
1267 | |||
1268 | return false; | ||
1269 | } | ||
1270 | |||
1259 | static void pirq_disable_irq(struct pci_dev *dev) | 1271 | static void pirq_disable_irq(struct pci_dev *dev) |
1260 | { | 1272 | { |
1261 | if (io_apic_assign_pci_irqs && dev->irq_managed && dev->irq) { | 1273 | if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) && |
1274 | dev->irq_managed && dev->irq) { | ||
1262 | mp_unmap_irq(dev->irq); | 1275 | mp_unmap_irq(dev->irq); |
1263 | dev->irq = 0; | 1276 | dev->irq = 0; |
1264 | dev->irq_managed = 0; | 1277 | dev->irq_managed = 0; |
diff --git a/arch/x86/vdso/vdso32/sigreturn.S b/arch/x86/vdso/vdso32/sigreturn.S index 31776d0efc8c..d7ec4e251c0a 100644 --- a/arch/x86/vdso/vdso32/sigreturn.S +++ b/arch/x86/vdso/vdso32/sigreturn.S | |||
@@ -17,6 +17,7 @@ | |||
17 | .text | 17 | .text |
18 | .globl __kernel_sigreturn | 18 | .globl __kernel_sigreturn |
19 | .type __kernel_sigreturn,@function | 19 | .type __kernel_sigreturn,@function |
20 | nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */ | ||
20 | ALIGN | 21 | ALIGN |
21 | __kernel_sigreturn: | 22 | __kernel_sigreturn: |
22 | .LSTART_sigreturn: | 23 | .LSTART_sigreturn: |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 740ae3026a14..9f93af56a5fc 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
@@ -563,7 +563,7 @@ static bool alloc_p2m(unsigned long pfn) | |||
563 | if (p2m_pfn == PFN_DOWN(__pa(p2m_missing))) | 563 | if (p2m_pfn == PFN_DOWN(__pa(p2m_missing))) |
564 | p2m_init(p2m); | 564 | p2m_init(p2m); |
565 | else | 565 | else |
566 | p2m_init_identity(p2m, pfn); | 566 | p2m_init_identity(p2m, pfn & ~(P2M_PER_PAGE - 1)); |
567 | 567 | ||
568 | spin_lock_irqsave(&p2m_update_lock, flags); | 568 | spin_lock_irqsave(&p2m_update_lock, flags); |
569 | 569 | ||
diff --git a/block/blk-merge.c b/block/blk-merge.c index fc1ff3b1ea1f..fd3fee81c23c 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -592,7 +592,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) | |||
592 | if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) { | 592 | if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) { |
593 | struct bio_vec *bprev; | 593 | struct bio_vec *bprev; |
594 | 594 | ||
595 | bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1]; | 595 | bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1]; |
596 | if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset)) | 596 | if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset)) |
597 | return false; | 597 | return false; |
598 | } | 598 | } |
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index d53a764b05ea..be3290cc0644 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c | |||
@@ -278,9 +278,11 @@ static int bt_get(struct blk_mq_alloc_data *data, | |||
278 | /* | 278 | /* |
279 | * We're out of tags on this hardware queue, kick any | 279 | * We're out of tags on this hardware queue, kick any |
280 | * pending IO submits before going to sleep waiting for | 280 | * pending IO submits before going to sleep waiting for |
281 | * some to complete. | 281 | * some to complete. Note that hctx can be NULL here for |
282 | * reserved tag allocation. | ||
282 | */ | 283 | */ |
283 | blk_mq_run_hw_queue(hctx, false); | 284 | if (hctx) |
285 | blk_mq_run_hw_queue(hctx, false); | ||
284 | 286 | ||
285 | /* | 287 | /* |
286 | * Retry tag allocation after running the hardware queue, | 288 | * Retry tag allocation after running the hardware queue, |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 4f4bea21052e..b7b8933ec241 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -1938,7 +1938,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) | |||
1938 | */ | 1938 | */ |
1939 | if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release, | 1939 | if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release, |
1940 | PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) | 1940 | PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) |
1941 | goto err_map; | 1941 | goto err_mq_usage; |
1942 | 1942 | ||
1943 | setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); | 1943 | setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); |
1944 | blk_queue_rq_timeout(q, 30000); | 1944 | blk_queue_rq_timeout(q, 30000); |
@@ -1981,7 +1981,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) | |||
1981 | blk_mq_init_cpu_queues(q, set->nr_hw_queues); | 1981 | blk_mq_init_cpu_queues(q, set->nr_hw_queues); |
1982 | 1982 | ||
1983 | if (blk_mq_init_hw_queues(q, set)) | 1983 | if (blk_mq_init_hw_queues(q, set)) |
1984 | goto err_hw; | 1984 | goto err_mq_usage; |
1985 | 1985 | ||
1986 | mutex_lock(&all_q_mutex); | 1986 | mutex_lock(&all_q_mutex); |
1987 | list_add_tail(&q->all_q_node, &all_q_list); | 1987 | list_add_tail(&q->all_q_node, &all_q_list); |
@@ -1993,7 +1993,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) | |||
1993 | 1993 | ||
1994 | return q; | 1994 | return q; |
1995 | 1995 | ||
1996 | err_hw: | 1996 | err_mq_usage: |
1997 | blk_cleanup_queue(q); | 1997 | blk_cleanup_queue(q); |
1998 | err_hctxs: | 1998 | err_hctxs: |
1999 | kfree(map); | 1999 | kfree(map); |
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 657964e8ab7e..37fb19047603 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c | |||
@@ -65,6 +65,7 @@ struct lpss_private_data; | |||
65 | 65 | ||
66 | struct lpss_device_desc { | 66 | struct lpss_device_desc { |
67 | unsigned int flags; | 67 | unsigned int flags; |
68 | const char *clk_con_id; | ||
68 | unsigned int prv_offset; | 69 | unsigned int prv_offset; |
69 | size_t prv_size_override; | 70 | size_t prv_size_override; |
70 | void (*setup)(struct lpss_private_data *pdata); | 71 | void (*setup)(struct lpss_private_data *pdata); |
@@ -140,6 +141,7 @@ static struct lpss_device_desc lpt_i2c_dev_desc = { | |||
140 | 141 | ||
141 | static struct lpss_device_desc lpt_uart_dev_desc = { | 142 | static struct lpss_device_desc lpt_uart_dev_desc = { |
142 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, | 143 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, |
144 | .clk_con_id = "baudclk", | ||
143 | .prv_offset = 0x800, | 145 | .prv_offset = 0x800, |
144 | .setup = lpss_uart_setup, | 146 | .setup = lpss_uart_setup, |
145 | }; | 147 | }; |
@@ -156,6 +158,7 @@ static struct lpss_device_desc byt_pwm_dev_desc = { | |||
156 | 158 | ||
157 | static struct lpss_device_desc byt_uart_dev_desc = { | 159 | static struct lpss_device_desc byt_uart_dev_desc = { |
158 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, | 160 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, |
161 | .clk_con_id = "baudclk", | ||
159 | .prv_offset = 0x800, | 162 | .prv_offset = 0x800, |
160 | .setup = lpss_uart_setup, | 163 | .setup = lpss_uart_setup, |
161 | }; | 164 | }; |
@@ -313,7 +316,7 @@ out: | |||
313 | return PTR_ERR(clk); | 316 | return PTR_ERR(clk); |
314 | 317 | ||
315 | pdata->clk = clk; | 318 | pdata->clk = clk; |
316 | clk_register_clkdev(clk, NULL, devname); | 319 | clk_register_clkdev(clk, dev_desc->clk_con_id, devname); |
317 | return 0; | 320 | return 0; |
318 | } | 321 | } |
319 | 322 | ||
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index e7f718d6918a..b1def411c0b8 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c | |||
@@ -485,6 +485,14 @@ void acpi_pci_irq_disable(struct pci_dev *dev) | |||
485 | if (!pin || !dev->irq_managed || dev->irq <= 0) | 485 | if (!pin || !dev->irq_managed || dev->irq <= 0) |
486 | return; | 486 | return; |
487 | 487 | ||
488 | /* Keep IOAPIC pin configuration when suspending */ | ||
489 | if (dev->dev.power.is_prepared) | ||
490 | return; | ||
491 | #ifdef CONFIG_PM | ||
492 | if (dev->dev.power.runtime_status == RPM_SUSPENDING) | ||
493 | return; | ||
494 | #endif | ||
495 | |||
488 | entry = acpi_pci_irq_lookup(dev, pin); | 496 | entry = acpi_pci_irq_lookup(dev, pin); |
489 | if (!entry) | 497 | if (!entry) |
490 | return; | 498 | return; |
@@ -505,6 +513,5 @@ void acpi_pci_irq_disable(struct pci_dev *dev) | |||
505 | if (gsi >= 0) { | 513 | if (gsi >= 0) { |
506 | acpi_unregister_gsi(gsi); | 514 | acpi_unregister_gsi(gsi); |
507 | dev->irq_managed = 0; | 515 | dev->irq_managed = 0; |
508 | dev->irq = 0; | ||
509 | } | 516 | } |
510 | } | 517 | } |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 4c35f0822d06..ef150ebb4c30 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -4737,7 +4737,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag) | |||
4737 | return NULL; | 4737 | return NULL; |
4738 | 4738 | ||
4739 | /* libsas case */ | 4739 | /* libsas case */ |
4740 | if (!ap->scsi_host) { | 4740 | if (ap->flags & ATA_FLAG_SAS_HOST) { |
4741 | tag = ata_sas_allocate_tag(ap); | 4741 | tag = ata_sas_allocate_tag(ap); |
4742 | if (tag < 0) | 4742 | if (tag < 0) |
4743 | return NULL; | 4743 | return NULL; |
@@ -4776,7 +4776,7 @@ void ata_qc_free(struct ata_queued_cmd *qc) | |||
4776 | tag = qc->tag; | 4776 | tag = qc->tag; |
4777 | if (likely(ata_tag_valid(tag))) { | 4777 | if (likely(ata_tag_valid(tag))) { |
4778 | qc->tag = ATA_TAG_POISON; | 4778 | qc->tag = ATA_TAG_POISON; |
4779 | if (!ap->scsi_host) | 4779 | if (ap->flags & ATA_FLAG_SAS_HOST) |
4780 | ata_sas_free_tag(tag, ap); | 4780 | ata_sas_free_tag(tag, ap); |
4781 | } | 4781 | } |
4782 | } | 4782 | } |
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index f9054cd36a72..5389579c5120 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c | |||
@@ -869,6 +869,8 @@ try_offline_again: | |||
869 | */ | 869 | */ |
870 | ata_msleep(ap, 1); | 870 | ata_msleep(ap, 1); |
871 | 871 | ||
872 | sata_set_spd(link); | ||
873 | |||
872 | /* | 874 | /* |
873 | * Now, bring the host controller online again, this can take time | 875 | * Now, bring the host controller online again, this can take time |
874 | * as PHY reset and communication establishment, 1st D2H FIS and | 876 | * as PHY reset and communication establishment, 1st D2H FIS and |
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index beb8b27d4621..a13587b5c2be 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h | |||
@@ -243,4 +243,12 @@ extern struct regcache_ops regcache_rbtree_ops; | |||
243 | extern struct regcache_ops regcache_lzo_ops; | 243 | extern struct regcache_ops regcache_lzo_ops; |
244 | extern struct regcache_ops regcache_flat_ops; | 244 | extern struct regcache_ops regcache_flat_ops; |
245 | 245 | ||
246 | static inline const char *regmap_name(const struct regmap *map) | ||
247 | { | ||
248 | if (map->dev) | ||
249 | return dev_name(map->dev); | ||
250 | |||
251 | return map->name; | ||
252 | } | ||
253 | |||
246 | #endif | 254 | #endif |
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index d453a2c98ad0..81751a49d8bf 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c | |||
@@ -307,7 +307,7 @@ static int regcache_rbtree_insert_to_block(struct regmap *map, | |||
307 | if (pos == 0) { | 307 | if (pos == 0) { |
308 | memmove(blk + offset * map->cache_word_size, | 308 | memmove(blk + offset * map->cache_word_size, |
309 | blk, rbnode->blklen * map->cache_word_size); | 309 | blk, rbnode->blklen * map->cache_word_size); |
310 | bitmap_shift_right(present, present, offset, blklen); | 310 | bitmap_shift_left(present, present, offset, blklen); |
311 | } | 311 | } |
312 | 312 | ||
313 | /* update the rbnode block, its size and the base register */ | 313 | /* update the rbnode block, its size and the base register */ |
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index f373c35f9e1d..87db9893b463 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c | |||
@@ -218,7 +218,7 @@ int regcache_read(struct regmap *map, | |||
218 | ret = map->cache_ops->read(map, reg, value); | 218 | ret = map->cache_ops->read(map, reg, value); |
219 | 219 | ||
220 | if (ret == 0) | 220 | if (ret == 0) |
221 | trace_regmap_reg_read_cache(map->dev, reg, *value); | 221 | trace_regmap_reg_read_cache(map, reg, *value); |
222 | 222 | ||
223 | return ret; | 223 | return ret; |
224 | } | 224 | } |
@@ -311,7 +311,7 @@ int regcache_sync(struct regmap *map) | |||
311 | dev_dbg(map->dev, "Syncing %s cache\n", | 311 | dev_dbg(map->dev, "Syncing %s cache\n", |
312 | map->cache_ops->name); | 312 | map->cache_ops->name); |
313 | name = map->cache_ops->name; | 313 | name = map->cache_ops->name; |
314 | trace_regcache_sync(map->dev, name, "start"); | 314 | trace_regcache_sync(map, name, "start"); |
315 | 315 | ||
316 | if (!map->cache_dirty) | 316 | if (!map->cache_dirty) |
317 | goto out; | 317 | goto out; |
@@ -346,7 +346,7 @@ out: | |||
346 | 346 | ||
347 | regmap_async_complete(map); | 347 | regmap_async_complete(map); |
348 | 348 | ||
349 | trace_regcache_sync(map->dev, name, "stop"); | 349 | trace_regcache_sync(map, name, "stop"); |
350 | 350 | ||
351 | return ret; | 351 | return ret; |
352 | } | 352 | } |
@@ -381,7 +381,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min, | |||
381 | name = map->cache_ops->name; | 381 | name = map->cache_ops->name; |
382 | dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max); | 382 | dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max); |
383 | 383 | ||
384 | trace_regcache_sync(map->dev, name, "start region"); | 384 | trace_regcache_sync(map, name, "start region"); |
385 | 385 | ||
386 | if (!map->cache_dirty) | 386 | if (!map->cache_dirty) |
387 | goto out; | 387 | goto out; |
@@ -401,7 +401,7 @@ out: | |||
401 | 401 | ||
402 | regmap_async_complete(map); | 402 | regmap_async_complete(map); |
403 | 403 | ||
404 | trace_regcache_sync(map->dev, name, "stop region"); | 404 | trace_regcache_sync(map, name, "stop region"); |
405 | 405 | ||
406 | return ret; | 406 | return ret; |
407 | } | 407 | } |
@@ -428,7 +428,7 @@ int regcache_drop_region(struct regmap *map, unsigned int min, | |||
428 | 428 | ||
429 | map->lock(map->lock_arg); | 429 | map->lock(map->lock_arg); |
430 | 430 | ||
431 | trace_regcache_drop_region(map->dev, min, max); | 431 | trace_regcache_drop_region(map, min, max); |
432 | 432 | ||
433 | ret = map->cache_ops->drop(map, min, max); | 433 | ret = map->cache_ops->drop(map, min, max); |
434 | 434 | ||
@@ -455,7 +455,7 @@ void regcache_cache_only(struct regmap *map, bool enable) | |||
455 | map->lock(map->lock_arg); | 455 | map->lock(map->lock_arg); |
456 | WARN_ON(map->cache_bypass && enable); | 456 | WARN_ON(map->cache_bypass && enable); |
457 | map->cache_only = enable; | 457 | map->cache_only = enable; |
458 | trace_regmap_cache_only(map->dev, enable); | 458 | trace_regmap_cache_only(map, enable); |
459 | map->unlock(map->lock_arg); | 459 | map->unlock(map->lock_arg); |
460 | } | 460 | } |
461 | EXPORT_SYMBOL_GPL(regcache_cache_only); | 461 | EXPORT_SYMBOL_GPL(regcache_cache_only); |
@@ -493,7 +493,7 @@ void regcache_cache_bypass(struct regmap *map, bool enable) | |||
493 | map->lock(map->lock_arg); | 493 | map->lock(map->lock_arg); |
494 | WARN_ON(map->cache_only && enable); | 494 | WARN_ON(map->cache_only && enable); |
495 | map->cache_bypass = enable; | 495 | map->cache_bypass = enable; |
496 | trace_regmap_cache_bypass(map->dev, enable); | 496 | trace_regmap_cache_bypass(map, enable); |
497 | map->unlock(map->lock_arg); | 497 | map->unlock(map->lock_arg); |
498 | } | 498 | } |
499 | EXPORT_SYMBOL_GPL(regcache_cache_bypass); | 499 | EXPORT_SYMBOL_GPL(regcache_cache_bypass); |
@@ -608,7 +608,8 @@ static int regcache_sync_block_single(struct regmap *map, void *block, | |||
608 | for (i = start; i < end; i++) { | 608 | for (i = start; i < end; i++) { |
609 | regtmp = block_base + (i * map->reg_stride); | 609 | regtmp = block_base + (i * map->reg_stride); |
610 | 610 | ||
611 | if (!regcache_reg_present(cache_present, i)) | 611 | if (!regcache_reg_present(cache_present, i) || |
612 | !regmap_writeable(map, regtmp)) | ||
612 | continue; | 613 | continue; |
613 | 614 | ||
614 | val = regcache_get_val(map, block, i); | 615 | val = regcache_get_val(map, block, i); |
@@ -677,7 +678,8 @@ static int regcache_sync_block_raw(struct regmap *map, void *block, | |||
677 | for (i = start; i < end; i++) { | 678 | for (i = start; i < end; i++) { |
678 | regtmp = block_base + (i * map->reg_stride); | 679 | regtmp = block_base + (i * map->reg_stride); |
679 | 680 | ||
680 | if (!regcache_reg_present(cache_present, i)) { | 681 | if (!regcache_reg_present(cache_present, i) || |
682 | !regmap_writeable(map, regtmp)) { | ||
681 | ret = regcache_sync_block_raw_flush(map, &data, | 683 | ret = regcache_sync_block_raw_flush(map, &data, |
682 | base, regtmp); | 684 | base, regtmp); |
683 | if (ret != 0) | 685 | if (ret != 0) |
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 6299a50a5960..a6c3f75b4b01 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c | |||
@@ -499,7 +499,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, | |||
499 | goto err_alloc; | 499 | goto err_alloc; |
500 | } | 500 | } |
501 | 501 | ||
502 | ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags, | 502 | ret = request_threaded_irq(irq, NULL, regmap_irq_thread, |
503 | irq_flags | IRQF_ONESHOT, | ||
503 | chip->name, d); | 504 | chip->name, d); |
504 | if (ret != 0) { | 505 | if (ret != 0) { |
505 | dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n", | 506 | dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n", |
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index f99b098ddabf..dbfe6a69c3da 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c | |||
@@ -1281,7 +1281,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, | |||
1281 | if (map->async && map->bus->async_write) { | 1281 | if (map->async && map->bus->async_write) { |
1282 | struct regmap_async *async; | 1282 | struct regmap_async *async; |
1283 | 1283 | ||
1284 | trace_regmap_async_write_start(map->dev, reg, val_len); | 1284 | trace_regmap_async_write_start(map, reg, val_len); |
1285 | 1285 | ||
1286 | spin_lock_irqsave(&map->async_lock, flags); | 1286 | spin_lock_irqsave(&map->async_lock, flags); |
1287 | async = list_first_entry_or_null(&map->async_free, | 1287 | async = list_first_entry_or_null(&map->async_free, |
@@ -1339,8 +1339,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, | |||
1339 | return ret; | 1339 | return ret; |
1340 | } | 1340 | } |
1341 | 1341 | ||
1342 | trace_regmap_hw_write_start(map->dev, reg, | 1342 | trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); |
1343 | val_len / map->format.val_bytes); | ||
1344 | 1343 | ||
1345 | /* If we're doing a single register write we can probably just | 1344 | /* If we're doing a single register write we can probably just |
1346 | * send the work_buf directly, otherwise try to do a gather | 1345 | * send the work_buf directly, otherwise try to do a gather |
@@ -1372,8 +1371,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, | |||
1372 | kfree(buf); | 1371 | kfree(buf); |
1373 | } | 1372 | } |
1374 | 1373 | ||
1375 | trace_regmap_hw_write_done(map->dev, reg, | 1374 | trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); |
1376 | val_len / map->format.val_bytes); | ||
1377 | 1375 | ||
1378 | return ret; | 1376 | return ret; |
1379 | } | 1377 | } |
@@ -1407,12 +1405,12 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg, | |||
1407 | 1405 | ||
1408 | map->format.format_write(map, reg, val); | 1406 | map->format.format_write(map, reg, val); |
1409 | 1407 | ||
1410 | trace_regmap_hw_write_start(map->dev, reg, 1); | 1408 | trace_regmap_hw_write_start(map, reg, 1); |
1411 | 1409 | ||
1412 | ret = map->bus->write(map->bus_context, map->work_buf, | 1410 | ret = map->bus->write(map->bus_context, map->work_buf, |
1413 | map->format.buf_size); | 1411 | map->format.buf_size); |
1414 | 1412 | ||
1415 | trace_regmap_hw_write_done(map->dev, reg, 1); | 1413 | trace_regmap_hw_write_done(map, reg, 1); |
1416 | 1414 | ||
1417 | return ret; | 1415 | return ret; |
1418 | } | 1416 | } |
@@ -1470,7 +1468,7 @@ int _regmap_write(struct regmap *map, unsigned int reg, | |||
1470 | dev_info(map->dev, "%x <= %x\n", reg, val); | 1468 | dev_info(map->dev, "%x <= %x\n", reg, val); |
1471 | #endif | 1469 | #endif |
1472 | 1470 | ||
1473 | trace_regmap_reg_write(map->dev, reg, val); | 1471 | trace_regmap_reg_write(map, reg, val); |
1474 | 1472 | ||
1475 | return map->reg_write(context, reg, val); | 1473 | return map->reg_write(context, reg, val); |
1476 | } | 1474 | } |
@@ -1773,7 +1771,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map, | |||
1773 | for (i = 0; i < num_regs; i++) { | 1771 | for (i = 0; i < num_regs; i++) { |
1774 | int reg = regs[i].reg; | 1772 | int reg = regs[i].reg; |
1775 | int val = regs[i].def; | 1773 | int val = regs[i].def; |
1776 | trace_regmap_hw_write_start(map->dev, reg, 1); | 1774 | trace_regmap_hw_write_start(map, reg, 1); |
1777 | map->format.format_reg(u8, reg, map->reg_shift); | 1775 | map->format.format_reg(u8, reg, map->reg_shift); |
1778 | u8 += reg_bytes + pad_bytes; | 1776 | u8 += reg_bytes + pad_bytes; |
1779 | map->format.format_val(u8, val, 0); | 1777 | map->format.format_val(u8, val, 0); |
@@ -1788,7 +1786,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map, | |||
1788 | 1786 | ||
1789 | for (i = 0; i < num_regs; i++) { | 1787 | for (i = 0; i < num_regs; i++) { |
1790 | int reg = regs[i].reg; | 1788 | int reg = regs[i].reg; |
1791 | trace_regmap_hw_write_done(map->dev, reg, 1); | 1789 | trace_regmap_hw_write_done(map, reg, 1); |
1792 | } | 1790 | } |
1793 | return ret; | 1791 | return ret; |
1794 | } | 1792 | } |
@@ -2059,15 +2057,13 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, | |||
2059 | */ | 2057 | */ |
2060 | u8[0] |= map->read_flag_mask; | 2058 | u8[0] |= map->read_flag_mask; |
2061 | 2059 | ||
2062 | trace_regmap_hw_read_start(map->dev, reg, | 2060 | trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); |
2063 | val_len / map->format.val_bytes); | ||
2064 | 2061 | ||
2065 | ret = map->bus->read(map->bus_context, map->work_buf, | 2062 | ret = map->bus->read(map->bus_context, map->work_buf, |
2066 | map->format.reg_bytes + map->format.pad_bytes, | 2063 | map->format.reg_bytes + map->format.pad_bytes, |
2067 | val, val_len); | 2064 | val, val_len); |
2068 | 2065 | ||
2069 | trace_regmap_hw_read_done(map->dev, reg, | 2066 | trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); |
2070 | val_len / map->format.val_bytes); | ||
2071 | 2067 | ||
2072 | return ret; | 2068 | return ret; |
2073 | } | 2069 | } |
@@ -2123,7 +2119,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg, | |||
2123 | dev_info(map->dev, "%x => %x\n", reg, *val); | 2119 | dev_info(map->dev, "%x => %x\n", reg, *val); |
2124 | #endif | 2120 | #endif |
2125 | 2121 | ||
2126 | trace_regmap_reg_read(map->dev, reg, *val); | 2122 | trace_regmap_reg_read(map, reg, *val); |
2127 | 2123 | ||
2128 | if (!map->cache_bypass) | 2124 | if (!map->cache_bypass) |
2129 | regcache_write(map, reg, *val); | 2125 | regcache_write(map, reg, *val); |
@@ -2480,7 +2476,7 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret) | |||
2480 | struct regmap *map = async->map; | 2476 | struct regmap *map = async->map; |
2481 | bool wake; | 2477 | bool wake; |
2482 | 2478 | ||
2483 | trace_regmap_async_io_complete(map->dev); | 2479 | trace_regmap_async_io_complete(map); |
2484 | 2480 | ||
2485 | spin_lock(&map->async_lock); | 2481 | spin_lock(&map->async_lock); |
2486 | list_move(&async->list, &map->async_free); | 2482 | list_move(&async->list, &map->async_free); |
@@ -2525,7 +2521,7 @@ int regmap_async_complete(struct regmap *map) | |||
2525 | if (!map->bus || !map->bus->async_write) | 2521 | if (!map->bus || !map->bus->async_write) |
2526 | return 0; | 2522 | return 0; |
2527 | 2523 | ||
2528 | trace_regmap_async_complete_start(map->dev); | 2524 | trace_regmap_async_complete_start(map); |
2529 | 2525 | ||
2530 | wait_event(map->async_waitq, regmap_async_is_done(map)); | 2526 | wait_event(map->async_waitq, regmap_async_is_done(map)); |
2531 | 2527 | ||
@@ -2534,7 +2530,7 @@ int regmap_async_complete(struct regmap *map) | |||
2534 | map->async_ret = 0; | 2530 | map->async_ret = 0; |
2535 | spin_unlock_irqrestore(&map->async_lock, flags); | 2531 | spin_unlock_irqrestore(&map->async_lock, flags); |
2536 | 2532 | ||
2537 | trace_regmap_async_complete_done(map->dev); | 2533 | trace_regmap_async_complete_done(map); |
2538 | 2534 | ||
2539 | return ret; | 2535 | return ret; |
2540 | } | 2536 | } |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 4bc2a5cb9935..a98c41f72c63 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -803,10 +803,6 @@ static int __init nbd_init(void) | |||
803 | return -EINVAL; | 803 | return -EINVAL; |
804 | } | 804 | } |
805 | 805 | ||
806 | nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); | ||
807 | if (!nbd_dev) | ||
808 | return -ENOMEM; | ||
809 | |||
810 | part_shift = 0; | 806 | part_shift = 0; |
811 | if (max_part > 0) { | 807 | if (max_part > 0) { |
812 | part_shift = fls(max_part); | 808 | part_shift = fls(max_part); |
@@ -828,6 +824,10 @@ static int __init nbd_init(void) | |||
828 | if (nbds_max > 1UL << (MINORBITS - part_shift)) | 824 | if (nbds_max > 1UL << (MINORBITS - part_shift)) |
829 | return -EINVAL; | 825 | return -EINVAL; |
830 | 826 | ||
827 | nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); | ||
828 | if (!nbd_dev) | ||
829 | return -ENOMEM; | ||
830 | |||
831 | for (i = 0; i < nbds_max; i++) { | 831 | for (i = 0; i < nbds_max; i++) { |
832 | struct gendisk *disk = alloc_disk(1 << part_shift); | 832 | struct gendisk *disk = alloc_disk(1 << part_shift); |
833 | if (!disk) | 833 | if (!disk) |
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index ceb32dd52a6c..e23be20a3417 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c | |||
@@ -3003,6 +3003,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
3003 | } | 3003 | } |
3004 | get_device(dev->device); | 3004 | get_device(dev->device); |
3005 | 3005 | ||
3006 | INIT_LIST_HEAD(&dev->node); | ||
3006 | INIT_WORK(&dev->probe_work, nvme_async_probe); | 3007 | INIT_WORK(&dev->probe_work, nvme_async_probe); |
3007 | schedule_work(&dev->probe_work); | 3008 | schedule_work(&dev->probe_work); |
3008 | return 0; | 3009 | return 0; |
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 1d278ccd751f..e096e9cddb40 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c | |||
@@ -140,24 +140,24 @@ static int tpm_dev_add_device(struct tpm_chip *chip) | |||
140 | { | 140 | { |
141 | int rc; | 141 | int rc; |
142 | 142 | ||
143 | rc = device_add(&chip->dev); | 143 | rc = cdev_add(&chip->cdev, chip->dev.devt, 1); |
144 | if (rc) { | 144 | if (rc) { |
145 | dev_err(&chip->dev, | 145 | dev_err(&chip->dev, |
146 | "unable to device_register() %s, major %d, minor %d, err=%d\n", | 146 | "unable to cdev_add() %s, major %d, minor %d, err=%d\n", |
147 | chip->devname, MAJOR(chip->dev.devt), | 147 | chip->devname, MAJOR(chip->dev.devt), |
148 | MINOR(chip->dev.devt), rc); | 148 | MINOR(chip->dev.devt), rc); |
149 | 149 | ||
150 | device_unregister(&chip->dev); | ||
150 | return rc; | 151 | return rc; |
151 | } | 152 | } |
152 | 153 | ||
153 | rc = cdev_add(&chip->cdev, chip->dev.devt, 1); | 154 | rc = device_add(&chip->dev); |
154 | if (rc) { | 155 | if (rc) { |
155 | dev_err(&chip->dev, | 156 | dev_err(&chip->dev, |
156 | "unable to cdev_add() %s, major %d, minor %d, err=%d\n", | 157 | "unable to device_register() %s, major %d, minor %d, err=%d\n", |
157 | chip->devname, MAJOR(chip->dev.devt), | 158 | chip->devname, MAJOR(chip->dev.devt), |
158 | MINOR(chip->dev.devt), rc); | 159 | MINOR(chip->dev.devt), rc); |
159 | 160 | ||
160 | device_unregister(&chip->dev); | ||
161 | return rc; | 161 | return rc; |
162 | } | 162 | } |
163 | 163 | ||
@@ -174,27 +174,17 @@ static void tpm_dev_del_device(struct tpm_chip *chip) | |||
174 | * tpm_chip_register() - create a character device for the TPM chip | 174 | * tpm_chip_register() - create a character device for the TPM chip |
175 | * @chip: TPM chip to use. | 175 | * @chip: TPM chip to use. |
176 | * | 176 | * |
177 | * Creates a character device for the TPM chip and adds sysfs interfaces for | 177 | * Creates a character device for the TPM chip and adds sysfs attributes for |
178 | * the device, PPI and TCPA. As the last step this function adds the | 178 | * the device. As the last step this function adds the chip to the list of TPM |
179 | * chip to the list of TPM chips available for use. | 179 | * chips available for in-kernel use. |
180 | * | 180 | * |
181 | * NOTE: This function should be only called after the chip initialization | 181 | * This function should be only called after the chip initialization is |
182 | * is complete. | 182 | * complete. |
183 | * | ||
184 | * Called from tpm_<specific>.c probe function only for devices | ||
185 | * the driver has determined it should claim. Prior to calling | ||
186 | * this function the specific probe function has called pci_enable_device | ||
187 | * upon errant exit from this function specific probe function should call | ||
188 | * pci_disable_device | ||
189 | */ | 183 | */ |
190 | int tpm_chip_register(struct tpm_chip *chip) | 184 | int tpm_chip_register(struct tpm_chip *chip) |
191 | { | 185 | { |
192 | int rc; | 186 | int rc; |
193 | 187 | ||
194 | rc = tpm_dev_add_device(chip); | ||
195 | if (rc) | ||
196 | return rc; | ||
197 | |||
198 | /* Populate sysfs for TPM1 devices. */ | 188 | /* Populate sysfs for TPM1 devices. */ |
199 | if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { | 189 | if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { |
200 | rc = tpm_sysfs_add_device(chip); | 190 | rc = tpm_sysfs_add_device(chip); |
@@ -208,6 +198,10 @@ int tpm_chip_register(struct tpm_chip *chip) | |||
208 | chip->bios_dir = tpm_bios_log_setup(chip->devname); | 198 | chip->bios_dir = tpm_bios_log_setup(chip->devname); |
209 | } | 199 | } |
210 | 200 | ||
201 | rc = tpm_dev_add_device(chip); | ||
202 | if (rc) | ||
203 | return rc; | ||
204 | |||
211 | /* Make the chip available. */ | 205 | /* Make the chip available. */ |
212 | spin_lock(&driver_lock); | 206 | spin_lock(&driver_lock); |
213 | list_add_rcu(&chip->list, &tpm_chip_list); | 207 | list_add_rcu(&chip->list, &tpm_chip_list); |
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index b1e53e3aece5..42ffa5e7a1e0 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c | |||
@@ -124,7 +124,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) | |||
124 | { | 124 | { |
125 | struct ibmvtpm_dev *ibmvtpm; | 125 | struct ibmvtpm_dev *ibmvtpm; |
126 | struct ibmvtpm_crq crq; | 126 | struct ibmvtpm_crq crq; |
127 | u64 *word = (u64 *) &crq; | 127 | __be64 *word = (__be64 *)&crq; |
128 | int rc; | 128 | int rc; |
129 | 129 | ||
130 | ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip); | 130 | ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip); |
@@ -145,11 +145,11 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) | |||
145 | memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); | 145 | memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); |
146 | crq.valid = (u8)IBMVTPM_VALID_CMD; | 146 | crq.valid = (u8)IBMVTPM_VALID_CMD; |
147 | crq.msg = (u8)VTPM_TPM_COMMAND; | 147 | crq.msg = (u8)VTPM_TPM_COMMAND; |
148 | crq.len = (u16)count; | 148 | crq.len = cpu_to_be16(count); |
149 | crq.data = ibmvtpm->rtce_dma_handle; | 149 | crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle); |
150 | 150 | ||
151 | rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(word[0]), | 151 | rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]), |
152 | cpu_to_be64(word[1])); | 152 | be64_to_cpu(word[1])); |
153 | if (rc != H_SUCCESS) { | 153 | if (rc != H_SUCCESS) { |
154 | dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); | 154 | dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); |
155 | rc = 0; | 155 | rc = 0; |
diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h index f595f14426bf..6af92890518f 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.h +++ b/drivers/char/tpm/tpm_ibmvtpm.h | |||
@@ -22,9 +22,9 @@ | |||
22 | struct ibmvtpm_crq { | 22 | struct ibmvtpm_crq { |
23 | u8 valid; | 23 | u8 valid; |
24 | u8 msg; | 24 | u8 msg; |
25 | u16 len; | 25 | __be16 len; |
26 | u32 data; | 26 | __be32 data; |
27 | u64 reserved; | 27 | __be64 reserved; |
28 | } __attribute__((packed, aligned(8))); | 28 | } __attribute__((packed, aligned(8))); |
29 | 29 | ||
30 | struct ibmvtpm_crq_queue { | 30 | struct ibmvtpm_crq_queue { |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index fae2dbbf5745..72d7028f779b 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -142,6 +142,7 @@ struct ports_device { | |||
142 | * notification | 142 | * notification |
143 | */ | 143 | */ |
144 | struct work_struct control_work; | 144 | struct work_struct control_work; |
145 | struct work_struct config_work; | ||
145 | 146 | ||
146 | struct list_head ports; | 147 | struct list_head ports; |
147 | 148 | ||
@@ -1837,10 +1838,21 @@ static void config_intr(struct virtio_device *vdev) | |||
1837 | 1838 | ||
1838 | portdev = vdev->priv; | 1839 | portdev = vdev->priv; |
1839 | 1840 | ||
1841 | if (!use_multiport(portdev)) | ||
1842 | schedule_work(&portdev->config_work); | ||
1843 | } | ||
1844 | |||
1845 | static void config_work_handler(struct work_struct *work) | ||
1846 | { | ||
1847 | struct ports_device *portdev; | ||
1848 | |||
1849 | portdev = container_of(work, struct ports_device, control_work); | ||
1840 | if (!use_multiport(portdev)) { | 1850 | if (!use_multiport(portdev)) { |
1851 | struct virtio_device *vdev; | ||
1841 | struct port *port; | 1852 | struct port *port; |
1842 | u16 rows, cols; | 1853 | u16 rows, cols; |
1843 | 1854 | ||
1855 | vdev = portdev->vdev; | ||
1844 | virtio_cread(vdev, struct virtio_console_config, cols, &cols); | 1856 | virtio_cread(vdev, struct virtio_console_config, cols, &cols); |
1845 | virtio_cread(vdev, struct virtio_console_config, rows, &rows); | 1857 | virtio_cread(vdev, struct virtio_console_config, rows, &rows); |
1846 | 1858 | ||
@@ -2040,12 +2052,14 @@ static int virtcons_probe(struct virtio_device *vdev) | |||
2040 | 2052 | ||
2041 | virtio_device_ready(portdev->vdev); | 2053 | virtio_device_ready(portdev->vdev); |
2042 | 2054 | ||
2055 | INIT_WORK(&portdev->config_work, &config_work_handler); | ||
2056 | INIT_WORK(&portdev->control_work, &control_work_handler); | ||
2057 | |||
2043 | if (multiport) { | 2058 | if (multiport) { |
2044 | unsigned int nr_added_bufs; | 2059 | unsigned int nr_added_bufs; |
2045 | 2060 | ||
2046 | spin_lock_init(&portdev->c_ivq_lock); | 2061 | spin_lock_init(&portdev->c_ivq_lock); |
2047 | spin_lock_init(&portdev->c_ovq_lock); | 2062 | spin_lock_init(&portdev->c_ovq_lock); |
2048 | INIT_WORK(&portdev->control_work, &control_work_handler); | ||
2049 | 2063 | ||
2050 | nr_added_bufs = fill_queue(portdev->c_ivq, | 2064 | nr_added_bufs = fill_queue(portdev->c_ivq, |
2051 | &portdev->c_ivq_lock); | 2065 | &portdev->c_ivq_lock); |
@@ -2113,6 +2127,8 @@ static void virtcons_remove(struct virtio_device *vdev) | |||
2113 | /* Finish up work that's lined up */ | 2127 | /* Finish up work that's lined up */ |
2114 | if (use_multiport(portdev)) | 2128 | if (use_multiport(portdev)) |
2115 | cancel_work_sync(&portdev->control_work); | 2129 | cancel_work_sync(&portdev->control_work); |
2130 | else | ||
2131 | cancel_work_sync(&portdev->config_work); | ||
2116 | 2132 | ||
2117 | list_for_each_entry_safe(port, port2, &portdev->ports, list) | 2133 | list_for_each_entry_safe(port, port2, &portdev->ports, list) |
2118 | unplug_port(port); | 2134 | unplug_port(port); |
@@ -2164,6 +2180,7 @@ static int virtcons_freeze(struct virtio_device *vdev) | |||
2164 | 2180 | ||
2165 | virtqueue_disable_cb(portdev->c_ivq); | 2181 | virtqueue_disable_cb(portdev->c_ivq); |
2166 | cancel_work_sync(&portdev->control_work); | 2182 | cancel_work_sync(&portdev->control_work); |
2183 | cancel_work_sync(&portdev->config_work); | ||
2167 | /* | 2184 | /* |
2168 | * Once more: if control_work_handler() was running, it would | 2185 | * Once more: if control_work_handler() was running, it would |
2169 | * enable the cb as the last step. | 2186 | * enable the cb as the last step. |
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index db7f8bce7467..25006a8bb8e6 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c | |||
@@ -144,12 +144,6 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw, | |||
144 | divider->flags); | 144 | divider->flags); |
145 | } | 145 | } |
146 | 146 | ||
147 | /* | ||
148 | * The reverse of DIV_ROUND_UP: The maximum number which | ||
149 | * divided by m is r | ||
150 | */ | ||
151 | #define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1) | ||
152 | |||
153 | static bool _is_valid_table_div(const struct clk_div_table *table, | 147 | static bool _is_valid_table_div(const struct clk_div_table *table, |
154 | unsigned int div) | 148 | unsigned int div) |
155 | { | 149 | { |
@@ -225,19 +219,24 @@ static int _div_round_closest(const struct clk_div_table *table, | |||
225 | unsigned long parent_rate, unsigned long rate, | 219 | unsigned long parent_rate, unsigned long rate, |
226 | unsigned long flags) | 220 | unsigned long flags) |
227 | { | 221 | { |
228 | int up, down, div; | 222 | int up, down; |
223 | unsigned long up_rate, down_rate; | ||
229 | 224 | ||
230 | up = down = div = DIV_ROUND_CLOSEST(parent_rate, rate); | 225 | up = DIV_ROUND_UP(parent_rate, rate); |
226 | down = parent_rate / rate; | ||
231 | 227 | ||
232 | if (flags & CLK_DIVIDER_POWER_OF_TWO) { | 228 | if (flags & CLK_DIVIDER_POWER_OF_TWO) { |
233 | up = __roundup_pow_of_two(div); | 229 | up = __roundup_pow_of_two(up); |
234 | down = __rounddown_pow_of_two(div); | 230 | down = __rounddown_pow_of_two(down); |
235 | } else if (table) { | 231 | } else if (table) { |
236 | up = _round_up_table(table, div); | 232 | up = _round_up_table(table, up); |
237 | down = _round_down_table(table, div); | 233 | down = _round_down_table(table, down); |
238 | } | 234 | } |
239 | 235 | ||
240 | return (up - div) <= (div - down) ? up : down; | 236 | up_rate = DIV_ROUND_UP(parent_rate, up); |
237 | down_rate = DIV_ROUND_UP(parent_rate, down); | ||
238 | |||
239 | return (rate - up_rate) <= (down_rate - rate) ? up : down; | ||
241 | } | 240 | } |
242 | 241 | ||
243 | static int _div_round(const struct clk_div_table *table, | 242 | static int _div_round(const struct clk_div_table *table, |
@@ -313,7 +312,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, | |||
313 | return i; | 312 | return i; |
314 | } | 313 | } |
315 | parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), | 314 | parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), |
316 | MULT_ROUND_UP(rate, i)); | 315 | rate * i); |
317 | now = DIV_ROUND_UP(parent_rate, i); | 316 | now = DIV_ROUND_UP(parent_rate, i); |
318 | if (_is_best_div(rate, now, best, flags)) { | 317 | if (_is_best_div(rate, now, best, flags)) { |
319 | bestdiv = i; | 318 | bestdiv = i; |
@@ -353,7 +352,7 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, | |||
353 | bestdiv = readl(divider->reg) >> divider->shift; | 352 | bestdiv = readl(divider->reg) >> divider->shift; |
354 | bestdiv &= div_mask(divider->width); | 353 | bestdiv &= div_mask(divider->width); |
355 | bestdiv = _get_div(divider->table, bestdiv, divider->flags); | 354 | bestdiv = _get_div(divider->table, bestdiv, divider->flags); |
356 | return bestdiv; | 355 | return DIV_ROUND_UP(*prate, bestdiv); |
357 | } | 356 | } |
358 | 357 | ||
359 | return divider_round_rate(hw, rate, prate, divider->table, | 358 | return divider_round_rate(hw, rate, prate, divider->table, |
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index eb0152961d3c..237f23f68bfc 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c | |||
@@ -1350,7 +1350,6 @@ static unsigned long clk_core_get_rate(struct clk_core *clk) | |||
1350 | 1350 | ||
1351 | return rate; | 1351 | return rate; |
1352 | } | 1352 | } |
1353 | EXPORT_SYMBOL_GPL(clk_core_get_rate); | ||
1354 | 1353 | ||
1355 | /** | 1354 | /** |
1356 | * clk_get_rate - return the rate of clk | 1355 | * clk_get_rate - return the rate of clk |
@@ -2171,6 +2170,32 @@ int clk_get_phase(struct clk *clk) | |||
2171 | } | 2170 | } |
2172 | 2171 | ||
2173 | /** | 2172 | /** |
2173 | * clk_is_match - check if two clk's point to the same hardware clock | ||
2174 | * @p: clk compared against q | ||
2175 | * @q: clk compared against p | ||
2176 | * | ||
2177 | * Returns true if the two struct clk pointers both point to the same hardware | ||
2178 | * clock node. Put differently, returns true if struct clk *p and struct clk *q | ||
2179 | * share the same struct clk_core object. | ||
2180 | * | ||
2181 | * Returns false otherwise. Note that two NULL clks are treated as matching. | ||
2182 | */ | ||
2183 | bool clk_is_match(const struct clk *p, const struct clk *q) | ||
2184 | { | ||
2185 | /* trivial case: identical struct clk's or both NULL */ | ||
2186 | if (p == q) | ||
2187 | return true; | ||
2188 | |||
2189 | /* true if clk->core pointers match. Avoid derefing garbage */ | ||
2190 | if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q)) | ||
2191 | if (p->core == q->core) | ||
2192 | return true; | ||
2193 | |||
2194 | return false; | ||
2195 | } | ||
2196 | EXPORT_SYMBOL_GPL(clk_is_match); | ||
2197 | |||
2198 | /** | ||
2174 | * __clk_init - initialize the data structures in a struct clk | 2199 | * __clk_init - initialize the data structures in a struct clk |
2175 | * @dev: device initializing this clk, placeholder for now | 2200 | * @dev: device initializing this clk, placeholder for now |
2176 | * @clk: clk being initialized | 2201 | * @clk: clk being initialized |
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c index b0b562b9ce0e..e60feffc10a1 100644 --- a/drivers/clk/qcom/gcc-msm8960.c +++ b/drivers/clk/qcom/gcc-msm8960.c | |||
@@ -48,6 +48,17 @@ static struct clk_pll pll3 = { | |||
48 | }, | 48 | }, |
49 | }; | 49 | }; |
50 | 50 | ||
51 | static struct clk_regmap pll4_vote = { | ||
52 | .enable_reg = 0x34c0, | ||
53 | .enable_mask = BIT(4), | ||
54 | .hw.init = &(struct clk_init_data){ | ||
55 | .name = "pll4_vote", | ||
56 | .parent_names = (const char *[]){ "pll4" }, | ||
57 | .num_parents = 1, | ||
58 | .ops = &clk_pll_vote_ops, | ||
59 | }, | ||
60 | }; | ||
61 | |||
51 | static struct clk_pll pll8 = { | 62 | static struct clk_pll pll8 = { |
52 | .l_reg = 0x3144, | 63 | .l_reg = 0x3144, |
53 | .m_reg = 0x3148, | 64 | .m_reg = 0x3148, |
@@ -3023,6 +3034,7 @@ static struct clk_branch rpm_msg_ram_h_clk = { | |||
3023 | 3034 | ||
3024 | static struct clk_regmap *gcc_msm8960_clks[] = { | 3035 | static struct clk_regmap *gcc_msm8960_clks[] = { |
3025 | [PLL3] = &pll3.clkr, | 3036 | [PLL3] = &pll3.clkr, |
3037 | [PLL4_VOTE] = &pll4_vote, | ||
3026 | [PLL8] = &pll8.clkr, | 3038 | [PLL8] = &pll8.clkr, |
3027 | [PLL8_VOTE] = &pll8_vote, | 3039 | [PLL8_VOTE] = &pll8_vote, |
3028 | [PLL14] = &pll14.clkr, | 3040 | [PLL14] = &pll14.clkr, |
@@ -3247,6 +3259,7 @@ static const struct qcom_reset_map gcc_msm8960_resets[] = { | |||
3247 | 3259 | ||
3248 | static struct clk_regmap *gcc_apq8064_clks[] = { | 3260 | static struct clk_regmap *gcc_apq8064_clks[] = { |
3249 | [PLL3] = &pll3.clkr, | 3261 | [PLL3] = &pll3.clkr, |
3262 | [PLL4_VOTE] = &pll4_vote, | ||
3250 | [PLL8] = &pll8.clkr, | 3263 | [PLL8] = &pll8.clkr, |
3251 | [PLL8_VOTE] = &pll8_vote, | 3264 | [PLL8_VOTE] = &pll8_vote, |
3252 | [PLL14] = &pll14.clkr, | 3265 | [PLL14] = &pll14.clkr, |
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c index 121ffde25dc3..c9ff27b4648b 100644 --- a/drivers/clk/qcom/lcc-ipq806x.c +++ b/drivers/clk/qcom/lcc-ipq806x.c | |||
@@ -462,7 +462,6 @@ static struct platform_driver lcc_ipq806x_driver = { | |||
462 | .remove = lcc_ipq806x_remove, | 462 | .remove = lcc_ipq806x_remove, |
463 | .driver = { | 463 | .driver = { |
464 | .name = "lcc-ipq806x", | 464 | .name = "lcc-ipq806x", |
465 | .owner = THIS_MODULE, | ||
466 | .of_match_table = lcc_ipq806x_match_table, | 465 | .of_match_table = lcc_ipq806x_match_table, |
467 | }, | 466 | }, |
468 | }; | 467 | }; |
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c index a75a408cfccd..e2c863295f00 100644 --- a/drivers/clk/qcom/lcc-msm8960.c +++ b/drivers/clk/qcom/lcc-msm8960.c | |||
@@ -417,8 +417,8 @@ static struct clk_rcg slimbus_src = { | |||
417 | .mnctr_en_bit = 8, | 417 | .mnctr_en_bit = 8, |
418 | .mnctr_reset_bit = 7, | 418 | .mnctr_reset_bit = 7, |
419 | .mnctr_mode_shift = 5, | 419 | .mnctr_mode_shift = 5, |
420 | .n_val_shift = 16, | 420 | .n_val_shift = 24, |
421 | .m_val_shift = 16, | 421 | .m_val_shift = 8, |
422 | .width = 8, | 422 | .width = 8, |
423 | }, | 423 | }, |
424 | .p = { | 424 | .p = { |
@@ -547,7 +547,7 @@ static int lcc_msm8960_probe(struct platform_device *pdev) | |||
547 | return PTR_ERR(regmap); | 547 | return PTR_ERR(regmap); |
548 | 548 | ||
549 | /* Use the correct frequency plan depending on speed of PLL4 */ | 549 | /* Use the correct frequency plan depending on speed of PLL4 */ |
550 | val = regmap_read(regmap, 0x4, &val); | 550 | regmap_read(regmap, 0x4, &val); |
551 | if (val == 0x12) { | 551 | if (val == 0x12) { |
552 | slimbus_src.freq_tbl = clk_tbl_aif_osr_492; | 552 | slimbus_src.freq_tbl = clk_tbl_aif_osr_492; |
553 | mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492; | 553 | mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492; |
@@ -574,7 +574,6 @@ static struct platform_driver lcc_msm8960_driver = { | |||
574 | .remove = lcc_msm8960_remove, | 574 | .remove = lcc_msm8960_remove, |
575 | .driver = { | 575 | .driver = { |
576 | .name = "lcc-msm8960", | 576 | .name = "lcc-msm8960", |
577 | .owner = THIS_MODULE, | ||
578 | .of_match_table = lcc_msm8960_match_table, | 577 | .of_match_table = lcc_msm8960_match_table, |
579 | }, | 578 | }, |
580 | }; | 579 | }; |
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c index 6ef89639a9f6..d21640634adf 100644 --- a/drivers/clk/ti/fapll.c +++ b/drivers/clk/ti/fapll.c | |||
@@ -84,7 +84,7 @@ static int ti_fapll_enable(struct clk_hw *hw) | |||
84 | struct fapll_data *fd = to_fapll(hw); | 84 | struct fapll_data *fd = to_fapll(hw); |
85 | u32 v = readl_relaxed(fd->base); | 85 | u32 v = readl_relaxed(fd->base); |
86 | 86 | ||
87 | v |= (1 << FAPLL_MAIN_PLLEN); | 87 | v |= FAPLL_MAIN_PLLEN; |
88 | writel_relaxed(v, fd->base); | 88 | writel_relaxed(v, fd->base); |
89 | 89 | ||
90 | return 0; | 90 | return 0; |
@@ -95,7 +95,7 @@ static void ti_fapll_disable(struct clk_hw *hw) | |||
95 | struct fapll_data *fd = to_fapll(hw); | 95 | struct fapll_data *fd = to_fapll(hw); |
96 | u32 v = readl_relaxed(fd->base); | 96 | u32 v = readl_relaxed(fd->base); |
97 | 97 | ||
98 | v &= ~(1 << FAPLL_MAIN_PLLEN); | 98 | v &= ~FAPLL_MAIN_PLLEN; |
99 | writel_relaxed(v, fd->base); | 99 | writel_relaxed(v, fd->base); |
100 | } | 100 | } |
101 | 101 | ||
@@ -104,7 +104,7 @@ static int ti_fapll_is_enabled(struct clk_hw *hw) | |||
104 | struct fapll_data *fd = to_fapll(hw); | 104 | struct fapll_data *fd = to_fapll(hw); |
105 | u32 v = readl_relaxed(fd->base); | 105 | u32 v = readl_relaxed(fd->base); |
106 | 106 | ||
107 | return v & (1 << FAPLL_MAIN_PLLEN); | 107 | return v & FAPLL_MAIN_PLLEN; |
108 | } | 108 | } |
109 | 109 | ||
110 | static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw, | 110 | static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw, |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 68161f7a07d6..a0b036ccb118 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
@@ -192,6 +192,7 @@ config SYS_SUPPORTS_EM_STI | |||
192 | config SH_TIMER_CMT | 192 | config SH_TIMER_CMT |
193 | bool "Renesas CMT timer driver" if COMPILE_TEST | 193 | bool "Renesas CMT timer driver" if COMPILE_TEST |
194 | depends on GENERIC_CLOCKEVENTS | 194 | depends on GENERIC_CLOCKEVENTS |
195 | depends on HAS_IOMEM | ||
195 | default SYS_SUPPORTS_SH_CMT | 196 | default SYS_SUPPORTS_SH_CMT |
196 | help | 197 | help |
197 | This enables build of a clocksource and clockevent driver for | 198 | This enables build of a clocksource and clockevent driver for |
@@ -201,6 +202,7 @@ config SH_TIMER_CMT | |||
201 | config SH_TIMER_MTU2 | 202 | config SH_TIMER_MTU2 |
202 | bool "Renesas MTU2 timer driver" if COMPILE_TEST | 203 | bool "Renesas MTU2 timer driver" if COMPILE_TEST |
203 | depends on GENERIC_CLOCKEVENTS | 204 | depends on GENERIC_CLOCKEVENTS |
205 | depends on HAS_IOMEM | ||
204 | default SYS_SUPPORTS_SH_MTU2 | 206 | default SYS_SUPPORTS_SH_MTU2 |
205 | help | 207 | help |
206 | This enables build of a clockevent driver for the Multi-Function | 208 | This enables build of a clockevent driver for the Multi-Function |
@@ -210,6 +212,7 @@ config SH_TIMER_MTU2 | |||
210 | config SH_TIMER_TMU | 212 | config SH_TIMER_TMU |
211 | bool "Renesas TMU timer driver" if COMPILE_TEST | 213 | bool "Renesas TMU timer driver" if COMPILE_TEST |
212 | depends on GENERIC_CLOCKEVENTS | 214 | depends on GENERIC_CLOCKEVENTS |
215 | depends on HAS_IOMEM | ||
213 | default SYS_SUPPORTS_SH_TMU | 216 | default SYS_SUPPORTS_SH_TMU |
214 | help | 217 | help |
215 | This enables build of a clocksource and clockevent driver for | 218 | This enables build of a clocksource and clockevent driver for |
diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c index bba62f9deefb..ec57ba2bbd87 100644 --- a/drivers/clocksource/time-efm32.c +++ b/drivers/clocksource/time-efm32.c | |||
@@ -225,12 +225,12 @@ static int __init efm32_clockevent_init(struct device_node *np) | |||
225 | clock_event_ddata.base = base; | 225 | clock_event_ddata.base = base; |
226 | clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ); | 226 | clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ); |
227 | 227 | ||
228 | setup_irq(irq, &efm32_clock_event_irq); | ||
229 | |||
230 | clockevents_config_and_register(&clock_event_ddata.evtdev, | 228 | clockevents_config_and_register(&clock_event_ddata.evtdev, |
231 | DIV_ROUND_CLOSEST(rate, 1024), | 229 | DIV_ROUND_CLOSEST(rate, 1024), |
232 | 0xf, 0xffff); | 230 | 0xf, 0xffff); |
233 | 231 | ||
232 | setup_irq(irq, &efm32_clock_event_irq); | ||
233 | |||
234 | return 0; | 234 | return 0; |
235 | 235 | ||
236 | err_get_irq: | 236 | err_get_irq: |
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c index 02268448dc85..58597fbcc046 100644 --- a/drivers/clocksource/timer-sun5i.c +++ b/drivers/clocksource/timer-sun5i.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/irq.h> | 17 | #include <linux/irq.h> |
18 | #include <linux/irqreturn.h> | 18 | #include <linux/irqreturn.h> |
19 | #include <linux/reset.h> | 19 | #include <linux/reset.h> |
20 | #include <linux/sched_clock.h> | ||
21 | #include <linux/of.h> | 20 | #include <linux/of.h> |
22 | #include <linux/of_address.h> | 21 | #include <linux/of_address.h> |
23 | #include <linux/of_irq.h> | 22 | #include <linux/of_irq.h> |
@@ -137,11 +136,6 @@ static struct irqaction sun5i_timer_irq = { | |||
137 | .dev_id = &sun5i_clockevent, | 136 | .dev_id = &sun5i_clockevent, |
138 | }; | 137 | }; |
139 | 138 | ||
140 | static u64 sun5i_timer_sched_read(void) | ||
141 | { | ||
142 | return ~readl(timer_base + TIMER_CNTVAL_LO_REG(1)); | ||
143 | } | ||
144 | |||
145 | static void __init sun5i_timer_init(struct device_node *node) | 139 | static void __init sun5i_timer_init(struct device_node *node) |
146 | { | 140 | { |
147 | struct reset_control *rstc; | 141 | struct reset_control *rstc; |
@@ -172,16 +166,11 @@ static void __init sun5i_timer_init(struct device_node *node) | |||
172 | writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, | 166 | writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, |
173 | timer_base + TIMER_CTL_REG(1)); | 167 | timer_base + TIMER_CTL_REG(1)); |
174 | 168 | ||
175 | sched_clock_register(sun5i_timer_sched_read, 32, rate); | ||
176 | clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name, | 169 | clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name, |
177 | rate, 340, 32, clocksource_mmio_readl_down); | 170 | rate, 340, 32, clocksource_mmio_readl_down); |
178 | 171 | ||
179 | ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); | 172 | ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); |
180 | 173 | ||
181 | ret = setup_irq(irq, &sun5i_timer_irq); | ||
182 | if (ret) | ||
183 | pr_warn("failed to setup irq %d\n", irq); | ||
184 | |||
185 | /* Enable timer0 interrupt */ | 174 | /* Enable timer0 interrupt */ |
186 | val = readl(timer_base + TIMER_IRQ_EN_REG); | 175 | val = readl(timer_base + TIMER_IRQ_EN_REG); |
187 | writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); | 176 | writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); |
@@ -191,6 +180,10 @@ static void __init sun5i_timer_init(struct device_node *node) | |||
191 | 180 | ||
192 | clockevents_config_and_register(&sun5i_clockevent, rate, | 181 | clockevents_config_and_register(&sun5i_clockevent, rate, |
193 | TIMER_SYNC_TICKS, 0xffffffff); | 182 | TIMER_SYNC_TICKS, 0xffffffff); |
183 | |||
184 | ret = setup_irq(irq, &sun5i_timer_irq); | ||
185 | if (ret) | ||
186 | pr_warn("failed to setup irq %d\n", irq); | ||
194 | } | 187 | } |
195 | CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer", | 188 | CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer", |
196 | sun5i_timer_init); | 189 | sun5i_timer_init); |
diff --git a/drivers/cpuidle/cpuidle-mvebu-v7.c b/drivers/cpuidle/cpuidle-mvebu-v7.c index 38e68618513a..980151f34707 100644 --- a/drivers/cpuidle/cpuidle-mvebu-v7.c +++ b/drivers/cpuidle/cpuidle-mvebu-v7.c | |||
@@ -37,11 +37,11 @@ static int mvebu_v7_enter_idle(struct cpuidle_device *dev, | |||
37 | deepidle = true; | 37 | deepidle = true; |
38 | 38 | ||
39 | ret = mvebu_v7_cpu_suspend(deepidle); | 39 | ret = mvebu_v7_cpu_suspend(deepidle); |
40 | cpu_pm_exit(); | ||
41 | |||
40 | if (ret) | 42 | if (ret) |
41 | return ret; | 43 | return ret; |
42 | 44 | ||
43 | cpu_pm_exit(); | ||
44 | |||
45 | return index; | 45 | return index; |
46 | } | 46 | } |
47 | 47 | ||
@@ -50,17 +50,17 @@ static struct cpuidle_driver armadaxp_idle_driver = { | |||
50 | .states[0] = ARM_CPUIDLE_WFI_STATE, | 50 | .states[0] = ARM_CPUIDLE_WFI_STATE, |
51 | .states[1] = { | 51 | .states[1] = { |
52 | .enter = mvebu_v7_enter_idle, | 52 | .enter = mvebu_v7_enter_idle, |
53 | .exit_latency = 10, | 53 | .exit_latency = 100, |
54 | .power_usage = 50, | 54 | .power_usage = 50, |
55 | .target_residency = 100, | 55 | .target_residency = 1000, |
56 | .name = "MV CPU IDLE", | 56 | .name = "MV CPU IDLE", |
57 | .desc = "CPU power down", | 57 | .desc = "CPU power down", |
58 | }, | 58 | }, |
59 | .states[2] = { | 59 | .states[2] = { |
60 | .enter = mvebu_v7_enter_idle, | 60 | .enter = mvebu_v7_enter_idle, |
61 | .exit_latency = 100, | 61 | .exit_latency = 1000, |
62 | .power_usage = 5, | 62 | .power_usage = 5, |
63 | .target_residency = 1000, | 63 | .target_residency = 10000, |
64 | .flags = MVEBU_V7_FLAG_DEEP_IDLE, | 64 | .flags = MVEBU_V7_FLAG_DEEP_IDLE, |
65 | .name = "MV CPU DEEP IDLE", | 65 | .name = "MV CPU DEEP IDLE", |
66 | .desc = "CPU and L2 Fabric power down", | 66 | .desc = "CPU and L2 Fabric power down", |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 4a5fd245014e..83aa55d6fa5d 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -97,6 +97,12 @@ | |||
97 | 97 | ||
98 | #define DRIVER_NAME "pl08xdmac" | 98 | #define DRIVER_NAME "pl08xdmac" |
99 | 99 | ||
100 | #define PL80X_DMA_BUSWIDTHS \ | ||
101 | BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ | ||
102 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | ||
103 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | ||
104 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | ||
105 | |||
100 | static struct amba_driver pl08x_amba_driver; | 106 | static struct amba_driver pl08x_amba_driver; |
101 | struct pl08x_driver_data; | 107 | struct pl08x_driver_data; |
102 | 108 | ||
@@ -2070,6 +2076,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
2070 | pl08x->memcpy.device_pause = pl08x_pause; | 2076 | pl08x->memcpy.device_pause = pl08x_pause; |
2071 | pl08x->memcpy.device_resume = pl08x_resume; | 2077 | pl08x->memcpy.device_resume = pl08x_resume; |
2072 | pl08x->memcpy.device_terminate_all = pl08x_terminate_all; | 2078 | pl08x->memcpy.device_terminate_all = pl08x_terminate_all; |
2079 | pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS; | ||
2080 | pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS; | ||
2081 | pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM); | ||
2082 | pl08x->memcpy.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; | ||
2073 | 2083 | ||
2074 | /* Initialize slave engine */ | 2084 | /* Initialize slave engine */ |
2075 | dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); | 2085 | dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); |
@@ -2086,6 +2096,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
2086 | pl08x->slave.device_pause = pl08x_pause; | 2096 | pl08x->slave.device_pause = pl08x_pause; |
2087 | pl08x->slave.device_resume = pl08x_resume; | 2097 | pl08x->slave.device_resume = pl08x_resume; |
2088 | pl08x->slave.device_terminate_all = pl08x_terminate_all; | 2098 | pl08x->slave.device_terminate_all = pl08x_terminate_all; |
2099 | pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS; | ||
2100 | pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS; | ||
2101 | pl08x->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
2102 | pl08x->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; | ||
2089 | 2103 | ||
2090 | /* Get the platform data */ | 2104 | /* Get the platform data */ |
2091 | pl08x->pd = dev_get_platdata(&adev->dev); | 2105 | pl08x->pd = dev_get_platdata(&adev->dev); |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 1e1a4c567542..0b4fc6fb48ce 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -238,93 +238,126 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) | |||
238 | } | 238 | } |
239 | 239 | ||
240 | /* | 240 | /* |
241 | * atc_get_current_descriptors - | 241 | * atc_get_desc_by_cookie - get the descriptor of a cookie |
242 | * locate the descriptor which equal to physical address in DSCR | 242 | * @atchan: the DMA channel |
243 | * @atchan: the channel we want to start | 243 | * @cookie: the cookie to get the descriptor for |
244 | * @dscr_addr: physical descriptor address in DSCR | ||
245 | */ | 244 | */ |
246 | static struct at_desc *atc_get_current_descriptors(struct at_dma_chan *atchan, | 245 | static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan, |
247 | u32 dscr_addr) | 246 | dma_cookie_t cookie) |
248 | { | 247 | { |
249 | struct at_desc *desc, *_desc, *child, *desc_cur = NULL; | 248 | struct at_desc *desc, *_desc; |
250 | 249 | ||
251 | list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { | 250 | list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) { |
252 | if (desc->lli.dscr == dscr_addr) { | 251 | if (desc->txd.cookie == cookie) |
253 | desc_cur = desc; | 252 | return desc; |
254 | break; | 253 | } |
255 | } | ||
256 | 254 | ||
257 | list_for_each_entry(child, &desc->tx_list, desc_node) { | 255 | list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { |
258 | if (child->lli.dscr == dscr_addr) { | 256 | if (desc->txd.cookie == cookie) |
259 | desc_cur = child; | 257 | return desc; |
260 | break; | ||
261 | } | ||
262 | } | ||
263 | } | 258 | } |
264 | 259 | ||
265 | return desc_cur; | 260 | return NULL; |
266 | } | 261 | } |
267 | 262 | ||
268 | /* | 263 | /** |
269 | * atc_get_bytes_left - | 264 | * atc_calc_bytes_left - calculates the number of bytes left according to the |
270 | * Get the number of bytes residue in dma buffer, | 265 | * value read from CTRLA. |
271 | * @chan: the channel we want to start | 266 | * |
267 | * @current_len: the number of bytes left before reading CTRLA | ||
268 | * @ctrla: the value of CTRLA | ||
269 | * @desc: the descriptor containing the transfer width | ||
270 | */ | ||
271 | static inline int atc_calc_bytes_left(int current_len, u32 ctrla, | ||
272 | struct at_desc *desc) | ||
273 | { | ||
274 | return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width); | ||
275 | } | ||
276 | |||
277 | /** | ||
278 | * atc_calc_bytes_left_from_reg - calculates the number of bytes left according | ||
279 | * to the current value of CTRLA. | ||
280 | * | ||
281 | * @current_len: the number of bytes left before reading CTRLA | ||
282 | * @atchan: the channel to read CTRLA for | ||
283 | * @desc: the descriptor containing the transfer width | ||
284 | */ | ||
285 | static inline int atc_calc_bytes_left_from_reg(int current_len, | ||
286 | struct at_dma_chan *atchan, struct at_desc *desc) | ||
287 | { | ||
288 | u32 ctrla = channel_readl(atchan, CTRLA); | ||
289 | |||
290 | return atc_calc_bytes_left(current_len, ctrla, desc); | ||
291 | } | ||
292 | |||
293 | /** | ||
294 | * atc_get_bytes_left - get the number of bytes residue for a cookie | ||
295 | * @chan: DMA channel | ||
296 | * @cookie: transaction identifier to check status of | ||
272 | */ | 297 | */ |
273 | static int atc_get_bytes_left(struct dma_chan *chan) | 298 | static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) |
274 | { | 299 | { |
275 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 300 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
276 | struct at_dma *atdma = to_at_dma(chan->device); | ||
277 | int chan_id = atchan->chan_common.chan_id; | ||
278 | struct at_desc *desc_first = atc_first_active(atchan); | 301 | struct at_desc *desc_first = atc_first_active(atchan); |
279 | struct at_desc *desc_cur; | 302 | struct at_desc *desc; |
280 | int ret = 0, count = 0; | 303 | int ret; |
304 | u32 ctrla, dscr; | ||
281 | 305 | ||
282 | /* | 306 | /* |
283 | * Initialize necessary values in the first time. | 307 | * If the cookie doesn't match to the currently running transfer then |
284 | * remain_desc record remain desc length. | 308 | * we can return the total length of the associated DMA transfer, |
309 | * because it is still queued. | ||
285 | */ | 310 | */ |
286 | if (atchan->remain_desc == 0) | 311 | desc = atc_get_desc_by_cookie(atchan, cookie); |
287 | /* First descriptor embedds the transaction length */ | 312 | if (desc == NULL) |
288 | atchan->remain_desc = desc_first->len; | 313 | return -EINVAL; |
314 | else if (desc != desc_first) | ||
315 | return desc->total_len; | ||
289 | 316 | ||
290 | /* | 317 | /* cookie matches to the currently running transfer */ |
291 | * This happens when current descriptor transfer complete. | 318 | ret = desc_first->total_len; |
292 | * The residual buffer size should reduce current descriptor length. | ||
293 | */ | ||
294 | if (unlikely(test_bit(ATC_IS_BTC, &atchan->status))) { | ||
295 | clear_bit(ATC_IS_BTC, &atchan->status); | ||
296 | desc_cur = atc_get_current_descriptors(atchan, | ||
297 | channel_readl(atchan, DSCR)); | ||
298 | if (!desc_cur) { | ||
299 | ret = -EINVAL; | ||
300 | goto out; | ||
301 | } | ||
302 | 319 | ||
303 | count = (desc_cur->lli.ctrla & ATC_BTSIZE_MAX) | 320 | if (desc_first->lli.dscr) { |
304 | << desc_first->tx_width; | 321 | /* hardware linked list transfer */ |
305 | if (atchan->remain_desc < count) { | 322 | |
306 | ret = -EINVAL; | 323 | /* |
307 | goto out; | 324 | * Calculate the residue by removing the length of the child |
325 | * descriptors already transferred from the total length. | ||
326 | * To get the current child descriptor we can use the value of | ||
327 | * the channel's DSCR register and compare it against the value | ||
328 | * of the hardware linked list structure of each child | ||
329 | * descriptor. | ||
330 | */ | ||
331 | |||
332 | ctrla = channel_readl(atchan, CTRLA); | ||
333 | rmb(); /* ensure CTRLA is read before DSCR */ | ||
334 | dscr = channel_readl(atchan, DSCR); | ||
335 | |||
336 | /* for the first descriptor we can be more accurate */ | ||
337 | if (desc_first->lli.dscr == dscr) | ||
338 | return atc_calc_bytes_left(ret, ctrla, desc_first); | ||
339 | |||
340 | ret -= desc_first->len; | ||
341 | list_for_each_entry(desc, &desc_first->tx_list, desc_node) { | ||
342 | if (desc->lli.dscr == dscr) | ||
343 | break; | ||
344 | |||
345 | ret -= desc->len; | ||
308 | } | 346 | } |
309 | 347 | ||
310 | atchan->remain_desc -= count; | ||
311 | ret = atchan->remain_desc; | ||
312 | } else { | ||
313 | /* | 348 | /* |
314 | * Get residual bytes when current | 349 | * For the last descriptor in the chain we can calculate |
315 | * descriptor transfer in progress. | 350 | * the remaining bytes using the channel's register. |
351 | * Note that the transfer width of the first and last | ||
352 | * descriptor may differ. | ||
316 | */ | 353 | */ |
317 | count = (channel_readl(atchan, CTRLA) & ATC_BTSIZE_MAX) | 354 | if (!desc->lli.dscr) |
318 | << (desc_first->tx_width); | 355 | ret = atc_calc_bytes_left_from_reg(ret, atchan, desc); |
319 | ret = atchan->remain_desc - count; | 356 | } else { |
357 | /* single transfer */ | ||
358 | ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first); | ||
320 | } | 359 | } |
321 | /* | ||
322 | * Check fifo empty. | ||
323 | */ | ||
324 | if (!(dma_readl(atdma, CHSR) & AT_DMA_EMPT(chan_id))) | ||
325 | atc_issue_pending(chan); | ||
326 | 360 | ||
327 | out: | ||
328 | return ret; | 361 | return ret; |
329 | } | 362 | } |
330 | 363 | ||
@@ -539,8 +572,6 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id) | |||
539 | /* Give information to tasklet */ | 572 | /* Give information to tasklet */ |
540 | set_bit(ATC_IS_ERROR, &atchan->status); | 573 | set_bit(ATC_IS_ERROR, &atchan->status); |
541 | } | 574 | } |
542 | if (pending & AT_DMA_BTC(i)) | ||
543 | set_bit(ATC_IS_BTC, &atchan->status); | ||
544 | tasklet_schedule(&atchan->tasklet); | 575 | tasklet_schedule(&atchan->tasklet); |
545 | ret = IRQ_HANDLED; | 576 | ret = IRQ_HANDLED; |
546 | } | 577 | } |
@@ -653,14 +684,18 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
653 | desc->lli.ctrlb = ctrlb; | 684 | desc->lli.ctrlb = ctrlb; |
654 | 685 | ||
655 | desc->txd.cookie = 0; | 686 | desc->txd.cookie = 0; |
687 | desc->len = xfer_count << src_width; | ||
656 | 688 | ||
657 | atc_desc_chain(&first, &prev, desc); | 689 | atc_desc_chain(&first, &prev, desc); |
658 | } | 690 | } |
659 | 691 | ||
660 | /* First descriptor of the chain embedds additional information */ | 692 | /* First descriptor of the chain embedds additional information */ |
661 | first->txd.cookie = -EBUSY; | 693 | first->txd.cookie = -EBUSY; |
662 | first->len = len; | 694 | first->total_len = len; |
695 | |||
696 | /* set transfer width for the calculation of the residue */ | ||
663 | first->tx_width = src_width; | 697 | first->tx_width = src_width; |
698 | prev->tx_width = src_width; | ||
664 | 699 | ||
665 | /* set end-of-link to the last link descriptor of list*/ | 700 | /* set end-of-link to the last link descriptor of list*/ |
666 | set_desc_eol(desc); | 701 | set_desc_eol(desc); |
@@ -752,6 +787,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
752 | | ATC_SRC_WIDTH(mem_width) | 787 | | ATC_SRC_WIDTH(mem_width) |
753 | | len >> mem_width; | 788 | | len >> mem_width; |
754 | desc->lli.ctrlb = ctrlb; | 789 | desc->lli.ctrlb = ctrlb; |
790 | desc->len = len; | ||
755 | 791 | ||
756 | atc_desc_chain(&first, &prev, desc); | 792 | atc_desc_chain(&first, &prev, desc); |
757 | total_len += len; | 793 | total_len += len; |
@@ -792,6 +828,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
792 | | ATC_DST_WIDTH(mem_width) | 828 | | ATC_DST_WIDTH(mem_width) |
793 | | len >> reg_width; | 829 | | len >> reg_width; |
794 | desc->lli.ctrlb = ctrlb; | 830 | desc->lli.ctrlb = ctrlb; |
831 | desc->len = len; | ||
795 | 832 | ||
796 | atc_desc_chain(&first, &prev, desc); | 833 | atc_desc_chain(&first, &prev, desc); |
797 | total_len += len; | 834 | total_len += len; |
@@ -806,8 +843,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
806 | 843 | ||
807 | /* First descriptor of the chain embedds additional information */ | 844 | /* First descriptor of the chain embedds additional information */ |
808 | first->txd.cookie = -EBUSY; | 845 | first->txd.cookie = -EBUSY; |
809 | first->len = total_len; | 846 | first->total_len = total_len; |
847 | |||
848 | /* set transfer width for the calculation of the residue */ | ||
810 | first->tx_width = reg_width; | 849 | first->tx_width = reg_width; |
850 | prev->tx_width = reg_width; | ||
811 | 851 | ||
812 | /* first link descriptor of list is responsible of flags */ | 852 | /* first link descriptor of list is responsible of flags */ |
813 | first->txd.flags = flags; /* client is in control of this ack */ | 853 | first->txd.flags = flags; /* client is in control of this ack */ |
@@ -872,6 +912,7 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, | |||
872 | | ATC_FC_MEM2PER | 912 | | ATC_FC_MEM2PER |
873 | | ATC_SIF(atchan->mem_if) | 913 | | ATC_SIF(atchan->mem_if) |
874 | | ATC_DIF(atchan->per_if); | 914 | | ATC_DIF(atchan->per_if); |
915 | desc->len = period_len; | ||
875 | break; | 916 | break; |
876 | 917 | ||
877 | case DMA_DEV_TO_MEM: | 918 | case DMA_DEV_TO_MEM: |
@@ -883,6 +924,7 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, | |||
883 | | ATC_FC_PER2MEM | 924 | | ATC_FC_PER2MEM |
884 | | ATC_SIF(atchan->per_if) | 925 | | ATC_SIF(atchan->per_if) |
885 | | ATC_DIF(atchan->mem_if); | 926 | | ATC_DIF(atchan->mem_if); |
927 | desc->len = period_len; | ||
886 | break; | 928 | break; |
887 | 929 | ||
888 | default: | 930 | default: |
@@ -964,7 +1006,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | |||
964 | 1006 | ||
965 | /* First descriptor of the chain embedds additional information */ | 1007 | /* First descriptor of the chain embedds additional information */ |
966 | first->txd.cookie = -EBUSY; | 1008 | first->txd.cookie = -EBUSY; |
967 | first->len = buf_len; | 1009 | first->total_len = buf_len; |
968 | first->tx_width = reg_width; | 1010 | first->tx_width = reg_width; |
969 | 1011 | ||
970 | return &first->txd; | 1012 | return &first->txd; |
@@ -1118,7 +1160,7 @@ atc_tx_status(struct dma_chan *chan, | |||
1118 | spin_lock_irqsave(&atchan->lock, flags); | 1160 | spin_lock_irqsave(&atchan->lock, flags); |
1119 | 1161 | ||
1120 | /* Get number of bytes left in the active transactions */ | 1162 | /* Get number of bytes left in the active transactions */ |
1121 | bytes = atc_get_bytes_left(chan); | 1163 | bytes = atc_get_bytes_left(chan, cookie); |
1122 | 1164 | ||
1123 | spin_unlock_irqrestore(&atchan->lock, flags); | 1165 | spin_unlock_irqrestore(&atchan->lock, flags); |
1124 | 1166 | ||
@@ -1214,7 +1256,6 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) | |||
1214 | 1256 | ||
1215 | spin_lock_irqsave(&atchan->lock, flags); | 1257 | spin_lock_irqsave(&atchan->lock, flags); |
1216 | atchan->descs_allocated = i; | 1258 | atchan->descs_allocated = i; |
1217 | atchan->remain_desc = 0; | ||
1218 | list_splice(&tmp_list, &atchan->free_list); | 1259 | list_splice(&tmp_list, &atchan->free_list); |
1219 | dma_cookie_init(chan); | 1260 | dma_cookie_init(chan); |
1220 | spin_unlock_irqrestore(&atchan->lock, flags); | 1261 | spin_unlock_irqrestore(&atchan->lock, flags); |
@@ -1257,7 +1298,6 @@ static void atc_free_chan_resources(struct dma_chan *chan) | |||
1257 | list_splice_init(&atchan->free_list, &list); | 1298 | list_splice_init(&atchan->free_list, &list); |
1258 | atchan->descs_allocated = 0; | 1299 | atchan->descs_allocated = 0; |
1259 | atchan->status = 0; | 1300 | atchan->status = 0; |
1260 | atchan->remain_desc = 0; | ||
1261 | 1301 | ||
1262 | dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); | 1302 | dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); |
1263 | } | 1303 | } |
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index d6bba6c636c2..2727ca560572 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h | |||
@@ -181,8 +181,9 @@ struct at_lli { | |||
181 | * @at_lli: hardware lli structure | 181 | * @at_lli: hardware lli structure |
182 | * @txd: support for the async_tx api | 182 | * @txd: support for the async_tx api |
183 | * @desc_node: node on the channed descriptors list | 183 | * @desc_node: node on the channed descriptors list |
184 | * @len: total transaction bytecount | 184 | * @len: descriptor byte count |
185 | * @tx_width: transfer width | 185 | * @tx_width: transfer width |
186 | * @total_len: total transaction byte count | ||
186 | */ | 187 | */ |
187 | struct at_desc { | 188 | struct at_desc { |
188 | /* FIRST values the hardware uses */ | 189 | /* FIRST values the hardware uses */ |
@@ -194,6 +195,7 @@ struct at_desc { | |||
194 | struct list_head desc_node; | 195 | struct list_head desc_node; |
195 | size_t len; | 196 | size_t len; |
196 | u32 tx_width; | 197 | u32 tx_width; |
198 | size_t total_len; | ||
197 | }; | 199 | }; |
198 | 200 | ||
199 | static inline struct at_desc * | 201 | static inline struct at_desc * |
@@ -213,7 +215,6 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd) | |||
213 | enum atc_status { | 215 | enum atc_status { |
214 | ATC_IS_ERROR = 0, | 216 | ATC_IS_ERROR = 0, |
215 | ATC_IS_PAUSED = 1, | 217 | ATC_IS_PAUSED = 1, |
216 | ATC_IS_BTC = 2, | ||
217 | ATC_IS_CYCLIC = 24, | 218 | ATC_IS_CYCLIC = 24, |
218 | }; | 219 | }; |
219 | 220 | ||
@@ -231,7 +232,6 @@ enum atc_status { | |||
231 | * @save_cfg: configuration register that is saved on suspend/resume cycle | 232 | * @save_cfg: configuration register that is saved on suspend/resume cycle |
232 | * @save_dscr: for cyclic operations, preserve next descriptor address in | 233 | * @save_dscr: for cyclic operations, preserve next descriptor address in |
233 | * the cyclic list on suspend/resume cycle | 234 | * the cyclic list on suspend/resume cycle |
234 | * @remain_desc: to save remain desc length | ||
235 | * @dma_sconfig: configuration for slave transfers, passed via | 235 | * @dma_sconfig: configuration for slave transfers, passed via |
236 | * .device_config | 236 | * .device_config |
237 | * @lock: serializes enqueue/dequeue operations to descriptors lists | 237 | * @lock: serializes enqueue/dequeue operations to descriptors lists |
@@ -251,7 +251,6 @@ struct at_dma_chan { | |||
251 | struct tasklet_struct tasklet; | 251 | struct tasklet_struct tasklet; |
252 | u32 save_cfg; | 252 | u32 save_cfg; |
253 | u32 save_dscr; | 253 | u32 save_dscr; |
254 | u32 remain_desc; | ||
255 | struct dma_slave_config dma_sconfig; | 254 | struct dma_slave_config dma_sconfig; |
256 | 255 | ||
257 | spinlock_t lock; | 256 | spinlock_t lock; |
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index 6565a361e7e5..b2c3ae071429 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c | |||
@@ -26,6 +26,8 @@ | |||
26 | 26 | ||
27 | #include "internal.h" | 27 | #include "internal.h" |
28 | 28 | ||
29 | #define DRV_NAME "dw_dmac" | ||
30 | |||
29 | static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, | 31 | static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, |
30 | struct of_dma *ofdma) | 32 | struct of_dma *ofdma) |
31 | { | 33 | { |
@@ -284,7 +286,7 @@ static struct platform_driver dw_driver = { | |||
284 | .remove = dw_remove, | 286 | .remove = dw_remove, |
285 | .shutdown = dw_shutdown, | 287 | .shutdown = dw_shutdown, |
286 | .driver = { | 288 | .driver = { |
287 | .name = "dw_dmac", | 289 | .name = DRV_NAME, |
288 | .pm = &dw_dev_pm_ops, | 290 | .pm = &dw_dev_pm_ops, |
289 | .of_match_table = of_match_ptr(dw_dma_of_id_table), | 291 | .of_match_table = of_match_ptr(dw_dma_of_id_table), |
290 | .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), | 292 | .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), |
@@ -305,3 +307,4 @@ module_exit(dw_exit); | |||
305 | 307 | ||
306 | MODULE_LICENSE("GPL v2"); | 308 | MODULE_LICENSE("GPL v2"); |
307 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver"); | 309 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver"); |
310 | MODULE_ALIAS("platform:" DRV_NAME); | ||
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 18c0a131e4e4..66a0efb9651d 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -531,6 +531,10 @@ static int sdma_run_channel0(struct sdma_engine *sdma) | |||
531 | dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); | 531 | dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); |
532 | } | 532 | } |
533 | 533 | ||
534 | /* Set bits of CONFIG register with dynamic context switching */ | ||
535 | if (readl(sdma->regs + SDMA_H_CONFIG) == 0) | ||
536 | writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); | ||
537 | |||
534 | return ret ? 0 : -ETIMEDOUT; | 538 | return ret ? 0 : -ETIMEDOUT; |
535 | } | 539 | } |
536 | 540 | ||
@@ -1394,9 +1398,6 @@ static int sdma_init(struct sdma_engine *sdma) | |||
1394 | 1398 | ||
1395 | writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); | 1399 | writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); |
1396 | 1400 | ||
1397 | /* Set bits of CONFIG register with given context switching mode */ | ||
1398 | writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); | ||
1399 | |||
1400 | /* Initializes channel's priorities */ | 1401 | /* Initializes channel's priorities */ |
1401 | sdma_set_channel_priority(&sdma->channel[0], 7); | 1402 | sdma_set_channel_priority(&sdma->channel[0], 7); |
1402 | 1403 | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 910ff8ab9c9c..d8135adb2238 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | |||
@@ -645,6 +645,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, | |||
645 | pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id); | 645 | pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id); |
646 | pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id); | 646 | pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id); |
647 | 647 | ||
648 | init_sdma_vm(dqm, q, qpd); | ||
648 | retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, | 649 | retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, |
649 | &q->gart_mqd_addr, &q->properties); | 650 | &q->gart_mqd_addr, &q->properties); |
650 | if (retval != 0) { | 651 | if (retval != 0) { |
@@ -652,7 +653,14 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, | |||
652 | return retval; | 653 | return retval; |
653 | } | 654 | } |
654 | 655 | ||
655 | init_sdma_vm(dqm, q, qpd); | 656 | retval = mqd->load_mqd(mqd, q->mqd, 0, |
657 | 0, NULL); | ||
658 | if (retval != 0) { | ||
659 | deallocate_sdma_queue(dqm, q->sdma_id); | ||
660 | mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); | ||
661 | return retval; | ||
662 | } | ||
663 | |||
656 | return 0; | 664 | return 0; |
657 | } | 665 | } |
658 | 666 | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index e415a2a9207e..c7d298e62c96 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | |||
@@ -44,7 +44,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, | |||
44 | BUG_ON(!kq || !dev); | 44 | BUG_ON(!kq || !dev); |
45 | BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ); | 45 | BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ); |
46 | 46 | ||
47 | pr_debug("kfd: In func %s initializing queue type %d size %d\n", | 47 | pr_debug("amdkfd: In func %s initializing queue type %d size %d\n", |
48 | __func__, KFD_QUEUE_TYPE_HIQ, queue_size); | 48 | __func__, KFD_QUEUE_TYPE_HIQ, queue_size); |
49 | 49 | ||
50 | nop.opcode = IT_NOP; | 50 | nop.opcode = IT_NOP; |
@@ -69,12 +69,16 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, | |||
69 | 69 | ||
70 | prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off); | 70 | prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off); |
71 | 71 | ||
72 | if (prop.doorbell_ptr == NULL) | 72 | if (prop.doorbell_ptr == NULL) { |
73 | pr_err("amdkfd: error init doorbell"); | ||
73 | goto err_get_kernel_doorbell; | 74 | goto err_get_kernel_doorbell; |
75 | } | ||
74 | 76 | ||
75 | retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq); | 77 | retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq); |
76 | if (retval != 0) | 78 | if (retval != 0) { |
79 | pr_err("amdkfd: error init pq queues size (%d)\n", queue_size); | ||
77 | goto err_pq_allocate_vidmem; | 80 | goto err_pq_allocate_vidmem; |
81 | } | ||
78 | 82 | ||
79 | kq->pq_kernel_addr = kq->pq->cpu_ptr; | 83 | kq->pq_kernel_addr = kq->pq->cpu_ptr; |
80 | kq->pq_gpu_addr = kq->pq->gpu_addr; | 84 | kq->pq_gpu_addr = kq->pq->gpu_addr; |
@@ -165,10 +169,8 @@ err_rptr_allocate_vidmem: | |||
165 | err_eop_allocate_vidmem: | 169 | err_eop_allocate_vidmem: |
166 | kfd_gtt_sa_free(dev, kq->pq); | 170 | kfd_gtt_sa_free(dev, kq->pq); |
167 | err_pq_allocate_vidmem: | 171 | err_pq_allocate_vidmem: |
168 | pr_err("kfd: error init pq\n"); | ||
169 | kfd_release_kernel_doorbell(dev, prop.doorbell_ptr); | 172 | kfd_release_kernel_doorbell(dev, prop.doorbell_ptr); |
170 | err_get_kernel_doorbell: | 173 | err_get_kernel_doorbell: |
171 | pr_err("kfd: error init doorbell"); | ||
172 | return false; | 174 | return false; |
173 | 175 | ||
174 | } | 176 | } |
@@ -187,6 +189,8 @@ static void uninitialize(struct kernel_queue *kq) | |||
187 | else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ) | 189 | else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ) |
188 | kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj); | 190 | kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj); |
189 | 191 | ||
192 | kq->mqd->uninit_mqd(kq->mqd, kq->queue->mqd, kq->queue->mqd_mem_obj); | ||
193 | |||
190 | kfd_gtt_sa_free(kq->dev, kq->rptr_mem); | 194 | kfd_gtt_sa_free(kq->dev, kq->rptr_mem); |
191 | kfd_gtt_sa_free(kq->dev, kq->wptr_mem); | 195 | kfd_gtt_sa_free(kq->dev, kq->wptr_mem); |
192 | kq->ops_asic_specific.uninitialize(kq); | 196 | kq->ops_asic_specific.uninitialize(kq); |
@@ -211,7 +215,7 @@ static int acquire_packet_buffer(struct kernel_queue *kq, | |||
211 | queue_address = (unsigned int *)kq->pq_kernel_addr; | 215 | queue_address = (unsigned int *)kq->pq_kernel_addr; |
212 | queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t); | 216 | queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t); |
213 | 217 | ||
214 | pr_debug("kfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n", | 218 | pr_debug("amdkfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n", |
215 | __func__, rptr, wptr, queue_address); | 219 | __func__, rptr, wptr, queue_address); |
216 | 220 | ||
217 | available_size = (rptr - 1 - wptr + queue_size_dwords) % | 221 | available_size = (rptr - 1 - wptr + queue_size_dwords) % |
@@ -296,7 +300,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, | |||
296 | } | 300 | } |
297 | 301 | ||
298 | if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) { | 302 | if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) { |
299 | pr_err("kfd: failed to init kernel queue\n"); | 303 | pr_err("amdkfd: failed to init kernel queue\n"); |
300 | kfree(kq); | 304 | kfree(kq); |
301 | return NULL; | 305 | return NULL; |
302 | } | 306 | } |
@@ -319,7 +323,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev) | |||
319 | 323 | ||
320 | BUG_ON(!dev); | 324 | BUG_ON(!dev); |
321 | 325 | ||
322 | pr_err("kfd: starting kernel queue test\n"); | 326 | pr_err("amdkfd: starting kernel queue test\n"); |
323 | 327 | ||
324 | kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ); | 328 | kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ); |
325 | BUG_ON(!kq); | 329 | BUG_ON(!kq); |
@@ -330,7 +334,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev) | |||
330 | buffer[i] = kq->nop_packet; | 334 | buffer[i] = kq->nop_packet; |
331 | kq->ops.submit_packet(kq); | 335 | kq->ops.submit_packet(kq); |
332 | 336 | ||
333 | pr_err("kfd: ending kernel queue test\n"); | 337 | pr_err("amdkfd: ending kernel queue test\n"); |
334 | } | 338 | } |
335 | 339 | ||
336 | 340 | ||
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 6b6b07ff720b..679b10e34fb5 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -43,9 +43,10 @@ | |||
43 | #include "drm_crtc_internal.h" | 43 | #include "drm_crtc_internal.h" |
44 | #include "drm_internal.h" | 44 | #include "drm_internal.h" |
45 | 45 | ||
46 | static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, | 46 | static struct drm_framebuffer * |
47 | struct drm_mode_fb_cmd2 *r, | 47 | internal_framebuffer_create(struct drm_device *dev, |
48 | struct drm_file *file_priv); | 48 | struct drm_mode_fb_cmd2 *r, |
49 | struct drm_file *file_priv); | ||
49 | 50 | ||
50 | /* Avoid boilerplate. I'm tired of typing. */ | 51 | /* Avoid boilerplate. I'm tired of typing. */ |
51 | #define DRM_ENUM_NAME_FN(fnname, list) \ | 52 | #define DRM_ENUM_NAME_FN(fnname, list) \ |
@@ -524,17 +525,6 @@ void drm_framebuffer_reference(struct drm_framebuffer *fb) | |||
524 | } | 525 | } |
525 | EXPORT_SYMBOL(drm_framebuffer_reference); | 526 | EXPORT_SYMBOL(drm_framebuffer_reference); |
526 | 527 | ||
527 | static void drm_framebuffer_free_bug(struct kref *kref) | ||
528 | { | ||
529 | BUG(); | ||
530 | } | ||
531 | |||
532 | static void __drm_framebuffer_unreference(struct drm_framebuffer *fb) | ||
533 | { | ||
534 | DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount)); | ||
535 | kref_put(&fb->refcount, drm_framebuffer_free_bug); | ||
536 | } | ||
537 | |||
538 | /** | 528 | /** |
539 | * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr | 529 | * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr |
540 | * @fb: fb to unregister | 530 | * @fb: fb to unregister |
@@ -1319,7 +1309,7 @@ void drm_plane_force_disable(struct drm_plane *plane) | |||
1319 | return; | 1309 | return; |
1320 | } | 1310 | } |
1321 | /* disconnect the plane from the fb and crtc: */ | 1311 | /* disconnect the plane from the fb and crtc: */ |
1322 | __drm_framebuffer_unreference(plane->old_fb); | 1312 | drm_framebuffer_unreference(plane->old_fb); |
1323 | plane->old_fb = NULL; | 1313 | plane->old_fb = NULL; |
1324 | plane->fb = NULL; | 1314 | plane->fb = NULL; |
1325 | plane->crtc = NULL; | 1315 | plane->crtc = NULL; |
@@ -2908,13 +2898,11 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc, | |||
2908 | */ | 2898 | */ |
2909 | if (req->flags & DRM_MODE_CURSOR_BO) { | 2899 | if (req->flags & DRM_MODE_CURSOR_BO) { |
2910 | if (req->handle) { | 2900 | if (req->handle) { |
2911 | fb = add_framebuffer_internal(dev, &fbreq, file_priv); | 2901 | fb = internal_framebuffer_create(dev, &fbreq, file_priv); |
2912 | if (IS_ERR(fb)) { | 2902 | if (IS_ERR(fb)) { |
2913 | DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); | 2903 | DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); |
2914 | return PTR_ERR(fb); | 2904 | return PTR_ERR(fb); |
2915 | } | 2905 | } |
2916 | |||
2917 | drm_framebuffer_reference(fb); | ||
2918 | } else { | 2906 | } else { |
2919 | fb = NULL; | 2907 | fb = NULL; |
2920 | } | 2908 | } |
@@ -3267,9 +3255,10 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) | |||
3267 | return 0; | 3255 | return 0; |
3268 | } | 3256 | } |
3269 | 3257 | ||
3270 | static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, | 3258 | static struct drm_framebuffer * |
3271 | struct drm_mode_fb_cmd2 *r, | 3259 | internal_framebuffer_create(struct drm_device *dev, |
3272 | struct drm_file *file_priv) | 3260 | struct drm_mode_fb_cmd2 *r, |
3261 | struct drm_file *file_priv) | ||
3273 | { | 3262 | { |
3274 | struct drm_mode_config *config = &dev->mode_config; | 3263 | struct drm_mode_config *config = &dev->mode_config; |
3275 | struct drm_framebuffer *fb; | 3264 | struct drm_framebuffer *fb; |
@@ -3301,12 +3290,6 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, | |||
3301 | return fb; | 3290 | return fb; |
3302 | } | 3291 | } |
3303 | 3292 | ||
3304 | mutex_lock(&file_priv->fbs_lock); | ||
3305 | r->fb_id = fb->base.id; | ||
3306 | list_add(&fb->filp_head, &file_priv->fbs); | ||
3307 | DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); | ||
3308 | mutex_unlock(&file_priv->fbs_lock); | ||
3309 | |||
3310 | return fb; | 3293 | return fb; |
3311 | } | 3294 | } |
3312 | 3295 | ||
@@ -3328,15 +3311,24 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, | |||
3328 | int drm_mode_addfb2(struct drm_device *dev, | 3311 | int drm_mode_addfb2(struct drm_device *dev, |
3329 | void *data, struct drm_file *file_priv) | 3312 | void *data, struct drm_file *file_priv) |
3330 | { | 3313 | { |
3314 | struct drm_mode_fb_cmd2 *r = data; | ||
3331 | struct drm_framebuffer *fb; | 3315 | struct drm_framebuffer *fb; |
3332 | 3316 | ||
3333 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 3317 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
3334 | return -EINVAL; | 3318 | return -EINVAL; |
3335 | 3319 | ||
3336 | fb = add_framebuffer_internal(dev, data, file_priv); | 3320 | fb = internal_framebuffer_create(dev, r, file_priv); |
3337 | if (IS_ERR(fb)) | 3321 | if (IS_ERR(fb)) |
3338 | return PTR_ERR(fb); | 3322 | return PTR_ERR(fb); |
3339 | 3323 | ||
3324 | /* Transfer ownership to the filp for reaping on close */ | ||
3325 | |||
3326 | DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); | ||
3327 | mutex_lock(&file_priv->fbs_lock); | ||
3328 | r->fb_id = fb->base.id; | ||
3329 | list_add(&fb->filp_head, &file_priv->fbs); | ||
3330 | mutex_unlock(&file_priv->fbs_lock); | ||
3331 | |||
3340 | return 0; | 3332 | return 0; |
3341 | } | 3333 | } |
3342 | 3334 | ||
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 9a5b68717ec8..379ab4555756 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c | |||
@@ -733,10 +733,14 @@ static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr, | |||
733 | struct drm_dp_sideband_msg_tx *txmsg) | 733 | struct drm_dp_sideband_msg_tx *txmsg) |
734 | { | 734 | { |
735 | bool ret; | 735 | bool ret; |
736 | mutex_lock(&mgr->qlock); | 736 | |
737 | /* | ||
738 | * All updates to txmsg->state are protected by mgr->qlock, and the two | ||
739 | * cases we check here are terminal states. For those the barriers | ||
740 | * provided by the wake_up/wait_event pair are enough. | ||
741 | */ | ||
737 | ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || | 742 | ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || |
738 | txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); | 743 | txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); |
739 | mutex_unlock(&mgr->qlock); | ||
740 | return ret; | 744 | return ret; |
741 | } | 745 | } |
742 | 746 | ||
@@ -1363,12 +1367,13 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, | |||
1363 | return 0; | 1367 | return 0; |
1364 | } | 1368 | } |
1365 | 1369 | ||
1366 | /* must be called holding qlock */ | ||
1367 | static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) | 1370 | static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) |
1368 | { | 1371 | { |
1369 | struct drm_dp_sideband_msg_tx *txmsg; | 1372 | struct drm_dp_sideband_msg_tx *txmsg; |
1370 | int ret; | 1373 | int ret; |
1371 | 1374 | ||
1375 | WARN_ON(!mutex_is_locked(&mgr->qlock)); | ||
1376 | |||
1372 | /* construct a chunk from the first msg in the tx_msg queue */ | 1377 | /* construct a chunk from the first msg in the tx_msg queue */ |
1373 | if (list_empty(&mgr->tx_msg_downq)) { | 1378 | if (list_empty(&mgr->tx_msg_downq)) { |
1374 | mgr->tx_down_in_progress = false; | 1379 | mgr->tx_down_in_progress = false; |
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 7fc6f8bd4821..1134526286c8 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c | |||
@@ -403,7 +403,7 @@ static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment) | |||
403 | unsigned rem; | 403 | unsigned rem; |
404 | 404 | ||
405 | rem = do_div(tmp, alignment); | 405 | rem = do_div(tmp, alignment); |
406 | if (tmp) | 406 | if (rem) |
407 | start += alignment - rem; | 407 | start += alignment - rem; |
408 | } | 408 | } |
409 | 409 | ||
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index a5e74612100e..0a6780367d28 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig | |||
@@ -50,7 +50,7 @@ config DRM_EXYNOS_DSI | |||
50 | 50 | ||
51 | config DRM_EXYNOS_DP | 51 | config DRM_EXYNOS_DP |
52 | bool "EXYNOS DRM DP driver support" | 52 | bool "EXYNOS DRM DP driver support" |
53 | depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) | 53 | depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) |
54 | default DRM_EXYNOS | 54 | default DRM_EXYNOS |
55 | select DRM_PANEL | 55 | select DRM_PANEL |
56 | help | 56 | help |
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index 63f02e2380ae..970046199608 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c | |||
@@ -888,8 +888,8 @@ static int decon_probe(struct platform_device *pdev) | |||
888 | of_node_put(i80_if_timings); | 888 | of_node_put(i80_if_timings); |
889 | 889 | ||
890 | ctx->regs = of_iomap(dev->of_node, 0); | 890 | ctx->regs = of_iomap(dev->of_node, 0); |
891 | if (IS_ERR(ctx->regs)) { | 891 | if (!ctx->regs) { |
892 | ret = PTR_ERR(ctx->regs); | 892 | ret = -ENOMEM; |
893 | goto err_del_component; | 893 | goto err_del_component; |
894 | } | 894 | } |
895 | 895 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c deleted file mode 100644 index ba9b3d5ed672..000000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.c +++ /dev/null | |||
@@ -1,245 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
3 | * Authors: | ||
4 | * Inki Dae <inki.dae@samsung.com> | ||
5 | * Joonyoung Shim <jy0922.shim@samsung.com> | ||
6 | * Seung-Woo Kim <sw0312.kim@samsung.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include <drm/drmP.h> | ||
15 | #include <drm/drm_crtc_helper.h> | ||
16 | |||
17 | #include <drm/exynos_drm.h> | ||
18 | #include "exynos_drm_drv.h" | ||
19 | #include "exynos_drm_encoder.h" | ||
20 | #include "exynos_drm_connector.h" | ||
21 | |||
22 | #define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\ | ||
23 | drm_connector) | ||
24 | |||
25 | struct exynos_drm_connector { | ||
26 | struct drm_connector drm_connector; | ||
27 | uint32_t encoder_id; | ||
28 | struct exynos_drm_display *display; | ||
29 | }; | ||
30 | |||
31 | static int exynos_drm_connector_get_modes(struct drm_connector *connector) | ||
32 | { | ||
33 | struct exynos_drm_connector *exynos_connector = | ||
34 | to_exynos_connector(connector); | ||
35 | struct exynos_drm_display *display = exynos_connector->display; | ||
36 | struct edid *edid = NULL; | ||
37 | unsigned int count = 0; | ||
38 | int ret; | ||
39 | |||
40 | /* | ||
41 | * if get_edid() exists then get_edid() callback of hdmi side | ||
42 | * is called to get edid data through i2c interface else | ||
43 | * get timing from the FIMD driver(display controller). | ||
44 | * | ||
45 | * P.S. in case of lcd panel, count is always 1 if success | ||
46 | * because lcd panel has only one mode. | ||
47 | */ | ||
48 | if (display->ops->get_edid) { | ||
49 | edid = display->ops->get_edid(display, connector); | ||
50 | if (IS_ERR_OR_NULL(edid)) { | ||
51 | ret = PTR_ERR(edid); | ||
52 | edid = NULL; | ||
53 | DRM_ERROR("Panel operation get_edid failed %d\n", ret); | ||
54 | goto out; | ||
55 | } | ||
56 | |||
57 | count = drm_add_edid_modes(connector, edid); | ||
58 | if (!count) { | ||
59 | DRM_ERROR("Add edid modes failed %d\n", count); | ||
60 | goto out; | ||
61 | } | ||
62 | |||
63 | drm_mode_connector_update_edid_property(connector, edid); | ||
64 | } else { | ||
65 | struct exynos_drm_panel_info *panel; | ||
66 | struct drm_display_mode *mode = drm_mode_create(connector->dev); | ||
67 | if (!mode) { | ||
68 | DRM_ERROR("failed to create a new display mode.\n"); | ||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | if (display->ops->get_panel) | ||
73 | panel = display->ops->get_panel(display); | ||
74 | else { | ||
75 | drm_mode_destroy(connector->dev, mode); | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | drm_display_mode_from_videomode(&panel->vm, mode); | ||
80 | mode->width_mm = panel->width_mm; | ||
81 | mode->height_mm = panel->height_mm; | ||
82 | connector->display_info.width_mm = mode->width_mm; | ||
83 | connector->display_info.height_mm = mode->height_mm; | ||
84 | |||
85 | mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; | ||
86 | drm_mode_set_name(mode); | ||
87 | drm_mode_probed_add(connector, mode); | ||
88 | |||
89 | count = 1; | ||
90 | } | ||
91 | |||
92 | out: | ||
93 | kfree(edid); | ||
94 | return count; | ||
95 | } | ||
96 | |||
97 | static int exynos_drm_connector_mode_valid(struct drm_connector *connector, | ||
98 | struct drm_display_mode *mode) | ||
99 | { | ||
100 | struct exynos_drm_connector *exynos_connector = | ||
101 | to_exynos_connector(connector); | ||
102 | struct exynos_drm_display *display = exynos_connector->display; | ||
103 | int ret = MODE_BAD; | ||
104 | |||
105 | DRM_DEBUG_KMS("%s\n", __FILE__); | ||
106 | |||
107 | if (display->ops->check_mode) | ||
108 | if (!display->ops->check_mode(display, mode)) | ||
109 | ret = MODE_OK; | ||
110 | |||
111 | return ret; | ||
112 | } | ||
113 | |||
114 | static struct drm_encoder *exynos_drm_best_encoder( | ||
115 | struct drm_connector *connector) | ||
116 | { | ||
117 | struct drm_device *dev = connector->dev; | ||
118 | struct exynos_drm_connector *exynos_connector = | ||
119 | to_exynos_connector(connector); | ||
120 | return drm_encoder_find(dev, exynos_connector->encoder_id); | ||
121 | } | ||
122 | |||
123 | static struct drm_connector_helper_funcs exynos_connector_helper_funcs = { | ||
124 | .get_modes = exynos_drm_connector_get_modes, | ||
125 | .mode_valid = exynos_drm_connector_mode_valid, | ||
126 | .best_encoder = exynos_drm_best_encoder, | ||
127 | }; | ||
128 | |||
129 | static int exynos_drm_connector_fill_modes(struct drm_connector *connector, | ||
130 | unsigned int max_width, unsigned int max_height) | ||
131 | { | ||
132 | struct exynos_drm_connector *exynos_connector = | ||
133 | to_exynos_connector(connector); | ||
134 | struct exynos_drm_display *display = exynos_connector->display; | ||
135 | unsigned int width, height; | ||
136 | |||
137 | width = max_width; | ||
138 | height = max_height; | ||
139 | |||
140 | /* | ||
141 | * if specific driver want to find desired_mode using maxmum | ||
142 | * resolution then get max width and height from that driver. | ||
143 | */ | ||
144 | if (display->ops->get_max_resol) | ||
145 | display->ops->get_max_resol(display, &width, &height); | ||
146 | |||
147 | return drm_helper_probe_single_connector_modes(connector, width, | ||
148 | height); | ||
149 | } | ||
150 | |||
151 | /* get detection status of display device. */ | ||
152 | static enum drm_connector_status | ||
153 | exynos_drm_connector_detect(struct drm_connector *connector, bool force) | ||
154 | { | ||
155 | struct exynos_drm_connector *exynos_connector = | ||
156 | to_exynos_connector(connector); | ||
157 | struct exynos_drm_display *display = exynos_connector->display; | ||
158 | enum drm_connector_status status = connector_status_disconnected; | ||
159 | |||
160 | if (display->ops->is_connected) { | ||
161 | if (display->ops->is_connected(display)) | ||
162 | status = connector_status_connected; | ||
163 | else | ||
164 | status = connector_status_disconnected; | ||
165 | } | ||
166 | |||
167 | return status; | ||
168 | } | ||
169 | |||
170 | static void exynos_drm_connector_destroy(struct drm_connector *connector) | ||
171 | { | ||
172 | struct exynos_drm_connector *exynos_connector = | ||
173 | to_exynos_connector(connector); | ||
174 | |||
175 | drm_connector_unregister(connector); | ||
176 | drm_connector_cleanup(connector); | ||
177 | kfree(exynos_connector); | ||
178 | } | ||
179 | |||
180 | static struct drm_connector_funcs exynos_connector_funcs = { | ||
181 | .dpms = drm_helper_connector_dpms, | ||
182 | .fill_modes = exynos_drm_connector_fill_modes, | ||
183 | .detect = exynos_drm_connector_detect, | ||
184 | .destroy = exynos_drm_connector_destroy, | ||
185 | }; | ||
186 | |||
187 | struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, | ||
188 | struct drm_encoder *encoder) | ||
189 | { | ||
190 | struct exynos_drm_connector *exynos_connector; | ||
191 | struct exynos_drm_display *display = exynos_drm_get_display(encoder); | ||
192 | struct drm_connector *connector; | ||
193 | int type; | ||
194 | int err; | ||
195 | |||
196 | exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL); | ||
197 | if (!exynos_connector) | ||
198 | return NULL; | ||
199 | |||
200 | connector = &exynos_connector->drm_connector; | ||
201 | |||
202 | switch (display->type) { | ||
203 | case EXYNOS_DISPLAY_TYPE_HDMI: | ||
204 | type = DRM_MODE_CONNECTOR_HDMIA; | ||
205 | connector->interlace_allowed = true; | ||
206 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
207 | break; | ||
208 | case EXYNOS_DISPLAY_TYPE_VIDI: | ||
209 | type = DRM_MODE_CONNECTOR_VIRTUAL; | ||
210 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
211 | break; | ||
212 | default: | ||
213 | type = DRM_MODE_CONNECTOR_Unknown; | ||
214 | break; | ||
215 | } | ||
216 | |||
217 | drm_connector_init(dev, connector, &exynos_connector_funcs, type); | ||
218 | drm_connector_helper_add(connector, &exynos_connector_helper_funcs); | ||
219 | |||
220 | err = drm_connector_register(connector); | ||
221 | if (err) | ||
222 | goto err_connector; | ||
223 | |||
224 | exynos_connector->encoder_id = encoder->base.id; | ||
225 | exynos_connector->display = display; | ||
226 | connector->dpms = DRM_MODE_DPMS_OFF; | ||
227 | connector->encoder = encoder; | ||
228 | |||
229 | err = drm_mode_connector_attach_encoder(connector, encoder); | ||
230 | if (err) { | ||
231 | DRM_ERROR("failed to attach a connector to a encoder\n"); | ||
232 | goto err_sysfs; | ||
233 | } | ||
234 | |||
235 | DRM_DEBUG_KMS("connector has been created\n"); | ||
236 | |||
237 | return connector; | ||
238 | |||
239 | err_sysfs: | ||
240 | drm_connector_unregister(connector); | ||
241 | err_connector: | ||
242 | drm_connector_cleanup(connector); | ||
243 | kfree(exynos_connector); | ||
244 | return NULL; | ||
245 | } | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.h b/drivers/gpu/drm/exynos/exynos_drm_connector.h deleted file mode 100644 index 4eb20d78379a..000000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.h +++ /dev/null | |||
@@ -1,20 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
3 | * Authors: | ||
4 | * Inki Dae <inki.dae@samsung.com> | ||
5 | * Joonyoung Shim <jy0922.shim@samsung.com> | ||
6 | * Seung-Woo Kim <sw0312.kim@samsung.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | */ | ||
13 | |||
14 | #ifndef _EXYNOS_DRM_CONNECTOR_H_ | ||
15 | #define _EXYNOS_DRM_CONNECTOR_H_ | ||
16 | |||
17 | struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, | ||
18 | struct drm_encoder *encoder); | ||
19 | |||
20 | #endif | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 925fc69af1a0..c300e22da8ac 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c | |||
@@ -284,14 +284,9 @@ static void fimd_clear_channel(struct fimd_context *ctx) | |||
284 | } | 284 | } |
285 | } | 285 | } |
286 | 286 | ||
287 | static int fimd_ctx_initialize(struct fimd_context *ctx, | 287 | static int fimd_iommu_attach_devices(struct fimd_context *ctx, |
288 | struct drm_device *drm_dev) | 288 | struct drm_device *drm_dev) |
289 | { | 289 | { |
290 | struct exynos_drm_private *priv; | ||
291 | priv = drm_dev->dev_private; | ||
292 | |||
293 | ctx->drm_dev = drm_dev; | ||
294 | ctx->pipe = priv->pipe++; | ||
295 | 290 | ||
296 | /* attach this sub driver to iommu mapping if supported. */ | 291 | /* attach this sub driver to iommu mapping if supported. */ |
297 | if (is_drm_iommu_supported(ctx->drm_dev)) { | 292 | if (is_drm_iommu_supported(ctx->drm_dev)) { |
@@ -313,7 +308,7 @@ static int fimd_ctx_initialize(struct fimd_context *ctx, | |||
313 | return 0; | 308 | return 0; |
314 | } | 309 | } |
315 | 310 | ||
316 | static void fimd_ctx_remove(struct fimd_context *ctx) | 311 | static void fimd_iommu_detach_devices(struct fimd_context *ctx) |
317 | { | 312 | { |
318 | /* detach this sub driver from iommu mapping if supported. */ | 313 | /* detach this sub driver from iommu mapping if supported. */ |
319 | if (is_drm_iommu_supported(ctx->drm_dev)) | 314 | if (is_drm_iommu_supported(ctx->drm_dev)) |
@@ -1056,25 +1051,23 @@ static int fimd_bind(struct device *dev, struct device *master, void *data) | |||
1056 | { | 1051 | { |
1057 | struct fimd_context *ctx = dev_get_drvdata(dev); | 1052 | struct fimd_context *ctx = dev_get_drvdata(dev); |
1058 | struct drm_device *drm_dev = data; | 1053 | struct drm_device *drm_dev = data; |
1054 | struct exynos_drm_private *priv = drm_dev->dev_private; | ||
1059 | int ret; | 1055 | int ret; |
1060 | 1056 | ||
1061 | ret = fimd_ctx_initialize(ctx, drm_dev); | 1057 | ctx->drm_dev = drm_dev; |
1062 | if (ret) { | 1058 | ctx->pipe = priv->pipe++; |
1063 | DRM_ERROR("fimd_ctx_initialize failed.\n"); | ||
1064 | return ret; | ||
1065 | } | ||
1066 | 1059 | ||
1067 | ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe, | 1060 | ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe, |
1068 | EXYNOS_DISPLAY_TYPE_LCD, | 1061 | EXYNOS_DISPLAY_TYPE_LCD, |
1069 | &fimd_crtc_ops, ctx); | 1062 | &fimd_crtc_ops, ctx); |
1070 | if (IS_ERR(ctx->crtc)) { | ||
1071 | fimd_ctx_remove(ctx); | ||
1072 | return PTR_ERR(ctx->crtc); | ||
1073 | } | ||
1074 | 1063 | ||
1075 | if (ctx->display) | 1064 | if (ctx->display) |
1076 | exynos_drm_create_enc_conn(drm_dev, ctx->display); | 1065 | exynos_drm_create_enc_conn(drm_dev, ctx->display); |
1077 | 1066 | ||
1067 | ret = fimd_iommu_attach_devices(ctx, drm_dev); | ||
1068 | if (ret) | ||
1069 | return ret; | ||
1070 | |||
1078 | return 0; | 1071 | return 0; |
1079 | 1072 | ||
1080 | } | 1073 | } |
@@ -1086,10 +1079,10 @@ static void fimd_unbind(struct device *dev, struct device *master, | |||
1086 | 1079 | ||
1087 | fimd_dpms(ctx->crtc, DRM_MODE_DPMS_OFF); | 1080 | fimd_dpms(ctx->crtc, DRM_MODE_DPMS_OFF); |
1088 | 1081 | ||
1082 | fimd_iommu_detach_devices(ctx); | ||
1083 | |||
1089 | if (ctx->display) | 1084 | if (ctx->display) |
1090 | exynos_dpi_remove(ctx->display); | 1085 | exynos_dpi_remove(ctx->display); |
1091 | |||
1092 | fimd_ctx_remove(ctx); | ||
1093 | } | 1086 | } |
1094 | 1087 | ||
1095 | static const struct component_ops fimd_component_ops = { | 1088 | static const struct component_ops fimd_component_ops = { |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index a5616872eee7..8ad5b7294eb4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c | |||
@@ -175,7 +175,7 @@ static int exynos_disable_plane(struct drm_plane *plane) | |||
175 | struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); | 175 | struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); |
176 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc); | 176 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc); |
177 | 177 | ||
178 | if (exynos_crtc->ops->win_disable) | 178 | if (exynos_crtc && exynos_crtc->ops->win_disable) |
179 | exynos_crtc->ops->win_disable(exynos_crtc, | 179 | exynos_crtc->ops->win_disable(exynos_crtc, |
180 | exynos_plane->zpos); | 180 | exynos_plane->zpos); |
181 | 181 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e5daad5f75fb..27ea6bdebce7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2737,24 +2737,11 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) | |||
2737 | 2737 | ||
2738 | WARN_ON(i915_verify_lists(ring->dev)); | 2738 | WARN_ON(i915_verify_lists(ring->dev)); |
2739 | 2739 | ||
2740 | /* Move any buffers on the active list that are no longer referenced | 2740 | /* Retire requests first as we use it above for the early return. |
2741 | * by the ringbuffer to the flushing/inactive lists as appropriate, | 2741 | * If we retire requests last, we may use a later seqno and so clear |
2742 | * before we free the context associated with the requests. | 2742 | * the requests lists without clearing the active list, leading to |
2743 | * confusion. | ||
2743 | */ | 2744 | */ |
2744 | while (!list_empty(&ring->active_list)) { | ||
2745 | struct drm_i915_gem_object *obj; | ||
2746 | |||
2747 | obj = list_first_entry(&ring->active_list, | ||
2748 | struct drm_i915_gem_object, | ||
2749 | ring_list); | ||
2750 | |||
2751 | if (!i915_gem_request_completed(obj->last_read_req, true)) | ||
2752 | break; | ||
2753 | |||
2754 | i915_gem_object_move_to_inactive(obj); | ||
2755 | } | ||
2756 | |||
2757 | |||
2758 | while (!list_empty(&ring->request_list)) { | 2745 | while (!list_empty(&ring->request_list)) { |
2759 | struct drm_i915_gem_request *request; | 2746 | struct drm_i915_gem_request *request; |
2760 | struct intel_ringbuffer *ringbuf; | 2747 | struct intel_ringbuffer *ringbuf; |
@@ -2789,6 +2776,23 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) | |||
2789 | i915_gem_free_request(request); | 2776 | i915_gem_free_request(request); |
2790 | } | 2777 | } |
2791 | 2778 | ||
2779 | /* Move any buffers on the active list that are no longer referenced | ||
2780 | * by the ringbuffer to the flushing/inactive lists as appropriate, | ||
2781 | * before we free the context associated with the requests. | ||
2782 | */ | ||
2783 | while (!list_empty(&ring->active_list)) { | ||
2784 | struct drm_i915_gem_object *obj; | ||
2785 | |||
2786 | obj = list_first_entry(&ring->active_list, | ||
2787 | struct drm_i915_gem_object, | ||
2788 | ring_list); | ||
2789 | |||
2790 | if (!i915_gem_request_completed(obj->last_read_req, true)) | ||
2791 | break; | ||
2792 | |||
2793 | i915_gem_object_move_to_inactive(obj); | ||
2794 | } | ||
2795 | |||
2792 | if (unlikely(ring->trace_irq_req && | 2796 | if (unlikely(ring->trace_irq_req && |
2793 | i915_gem_request_completed(ring->trace_irq_req, true))) { | 2797 | i915_gem_request_completed(ring->trace_irq_req, true))) { |
2794 | ring->irq_put(ring); | 2798 | ring->irq_put(ring); |
@@ -2936,9 +2940,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
2936 | req = obj->last_read_req; | 2940 | req = obj->last_read_req; |
2937 | 2941 | ||
2938 | /* Do this after OLR check to make sure we make forward progress polling | 2942 | /* Do this after OLR check to make sure we make forward progress polling |
2939 | * on this IOCTL with a timeout <=0 (like busy ioctl) | 2943 | * on this IOCTL with a timeout == 0 (like busy ioctl) |
2940 | */ | 2944 | */ |
2941 | if (args->timeout_ns <= 0) { | 2945 | if (args->timeout_ns == 0) { |
2942 | ret = -ETIME; | 2946 | ret = -ETIME; |
2943 | goto out; | 2947 | goto out; |
2944 | } | 2948 | } |
@@ -2948,7 +2952,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
2948 | i915_gem_request_reference(req); | 2952 | i915_gem_request_reference(req); |
2949 | mutex_unlock(&dev->struct_mutex); | 2953 | mutex_unlock(&dev->struct_mutex); |
2950 | 2954 | ||
2951 | ret = __i915_wait_request(req, reset_counter, true, &args->timeout_ns, | 2955 | ret = __i915_wait_request(req, reset_counter, true, |
2956 | args->timeout_ns > 0 ? &args->timeout_ns : NULL, | ||
2952 | file->driver_priv); | 2957 | file->driver_priv); |
2953 | mutex_lock(&dev->struct_mutex); | 2958 | mutex_lock(&dev->struct_mutex); |
2954 | i915_gem_request_unreference(req); | 2959 | i915_gem_request_unreference(req); |
@@ -4792,6 +4797,9 @@ i915_gem_init_hw(struct drm_device *dev) | |||
4792 | if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) | 4797 | if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) |
4793 | return -EIO; | 4798 | return -EIO; |
4794 | 4799 | ||
4800 | /* Double layer security blanket, see i915_gem_init() */ | ||
4801 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | ||
4802 | |||
4795 | if (dev_priv->ellc_size) | 4803 | if (dev_priv->ellc_size) |
4796 | I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); | 4804 | I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); |
4797 | 4805 | ||
@@ -4824,7 +4832,7 @@ i915_gem_init_hw(struct drm_device *dev) | |||
4824 | for_each_ring(ring, dev_priv, i) { | 4832 | for_each_ring(ring, dev_priv, i) { |
4825 | ret = ring->init_hw(ring); | 4833 | ret = ring->init_hw(ring); |
4826 | if (ret) | 4834 | if (ret) |
4827 | return ret; | 4835 | goto out; |
4828 | } | 4836 | } |
4829 | 4837 | ||
4830 | for (i = 0; i < NUM_L3_SLICES(dev); i++) | 4838 | for (i = 0; i < NUM_L3_SLICES(dev); i++) |
@@ -4841,9 +4849,11 @@ i915_gem_init_hw(struct drm_device *dev) | |||
4841 | DRM_ERROR("Context enable failed %d\n", ret); | 4849 | DRM_ERROR("Context enable failed %d\n", ret); |
4842 | i915_gem_cleanup_ringbuffer(dev); | 4850 | i915_gem_cleanup_ringbuffer(dev); |
4843 | 4851 | ||
4844 | return ret; | 4852 | goto out; |
4845 | } | 4853 | } |
4846 | 4854 | ||
4855 | out: | ||
4856 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | ||
4847 | return ret; | 4857 | return ret; |
4848 | } | 4858 | } |
4849 | 4859 | ||
@@ -4877,6 +4887,14 @@ int i915_gem_init(struct drm_device *dev) | |||
4877 | dev_priv->gt.stop_ring = intel_logical_ring_stop; | 4887 | dev_priv->gt.stop_ring = intel_logical_ring_stop; |
4878 | } | 4888 | } |
4879 | 4889 | ||
4890 | /* This is just a security blanket to placate dragons. | ||
4891 | * On some systems, we very sporadically observe that the first TLBs | ||
4892 | * used by the CS may be stale, despite us poking the TLB reset. If | ||
4893 | * we hold the forcewake during initialisation these problems | ||
4894 | * just magically go away. | ||
4895 | */ | ||
4896 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | ||
4897 | |||
4880 | ret = i915_gem_init_userptr(dev); | 4898 | ret = i915_gem_init_userptr(dev); |
4881 | if (ret) | 4899 | if (ret) |
4882 | goto out_unlock; | 4900 | goto out_unlock; |
@@ -4903,6 +4921,7 @@ int i915_gem_init(struct drm_device *dev) | |||
4903 | } | 4921 | } |
4904 | 4922 | ||
4905 | out_unlock: | 4923 | out_unlock: |
4924 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | ||
4906 | mutex_unlock(&dev->struct_mutex); | 4925 | mutex_unlock(&dev->struct_mutex); |
4907 | 4926 | ||
4908 | return ret; | 4927 | return ret; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e730789b53b7..f75173c20f47 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <drm/i915_drm.h> | 37 | #include <drm/i915_drm.h> |
38 | #include "i915_drv.h" | 38 | #include "i915_drv.h" |
39 | #include "i915_trace.h" | 39 | #include "i915_trace.h" |
40 | #include <drm/drm_atomic.h> | ||
40 | #include <drm/drm_atomic_helper.h> | 41 | #include <drm/drm_atomic_helper.h> |
41 | #include <drm/drm_dp_helper.h> | 42 | #include <drm/drm_dp_helper.h> |
42 | #include <drm/drm_crtc_helper.h> | 43 | #include <drm/drm_crtc_helper.h> |
@@ -2416,6 +2417,14 @@ out_unref_obj: | |||
2416 | return false; | 2417 | return false; |
2417 | } | 2418 | } |
2418 | 2419 | ||
2420 | /* Update plane->state->fb to match plane->fb after driver-internal updates */ | ||
2421 | static void | ||
2422 | update_state_fb(struct drm_plane *plane) | ||
2423 | { | ||
2424 | if (plane->fb != plane->state->fb) | ||
2425 | drm_atomic_set_fb_for_plane(plane->state, plane->fb); | ||
2426 | } | ||
2427 | |||
2419 | static void | 2428 | static void |
2420 | intel_find_plane_obj(struct intel_crtc *intel_crtc, | 2429 | intel_find_plane_obj(struct intel_crtc *intel_crtc, |
2421 | struct intel_initial_plane_config *plane_config) | 2430 | struct intel_initial_plane_config *plane_config) |
@@ -2429,8 +2438,15 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc, | |||
2429 | if (!intel_crtc->base.primary->fb) | 2438 | if (!intel_crtc->base.primary->fb) |
2430 | return; | 2439 | return; |
2431 | 2440 | ||
2432 | if (intel_alloc_plane_obj(intel_crtc, plane_config)) | 2441 | if (intel_alloc_plane_obj(intel_crtc, plane_config)) { |
2442 | struct drm_plane *primary = intel_crtc->base.primary; | ||
2443 | |||
2444 | primary->state->crtc = &intel_crtc->base; | ||
2445 | primary->crtc = &intel_crtc->base; | ||
2446 | update_state_fb(primary); | ||
2447 | |||
2433 | return; | 2448 | return; |
2449 | } | ||
2434 | 2450 | ||
2435 | kfree(intel_crtc->base.primary->fb); | 2451 | kfree(intel_crtc->base.primary->fb); |
2436 | intel_crtc->base.primary->fb = NULL; | 2452 | intel_crtc->base.primary->fb = NULL; |
@@ -2453,15 +2469,21 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc, | |||
2453 | continue; | 2469 | continue; |
2454 | 2470 | ||
2455 | if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { | 2471 | if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { |
2472 | struct drm_plane *primary = intel_crtc->base.primary; | ||
2473 | |||
2456 | if (obj->tiling_mode != I915_TILING_NONE) | 2474 | if (obj->tiling_mode != I915_TILING_NONE) |
2457 | dev_priv->preserve_bios_swizzle = true; | 2475 | dev_priv->preserve_bios_swizzle = true; |
2458 | 2476 | ||
2459 | drm_framebuffer_reference(c->primary->fb); | 2477 | drm_framebuffer_reference(c->primary->fb); |
2460 | intel_crtc->base.primary->fb = c->primary->fb; | 2478 | primary->fb = c->primary->fb; |
2479 | primary->state->crtc = &intel_crtc->base; | ||
2480 | primary->crtc = &intel_crtc->base; | ||
2461 | obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); | 2481 | obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); |
2462 | break; | 2482 | break; |
2463 | } | 2483 | } |
2464 | } | 2484 | } |
2485 | |||
2486 | update_state_fb(intel_crtc->base.primary); | ||
2465 | } | 2487 | } |
2466 | 2488 | ||
2467 | static void i9xx_update_primary_plane(struct drm_crtc *crtc, | 2489 | static void i9xx_update_primary_plane(struct drm_crtc *crtc, |
@@ -6602,6 +6624,10 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, | |||
6602 | struct drm_framebuffer *fb; | 6624 | struct drm_framebuffer *fb; |
6603 | struct intel_framebuffer *intel_fb; | 6625 | struct intel_framebuffer *intel_fb; |
6604 | 6626 | ||
6627 | val = I915_READ(DSPCNTR(plane)); | ||
6628 | if (!(val & DISPLAY_PLANE_ENABLE)) | ||
6629 | return; | ||
6630 | |||
6605 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); | 6631 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); |
6606 | if (!intel_fb) { | 6632 | if (!intel_fb) { |
6607 | DRM_DEBUG_KMS("failed to alloc fb\n"); | 6633 | DRM_DEBUG_KMS("failed to alloc fb\n"); |
@@ -6610,8 +6636,6 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, | |||
6610 | 6636 | ||
6611 | fb = &intel_fb->base; | 6637 | fb = &intel_fb->base; |
6612 | 6638 | ||
6613 | val = I915_READ(DSPCNTR(plane)); | ||
6614 | |||
6615 | if (INTEL_INFO(dev)->gen >= 4) | 6639 | if (INTEL_INFO(dev)->gen >= 4) |
6616 | if (val & DISPPLANE_TILED) | 6640 | if (val & DISPPLANE_TILED) |
6617 | plane_config->tiling = I915_TILING_X; | 6641 | plane_config->tiling = I915_TILING_X; |
@@ -7643,6 +7667,9 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, | |||
7643 | fb = &intel_fb->base; | 7667 | fb = &intel_fb->base; |
7644 | 7668 | ||
7645 | val = I915_READ(PLANE_CTL(pipe, 0)); | 7669 | val = I915_READ(PLANE_CTL(pipe, 0)); |
7670 | if (!(val & PLANE_CTL_ENABLE)) | ||
7671 | goto error; | ||
7672 | |||
7646 | if (val & PLANE_CTL_TILED_MASK) | 7673 | if (val & PLANE_CTL_TILED_MASK) |
7647 | plane_config->tiling = I915_TILING_X; | 7674 | plane_config->tiling = I915_TILING_X; |
7648 | 7675 | ||
@@ -7730,6 +7757,10 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, | |||
7730 | struct drm_framebuffer *fb; | 7757 | struct drm_framebuffer *fb; |
7731 | struct intel_framebuffer *intel_fb; | 7758 | struct intel_framebuffer *intel_fb; |
7732 | 7759 | ||
7760 | val = I915_READ(DSPCNTR(pipe)); | ||
7761 | if (!(val & DISPLAY_PLANE_ENABLE)) | ||
7762 | return; | ||
7763 | |||
7733 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); | 7764 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); |
7734 | if (!intel_fb) { | 7765 | if (!intel_fb) { |
7735 | DRM_DEBUG_KMS("failed to alloc fb\n"); | 7766 | DRM_DEBUG_KMS("failed to alloc fb\n"); |
@@ -7738,8 +7769,6 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, | |||
7738 | 7769 | ||
7739 | fb = &intel_fb->base; | 7770 | fb = &intel_fb->base; |
7740 | 7771 | ||
7741 | val = I915_READ(DSPCNTR(pipe)); | ||
7742 | |||
7743 | if (INTEL_INFO(dev)->gen >= 4) | 7772 | if (INTEL_INFO(dev)->gen >= 4) |
7744 | if (val & DISPPLANE_TILED) | 7773 | if (val & DISPPLANE_TILED) |
7745 | plane_config->tiling = I915_TILING_X; | 7774 | plane_config->tiling = I915_TILING_X; |
@@ -9716,7 +9745,7 @@ void intel_check_page_flip(struct drm_device *dev, int pipe) | |||
9716 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 9745 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
9717 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 9746 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
9718 | 9747 | ||
9719 | WARN_ON(!in_irq()); | 9748 | WARN_ON(!in_interrupt()); |
9720 | 9749 | ||
9721 | if (crtc == NULL) | 9750 | if (crtc == NULL) |
9722 | return; | 9751 | return; |
@@ -9816,6 +9845,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
9816 | drm_gem_object_reference(&obj->base); | 9845 | drm_gem_object_reference(&obj->base); |
9817 | 9846 | ||
9818 | crtc->primary->fb = fb; | 9847 | crtc->primary->fb = fb; |
9848 | update_state_fb(crtc->primary); | ||
9819 | 9849 | ||
9820 | work->pending_flip_obj = obj; | 9850 | work->pending_flip_obj = obj; |
9821 | 9851 | ||
@@ -9884,6 +9914,7 @@ cleanup_unpin: | |||
9884 | cleanup_pending: | 9914 | cleanup_pending: |
9885 | atomic_dec(&intel_crtc->unpin_work_count); | 9915 | atomic_dec(&intel_crtc->unpin_work_count); |
9886 | crtc->primary->fb = old_fb; | 9916 | crtc->primary->fb = old_fb; |
9917 | update_state_fb(crtc->primary); | ||
9887 | drm_gem_object_unreference(&work->old_fb_obj->base); | 9918 | drm_gem_object_unreference(&work->old_fb_obj->base); |
9888 | drm_gem_object_unreference(&obj->base); | 9919 | drm_gem_object_unreference(&obj->base); |
9889 | mutex_unlock(&dev->struct_mutex); | 9920 | mutex_unlock(&dev->struct_mutex); |
@@ -13718,6 +13749,7 @@ void intel_modeset_gem_init(struct drm_device *dev) | |||
13718 | to_intel_crtc(c)->pipe); | 13749 | to_intel_crtc(c)->pipe); |
13719 | drm_framebuffer_unreference(c->primary->fb); | 13750 | drm_framebuffer_unreference(c->primary->fb); |
13720 | c->primary->fb = NULL; | 13751 | c->primary->fb = NULL; |
13752 | update_state_fb(c->primary); | ||
13721 | } | 13753 | } |
13722 | } | 13754 | } |
13723 | mutex_unlock(&dev->struct_mutex); | 13755 | mutex_unlock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index c47a3baa53d5..4e8fb891d4ea 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
@@ -1048,8 +1048,14 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) | |||
1048 | 1048 | ||
1049 | /* We need to init first for ECOBUS access and then | 1049 | /* We need to init first for ECOBUS access and then |
1050 | * determine later if we want to reinit, in case of MT access is | 1050 | * determine later if we want to reinit, in case of MT access is |
1051 | * not working | 1051 | * not working. In this stage we don't know which flavour this |
1052 | * ivb is, so it is better to reset also the gen6 fw registers | ||
1053 | * before the ecobus check. | ||
1052 | */ | 1054 | */ |
1055 | |||
1056 | __raw_i915_write32(dev_priv, FORCEWAKE, 0); | ||
1057 | __raw_posting_read(dev_priv, ECOBUS); | ||
1058 | |||
1053 | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, | 1059 | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, |
1054 | FORCEWAKE_MT, FORCEWAKE_MT_ACK); | 1060 | FORCEWAKE_MT, FORCEWAKE_MT_ACK); |
1055 | 1061 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index 29bd539af183..6efa8f38ff54 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | |||
@@ -340,11 +340,13 @@ nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, | |||
340 | 340 | ||
341 | /* switch mmio to cpu's native endianness */ | 341 | /* switch mmio to cpu's native endianness */ |
342 | #ifndef __BIG_ENDIAN | 342 | #ifndef __BIG_ENDIAN |
343 | if (ioread32_native(map + 0x000004) != 0x00000000) | 343 | if (ioread32_native(map + 0x000004) != 0x00000000) { |
344 | #else | 344 | #else |
345 | if (ioread32_native(map + 0x000004) == 0x00000000) | 345 | if (ioread32_native(map + 0x000004) == 0x00000000) { |
346 | #endif | 346 | #endif |
347 | iowrite32_native(0x01000001, map + 0x000004); | 347 | iowrite32_native(0x01000001, map + 0x000004); |
348 | ioread32_native(map); | ||
349 | } | ||
348 | 350 | ||
349 | /* read boot0 and strapping information */ | 351 | /* read boot0 and strapping information */ |
350 | boot0 = ioread32_native(map + 0x000000); | 352 | boot0 = ioread32_native(map + 0x000000); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c index 539561ed3281..108d048da764 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c | |||
@@ -142,6 +142,49 @@ gm100_identify(struct nvkm_device *device) | |||
142 | device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass; | 142 | device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass; |
143 | #endif | 143 | #endif |
144 | break; | 144 | break; |
145 | case 0x126: | ||
146 | device->cname = "GM206"; | ||
147 | device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass; | ||
148 | device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass; | ||
149 | device->oclass[NVDEV_SUBDEV_I2C ] = gm204_i2c_oclass; | ||
150 | device->oclass[NVDEV_SUBDEV_FUSE ] = &gm107_fuse_oclass; | ||
151 | #if 0 | ||
152 | /* looks to be some non-trivial changes */ | ||
153 | device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass; | ||
154 | /* priv ring says no to 0x10eb14 writes */ | ||
155 | device->oclass[NVDEV_SUBDEV_THERM ] = &gm107_therm_oclass; | ||
156 | #endif | ||
157 | device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; | ||
158 | device->oclass[NVDEV_SUBDEV_DEVINIT] = gm204_devinit_oclass; | ||
159 | device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass; | ||
160 | device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass; | ||
161 | device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass; | ||
162 | device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass; | ||
163 | device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass; | ||
164 | device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass; | ||
165 | device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; | ||
166 | device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass; | ||
167 | device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass; | ||
168 | device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass; | ||
169 | #if 0 | ||
170 | device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; | ||
171 | #endif | ||
172 | device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass; | ||
173 | #if 0 | ||
174 | device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass; | ||
175 | device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass; | ||
176 | device->oclass[NVDEV_ENGINE_GR ] = gm107_gr_oclass; | ||
177 | #endif | ||
178 | device->oclass[NVDEV_ENGINE_DISP ] = gm204_disp_oclass; | ||
179 | #if 0 | ||
180 | device->oclass[NVDEV_ENGINE_CE0 ] = &gm204_ce0_oclass; | ||
181 | device->oclass[NVDEV_ENGINE_CE1 ] = &gm204_ce1_oclass; | ||
182 | device->oclass[NVDEV_ENGINE_CE2 ] = &gm204_ce2_oclass; | ||
183 | device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass; | ||
184 | device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass; | ||
185 | device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass; | ||
186 | #endif | ||
187 | break; | ||
145 | default: | 188 | default: |
146 | nv_fatal(device, "unknown Maxwell chipset\n"); | 189 | nv_fatal(device, "unknown Maxwell chipset\n"); |
147 | return -EINVAL; | 190 | return -EINVAL; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c index b038b6eb51db..043e4296084c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c | |||
@@ -502,72 +502,57 @@ nv04_fifo_intr(struct nvkm_subdev *subdev) | |||
502 | { | 502 | { |
503 | struct nvkm_device *device = nv_device(subdev); | 503 | struct nvkm_device *device = nv_device(subdev); |
504 | struct nv04_fifo_priv *priv = (void *)subdev; | 504 | struct nv04_fifo_priv *priv = (void *)subdev; |
505 | uint32_t status, reassign; | 505 | u32 mask = nv_rd32(priv, NV03_PFIFO_INTR_EN_0); |
506 | int cnt = 0; | 506 | u32 stat = nv_rd32(priv, NV03_PFIFO_INTR_0) & mask; |
507 | u32 reassign, chid, get, sem; | ||
507 | 508 | ||
508 | reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1; | 509 | reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1; |
509 | while ((status = nv_rd32(priv, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) { | 510 | nv_wr32(priv, NV03_PFIFO_CACHES, 0); |
510 | uint32_t chid, get; | ||
511 | |||
512 | nv_wr32(priv, NV03_PFIFO_CACHES, 0); | ||
513 | |||
514 | chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; | ||
515 | get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET); | ||
516 | 511 | ||
517 | if (status & NV_PFIFO_INTR_CACHE_ERROR) { | 512 | chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; |
518 | nv04_fifo_cache_error(device, priv, chid, get); | 513 | get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET); |
519 | status &= ~NV_PFIFO_INTR_CACHE_ERROR; | ||
520 | } | ||
521 | 514 | ||
522 | if (status & NV_PFIFO_INTR_DMA_PUSHER) { | 515 | if (stat & NV_PFIFO_INTR_CACHE_ERROR) { |
523 | nv04_fifo_dma_pusher(device, priv, chid); | 516 | nv04_fifo_cache_error(device, priv, chid, get); |
524 | status &= ~NV_PFIFO_INTR_DMA_PUSHER; | 517 | stat &= ~NV_PFIFO_INTR_CACHE_ERROR; |
525 | } | 518 | } |
526 | 519 | ||
527 | if (status & NV_PFIFO_INTR_SEMAPHORE) { | 520 | if (stat & NV_PFIFO_INTR_DMA_PUSHER) { |
528 | uint32_t sem; | 521 | nv04_fifo_dma_pusher(device, priv, chid); |
522 | stat &= ~NV_PFIFO_INTR_DMA_PUSHER; | ||
523 | } | ||
529 | 524 | ||
530 | status &= ~NV_PFIFO_INTR_SEMAPHORE; | 525 | if (stat & NV_PFIFO_INTR_SEMAPHORE) { |
531 | nv_wr32(priv, NV03_PFIFO_INTR_0, | 526 | stat &= ~NV_PFIFO_INTR_SEMAPHORE; |
532 | NV_PFIFO_INTR_SEMAPHORE); | 527 | nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE); |
533 | 528 | ||
534 | sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE); | 529 | sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE); |
535 | nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); | 530 | nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); |
536 | 531 | ||
537 | nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); | 532 | nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); |
538 | nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); | 533 | nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); |
539 | } | 534 | } |
540 | 535 | ||
541 | if (device->card_type == NV_50) { | 536 | if (device->card_type == NV_50) { |
542 | if (status & 0x00000010) { | 537 | if (stat & 0x00000010) { |
543 | status &= ~0x00000010; | 538 | stat &= ~0x00000010; |
544 | nv_wr32(priv, 0x002100, 0x00000010); | 539 | nv_wr32(priv, 0x002100, 0x00000010); |
545 | } | ||
546 | |||
547 | if (status & 0x40000000) { | ||
548 | nv_wr32(priv, 0x002100, 0x40000000); | ||
549 | nvkm_fifo_uevent(&priv->base); | ||
550 | status &= ~0x40000000; | ||
551 | } | ||
552 | } | 540 | } |
553 | 541 | ||
554 | if (status) { | 542 | if (stat & 0x40000000) { |
555 | nv_warn(priv, "unknown intr 0x%08x, ch %d\n", | 543 | nv_wr32(priv, 0x002100, 0x40000000); |
556 | status, chid); | 544 | nvkm_fifo_uevent(&priv->base); |
557 | nv_wr32(priv, NV03_PFIFO_INTR_0, status); | 545 | stat &= ~0x40000000; |
558 | status = 0; | ||
559 | } | 546 | } |
560 | |||
561 | nv_wr32(priv, NV03_PFIFO_CACHES, reassign); | ||
562 | } | 547 | } |
563 | 548 | ||
564 | if (status) { | 549 | if (stat) { |
565 | nv_error(priv, "still angry after %d spins, halt\n", cnt); | 550 | nv_warn(priv, "unknown intr 0x%08x\n", stat); |
566 | nv_wr32(priv, 0x002140, 0); | 551 | nv_mask(priv, NV03_PFIFO_INTR_EN_0, stat, 0x00000000); |
567 | nv_wr32(priv, 0x000140, 0); | 552 | nv_wr32(priv, NV03_PFIFO_INTR_0, stat); |
568 | } | 553 | } |
569 | 554 | ||
570 | nv_wr32(priv, 0x000100, 0x00000100); | 555 | nv_wr32(priv, NV03_PFIFO_CACHES, reassign); |
571 | } | 556 | } |
572 | 557 | ||
573 | static int | 558 | static int |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c index 2e7ec389eea7..57e2c5b13123 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c | |||
@@ -1032,9 +1032,9 @@ gf100_grctx_generate_bundle(struct gf100_grctx *info) | |||
1032 | const int s = 8; | 1032 | const int s = 8; |
1033 | const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); | 1033 | const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); |
1034 | mmio_refn(info, 0x408004, 0x00000000, s, b); | 1034 | mmio_refn(info, 0x408004, 0x00000000, s, b); |
1035 | mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); | 1035 | mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s)); |
1036 | mmio_refn(info, 0x418808, 0x00000000, s, b); | 1036 | mmio_refn(info, 0x418808, 0x00000000, s, b); |
1037 | mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); | 1037 | mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s)); |
1038 | } | 1038 | } |
1039 | 1039 | ||
1040 | void | 1040 | void |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c index b52300d8861a..5e9454ba158f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c | |||
@@ -851,9 +851,9 @@ gk104_grctx_generate_bundle(struct gf100_grctx *info) | |||
851 | const int s = 8; | 851 | const int s = 8; |
852 | const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); | 852 | const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); |
853 | mmio_refn(info, 0x408004, 0x00000000, s, b); | 853 | mmio_refn(info, 0x408004, 0x00000000, s, b); |
854 | mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); | 854 | mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s)); |
855 | mmio_refn(info, 0x418808, 0x00000000, s, b); | 855 | mmio_refn(info, 0x418808, 0x00000000, s, b); |
856 | mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); | 856 | mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s)); |
857 | mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); | 857 | mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); |
858 | } | 858 | } |
859 | 859 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c index 956f4dce960c..b2fae6e389e2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c | |||
@@ -871,9 +871,9 @@ gm107_grctx_generate_bundle(struct gf100_grctx *info) | |||
871 | const int s = 8; | 871 | const int s = 8; |
872 | const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); | 872 | const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); |
873 | mmio_refn(info, 0x408004, 0x00000000, s, b); | 873 | mmio_refn(info, 0x408004, 0x00000000, s, b); |
874 | mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); | 874 | mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s)); |
875 | mmio_refn(info, 0x418e24, 0x00000000, s, b); | 875 | mmio_refn(info, 0x418e24, 0x00000000, s, b); |
876 | mmio_refn(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s), 0, b); | 876 | mmio_wr32(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s)); |
877 | mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); | 877 | mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); |
878 | } | 878 | } |
879 | 879 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c index d1a89b2bd5c1..c4e1f085ee10 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c | |||
@@ -74,7 +74,11 @@ dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info) | |||
74 | u16 ent = dcb_i2c_entry(bios, idx, &ver, &len); | 74 | u16 ent = dcb_i2c_entry(bios, idx, &ver, &len); |
75 | if (ent) { | 75 | if (ent) { |
76 | if (ver >= 0x41) { | 76 | if (ver >= 0x41) { |
77 | if (!(nv_ro32(bios, ent) & 0x80000000)) | 77 | u32 ent_value = nv_ro32(bios, ent); |
78 | u8 i2c_port = (ent_value >> 27) & 0x1f; | ||
79 | u8 dpaux_port = (ent_value >> 22) & 0x1f; | ||
80 | /* value 0x1f means unused according to DCB 4.x spec */ | ||
81 | if (i2c_port == 0x1f && dpaux_port == 0x1f) | ||
78 | info->type = DCB_I2C_UNUSED; | 82 | info->type = DCB_I2C_UNUSED; |
79 | else | 83 | else |
80 | info->type = DCB_I2C_PMGR; | 84 | info->type = DCB_I2C_PMGR; |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index d13d1b5a859f..df09ca7c4889 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -1030,37 +1030,59 @@ static inline bool radeon_test_signaled(struct radeon_fence *fence) | |||
1030 | return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); | 1030 | return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); |
1031 | } | 1031 | } |
1032 | 1032 | ||
1033 | struct radeon_wait_cb { | ||
1034 | struct fence_cb base; | ||
1035 | struct task_struct *task; | ||
1036 | }; | ||
1037 | |||
1038 | static void | ||
1039 | radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb) | ||
1040 | { | ||
1041 | struct radeon_wait_cb *wait = | ||
1042 | container_of(cb, struct radeon_wait_cb, base); | ||
1043 | |||
1044 | wake_up_process(wait->task); | ||
1045 | } | ||
1046 | |||
1033 | static signed long radeon_fence_default_wait(struct fence *f, bool intr, | 1047 | static signed long radeon_fence_default_wait(struct fence *f, bool intr, |
1034 | signed long t) | 1048 | signed long t) |
1035 | { | 1049 | { |
1036 | struct radeon_fence *fence = to_radeon_fence(f); | 1050 | struct radeon_fence *fence = to_radeon_fence(f); |
1037 | struct radeon_device *rdev = fence->rdev; | 1051 | struct radeon_device *rdev = fence->rdev; |
1038 | bool signaled; | 1052 | struct radeon_wait_cb cb; |
1039 | 1053 | ||
1040 | fence_enable_sw_signaling(&fence->base); | 1054 | cb.task = current; |
1041 | 1055 | ||
1042 | /* | 1056 | if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb)) |
1043 | * This function has to return -EDEADLK, but cannot hold | 1057 | return t; |
1044 | * exclusive_lock during the wait because some callers | 1058 | |
1045 | * may already hold it. This means checking needs_reset without | 1059 | while (t > 0) { |
1046 | * lock, and not fiddling with any gpu internals. | 1060 | if (intr) |
1047 | * | 1061 | set_current_state(TASK_INTERRUPTIBLE); |
1048 | * The callback installed with fence_enable_sw_signaling will | 1062 | else |
1049 | * run before our wait_event_*timeout call, so we will see | 1063 | set_current_state(TASK_UNINTERRUPTIBLE); |
1050 | * both the signaled fence and the changes to needs_reset. | 1064 | |
1051 | */ | 1065 | /* |
1066 | * radeon_test_signaled must be called after | ||
1067 | * set_current_state to prevent a race with wake_up_process | ||
1068 | */ | ||
1069 | if (radeon_test_signaled(fence)) | ||
1070 | break; | ||
1071 | |||
1072 | if (rdev->needs_reset) { | ||
1073 | t = -EDEADLK; | ||
1074 | break; | ||
1075 | } | ||
1076 | |||
1077 | t = schedule_timeout(t); | ||
1078 | |||
1079 | if (t > 0 && intr && signal_pending(current)) | ||
1080 | t = -ERESTARTSYS; | ||
1081 | } | ||
1082 | |||
1083 | __set_current_state(TASK_RUNNING); | ||
1084 | fence_remove_callback(f, &cb.base); | ||
1052 | 1085 | ||
1053 | if (intr) | ||
1054 | t = wait_event_interruptible_timeout(rdev->fence_queue, | ||
1055 | ((signaled = radeon_test_signaled(fence)) || | ||
1056 | rdev->needs_reset), t); | ||
1057 | else | ||
1058 | t = wait_event_timeout(rdev->fence_queue, | ||
1059 | ((signaled = radeon_test_signaled(fence)) || | ||
1060 | rdev->needs_reset), t); | ||
1061 | |||
1062 | if (t > 0 && !signaled) | ||
1063 | return -EDEADLK; | ||
1064 | return t; | 1086 | return t; |
1065 | } | 1087 | } |
1066 | 1088 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c index 061eaa9c19c7..122eb5693ba1 100644 --- a/drivers/gpu/drm/radeon/radeon_kfd.c +++ b/drivers/gpu/drm/radeon/radeon_kfd.c | |||
@@ -153,7 +153,7 @@ void radeon_kfd_device_init(struct radeon_device *rdev) | |||
153 | .compute_vmid_bitmap = 0xFF00, | 153 | .compute_vmid_bitmap = 0xFF00, |
154 | 154 | ||
155 | .first_compute_pipe = 1, | 155 | .first_compute_pipe = 1, |
156 | .compute_pipe_count = 8 - 1, | 156 | .compute_pipe_count = 4 - 1, |
157 | }; | 157 | }; |
158 | 158 | ||
159 | radeon_doorbell_get_kfd_info(rdev, | 159 | radeon_doorbell_get_kfd_info(rdev, |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 43e09942823e..318165d4855c 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -173,17 +173,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) | |||
173 | else | 173 | else |
174 | rbo->placements[i].lpfn = 0; | 174 | rbo->placements[i].lpfn = 0; |
175 | } | 175 | } |
176 | |||
177 | /* | ||
178 | * Use two-ended allocation depending on the buffer size to | ||
179 | * improve fragmentation quality. | ||
180 | * 512kb was measured as the most optimal number. | ||
181 | */ | ||
182 | if (rbo->tbo.mem.size > 512 * 1024) { | ||
183 | for (i = 0; i < c; i++) { | ||
184 | rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN; | ||
185 | } | ||
186 | } | ||
187 | } | 176 | } |
188 | 177 | ||
189 | int radeon_bo_create(struct radeon_device *rdev, | 178 | int radeon_bo_create(struct radeon_device *rdev, |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index e088e5558da0..a7fb2735d4a9 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -7130,8 +7130,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) | |||
7130 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); | 7130 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); |
7131 | 7131 | ||
7132 | if (!vclk || !dclk) { | 7132 | if (!vclk || !dclk) { |
7133 | /* keep the Bypass mode, put PLL to sleep */ | 7133 | /* keep the Bypass mode */ |
7134 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); | ||
7135 | return 0; | 7134 | return 0; |
7136 | } | 7135 | } |
7137 | 7136 | ||
@@ -7147,8 +7146,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) | |||
7147 | /* set VCO_MODE to 1 */ | 7146 | /* set VCO_MODE to 1 */ |
7148 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); | 7147 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); |
7149 | 7148 | ||
7150 | /* toggle UPLL_SLEEP to 1 then back to 0 */ | 7149 | /* disable sleep mode */ |
7151 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); | ||
7152 | WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK); | 7150 | WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK); |
7153 | 7151 | ||
7154 | /* deassert UPLL_RESET */ | 7152 | /* deassert UPLL_RESET */ |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 6c6b655defcf..e13b9cbc304e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -725,32 +725,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
725 | goto out_err1; | 725 | goto out_err1; |
726 | } | 726 | } |
727 | 727 | ||
728 | ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, | ||
729 | (dev_priv->vram_size >> PAGE_SHIFT)); | ||
730 | if (unlikely(ret != 0)) { | ||
731 | DRM_ERROR("Failed initializing memory manager for VRAM.\n"); | ||
732 | goto out_err2; | ||
733 | } | ||
734 | |||
735 | dev_priv->has_gmr = true; | ||
736 | if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || | ||
737 | refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, | ||
738 | VMW_PL_GMR) != 0) { | ||
739 | DRM_INFO("No GMR memory available. " | ||
740 | "Graphics memory resources are very limited.\n"); | ||
741 | dev_priv->has_gmr = false; | ||
742 | } | ||
743 | |||
744 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | ||
745 | dev_priv->has_mob = true; | ||
746 | if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, | ||
747 | VMW_PL_MOB) != 0) { | ||
748 | DRM_INFO("No MOB memory available. " | ||
749 | "3D will be disabled.\n"); | ||
750 | dev_priv->has_mob = false; | ||
751 | } | ||
752 | } | ||
753 | |||
754 | dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, | 728 | dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, |
755 | dev_priv->mmio_size); | 729 | dev_priv->mmio_size); |
756 | 730 | ||
@@ -813,6 +787,33 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
813 | goto out_no_fman; | 787 | goto out_no_fman; |
814 | } | 788 | } |
815 | 789 | ||
790 | |||
791 | ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, | ||
792 | (dev_priv->vram_size >> PAGE_SHIFT)); | ||
793 | if (unlikely(ret != 0)) { | ||
794 | DRM_ERROR("Failed initializing memory manager for VRAM.\n"); | ||
795 | goto out_no_vram; | ||
796 | } | ||
797 | |||
798 | dev_priv->has_gmr = true; | ||
799 | if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || | ||
800 | refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, | ||
801 | VMW_PL_GMR) != 0) { | ||
802 | DRM_INFO("No GMR memory available. " | ||
803 | "Graphics memory resources are very limited.\n"); | ||
804 | dev_priv->has_gmr = false; | ||
805 | } | ||
806 | |||
807 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | ||
808 | dev_priv->has_mob = true; | ||
809 | if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, | ||
810 | VMW_PL_MOB) != 0) { | ||
811 | DRM_INFO("No MOB memory available. " | ||
812 | "3D will be disabled.\n"); | ||
813 | dev_priv->has_mob = false; | ||
814 | } | ||
815 | } | ||
816 | |||
816 | vmw_kms_save_vga(dev_priv); | 817 | vmw_kms_save_vga(dev_priv); |
817 | 818 | ||
818 | /* Start kms and overlay systems, needs fifo. */ | 819 | /* Start kms and overlay systems, needs fifo. */ |
@@ -838,6 +839,12 @@ out_no_fifo: | |||
838 | vmw_kms_close(dev_priv); | 839 | vmw_kms_close(dev_priv); |
839 | out_no_kms: | 840 | out_no_kms: |
840 | vmw_kms_restore_vga(dev_priv); | 841 | vmw_kms_restore_vga(dev_priv); |
842 | if (dev_priv->has_mob) | ||
843 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
844 | if (dev_priv->has_gmr) | ||
845 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | ||
846 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
847 | out_no_vram: | ||
841 | vmw_fence_manager_takedown(dev_priv->fman); | 848 | vmw_fence_manager_takedown(dev_priv->fman); |
842 | out_no_fman: | 849 | out_no_fman: |
843 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | 850 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
@@ -853,12 +860,6 @@ out_err4: | |||
853 | iounmap(dev_priv->mmio_virt); | 860 | iounmap(dev_priv->mmio_virt); |
854 | out_err3: | 861 | out_err3: |
855 | arch_phys_wc_del(dev_priv->mmio_mtrr); | 862 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
856 | if (dev_priv->has_mob) | ||
857 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
858 | if (dev_priv->has_gmr) | ||
859 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | ||
860 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
861 | out_err2: | ||
862 | (void)ttm_bo_device_release(&dev_priv->bdev); | 863 | (void)ttm_bo_device_release(&dev_priv->bdev); |
863 | out_err1: | 864 | out_err1: |
864 | vmw_ttm_global_release(dev_priv); | 865 | vmw_ttm_global_release(dev_priv); |
@@ -887,6 +888,13 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
887 | } | 888 | } |
888 | vmw_kms_close(dev_priv); | 889 | vmw_kms_close(dev_priv); |
889 | vmw_overlay_close(dev_priv); | 890 | vmw_overlay_close(dev_priv); |
891 | |||
892 | if (dev_priv->has_mob) | ||
893 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
894 | if (dev_priv->has_gmr) | ||
895 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | ||
896 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
897 | |||
890 | vmw_fence_manager_takedown(dev_priv->fman); | 898 | vmw_fence_manager_takedown(dev_priv->fman); |
891 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | 899 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
892 | drm_irq_uninstall(dev_priv->dev); | 900 | drm_irq_uninstall(dev_priv->dev); |
@@ -898,11 +906,6 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
898 | ttm_object_device_release(&dev_priv->tdev); | 906 | ttm_object_device_release(&dev_priv->tdev); |
899 | iounmap(dev_priv->mmio_virt); | 907 | iounmap(dev_priv->mmio_virt); |
900 | arch_phys_wc_del(dev_priv->mmio_mtrr); | 908 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
901 | if (dev_priv->has_mob) | ||
902 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
903 | if (dev_priv->has_gmr) | ||
904 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | ||
905 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
906 | (void)ttm_bo_device_release(&dev_priv->bdev); | 909 | (void)ttm_bo_device_release(&dev_priv->bdev); |
907 | vmw_ttm_global_release(dev_priv); | 910 | vmw_ttm_global_release(dev_priv); |
908 | 911 | ||
@@ -1235,6 +1238,7 @@ static void vmw_remove(struct pci_dev *pdev) | |||
1235 | { | 1238 | { |
1236 | struct drm_device *dev = pci_get_drvdata(pdev); | 1239 | struct drm_device *dev = pci_get_drvdata(pdev); |
1237 | 1240 | ||
1241 | pci_disable_device(pdev); | ||
1238 | drm_put_dev(dev); | 1242 | drm_put_dev(dev); |
1239 | } | 1243 | } |
1240 | 1244 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 33176d05db35..654c8daeb5ab 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -890,7 +890,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, | |||
890 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); | 890 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); |
891 | if (unlikely(ret != 0)) { | 891 | if (unlikely(ret != 0)) { |
892 | DRM_ERROR("Could not find or use MOB buffer.\n"); | 892 | DRM_ERROR("Could not find or use MOB buffer.\n"); |
893 | return -EINVAL; | 893 | ret = -EINVAL; |
894 | goto out_no_reloc; | ||
894 | } | 895 | } |
895 | bo = &vmw_bo->base; | 896 | bo = &vmw_bo->base; |
896 | 897 | ||
@@ -914,7 +915,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, | |||
914 | 915 | ||
915 | out_no_reloc: | 916 | out_no_reloc: |
916 | vmw_dmabuf_unreference(&vmw_bo); | 917 | vmw_dmabuf_unreference(&vmw_bo); |
917 | vmw_bo_p = NULL; | 918 | *vmw_bo_p = NULL; |
918 | return ret; | 919 | return ret; |
919 | } | 920 | } |
920 | 921 | ||
@@ -951,7 +952,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | |||
951 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); | 952 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); |
952 | if (unlikely(ret != 0)) { | 953 | if (unlikely(ret != 0)) { |
953 | DRM_ERROR("Could not find or use GMR region.\n"); | 954 | DRM_ERROR("Could not find or use GMR region.\n"); |
954 | return -EINVAL; | 955 | ret = -EINVAL; |
956 | goto out_no_reloc; | ||
955 | } | 957 | } |
956 | bo = &vmw_bo->base; | 958 | bo = &vmw_bo->base; |
957 | 959 | ||
@@ -974,7 +976,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | |||
974 | 976 | ||
975 | out_no_reloc: | 977 | out_no_reloc: |
976 | vmw_dmabuf_unreference(&vmw_bo); | 978 | vmw_dmabuf_unreference(&vmw_bo); |
977 | vmw_bo_p = NULL; | 979 | *vmw_bo_p = NULL; |
978 | return ret; | 980 | return ret; |
979 | } | 981 | } |
980 | 982 | ||
@@ -2780,13 +2782,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
2780 | NULL, arg->command_size, arg->throttle_us, | 2782 | NULL, arg->command_size, arg->throttle_us, |
2781 | (void __user *)(unsigned long)arg->fence_rep, | 2783 | (void __user *)(unsigned long)arg->fence_rep, |
2782 | NULL); | 2784 | NULL); |
2783 | 2785 | ttm_read_unlock(&dev_priv->reservation_sem); | |
2784 | if (unlikely(ret != 0)) | 2786 | if (unlikely(ret != 0)) |
2785 | goto out_unlock; | 2787 | return ret; |
2786 | 2788 | ||
2787 | vmw_kms_cursor_post_execbuf(dev_priv); | 2789 | vmw_kms_cursor_post_execbuf(dev_priv); |
2788 | 2790 | ||
2789 | out_unlock: | 2791 | return 0; |
2790 | ttm_read_unlock(&dev_priv->reservation_sem); | ||
2791 | return ret; | ||
2792 | } | 2792 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 8725b79e7847..07cda8cbbddb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -2033,23 +2033,17 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | |||
2033 | int i; | 2033 | int i; |
2034 | struct drm_mode_config *mode_config = &dev->mode_config; | 2034 | struct drm_mode_config *mode_config = &dev->mode_config; |
2035 | 2035 | ||
2036 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); | ||
2037 | if (unlikely(ret != 0)) | ||
2038 | return ret; | ||
2039 | |||
2040 | if (!arg->num_outputs) { | 2036 | if (!arg->num_outputs) { |
2041 | struct drm_vmw_rect def_rect = {0, 0, 800, 600}; | 2037 | struct drm_vmw_rect def_rect = {0, 0, 800, 600}; |
2042 | vmw_du_update_layout(dev_priv, 1, &def_rect); | 2038 | vmw_du_update_layout(dev_priv, 1, &def_rect); |
2043 | goto out_unlock; | 2039 | return 0; |
2044 | } | 2040 | } |
2045 | 2041 | ||
2046 | rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); | 2042 | rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); |
2047 | rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), | 2043 | rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), |
2048 | GFP_KERNEL); | 2044 | GFP_KERNEL); |
2049 | if (unlikely(!rects)) { | 2045 | if (unlikely(!rects)) |
2050 | ret = -ENOMEM; | 2046 | return -ENOMEM; |
2051 | goto out_unlock; | ||
2052 | } | ||
2053 | 2047 | ||
2054 | user_rects = (void __user *)(unsigned long)arg->rects; | 2048 | user_rects = (void __user *)(unsigned long)arg->rects; |
2055 | ret = copy_from_user(rects, user_rects, rects_size); | 2049 | ret = copy_from_user(rects, user_rects, rects_size); |
@@ -2074,7 +2068,5 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | |||
2074 | 2068 | ||
2075 | out_free: | 2069 | out_free: |
2076 | kfree(rects); | 2070 | kfree(rects); |
2077 | out_unlock: | ||
2078 | ttm_read_unlock(&dev_priv->reservation_sem); | ||
2079 | return ret; | 2071 | return ret; |
2080 | } | 2072 | } |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 7c669c328c4c..56ce8c2b5530 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
@@ -1959,6 +1959,7 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
1959 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) }, | 1959 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) }, |
1960 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, | 1960 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, |
1961 | { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, | 1961 | { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, |
1962 | { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) }, | ||
1962 | { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, | 1963 | { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, |
1963 | { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, | 1964 | { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, |
1964 | { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, | 1965 | { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 204312bfab2c..9c4786759f16 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -586,6 +586,7 @@ | |||
586 | #define USB_VENDOR_ID_LOGITECH 0x046d | 586 | #define USB_VENDOR_ID_LOGITECH 0x046d |
587 | #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e | 587 | #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e |
588 | #define USB_DEVICE_ID_LOGITECH_T651 0xb00c | 588 | #define USB_DEVICE_ID_LOGITECH_T651 0xb00c |
589 | #define USB_DEVICE_ID_LOGITECH_C077 0xc007 | ||
589 | #define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 | 590 | #define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 |
590 | #define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110 | 591 | #define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110 |
591 | #define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f | 592 | #define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f |
@@ -898,6 +899,7 @@ | |||
898 | #define USB_VENDOR_ID_TIVO 0x150a | 899 | #define USB_VENDOR_ID_TIVO 0x150a |
899 | #define USB_DEVICE_ID_TIVO_SLIDE_BT 0x1200 | 900 | #define USB_DEVICE_ID_TIVO_SLIDE_BT 0x1200 |
900 | #define USB_DEVICE_ID_TIVO_SLIDE 0x1201 | 901 | #define USB_DEVICE_ID_TIVO_SLIDE 0x1201 |
902 | #define USB_DEVICE_ID_TIVO_SLIDE_PRO 0x1203 | ||
901 | 903 | ||
902 | #define USB_VENDOR_ID_TOPSEED 0x0766 | 904 | #define USB_VENDOR_ID_TOPSEED 0x0766 |
903 | #define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204 | 905 | #define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204 |
diff --git a/drivers/hid/hid-tivo.c b/drivers/hid/hid-tivo.c index d790d8d71f7f..d98696927453 100644 --- a/drivers/hid/hid-tivo.c +++ b/drivers/hid/hid-tivo.c | |||
@@ -64,6 +64,7 @@ static const struct hid_device_id tivo_devices[] = { | |||
64 | /* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */ | 64 | /* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */ |
65 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, | 65 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, |
66 | { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, | 66 | { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, |
67 | { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) }, | ||
67 | { } | 68 | { } |
68 | }; | 69 | }; |
69 | MODULE_DEVICE_TABLE(hid, tivo_devices); | 70 | MODULE_DEVICE_TABLE(hid, tivo_devices); |
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 9be99a67bfe2..a82127753461 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
@@ -78,6 +78,7 @@ static const struct hid_blacklist { | |||
78 | { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, | 78 | { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, |
79 | { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, | 79 | { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, |
80 | { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, | 80 | { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, |
81 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL }, | ||
81 | { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, | 82 | { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, |
82 | { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS }, | 83 | { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS }, |
83 | { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS }, | 84 | { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS }, |
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 046351cf17f3..bbe32d66e500 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c | |||
@@ -551,9 +551,13 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) | |||
551 | (features->type == CINTIQ && !(data[1] & 0x40))) | 551 | (features->type == CINTIQ && !(data[1] & 0x40))) |
552 | return 1; | 552 | return 1; |
553 | 553 | ||
554 | if (features->quirks & WACOM_QUIRK_MULTI_INPUT) | 554 | if (wacom->shared) { |
555 | wacom->shared->stylus_in_proximity = true; | 555 | wacom->shared->stylus_in_proximity = true; |
556 | 556 | ||
557 | if (wacom->shared->touch_down) | ||
558 | return 1; | ||
559 | } | ||
560 | |||
557 | /* in Range while exiting */ | 561 | /* in Range while exiting */ |
558 | if (((data[1] & 0xfe) == 0x20) && wacom->reporting_data) { | 562 | if (((data[1] & 0xfe) == 0x20) && wacom->reporting_data) { |
559 | input_report_key(input, BTN_TOUCH, 0); | 563 | input_report_key(input, BTN_TOUCH, 0); |
@@ -1043,27 +1047,28 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom) | |||
1043 | struct input_dev *input = wacom->input; | 1047 | struct input_dev *input = wacom->input; |
1044 | unsigned char *data = wacom->data; | 1048 | unsigned char *data = wacom->data; |
1045 | int i; | 1049 | int i; |
1046 | int current_num_contacts = 0; | 1050 | int current_num_contacts = data[61]; |
1047 | int contacts_to_send = 0; | 1051 | int contacts_to_send = 0; |
1048 | int num_contacts_left = 4; /* maximum contacts per packet */ | 1052 | int num_contacts_left = 4; /* maximum contacts per packet */ |
1049 | int byte_per_packet = WACOM_BYTES_PER_24HDT_PACKET; | 1053 | int byte_per_packet = WACOM_BYTES_PER_24HDT_PACKET; |
1050 | int y_offset = 2; | 1054 | int y_offset = 2; |
1055 | static int contact_with_no_pen_down_count = 0; | ||
1051 | 1056 | ||
1052 | if (wacom->features.type == WACOM_27QHDT) { | 1057 | if (wacom->features.type == WACOM_27QHDT) { |
1053 | current_num_contacts = data[63]; | 1058 | current_num_contacts = data[63]; |
1054 | num_contacts_left = 10; | 1059 | num_contacts_left = 10; |
1055 | byte_per_packet = WACOM_BYTES_PER_QHDTHID_PACKET; | 1060 | byte_per_packet = WACOM_BYTES_PER_QHDTHID_PACKET; |
1056 | y_offset = 0; | 1061 | y_offset = 0; |
1057 | } else { | ||
1058 | current_num_contacts = data[61]; | ||
1059 | } | 1062 | } |
1060 | 1063 | ||
1061 | /* | 1064 | /* |
1062 | * First packet resets the counter since only the first | 1065 | * First packet resets the counter since only the first |
1063 | * packet in series will have non-zero current_num_contacts. | 1066 | * packet in series will have non-zero current_num_contacts. |
1064 | */ | 1067 | */ |
1065 | if (current_num_contacts) | 1068 | if (current_num_contacts) { |
1066 | wacom->num_contacts_left = current_num_contacts; | 1069 | wacom->num_contacts_left = current_num_contacts; |
1070 | contact_with_no_pen_down_count = 0; | ||
1071 | } | ||
1067 | 1072 | ||
1068 | contacts_to_send = min(num_contacts_left, wacom->num_contacts_left); | 1073 | contacts_to_send = min(num_contacts_left, wacom->num_contacts_left); |
1069 | 1074 | ||
@@ -1096,15 +1101,16 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom) | |||
1096 | input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h)); | 1101 | input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h)); |
1097 | input_report_abs(input, ABS_MT_ORIENTATION, w > h); | 1102 | input_report_abs(input, ABS_MT_ORIENTATION, w > h); |
1098 | } | 1103 | } |
1104 | contact_with_no_pen_down_count++; | ||
1099 | } | 1105 | } |
1100 | } | 1106 | } |
1101 | input_mt_report_pointer_emulation(input, true); | 1107 | input_mt_report_pointer_emulation(input, true); |
1102 | 1108 | ||
1103 | wacom->num_contacts_left -= contacts_to_send; | 1109 | wacom->num_contacts_left -= contacts_to_send; |
1104 | if (wacom->num_contacts_left <= 0) | 1110 | if (wacom->num_contacts_left <= 0) { |
1105 | wacom->num_contacts_left = 0; | 1111 | wacom->num_contacts_left = 0; |
1106 | 1112 | wacom->shared->touch_down = (contact_with_no_pen_down_count > 0); | |
1107 | wacom->shared->touch_down = (wacom->num_contacts_left > 0); | 1113 | } |
1108 | return 1; | 1114 | return 1; |
1109 | } | 1115 | } |
1110 | 1116 | ||
@@ -1116,6 +1122,7 @@ static int wacom_mt_touch(struct wacom_wac *wacom) | |||
1116 | int current_num_contacts = data[2]; | 1122 | int current_num_contacts = data[2]; |
1117 | int contacts_to_send = 0; | 1123 | int contacts_to_send = 0; |
1118 | int x_offset = 0; | 1124 | int x_offset = 0; |
1125 | static int contact_with_no_pen_down_count = 0; | ||
1119 | 1126 | ||
1120 | /* MTTPC does not support Height and Width */ | 1127 | /* MTTPC does not support Height and Width */ |
1121 | if (wacom->features.type == MTTPC || wacom->features.type == MTTPC_B) | 1128 | if (wacom->features.type == MTTPC || wacom->features.type == MTTPC_B) |
@@ -1125,8 +1132,10 @@ static int wacom_mt_touch(struct wacom_wac *wacom) | |||
1125 | * First packet resets the counter since only the first | 1132 | * First packet resets the counter since only the first |
1126 | * packet in series will have non-zero current_num_contacts. | 1133 | * packet in series will have non-zero current_num_contacts. |
1127 | */ | 1134 | */ |
1128 | if (current_num_contacts) | 1135 | if (current_num_contacts) { |
1129 | wacom->num_contacts_left = current_num_contacts; | 1136 | wacom->num_contacts_left = current_num_contacts; |
1137 | contact_with_no_pen_down_count = 0; | ||
1138 | } | ||
1130 | 1139 | ||
1131 | /* There are at most 5 contacts per packet */ | 1140 | /* There are at most 5 contacts per packet */ |
1132 | contacts_to_send = min(5, wacom->num_contacts_left); | 1141 | contacts_to_send = min(5, wacom->num_contacts_left); |
@@ -1147,15 +1156,16 @@ static int wacom_mt_touch(struct wacom_wac *wacom) | |||
1147 | int y = get_unaligned_le16(&data[offset + x_offset + 9]); | 1156 | int y = get_unaligned_le16(&data[offset + x_offset + 9]); |
1148 | input_report_abs(input, ABS_MT_POSITION_X, x); | 1157 | input_report_abs(input, ABS_MT_POSITION_X, x); |
1149 | input_report_abs(input, ABS_MT_POSITION_Y, y); | 1158 | input_report_abs(input, ABS_MT_POSITION_Y, y); |
1159 | contact_with_no_pen_down_count++; | ||
1150 | } | 1160 | } |
1151 | } | 1161 | } |
1152 | input_mt_report_pointer_emulation(input, true); | 1162 | input_mt_report_pointer_emulation(input, true); |
1153 | 1163 | ||
1154 | wacom->num_contacts_left -= contacts_to_send; | 1164 | wacom->num_contacts_left -= contacts_to_send; |
1155 | if (wacom->num_contacts_left < 0) | 1165 | if (wacom->num_contacts_left <= 0) { |
1156 | wacom->num_contacts_left = 0; | 1166 | wacom->num_contacts_left = 0; |
1157 | 1167 | wacom->shared->touch_down = (contact_with_no_pen_down_count > 0); | |
1158 | wacom->shared->touch_down = (wacom->num_contacts_left > 0); | 1168 | } |
1159 | return 1; | 1169 | return 1; |
1160 | } | 1170 | } |
1161 | 1171 | ||
@@ -1193,29 +1203,25 @@ static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len) | |||
1193 | { | 1203 | { |
1194 | unsigned char *data = wacom->data; | 1204 | unsigned char *data = wacom->data; |
1195 | struct input_dev *input = wacom->input; | 1205 | struct input_dev *input = wacom->input; |
1196 | bool prox; | 1206 | bool prox = !wacom->shared->stylus_in_proximity; |
1197 | int x = 0, y = 0; | 1207 | int x = 0, y = 0; |
1198 | 1208 | ||
1199 | if (wacom->features.touch_max > 1 || len > WACOM_PKGLEN_TPC2FG) | 1209 | if (wacom->features.touch_max > 1 || len > WACOM_PKGLEN_TPC2FG) |
1200 | return 0; | 1210 | return 0; |
1201 | 1211 | ||
1202 | if (!wacom->shared->stylus_in_proximity) { | 1212 | if (len == WACOM_PKGLEN_TPC1FG) { |
1203 | if (len == WACOM_PKGLEN_TPC1FG) { | 1213 | prox = prox && (data[0] & 0x01); |
1204 | prox = data[0] & 0x01; | 1214 | x = get_unaligned_le16(&data[1]); |
1205 | x = get_unaligned_le16(&data[1]); | 1215 | y = get_unaligned_le16(&data[3]); |
1206 | y = get_unaligned_le16(&data[3]); | 1216 | } else if (len == WACOM_PKGLEN_TPC1FG_B) { |
1207 | } else if (len == WACOM_PKGLEN_TPC1FG_B) { | 1217 | prox = prox && (data[2] & 0x01); |
1208 | prox = data[2] & 0x01; | 1218 | x = get_unaligned_le16(&data[3]); |
1209 | x = get_unaligned_le16(&data[3]); | 1219 | y = get_unaligned_le16(&data[5]); |
1210 | y = get_unaligned_le16(&data[5]); | 1220 | } else { |
1211 | } else { | 1221 | prox = prox && (data[1] & 0x01); |
1212 | prox = data[1] & 0x01; | 1222 | x = le16_to_cpup((__le16 *)&data[2]); |
1213 | x = le16_to_cpup((__le16 *)&data[2]); | 1223 | y = le16_to_cpup((__le16 *)&data[4]); |
1214 | y = le16_to_cpup((__le16 *)&data[4]); | 1224 | } |
1215 | } | ||
1216 | } else | ||
1217 | /* force touch out when pen is in prox */ | ||
1218 | prox = 0; | ||
1219 | 1225 | ||
1220 | if (prox) { | 1226 | if (prox) { |
1221 | input_report_abs(input, ABS_X, x); | 1227 | input_report_abs(input, ABS_X, x); |
@@ -1613,6 +1619,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom) | |||
1613 | struct input_dev *pad_input = wacom->pad_input; | 1619 | struct input_dev *pad_input = wacom->pad_input; |
1614 | unsigned char *data = wacom->data; | 1620 | unsigned char *data = wacom->data; |
1615 | int i; | 1621 | int i; |
1622 | int contact_with_no_pen_down_count = 0; | ||
1616 | 1623 | ||
1617 | if (data[0] != 0x02) | 1624 | if (data[0] != 0x02) |
1618 | return 0; | 1625 | return 0; |
@@ -1640,6 +1647,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom) | |||
1640 | } | 1647 | } |
1641 | input_report_abs(input, ABS_MT_POSITION_X, x); | 1648 | input_report_abs(input, ABS_MT_POSITION_X, x); |
1642 | input_report_abs(input, ABS_MT_POSITION_Y, y); | 1649 | input_report_abs(input, ABS_MT_POSITION_Y, y); |
1650 | contact_with_no_pen_down_count++; | ||
1643 | } | 1651 | } |
1644 | } | 1652 | } |
1645 | 1653 | ||
@@ -1649,11 +1657,12 @@ static int wacom_bpt_touch(struct wacom_wac *wacom) | |||
1649 | input_report_key(pad_input, BTN_FORWARD, (data[1] & 0x04) != 0); | 1657 | input_report_key(pad_input, BTN_FORWARD, (data[1] & 0x04) != 0); |
1650 | input_report_key(pad_input, BTN_BACK, (data[1] & 0x02) != 0); | 1658 | input_report_key(pad_input, BTN_BACK, (data[1] & 0x02) != 0); |
1651 | input_report_key(pad_input, BTN_RIGHT, (data[1] & 0x01) != 0); | 1659 | input_report_key(pad_input, BTN_RIGHT, (data[1] & 0x01) != 0); |
1660 | wacom->shared->touch_down = (contact_with_no_pen_down_count > 0); | ||
1652 | 1661 | ||
1653 | return 1; | 1662 | return 1; |
1654 | } | 1663 | } |
1655 | 1664 | ||
1656 | static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) | 1665 | static int wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data, int last_touch_count) |
1657 | { | 1666 | { |
1658 | struct wacom_features *features = &wacom->features; | 1667 | struct wacom_features *features = &wacom->features; |
1659 | struct input_dev *input = wacom->input; | 1668 | struct input_dev *input = wacom->input; |
@@ -1661,7 +1670,7 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) | |||
1661 | int slot = input_mt_get_slot_by_key(input, data[0]); | 1670 | int slot = input_mt_get_slot_by_key(input, data[0]); |
1662 | 1671 | ||
1663 | if (slot < 0) | 1672 | if (slot < 0) |
1664 | return; | 1673 | return 0; |
1665 | 1674 | ||
1666 | touch = touch && !wacom->shared->stylus_in_proximity; | 1675 | touch = touch && !wacom->shared->stylus_in_proximity; |
1667 | 1676 | ||
@@ -1693,7 +1702,9 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) | |||
1693 | input_report_abs(input, ABS_MT_POSITION_Y, y); | 1702 | input_report_abs(input, ABS_MT_POSITION_Y, y); |
1694 | input_report_abs(input, ABS_MT_TOUCH_MAJOR, width); | 1703 | input_report_abs(input, ABS_MT_TOUCH_MAJOR, width); |
1695 | input_report_abs(input, ABS_MT_TOUCH_MINOR, height); | 1704 | input_report_abs(input, ABS_MT_TOUCH_MINOR, height); |
1705 | last_touch_count++; | ||
1696 | } | 1706 | } |
1707 | return last_touch_count; | ||
1697 | } | 1708 | } |
1698 | 1709 | ||
1699 | static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data) | 1710 | static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data) |
@@ -1718,6 +1729,7 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom) | |||
1718 | unsigned char *data = wacom->data; | 1729 | unsigned char *data = wacom->data; |
1719 | int count = data[1] & 0x07; | 1730 | int count = data[1] & 0x07; |
1720 | int i; | 1731 | int i; |
1732 | int contact_with_no_pen_down_count = 0; | ||
1721 | 1733 | ||
1722 | if (data[0] != 0x02) | 1734 | if (data[0] != 0x02) |
1723 | return 0; | 1735 | return 0; |
@@ -1728,12 +1740,15 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom) | |||
1728 | int msg_id = data[offset]; | 1740 | int msg_id = data[offset]; |
1729 | 1741 | ||
1730 | if (msg_id >= 2 && msg_id <= 17) | 1742 | if (msg_id >= 2 && msg_id <= 17) |
1731 | wacom_bpt3_touch_msg(wacom, data + offset); | 1743 | contact_with_no_pen_down_count = |
1744 | wacom_bpt3_touch_msg(wacom, data + offset, | ||
1745 | contact_with_no_pen_down_count); | ||
1732 | else if (msg_id == 128) | 1746 | else if (msg_id == 128) |
1733 | wacom_bpt3_button_msg(wacom, data + offset); | 1747 | wacom_bpt3_button_msg(wacom, data + offset); |
1734 | 1748 | ||
1735 | } | 1749 | } |
1736 | input_mt_report_pointer_emulation(input, true); | 1750 | input_mt_report_pointer_emulation(input, true); |
1751 | wacom->shared->touch_down = (contact_with_no_pen_down_count > 0); | ||
1737 | 1752 | ||
1738 | return 1; | 1753 | return 1; |
1739 | } | 1754 | } |
@@ -1759,6 +1774,9 @@ static int wacom_bpt_pen(struct wacom_wac *wacom) | |||
1759 | return 0; | 1774 | return 0; |
1760 | } | 1775 | } |
1761 | 1776 | ||
1777 | if (wacom->shared->touch_down) | ||
1778 | return 0; | ||
1779 | |||
1762 | prox = (data[1] & 0x20) == 0x20; | 1780 | prox = (data[1] & 0x20) == 0x20; |
1763 | 1781 | ||
1764 | /* | 1782 | /* |
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 210cf4874cb7..edf274cabe81 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
@@ -679,9 +679,6 @@ static int i2c_device_remove(struct device *dev) | |||
679 | status = driver->remove(client); | 679 | status = driver->remove(client); |
680 | } | 680 | } |
681 | 681 | ||
682 | if (dev->of_node) | ||
683 | irq_dispose_mapping(client->irq); | ||
684 | |||
685 | dev_pm_domain_detach(&client->dev, true); | 682 | dev_pm_domain_detach(&client->dev, true); |
686 | return status; | 683 | return status; |
687 | } | 684 | } |
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 1793aea4a7d2..6eb738ca6d2f 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c | |||
@@ -1793,11 +1793,11 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor) | |||
1793 | tape->best_dsc_rw_freq = clamp_t(unsigned long, t, IDETAPE_DSC_RW_MIN, | 1793 | tape->best_dsc_rw_freq = clamp_t(unsigned long, t, IDETAPE_DSC_RW_MIN, |
1794 | IDETAPE_DSC_RW_MAX); | 1794 | IDETAPE_DSC_RW_MAX); |
1795 | printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, " | 1795 | printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, " |
1796 | "%lums tDSC%s\n", | 1796 | "%ums tDSC%s\n", |
1797 | drive->name, tape->name, *(u16 *)&tape->caps[14], | 1797 | drive->name, tape->name, *(u16 *)&tape->caps[14], |
1798 | (*(u16 *)&tape->caps[16] * 512) / tape->buffer_size, | 1798 | (*(u16 *)&tape->caps[16] * 512) / tape->buffer_size, |
1799 | tape->buffer_size / 1024, | 1799 | tape->buffer_size / 1024, |
1800 | tape->best_dsc_rw_freq * 1000 / HZ, | 1800 | jiffies_to_msecs(tape->best_dsc_rw_freq), |
1801 | (drive->dev_flags & IDE_DFLAG_USING_DMA) ? ", DMA" : ""); | 1801 | (drive->dev_flags & IDE_DFLAG_USING_DMA) ? ", DMA" : ""); |
1802 | 1802 | ||
1803 | ide_proc_register_driver(drive, tape->driver); | 1803 | ide_proc_register_driver(drive, tape->driver); |
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index c7619716c31d..59040265e361 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
@@ -64,6 +64,14 @@ enum { | |||
64 | #define GUID_TBL_BLK_NUM_ENTRIES 8 | 64 | #define GUID_TBL_BLK_NUM_ENTRIES 8 |
65 | #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES) | 65 | #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES) |
66 | 66 | ||
67 | /* Counters should be saturate once they reach their maximum value */ | ||
68 | #define ASSIGN_32BIT_COUNTER(counter, value) do {\ | ||
69 | if ((value) > U32_MAX) \ | ||
70 | counter = cpu_to_be32(U32_MAX); \ | ||
71 | else \ | ||
72 | counter = cpu_to_be32(value); \ | ||
73 | } while (0) | ||
74 | |||
67 | struct mlx4_mad_rcv_buf { | 75 | struct mlx4_mad_rcv_buf { |
68 | struct ib_grh grh; | 76 | struct ib_grh grh; |
69 | u8 payload[256]; | 77 | u8 payload[256]; |
@@ -806,10 +814,14 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, | |||
806 | static void edit_counter(struct mlx4_counter *cnt, | 814 | static void edit_counter(struct mlx4_counter *cnt, |
807 | struct ib_pma_portcounters *pma_cnt) | 815 | struct ib_pma_portcounters *pma_cnt) |
808 | { | 816 | { |
809 | pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2)); | 817 | ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data, |
810 | pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2)); | 818 | (be64_to_cpu(cnt->tx_bytes) >> 2)); |
811 | pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames)); | 819 | ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data, |
812 | pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames)); | 820 | (be64_to_cpu(cnt->rx_bytes) >> 2)); |
821 | ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets, | ||
822 | be64_to_cpu(cnt->tx_frames)); | ||
823 | ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets, | ||
824 | be64_to_cpu(cnt->rx_frames)); | ||
813 | } | 825 | } |
814 | 826 | ||
815 | static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, | 827 | static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index ac6e2b710ea6..b972c0b41799 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -2697,8 +2697,12 @@ static void handle_bonded_port_state_event(struct work_struct *work) | |||
2697 | spin_lock_bh(&ibdev->iboe.lock); | 2697 | spin_lock_bh(&ibdev->iboe.lock); |
2698 | for (i = 0; i < MLX4_MAX_PORTS; ++i) { | 2698 | for (i = 0; i < MLX4_MAX_PORTS; ++i) { |
2699 | struct net_device *curr_netdev = ibdev->iboe.netdevs[i]; | 2699 | struct net_device *curr_netdev = ibdev->iboe.netdevs[i]; |
2700 | enum ib_port_state curr_port_state; | ||
2700 | 2701 | ||
2701 | enum ib_port_state curr_port_state = | 2702 | if (!curr_netdev) |
2703 | continue; | ||
2704 | |||
2705 | curr_port_state = | ||
2702 | (netif_running(curr_netdev) && | 2706 | (netif_running(curr_netdev) && |
2703 | netif_carrier_ok(curr_netdev)) ? | 2707 | netif_carrier_ok(curr_netdev)) ? |
2704 | IB_PORT_ACTIVE : IB_PORT_DOWN; | 2708 | IB_PORT_ACTIVE : IB_PORT_DOWN; |
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c index 8ff612d160b0..563932500ff1 100644 --- a/drivers/input/keyboard/tc3589x-keypad.c +++ b/drivers/input/keyboard/tc3589x-keypad.c | |||
@@ -411,9 +411,9 @@ static int tc3589x_keypad_probe(struct platform_device *pdev) | |||
411 | 411 | ||
412 | input_set_drvdata(input, keypad); | 412 | input_set_drvdata(input, keypad); |
413 | 413 | ||
414 | error = request_threaded_irq(irq, NULL, | 414 | error = request_threaded_irq(irq, NULL, tc3589x_keypad_irq, |
415 | tc3589x_keypad_irq, plat->irqtype, | 415 | plat->irqtype | IRQF_ONESHOT, |
416 | "tc3589x-keypad", keypad); | 416 | "tc3589x-keypad", keypad); |
417 | if (error < 0) { | 417 | if (error < 0) { |
418 | dev_err(&pdev->dev, | 418 | dev_err(&pdev->dev, |
419 | "Could not allocate irq %d,error %d\n", | 419 | "Could not allocate irq %d,error %d\n", |
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c index 59d4dcddf6de..98228773a111 100644 --- a/drivers/input/misc/mma8450.c +++ b/drivers/input/misc/mma8450.c | |||
@@ -187,6 +187,7 @@ static int mma8450_probe(struct i2c_client *c, | |||
187 | idev->private = m; | 187 | idev->private = m; |
188 | idev->input->name = MMA8450_DRV_NAME; | 188 | idev->input->name = MMA8450_DRV_NAME; |
189 | idev->input->id.bustype = BUS_I2C; | 189 | idev->input->id.bustype = BUS_I2C; |
190 | idev->input->dev.parent = &c->dev; | ||
190 | idev->poll = mma8450_poll; | 191 | idev->poll = mma8450_poll; |
191 | idev->poll_interval = POLL_INTERVAL; | 192 | idev->poll_interval = POLL_INTERVAL; |
192 | idev->poll_interval_max = POLL_INTERVAL_MAX; | 193 | idev->poll_interval_max = POLL_INTERVAL_MAX; |
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index d28726a0ef85..1bd15ebc01f2 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c | |||
@@ -2605,8 +2605,10 @@ int alps_detect(struct psmouse *psmouse, bool set_properties) | |||
2605 | return -ENOMEM; | 2605 | return -ENOMEM; |
2606 | 2606 | ||
2607 | error = alps_identify(psmouse, priv); | 2607 | error = alps_identify(psmouse, priv); |
2608 | if (error) | 2608 | if (error) { |
2609 | kfree(priv); | ||
2609 | return error; | 2610 | return error; |
2611 | } | ||
2610 | 2612 | ||
2611 | if (set_properties) { | 2613 | if (set_properties) { |
2612 | psmouse->vendor = "ALPS"; | 2614 | psmouse->vendor = "ALPS"; |
diff --git a/drivers/input/mouse/cyapa_gen3.c b/drivers/input/mouse/cyapa_gen3.c index 77e9d70a986b..1e2291c378fe 100644 --- a/drivers/input/mouse/cyapa_gen3.c +++ b/drivers/input/mouse/cyapa_gen3.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <linux/input/mt.h> | 20 | #include <linux/input/mt.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/unaligned/access_ok.h> | 23 | #include <asm/unaligned.h> |
24 | #include "cyapa.h" | 24 | #include "cyapa.h" |
25 | 25 | ||
26 | 26 | ||
diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c index ddf5393a1180..5b611dd71e79 100644 --- a/drivers/input/mouse/cyapa_gen5.c +++ b/drivers/input/mouse/cyapa_gen5.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/mutex.h> | 17 | #include <linux/mutex.h> |
18 | #include <linux/completion.h> | 18 | #include <linux/completion.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/unaligned/access_ok.h> | 20 | #include <asm/unaligned.h> |
21 | #include <linux/crc-itu-t.h> | 21 | #include <linux/crc-itu-t.h> |
22 | #include "cyapa.h" | 22 | #include "cyapa.h" |
23 | 23 | ||
@@ -1926,7 +1926,7 @@ static int cyapa_gen5_read_idac_data(struct cyapa *cyapa, | |||
1926 | electrodes_tx = cyapa->electrodes_x; | 1926 | electrodes_tx = cyapa->electrodes_x; |
1927 | max_element_cnt = ((cyapa->aligned_electrodes_rx + 7) & | 1927 | max_element_cnt = ((cyapa->aligned_electrodes_rx + 7) & |
1928 | ~7u) * electrodes_tx; | 1928 | ~7u) * electrodes_tx; |
1929 | } else if (idac_data_type == GEN5_RETRIEVE_SELF_CAP_PWC_DATA) { | 1929 | } else { |
1930 | offset = 2; | 1930 | offset = 2; |
1931 | max_element_cnt = cyapa->electrodes_x + | 1931 | max_element_cnt = cyapa->electrodes_x + |
1932 | cyapa->electrodes_y; | 1932 | cyapa->electrodes_y; |
diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c index 757f78a94aec..23d259416f2f 100644 --- a/drivers/input/mouse/focaltech.c +++ b/drivers/input/mouse/focaltech.c | |||
@@ -67,9 +67,6 @@ static void focaltech_reset(struct psmouse *psmouse) | |||
67 | 67 | ||
68 | #define FOC_MAX_FINGERS 5 | 68 | #define FOC_MAX_FINGERS 5 |
69 | 69 | ||
70 | #define FOC_MAX_X 2431 | ||
71 | #define FOC_MAX_Y 1663 | ||
72 | |||
73 | /* | 70 | /* |
74 | * Current state of a single finger on the touchpad. | 71 | * Current state of a single finger on the touchpad. |
75 | */ | 72 | */ |
@@ -129,9 +126,17 @@ static void focaltech_report_state(struct psmouse *psmouse) | |||
129 | input_mt_slot(dev, i); | 126 | input_mt_slot(dev, i); |
130 | input_mt_report_slot_state(dev, MT_TOOL_FINGER, active); | 127 | input_mt_report_slot_state(dev, MT_TOOL_FINGER, active); |
131 | if (active) { | 128 | if (active) { |
132 | input_report_abs(dev, ABS_MT_POSITION_X, finger->x); | 129 | unsigned int clamped_x, clamped_y; |
130 | /* | ||
131 | * The touchpad might report invalid data, so we clamp | ||
132 | * the resulting values so that we do not confuse | ||
133 | * userspace. | ||
134 | */ | ||
135 | clamped_x = clamp(finger->x, 0U, priv->x_max); | ||
136 | clamped_y = clamp(finger->y, 0U, priv->y_max); | ||
137 | input_report_abs(dev, ABS_MT_POSITION_X, clamped_x); | ||
133 | input_report_abs(dev, ABS_MT_POSITION_Y, | 138 | input_report_abs(dev, ABS_MT_POSITION_Y, |
134 | FOC_MAX_Y - finger->y); | 139 | priv->y_max - clamped_y); |
135 | } | 140 | } |
136 | } | 141 | } |
137 | input_mt_report_pointer_emulation(dev, true); | 142 | input_mt_report_pointer_emulation(dev, true); |
@@ -180,16 +185,6 @@ static void focaltech_process_abs_packet(struct psmouse *psmouse, | |||
180 | 185 | ||
181 | state->pressed = (packet[0] >> 4) & 1; | 186 | state->pressed = (packet[0] >> 4) & 1; |
182 | 187 | ||
183 | /* | ||
184 | * packet[5] contains some kind of tool size in the most | ||
185 | * significant nibble. 0xff is a special value (latching) that | ||
186 | * signals a large contact area. | ||
187 | */ | ||
188 | if (packet[5] == 0xff) { | ||
189 | state->fingers[finger].valid = false; | ||
190 | return; | ||
191 | } | ||
192 | |||
193 | state->fingers[finger].x = ((packet[1] & 0xf) << 8) | packet[2]; | 188 | state->fingers[finger].x = ((packet[1] & 0xf) << 8) | packet[2]; |
194 | state->fingers[finger].y = (packet[3] << 8) | packet[4]; | 189 | state->fingers[finger].y = (packet[3] << 8) | packet[4]; |
195 | state->fingers[finger].valid = true; | 190 | state->fingers[finger].valid = true; |
@@ -381,6 +376,23 @@ static int focaltech_read_size(struct psmouse *psmouse) | |||
381 | 376 | ||
382 | return 0; | 377 | return 0; |
383 | } | 378 | } |
379 | |||
380 | void focaltech_set_resolution(struct psmouse *psmouse, unsigned int resolution) | ||
381 | { | ||
382 | /* not supported yet */ | ||
383 | } | ||
384 | |||
385 | static void focaltech_set_rate(struct psmouse *psmouse, unsigned int rate) | ||
386 | { | ||
387 | /* not supported yet */ | ||
388 | } | ||
389 | |||
390 | static void focaltech_set_scale(struct psmouse *psmouse, | ||
391 | enum psmouse_scale scale) | ||
392 | { | ||
393 | /* not supported yet */ | ||
394 | } | ||
395 | |||
384 | int focaltech_init(struct psmouse *psmouse) | 396 | int focaltech_init(struct psmouse *psmouse) |
385 | { | 397 | { |
386 | struct focaltech_data *priv; | 398 | struct focaltech_data *priv; |
@@ -415,6 +427,14 @@ int focaltech_init(struct psmouse *psmouse) | |||
415 | psmouse->cleanup = focaltech_reset; | 427 | psmouse->cleanup = focaltech_reset; |
416 | /* resync is not supported yet */ | 428 | /* resync is not supported yet */ |
417 | psmouse->resync_time = 0; | 429 | psmouse->resync_time = 0; |
430 | /* | ||
431 | * rate/resolution/scale changes are not supported yet, and | ||
432 | * the generic implementations of these functions seem to | ||
433 | * confuse some touchpads | ||
434 | */ | ||
435 | psmouse->set_resolution = focaltech_set_resolution; | ||
436 | psmouse->set_rate = focaltech_set_rate; | ||
437 | psmouse->set_scale = focaltech_set_scale; | ||
418 | 438 | ||
419 | return 0; | 439 | return 0; |
420 | 440 | ||
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 4ccd01d7a48d..8bc61237bc1b 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c | |||
@@ -454,6 +454,17 @@ static void psmouse_set_rate(struct psmouse *psmouse, unsigned int rate) | |||
454 | } | 454 | } |
455 | 455 | ||
456 | /* | 456 | /* |
457 | * Here we set the mouse scaling. | ||
458 | */ | ||
459 | |||
460 | static void psmouse_set_scale(struct psmouse *psmouse, enum psmouse_scale scale) | ||
461 | { | ||
462 | ps2_command(&psmouse->ps2dev, NULL, | ||
463 | scale == PSMOUSE_SCALE21 ? PSMOUSE_CMD_SETSCALE21 : | ||
464 | PSMOUSE_CMD_SETSCALE11); | ||
465 | } | ||
466 | |||
467 | /* | ||
457 | * psmouse_poll() - default poll handler. Everyone except for ALPS uses it. | 468 | * psmouse_poll() - default poll handler. Everyone except for ALPS uses it. |
458 | */ | 469 | */ |
459 | 470 | ||
@@ -689,6 +700,7 @@ static void psmouse_apply_defaults(struct psmouse *psmouse) | |||
689 | 700 | ||
690 | psmouse->set_rate = psmouse_set_rate; | 701 | psmouse->set_rate = psmouse_set_rate; |
691 | psmouse->set_resolution = psmouse_set_resolution; | 702 | psmouse->set_resolution = psmouse_set_resolution; |
703 | psmouse->set_scale = psmouse_set_scale; | ||
692 | psmouse->poll = psmouse_poll; | 704 | psmouse->poll = psmouse_poll; |
693 | psmouse->protocol_handler = psmouse_process_byte; | 705 | psmouse->protocol_handler = psmouse_process_byte; |
694 | psmouse->pktsize = 3; | 706 | psmouse->pktsize = 3; |
@@ -1160,7 +1172,7 @@ static void psmouse_initialize(struct psmouse *psmouse) | |||
1160 | if (psmouse_max_proto != PSMOUSE_PS2) { | 1172 | if (psmouse_max_proto != PSMOUSE_PS2) { |
1161 | psmouse->set_rate(psmouse, psmouse->rate); | 1173 | psmouse->set_rate(psmouse, psmouse->rate); |
1162 | psmouse->set_resolution(psmouse, psmouse->resolution); | 1174 | psmouse->set_resolution(psmouse, psmouse->resolution); |
1163 | ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11); | 1175 | psmouse->set_scale(psmouse, PSMOUSE_SCALE11); |
1164 | } | 1176 | } |
1165 | } | 1177 | } |
1166 | 1178 | ||
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h index c2ff137ecbdb..d02e1bdc9ae4 100644 --- a/drivers/input/mouse/psmouse.h +++ b/drivers/input/mouse/psmouse.h | |||
@@ -36,6 +36,11 @@ typedef enum { | |||
36 | PSMOUSE_FULL_PACKET | 36 | PSMOUSE_FULL_PACKET |
37 | } psmouse_ret_t; | 37 | } psmouse_ret_t; |
38 | 38 | ||
39 | enum psmouse_scale { | ||
40 | PSMOUSE_SCALE11, | ||
41 | PSMOUSE_SCALE21 | ||
42 | }; | ||
43 | |||
39 | struct psmouse { | 44 | struct psmouse { |
40 | void *private; | 45 | void *private; |
41 | struct input_dev *dev; | 46 | struct input_dev *dev; |
@@ -67,6 +72,7 @@ struct psmouse { | |||
67 | psmouse_ret_t (*protocol_handler)(struct psmouse *psmouse); | 72 | psmouse_ret_t (*protocol_handler)(struct psmouse *psmouse); |
68 | void (*set_rate)(struct psmouse *psmouse, unsigned int rate); | 73 | void (*set_rate)(struct psmouse *psmouse, unsigned int rate); |
69 | void (*set_resolution)(struct psmouse *psmouse, unsigned int resolution); | 74 | void (*set_resolution)(struct psmouse *psmouse, unsigned int resolution); |
75 | void (*set_scale)(struct psmouse *psmouse, enum psmouse_scale scale); | ||
70 | 76 | ||
71 | int (*reconnect)(struct psmouse *psmouse); | 77 | int (*reconnect)(struct psmouse *psmouse); |
72 | void (*disconnect)(struct psmouse *psmouse); | 78 | void (*disconnect)(struct psmouse *psmouse); |
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index f2cceb6493a0..dda605836546 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c | |||
@@ -67,9 +67,6 @@ | |||
67 | #define X_MAX_POSITIVE 8176 | 67 | #define X_MAX_POSITIVE 8176 |
68 | #define Y_MAX_POSITIVE 8176 | 68 | #define Y_MAX_POSITIVE 8176 |
69 | 69 | ||
70 | /* maximum ABS_MT_POSITION displacement (in mm) */ | ||
71 | #define DMAX 10 | ||
72 | |||
73 | /***************************************************************************** | 70 | /***************************************************************************** |
74 | * Stuff we need even when we do not want native Synaptics support | 71 | * Stuff we need even when we do not want native Synaptics support |
75 | ****************************************************************************/ | 72 | ****************************************************************************/ |
@@ -123,32 +120,41 @@ void synaptics_reset(struct psmouse *psmouse) | |||
123 | 120 | ||
124 | static bool cr48_profile_sensor; | 121 | static bool cr48_profile_sensor; |
125 | 122 | ||
123 | #define ANY_BOARD_ID 0 | ||
126 | struct min_max_quirk { | 124 | struct min_max_quirk { |
127 | const char * const *pnp_ids; | 125 | const char * const *pnp_ids; |
126 | struct { | ||
127 | unsigned long int min, max; | ||
128 | } board_id; | ||
128 | int x_min, x_max, y_min, y_max; | 129 | int x_min, x_max, y_min, y_max; |
129 | }; | 130 | }; |
130 | 131 | ||
131 | static const struct min_max_quirk min_max_pnpid_table[] = { | 132 | static const struct min_max_quirk min_max_pnpid_table[] = { |
132 | { | 133 | { |
133 | (const char * const []){"LEN0033", NULL}, | 134 | (const char * const []){"LEN0033", NULL}, |
135 | {ANY_BOARD_ID, ANY_BOARD_ID}, | ||
134 | 1024, 5052, 2258, 4832 | 136 | 1024, 5052, 2258, 4832 |
135 | }, | 137 | }, |
136 | { | 138 | { |
137 | (const char * const []){"LEN0035", "LEN0042", NULL}, | 139 | (const char * const []){"LEN0042", NULL}, |
140 | {ANY_BOARD_ID, ANY_BOARD_ID}, | ||
138 | 1232, 5710, 1156, 4696 | 141 | 1232, 5710, 1156, 4696 |
139 | }, | 142 | }, |
140 | { | 143 | { |
141 | (const char * const []){"LEN0034", "LEN0036", "LEN0037", | 144 | (const char * const []){"LEN0034", "LEN0036", "LEN0037", |
142 | "LEN0039", "LEN2002", "LEN2004", | 145 | "LEN0039", "LEN2002", "LEN2004", |
143 | NULL}, | 146 | NULL}, |
147 | {ANY_BOARD_ID, 2961}, | ||
144 | 1024, 5112, 2024, 4832 | 148 | 1024, 5112, 2024, 4832 |
145 | }, | 149 | }, |
146 | { | 150 | { |
147 | (const char * const []){"LEN2001", NULL}, | 151 | (const char * const []){"LEN2001", NULL}, |
152 | {ANY_BOARD_ID, ANY_BOARD_ID}, | ||
148 | 1024, 5022, 2508, 4832 | 153 | 1024, 5022, 2508, 4832 |
149 | }, | 154 | }, |
150 | { | 155 | { |
151 | (const char * const []){"LEN2006", NULL}, | 156 | (const char * const []){"LEN2006", NULL}, |
157 | {ANY_BOARD_ID, ANY_BOARD_ID}, | ||
152 | 1264, 5675, 1171, 4688 | 158 | 1264, 5675, 1171, 4688 |
153 | }, | 159 | }, |
154 | { } | 160 | { } |
@@ -175,9 +181,7 @@ static const char * const topbuttonpad_pnp_ids[] = { | |||
175 | "LEN0041", | 181 | "LEN0041", |
176 | "LEN0042", /* Yoga */ | 182 | "LEN0042", /* Yoga */ |
177 | "LEN0045", | 183 | "LEN0045", |
178 | "LEN0046", | ||
179 | "LEN0047", | 184 | "LEN0047", |
180 | "LEN0048", | ||
181 | "LEN0049", | 185 | "LEN0049", |
182 | "LEN2000", | 186 | "LEN2000", |
183 | "LEN2001", /* Edge E431 */ | 187 | "LEN2001", /* Edge E431 */ |
@@ -235,18 +239,39 @@ static int synaptics_model_id(struct psmouse *psmouse) | |||
235 | return 0; | 239 | return 0; |
236 | } | 240 | } |
237 | 241 | ||
242 | static int synaptics_more_extended_queries(struct psmouse *psmouse) | ||
243 | { | ||
244 | struct synaptics_data *priv = psmouse->private; | ||
245 | unsigned char buf[3]; | ||
246 | |||
247 | if (synaptics_send_cmd(psmouse, SYN_QUE_MEXT_CAPAB_10, buf)) | ||
248 | return -1; | ||
249 | |||
250 | priv->ext_cap_10 = (buf[0]<<16) | (buf[1]<<8) | buf[2]; | ||
251 | |||
252 | return 0; | ||
253 | } | ||
254 | |||
238 | /* | 255 | /* |
239 | * Read the board id from the touchpad | 256 | * Read the board id and the "More Extended Queries" from the touchpad |
240 | * The board id is encoded in the "QUERY MODES" response | 257 | * The board id is encoded in the "QUERY MODES" response |
241 | */ | 258 | */ |
242 | static int synaptics_board_id(struct psmouse *psmouse) | 259 | static int synaptics_query_modes(struct psmouse *psmouse) |
243 | { | 260 | { |
244 | struct synaptics_data *priv = psmouse->private; | 261 | struct synaptics_data *priv = psmouse->private; |
245 | unsigned char bid[3]; | 262 | unsigned char bid[3]; |
246 | 263 | ||
264 | /* firmwares prior 7.5 have no board_id encoded */ | ||
265 | if (SYN_ID_FULL(priv->identity) < 0x705) | ||
266 | return 0; | ||
267 | |||
247 | if (synaptics_send_cmd(psmouse, SYN_QUE_MODES, bid)) | 268 | if (synaptics_send_cmd(psmouse, SYN_QUE_MODES, bid)) |
248 | return -1; | 269 | return -1; |
249 | priv->board_id = ((bid[0] & 0xfc) << 6) | bid[1]; | 270 | priv->board_id = ((bid[0] & 0xfc) << 6) | bid[1]; |
271 | |||
272 | if (SYN_MEXT_CAP_BIT(bid[0])) | ||
273 | return synaptics_more_extended_queries(psmouse); | ||
274 | |||
250 | return 0; | 275 | return 0; |
251 | } | 276 | } |
252 | 277 | ||
@@ -346,7 +371,6 @@ static int synaptics_resolution(struct psmouse *psmouse) | |||
346 | { | 371 | { |
347 | struct synaptics_data *priv = psmouse->private; | 372 | struct synaptics_data *priv = psmouse->private; |
348 | unsigned char resp[3]; | 373 | unsigned char resp[3]; |
349 | int i; | ||
350 | 374 | ||
351 | if (SYN_ID_MAJOR(priv->identity) < 4) | 375 | if (SYN_ID_MAJOR(priv->identity) < 4) |
352 | return 0; | 376 | return 0; |
@@ -358,17 +382,6 @@ static int synaptics_resolution(struct psmouse *psmouse) | |||
358 | } | 382 | } |
359 | } | 383 | } |
360 | 384 | ||
361 | for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) { | ||
362 | if (psmouse_matches_pnp_id(psmouse, | ||
363 | min_max_pnpid_table[i].pnp_ids)) { | ||
364 | priv->x_min = min_max_pnpid_table[i].x_min; | ||
365 | priv->x_max = min_max_pnpid_table[i].x_max; | ||
366 | priv->y_min = min_max_pnpid_table[i].y_min; | ||
367 | priv->y_max = min_max_pnpid_table[i].y_max; | ||
368 | return 0; | ||
369 | } | ||
370 | } | ||
371 | |||
372 | if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 && | 385 | if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 && |
373 | SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) { | 386 | SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) { |
374 | if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) { | 387 | if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) { |
@@ -377,23 +390,69 @@ static int synaptics_resolution(struct psmouse *psmouse) | |||
377 | } else { | 390 | } else { |
378 | priv->x_max = (resp[0] << 5) | ((resp[1] & 0x0f) << 1); | 391 | priv->x_max = (resp[0] << 5) | ((resp[1] & 0x0f) << 1); |
379 | priv->y_max = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3); | 392 | priv->y_max = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3); |
393 | psmouse_info(psmouse, | ||
394 | "queried max coordinates: x [..%d], y [..%d]\n", | ||
395 | priv->x_max, priv->y_max); | ||
380 | } | 396 | } |
381 | } | 397 | } |
382 | 398 | ||
383 | if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 && | 399 | if (SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c) && |
384 | SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c)) { | 400 | (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 || |
401 | /* | ||
402 | * Firmware v8.1 does not report proper number of extended | ||
403 | * capabilities, but has been proven to report correct min | ||
404 | * coordinates. | ||
405 | */ | ||
406 | SYN_ID_FULL(priv->identity) == 0x801)) { | ||
385 | if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MIN_COORDS, resp)) { | 407 | if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MIN_COORDS, resp)) { |
386 | psmouse_warn(psmouse, | 408 | psmouse_warn(psmouse, |
387 | "device claims to have min coordinates query, but I'm not able to read it.\n"); | 409 | "device claims to have min coordinates query, but I'm not able to read it.\n"); |
388 | } else { | 410 | } else { |
389 | priv->x_min = (resp[0] << 5) | ((resp[1] & 0x0f) << 1); | 411 | priv->x_min = (resp[0] << 5) | ((resp[1] & 0x0f) << 1); |
390 | priv->y_min = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3); | 412 | priv->y_min = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3); |
413 | psmouse_info(psmouse, | ||
414 | "queried min coordinates: x [%d..], y [%d..]\n", | ||
415 | priv->x_min, priv->y_min); | ||
391 | } | 416 | } |
392 | } | 417 | } |
393 | 418 | ||
394 | return 0; | 419 | return 0; |
395 | } | 420 | } |
396 | 421 | ||
422 | /* | ||
423 | * Apply quirk(s) if the hardware matches | ||
424 | */ | ||
425 | |||
426 | static void synaptics_apply_quirks(struct psmouse *psmouse) | ||
427 | { | ||
428 | struct synaptics_data *priv = psmouse->private; | ||
429 | int i; | ||
430 | |||
431 | for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) { | ||
432 | if (!psmouse_matches_pnp_id(psmouse, | ||
433 | min_max_pnpid_table[i].pnp_ids)) | ||
434 | continue; | ||
435 | |||
436 | if (min_max_pnpid_table[i].board_id.min != ANY_BOARD_ID && | ||
437 | priv->board_id < min_max_pnpid_table[i].board_id.min) | ||
438 | continue; | ||
439 | |||
440 | if (min_max_pnpid_table[i].board_id.max != ANY_BOARD_ID && | ||
441 | priv->board_id > min_max_pnpid_table[i].board_id.max) | ||
442 | continue; | ||
443 | |||
444 | priv->x_min = min_max_pnpid_table[i].x_min; | ||
445 | priv->x_max = min_max_pnpid_table[i].x_max; | ||
446 | priv->y_min = min_max_pnpid_table[i].y_min; | ||
447 | priv->y_max = min_max_pnpid_table[i].y_max; | ||
448 | psmouse_info(psmouse, | ||
449 | "quirked min/max coordinates: x [%d..%d], y [%d..%d]\n", | ||
450 | priv->x_min, priv->x_max, | ||
451 | priv->y_min, priv->y_max); | ||
452 | break; | ||
453 | } | ||
454 | } | ||
455 | |||
397 | static int synaptics_query_hardware(struct psmouse *psmouse) | 456 | static int synaptics_query_hardware(struct psmouse *psmouse) |
398 | { | 457 | { |
399 | if (synaptics_identify(psmouse)) | 458 | if (synaptics_identify(psmouse)) |
@@ -402,13 +461,15 @@ static int synaptics_query_hardware(struct psmouse *psmouse) | |||
402 | return -1; | 461 | return -1; |
403 | if (synaptics_firmware_id(psmouse)) | 462 | if (synaptics_firmware_id(psmouse)) |
404 | return -1; | 463 | return -1; |
405 | if (synaptics_board_id(psmouse)) | 464 | if (synaptics_query_modes(psmouse)) |
406 | return -1; | 465 | return -1; |
407 | if (synaptics_capability(psmouse)) | 466 | if (synaptics_capability(psmouse)) |
408 | return -1; | 467 | return -1; |
409 | if (synaptics_resolution(psmouse)) | 468 | if (synaptics_resolution(psmouse)) |
410 | return -1; | 469 | return -1; |
411 | 470 | ||
471 | synaptics_apply_quirks(psmouse); | ||
472 | |||
412 | return 0; | 473 | return 0; |
413 | } | 474 | } |
414 | 475 | ||
@@ -516,18 +577,22 @@ static int synaptics_is_pt_packet(unsigned char *buf) | |||
516 | return (buf[0] & 0xFC) == 0x84 && (buf[3] & 0xCC) == 0xC4; | 577 | return (buf[0] & 0xFC) == 0x84 && (buf[3] & 0xCC) == 0xC4; |
517 | } | 578 | } |
518 | 579 | ||
519 | static void synaptics_pass_pt_packet(struct serio *ptport, unsigned char *packet) | 580 | static void synaptics_pass_pt_packet(struct psmouse *psmouse, |
581 | struct serio *ptport, | ||
582 | unsigned char *packet) | ||
520 | { | 583 | { |
584 | struct synaptics_data *priv = psmouse->private; | ||
521 | struct psmouse *child = serio_get_drvdata(ptport); | 585 | struct psmouse *child = serio_get_drvdata(ptport); |
522 | 586 | ||
523 | if (child && child->state == PSMOUSE_ACTIVATED) { | 587 | if (child && child->state == PSMOUSE_ACTIVATED) { |
524 | serio_interrupt(ptport, packet[1], 0); | 588 | serio_interrupt(ptport, packet[1] | priv->pt_buttons, 0); |
525 | serio_interrupt(ptport, packet[4], 0); | 589 | serio_interrupt(ptport, packet[4], 0); |
526 | serio_interrupt(ptport, packet[5], 0); | 590 | serio_interrupt(ptport, packet[5], 0); |
527 | if (child->pktsize == 4) | 591 | if (child->pktsize == 4) |
528 | serio_interrupt(ptport, packet[2], 0); | 592 | serio_interrupt(ptport, packet[2], 0); |
529 | } else | 593 | } else { |
530 | serio_interrupt(ptport, packet[1], 0); | 594 | serio_interrupt(ptport, packet[1], 0); |
595 | } | ||
531 | } | 596 | } |
532 | 597 | ||
533 | static void synaptics_pt_activate(struct psmouse *psmouse) | 598 | static void synaptics_pt_activate(struct psmouse *psmouse) |
@@ -605,6 +670,18 @@ static void synaptics_parse_agm(const unsigned char buf[], | |||
605 | } | 670 | } |
606 | } | 671 | } |
607 | 672 | ||
673 | static void synaptics_parse_ext_buttons(const unsigned char buf[], | ||
674 | struct synaptics_data *priv, | ||
675 | struct synaptics_hw_state *hw) | ||
676 | { | ||
677 | unsigned int ext_bits = | ||
678 | (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) + 1) >> 1; | ||
679 | unsigned int ext_mask = GENMASK(ext_bits - 1, 0); | ||
680 | |||
681 | hw->ext_buttons = buf[4] & ext_mask; | ||
682 | hw->ext_buttons |= (buf[5] & ext_mask) << ext_bits; | ||
683 | } | ||
684 | |||
608 | static bool is_forcepad; | 685 | static bool is_forcepad; |
609 | 686 | ||
610 | static int synaptics_parse_hw_state(const unsigned char buf[], | 687 | static int synaptics_parse_hw_state(const unsigned char buf[], |
@@ -691,28 +768,9 @@ static int synaptics_parse_hw_state(const unsigned char buf[], | |||
691 | hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0; | 768 | hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0; |
692 | } | 769 | } |
693 | 770 | ||
694 | if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) && | 771 | if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) > 0 && |
695 | ((buf[0] ^ buf[3]) & 0x02)) { | 772 | ((buf[0] ^ buf[3]) & 0x02)) { |
696 | switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) { | 773 | synaptics_parse_ext_buttons(buf, priv, hw); |
697 | default: | ||
698 | /* | ||
699 | * if nExtBtn is greater than 8 it should be | ||
700 | * considered invalid and treated as 0 | ||
701 | */ | ||
702 | break; | ||
703 | case 8: | ||
704 | hw->ext_buttons |= ((buf[5] & 0x08)) ? 0x80 : 0; | ||
705 | hw->ext_buttons |= ((buf[4] & 0x08)) ? 0x40 : 0; | ||
706 | case 6: | ||
707 | hw->ext_buttons |= ((buf[5] & 0x04)) ? 0x20 : 0; | ||
708 | hw->ext_buttons |= ((buf[4] & 0x04)) ? 0x10 : 0; | ||
709 | case 4: | ||
710 | hw->ext_buttons |= ((buf[5] & 0x02)) ? 0x08 : 0; | ||
711 | hw->ext_buttons |= ((buf[4] & 0x02)) ? 0x04 : 0; | ||
712 | case 2: | ||
713 | hw->ext_buttons |= ((buf[5] & 0x01)) ? 0x02 : 0; | ||
714 | hw->ext_buttons |= ((buf[4] & 0x01)) ? 0x01 : 0; | ||
715 | } | ||
716 | } | 774 | } |
717 | } else { | 775 | } else { |
718 | hw->x = (((buf[1] & 0x1f) << 8) | buf[2]); | 776 | hw->x = (((buf[1] & 0x1f) << 8) | buf[2]); |
@@ -774,12 +832,54 @@ static void synaptics_report_semi_mt_data(struct input_dev *dev, | |||
774 | } | 832 | } |
775 | } | 833 | } |
776 | 834 | ||
835 | static void synaptics_report_ext_buttons(struct psmouse *psmouse, | ||
836 | const struct synaptics_hw_state *hw) | ||
837 | { | ||
838 | struct input_dev *dev = psmouse->dev; | ||
839 | struct synaptics_data *priv = psmouse->private; | ||
840 | int ext_bits = (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) + 1) >> 1; | ||
841 | char buf[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; | ||
842 | int i; | ||
843 | |||
844 | if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap)) | ||
845 | return; | ||
846 | |||
847 | /* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */ | ||
848 | if (SYN_ID_FULL(priv->identity) == 0x801 && | ||
849 | !((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02)) | ||
850 | return; | ||
851 | |||
852 | if (!SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10)) { | ||
853 | for (i = 0; i < ext_bits; i++) { | ||
854 | input_report_key(dev, BTN_0 + 2 * i, | ||
855 | hw->ext_buttons & (1 << i)); | ||
856 | input_report_key(dev, BTN_1 + 2 * i, | ||
857 | hw->ext_buttons & (1 << (i + ext_bits))); | ||
858 | } | ||
859 | return; | ||
860 | } | ||
861 | |||
862 | /* | ||
863 | * This generation of touchpads has the trackstick buttons | ||
864 | * physically wired to the touchpad. Re-route them through | ||
865 | * the pass-through interface. | ||
866 | */ | ||
867 | if (!priv->pt_port) | ||
868 | return; | ||
869 | |||
870 | /* The trackstick expects at most 3 buttons */ | ||
871 | priv->pt_buttons = SYN_CAP_EXT_BUTTON_STICK_L(hw->ext_buttons) | | ||
872 | SYN_CAP_EXT_BUTTON_STICK_R(hw->ext_buttons) << 1 | | ||
873 | SYN_CAP_EXT_BUTTON_STICK_M(hw->ext_buttons) << 2; | ||
874 | |||
875 | synaptics_pass_pt_packet(psmouse, priv->pt_port, buf); | ||
876 | } | ||
877 | |||
777 | static void synaptics_report_buttons(struct psmouse *psmouse, | 878 | static void synaptics_report_buttons(struct psmouse *psmouse, |
778 | const struct synaptics_hw_state *hw) | 879 | const struct synaptics_hw_state *hw) |
779 | { | 880 | { |
780 | struct input_dev *dev = psmouse->dev; | 881 | struct input_dev *dev = psmouse->dev; |
781 | struct synaptics_data *priv = psmouse->private; | 882 | struct synaptics_data *priv = psmouse->private; |
782 | int i; | ||
783 | 883 | ||
784 | input_report_key(dev, BTN_LEFT, hw->left); | 884 | input_report_key(dev, BTN_LEFT, hw->left); |
785 | input_report_key(dev, BTN_RIGHT, hw->right); | 885 | input_report_key(dev, BTN_RIGHT, hw->right); |
@@ -792,8 +892,7 @@ static void synaptics_report_buttons(struct psmouse *psmouse, | |||
792 | input_report_key(dev, BTN_BACK, hw->down); | 892 | input_report_key(dev, BTN_BACK, hw->down); |
793 | } | 893 | } |
794 | 894 | ||
795 | for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++) | 895 | synaptics_report_ext_buttons(psmouse, hw); |
796 | input_report_key(dev, BTN_0 + i, hw->ext_buttons & (1 << i)); | ||
797 | } | 896 | } |
798 | 897 | ||
799 | static void synaptics_report_mt_data(struct psmouse *psmouse, | 898 | static void synaptics_report_mt_data(struct psmouse *psmouse, |
@@ -813,7 +912,7 @@ static void synaptics_report_mt_data(struct psmouse *psmouse, | |||
813 | pos[i].y = synaptics_invert_y(hw[i]->y); | 912 | pos[i].y = synaptics_invert_y(hw[i]->y); |
814 | } | 913 | } |
815 | 914 | ||
816 | input_mt_assign_slots(dev, slot, pos, nsemi, DMAX * priv->x_res); | 915 | input_mt_assign_slots(dev, slot, pos, nsemi, 0); |
817 | 916 | ||
818 | for (i = 0; i < nsemi; i++) { | 917 | for (i = 0; i < nsemi; i++) { |
819 | input_mt_slot(dev, slot[i]); | 918 | input_mt_slot(dev, slot[i]); |
@@ -1014,7 +1113,8 @@ static psmouse_ret_t synaptics_process_byte(struct psmouse *psmouse) | |||
1014 | if (SYN_CAP_PASS_THROUGH(priv->capabilities) && | 1113 | if (SYN_CAP_PASS_THROUGH(priv->capabilities) && |
1015 | synaptics_is_pt_packet(psmouse->packet)) { | 1114 | synaptics_is_pt_packet(psmouse->packet)) { |
1016 | if (priv->pt_port) | 1115 | if (priv->pt_port) |
1017 | synaptics_pass_pt_packet(priv->pt_port, psmouse->packet); | 1116 | synaptics_pass_pt_packet(psmouse, priv->pt_port, |
1117 | psmouse->packet); | ||
1018 | } else | 1118 | } else |
1019 | synaptics_process_packet(psmouse); | 1119 | synaptics_process_packet(psmouse); |
1020 | 1120 | ||
@@ -1116,8 +1216,9 @@ static void set_input_params(struct psmouse *psmouse, | |||
1116 | __set_bit(BTN_BACK, dev->keybit); | 1216 | __set_bit(BTN_BACK, dev->keybit); |
1117 | } | 1217 | } |
1118 | 1218 | ||
1119 | for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++) | 1219 | if (!SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10)) |
1120 | __set_bit(BTN_0 + i, dev->keybit); | 1220 | for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++) |
1221 | __set_bit(BTN_0 + i, dev->keybit); | ||
1121 | 1222 | ||
1122 | __clear_bit(EV_REL, dev->evbit); | 1223 | __clear_bit(EV_REL, dev->evbit); |
1123 | __clear_bit(REL_X, dev->relbit); | 1224 | __clear_bit(REL_X, dev->relbit); |
@@ -1125,7 +1226,8 @@ static void set_input_params(struct psmouse *psmouse, | |||
1125 | 1226 | ||
1126 | if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { | 1227 | if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { |
1127 | __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit); | 1228 | __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit); |
1128 | if (psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids)) | 1229 | if (psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) && |
1230 | !SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10)) | ||
1129 | __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit); | 1231 | __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit); |
1130 | /* Clickpads report only left button */ | 1232 | /* Clickpads report only left button */ |
1131 | __clear_bit(BTN_RIGHT, dev->keybit); | 1233 | __clear_bit(BTN_RIGHT, dev->keybit); |
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h index aedc3299b14e..ee4bd0d12b26 100644 --- a/drivers/input/mouse/synaptics.h +++ b/drivers/input/mouse/synaptics.h | |||
@@ -22,6 +22,7 @@ | |||
22 | #define SYN_QUE_EXT_CAPAB_0C 0x0c | 22 | #define SYN_QUE_EXT_CAPAB_0C 0x0c |
23 | #define SYN_QUE_EXT_MAX_COORDS 0x0d | 23 | #define SYN_QUE_EXT_MAX_COORDS 0x0d |
24 | #define SYN_QUE_EXT_MIN_COORDS 0x0f | 24 | #define SYN_QUE_EXT_MIN_COORDS 0x0f |
25 | #define SYN_QUE_MEXT_CAPAB_10 0x10 | ||
25 | 26 | ||
26 | /* synatics modes */ | 27 | /* synatics modes */ |
27 | #define SYN_BIT_ABSOLUTE_MODE (1 << 7) | 28 | #define SYN_BIT_ABSOLUTE_MODE (1 << 7) |
@@ -53,6 +54,7 @@ | |||
53 | #define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) | 54 | #define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) |
54 | #define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) | 55 | #define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) |
55 | #define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) | 56 | #define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) |
57 | #define SYN_MEXT_CAP_BIT(m) ((m) & (1 << 1)) | ||
56 | 58 | ||
57 | /* | 59 | /* |
58 | * The following describes response for the 0x0c query. | 60 | * The following describes response for the 0x0c query. |
@@ -89,6 +91,30 @@ | |||
89 | #define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400) | 91 | #define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400) |
90 | #define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800) | 92 | #define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800) |
91 | 93 | ||
94 | /* | ||
95 | * The following descibes response for the 0x10 query. | ||
96 | * | ||
97 | * byte mask name meaning | ||
98 | * ---- ---- ------- ------------ | ||
99 | * 1 0x01 ext buttons are stick buttons exported in the extended | ||
100 | * capability are actually meant to be used | ||
101 | * by the tracktick (pass-through). | ||
102 | * 1 0x02 SecurePad the touchpad is a SecurePad, so it | ||
103 | * contains a built-in fingerprint reader. | ||
104 | * 1 0xe0 more ext count how many more extented queries are | ||
105 | * available after this one. | ||
106 | * 2 0xff SecurePad width the width of the SecurePad fingerprint | ||
107 | * reader. | ||
108 | * 3 0xff SecurePad height the height of the SecurePad fingerprint | ||
109 | * reader. | ||
110 | */ | ||
111 | #define SYN_CAP_EXT_BUTTONS_STICK(ex10) ((ex10) & 0x010000) | ||
112 | #define SYN_CAP_SECUREPAD(ex10) ((ex10) & 0x020000) | ||
113 | |||
114 | #define SYN_CAP_EXT_BUTTON_STICK_L(eb) (!!((eb) & 0x01)) | ||
115 | #define SYN_CAP_EXT_BUTTON_STICK_M(eb) (!!((eb) & 0x02)) | ||
116 | #define SYN_CAP_EXT_BUTTON_STICK_R(eb) (!!((eb) & 0x04)) | ||
117 | |||
92 | /* synaptics modes query bits */ | 118 | /* synaptics modes query bits */ |
93 | #define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7)) | 119 | #define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7)) |
94 | #define SYN_MODE_RATE(m) ((m) & (1 << 6)) | 120 | #define SYN_MODE_RATE(m) ((m) & (1 << 6)) |
@@ -143,6 +169,7 @@ struct synaptics_data { | |||
143 | unsigned long int capabilities; /* Capabilities */ | 169 | unsigned long int capabilities; /* Capabilities */ |
144 | unsigned long int ext_cap; /* Extended Capabilities */ | 170 | unsigned long int ext_cap; /* Extended Capabilities */ |
145 | unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */ | 171 | unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */ |
172 | unsigned long int ext_cap_10; /* Ext Caps from 0x10 query */ | ||
146 | unsigned long int identity; /* Identification */ | 173 | unsigned long int identity; /* Identification */ |
147 | unsigned int x_res, y_res; /* X/Y resolution in units/mm */ | 174 | unsigned int x_res, y_res; /* X/Y resolution in units/mm */ |
148 | unsigned int x_max, y_max; /* Max coordinates (from FW) */ | 175 | unsigned int x_max, y_max; /* Max coordinates (from FW) */ |
@@ -156,6 +183,7 @@ struct synaptics_data { | |||
156 | bool disable_gesture; /* disable gestures */ | 183 | bool disable_gesture; /* disable gestures */ |
157 | 184 | ||
158 | struct serio *pt_port; /* Pass-through serio port */ | 185 | struct serio *pt_port; /* Pass-through serio port */ |
186 | unsigned char pt_buttons; /* Pass-through buttons */ | ||
159 | 187 | ||
160 | /* | 188 | /* |
161 | * Last received Advanced Gesture Mode (AGM) packet. An AGM packet | 189 | * Last received Advanced Gesture Mode (AGM) packet. An AGM packet |
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index 58917525126e..6261fd6d7c3c 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig | |||
@@ -943,6 +943,7 @@ config TOUCHSCREEN_SUN4I | |||
943 | tristate "Allwinner sun4i resistive touchscreen controller support" | 943 | tristate "Allwinner sun4i resistive touchscreen controller support" |
944 | depends on ARCH_SUNXI || COMPILE_TEST | 944 | depends on ARCH_SUNXI || COMPILE_TEST |
945 | depends on HWMON | 945 | depends on HWMON |
946 | depends on THERMAL || !THERMAL_OF | ||
946 | help | 947 | help |
947 | This selects support for the resistive touchscreen controller | 948 | This selects support for the resistive touchscreen controller |
948 | found on Allwinner sunxi SoCs. | 949 | found on Allwinner sunxi SoCs. |
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index baa0d9786f50..1ae4e547b419 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -23,6 +23,7 @@ config IOMMU_IO_PGTABLE | |||
23 | config IOMMU_IO_PGTABLE_LPAE | 23 | config IOMMU_IO_PGTABLE_LPAE |
24 | bool "ARMv7/v8 Long Descriptor Format" | 24 | bool "ARMv7/v8 Long Descriptor Format" |
25 | select IOMMU_IO_PGTABLE | 25 | select IOMMU_IO_PGTABLE |
26 | depends on ARM || ARM64 || COMPILE_TEST | ||
26 | help | 27 | help |
27 | Enable support for the ARM long descriptor pagetable format. | 28 | Enable support for the ARM long descriptor pagetable format. |
28 | This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page | 29 | This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page |
@@ -63,6 +64,7 @@ config MSM_IOMMU | |||
63 | bool "MSM IOMMU Support" | 64 | bool "MSM IOMMU Support" |
64 | depends on ARM | 65 | depends on ARM |
65 | depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST | 66 | depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST |
67 | depends on BROKEN | ||
66 | select IOMMU_API | 68 | select IOMMU_API |
67 | help | 69 | help |
68 | Support for the IOMMUs found on certain Qualcomm SOCs. | 70 | Support for the IOMMUs found on certain Qualcomm SOCs. |
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 7ce52737c7a1..dc14fec4ede1 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c | |||
@@ -1186,8 +1186,15 @@ static const struct iommu_ops exynos_iommu_ops = { | |||
1186 | 1186 | ||
1187 | static int __init exynos_iommu_init(void) | 1187 | static int __init exynos_iommu_init(void) |
1188 | { | 1188 | { |
1189 | struct device_node *np; | ||
1189 | int ret; | 1190 | int ret; |
1190 | 1191 | ||
1192 | np = of_find_matching_node(NULL, sysmmu_of_match); | ||
1193 | if (!np) | ||
1194 | return 0; | ||
1195 | |||
1196 | of_node_put(np); | ||
1197 | |||
1191 | lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", | 1198 | lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", |
1192 | LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); | 1199 | LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); |
1193 | if (!lv2table_kmem_cache) { | 1200 | if (!lv2table_kmem_cache) { |
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 5a500edf00cc..b610a8dee238 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c | |||
@@ -56,7 +56,8 @@ | |||
56 | ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ | 56 | ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ |
57 | * (d)->bits_per_level) + (d)->pg_shift) | 57 | * (d)->bits_per_level) + (d)->pg_shift) |
58 | 58 | ||
59 | #define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift) | 59 | #define ARM_LPAE_PAGES_PER_PGD(d) \ |
60 | DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift) | ||
60 | 61 | ||
61 | /* | 62 | /* |
62 | * Calculate the index at level l used to map virtual address a using the | 63 | * Calculate the index at level l used to map virtual address a using the |
@@ -66,7 +67,7 @@ | |||
66 | ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) | 67 | ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) |
67 | 68 | ||
68 | #define ARM_LPAE_LVL_IDX(a,l,d) \ | 69 | #define ARM_LPAE_LVL_IDX(a,l,d) \ |
69 | (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ | 70 | (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ |
70 | ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) | 71 | ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) |
71 | 72 | ||
72 | /* Calculate the block/page mapping size at level l for pagetable in d. */ | 73 | /* Calculate the block/page mapping size at level l for pagetable in d. */ |
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index f59f857b702e..a4ba851825c2 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c | |||
@@ -1376,6 +1376,13 @@ static int __init omap_iommu_init(void) | |||
1376 | struct kmem_cache *p; | 1376 | struct kmem_cache *p; |
1377 | const unsigned long flags = SLAB_HWCACHE_ALIGN; | 1377 | const unsigned long flags = SLAB_HWCACHE_ALIGN; |
1378 | size_t align = 1 << 10; /* L2 pagetable alignement */ | 1378 | size_t align = 1 << 10; /* L2 pagetable alignement */ |
1379 | struct device_node *np; | ||
1380 | |||
1381 | np = of_find_matching_node(NULL, omap_iommu_of_match); | ||
1382 | if (!np) | ||
1383 | return 0; | ||
1384 | |||
1385 | of_node_put(np); | ||
1379 | 1386 | ||
1380 | p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, | 1387 | p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, |
1381 | iopte_cachep_ctor); | 1388 | iopte_cachep_ctor); |
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 6a8b1ec4a48a..9f74fddcd304 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c | |||
@@ -1015,8 +1015,15 @@ static struct platform_driver rk_iommu_driver = { | |||
1015 | 1015 | ||
1016 | static int __init rk_iommu_init(void) | 1016 | static int __init rk_iommu_init(void) |
1017 | { | 1017 | { |
1018 | struct device_node *np; | ||
1018 | int ret; | 1019 | int ret; |
1019 | 1020 | ||
1021 | np = of_find_matching_node(NULL, rk_iommu_dt_ids); | ||
1022 | if (!np) | ||
1023 | return 0; | ||
1024 | |||
1025 | of_node_put(np); | ||
1026 | |||
1020 | ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); | 1027 | ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); |
1021 | if (ret) | 1028 | if (ret) |
1022 | return ret; | 1029 | return ret; |
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index 463c235acbdc..4387dae14e45 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c | |||
@@ -69,6 +69,7 @@ static void __iomem *per_cpu_int_base; | |||
69 | static void __iomem *main_int_base; | 69 | static void __iomem *main_int_base; |
70 | static struct irq_domain *armada_370_xp_mpic_domain; | 70 | static struct irq_domain *armada_370_xp_mpic_domain; |
71 | static u32 doorbell_mask_reg; | 71 | static u32 doorbell_mask_reg; |
72 | static int parent_irq; | ||
72 | #ifdef CONFIG_PCI_MSI | 73 | #ifdef CONFIG_PCI_MSI |
73 | static struct irq_domain *armada_370_xp_msi_domain; | 74 | static struct irq_domain *armada_370_xp_msi_domain; |
74 | static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR); | 75 | static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR); |
@@ -356,6 +357,7 @@ static int armada_xp_mpic_secondary_init(struct notifier_block *nfb, | |||
356 | { | 357 | { |
357 | if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) | 358 | if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) |
358 | armada_xp_mpic_smp_cpu_init(); | 359 | armada_xp_mpic_smp_cpu_init(); |
360 | |||
359 | return NOTIFY_OK; | 361 | return NOTIFY_OK; |
360 | } | 362 | } |
361 | 363 | ||
@@ -364,6 +366,20 @@ static struct notifier_block armada_370_xp_mpic_cpu_notifier = { | |||
364 | .priority = 100, | 366 | .priority = 100, |
365 | }; | 367 | }; |
366 | 368 | ||
369 | static int mpic_cascaded_secondary_init(struct notifier_block *nfb, | ||
370 | unsigned long action, void *hcpu) | ||
371 | { | ||
372 | if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) | ||
373 | enable_percpu_irq(parent_irq, IRQ_TYPE_NONE); | ||
374 | |||
375 | return NOTIFY_OK; | ||
376 | } | ||
377 | |||
378 | static struct notifier_block mpic_cascaded_cpu_notifier = { | ||
379 | .notifier_call = mpic_cascaded_secondary_init, | ||
380 | .priority = 100, | ||
381 | }; | ||
382 | |||
367 | #endif /* CONFIG_SMP */ | 383 | #endif /* CONFIG_SMP */ |
368 | 384 | ||
369 | static struct irq_domain_ops armada_370_xp_mpic_irq_ops = { | 385 | static struct irq_domain_ops armada_370_xp_mpic_irq_ops = { |
@@ -539,7 +555,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, | |||
539 | struct device_node *parent) | 555 | struct device_node *parent) |
540 | { | 556 | { |
541 | struct resource main_int_res, per_cpu_int_res; | 557 | struct resource main_int_res, per_cpu_int_res; |
542 | int parent_irq, nr_irqs, i; | 558 | int nr_irqs, i; |
543 | u32 control; | 559 | u32 control; |
544 | 560 | ||
545 | BUG_ON(of_address_to_resource(node, 0, &main_int_res)); | 561 | BUG_ON(of_address_to_resource(node, 0, &main_int_res)); |
@@ -587,6 +603,9 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, | |||
587 | register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier); | 603 | register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier); |
588 | #endif | 604 | #endif |
589 | } else { | 605 | } else { |
606 | #ifdef CONFIG_SMP | ||
607 | register_cpu_notifier(&mpic_cascaded_cpu_notifier); | ||
608 | #endif | ||
590 | irq_set_chained_handler(parent_irq, | 609 | irq_set_chained_handler(parent_irq, |
591 | armada_370_xp_mpic_handle_cascade_irq); | 610 | armada_370_xp_mpic_handle_cascade_irq); |
592 | } | 611 | } |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index d8996bdf0f61..596b0a9eee99 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
@@ -416,13 +416,14 @@ static void its_send_single_command(struct its_node *its, | |||
416 | { | 416 | { |
417 | struct its_cmd_block *cmd, *sync_cmd, *next_cmd; | 417 | struct its_cmd_block *cmd, *sync_cmd, *next_cmd; |
418 | struct its_collection *sync_col; | 418 | struct its_collection *sync_col; |
419 | unsigned long flags; | ||
419 | 420 | ||
420 | raw_spin_lock(&its->lock); | 421 | raw_spin_lock_irqsave(&its->lock, flags); |
421 | 422 | ||
422 | cmd = its_allocate_entry(its); | 423 | cmd = its_allocate_entry(its); |
423 | if (!cmd) { /* We're soooooo screewed... */ | 424 | if (!cmd) { /* We're soooooo screewed... */ |
424 | pr_err_ratelimited("ITS can't allocate, dropping command\n"); | 425 | pr_err_ratelimited("ITS can't allocate, dropping command\n"); |
425 | raw_spin_unlock(&its->lock); | 426 | raw_spin_unlock_irqrestore(&its->lock, flags); |
426 | return; | 427 | return; |
427 | } | 428 | } |
428 | sync_col = builder(cmd, desc); | 429 | sync_col = builder(cmd, desc); |
@@ -442,7 +443,7 @@ static void its_send_single_command(struct its_node *its, | |||
442 | 443 | ||
443 | post: | 444 | post: |
444 | next_cmd = its_post_commands(its); | 445 | next_cmd = its_post_commands(its); |
445 | raw_spin_unlock(&its->lock); | 446 | raw_spin_unlock_irqrestore(&its->lock, flags); |
446 | 447 | ||
447 | its_wait_for_range_completion(its, cmd, next_cmd); | 448 | its_wait_for_range_completion(its, cmd, next_cmd); |
448 | } | 449 | } |
@@ -799,21 +800,43 @@ static int its_alloc_tables(struct its_node *its) | |||
799 | { | 800 | { |
800 | int err; | 801 | int err; |
801 | int i; | 802 | int i; |
802 | int psz = PAGE_SIZE; | 803 | int psz = SZ_64K; |
803 | u64 shr = GITS_BASER_InnerShareable; | 804 | u64 shr = GITS_BASER_InnerShareable; |
804 | 805 | ||
805 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { | 806 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { |
806 | u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); | 807 | u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); |
807 | u64 type = GITS_BASER_TYPE(val); | 808 | u64 type = GITS_BASER_TYPE(val); |
808 | u64 entry_size = GITS_BASER_ENTRY_SIZE(val); | 809 | u64 entry_size = GITS_BASER_ENTRY_SIZE(val); |
810 | int order = get_order(psz); | ||
811 | int alloc_size; | ||
809 | u64 tmp; | 812 | u64 tmp; |
810 | void *base; | 813 | void *base; |
811 | 814 | ||
812 | if (type == GITS_BASER_TYPE_NONE) | 815 | if (type == GITS_BASER_TYPE_NONE) |
813 | continue; | 816 | continue; |
814 | 817 | ||
815 | /* We're lazy and only allocate a single page for now */ | 818 | /* |
816 | base = (void *)get_zeroed_page(GFP_KERNEL); | 819 | * Allocate as many entries as required to fit the |
820 | * range of device IDs that the ITS can grok... The ID | ||
821 | * space being incredibly sparse, this results in a | ||
822 | * massive waste of memory. | ||
823 | * | ||
824 | * For other tables, only allocate a single page. | ||
825 | */ | ||
826 | if (type == GITS_BASER_TYPE_DEVICE) { | ||
827 | u64 typer = readq_relaxed(its->base + GITS_TYPER); | ||
828 | u32 ids = GITS_TYPER_DEVBITS(typer); | ||
829 | |||
830 | order = get_order((1UL << ids) * entry_size); | ||
831 | if (order >= MAX_ORDER) { | ||
832 | order = MAX_ORDER - 1; | ||
833 | pr_warn("%s: Device Table too large, reduce its page order to %u\n", | ||
834 | its->msi_chip.of_node->full_name, order); | ||
835 | } | ||
836 | } | ||
837 | |||
838 | alloc_size = (1 << order) * PAGE_SIZE; | ||
839 | base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); | ||
817 | if (!base) { | 840 | if (!base) { |
818 | err = -ENOMEM; | 841 | err = -ENOMEM; |
819 | goto out_free; | 842 | goto out_free; |
@@ -841,7 +864,7 @@ retry_baser: | |||
841 | break; | 864 | break; |
842 | } | 865 | } |
843 | 866 | ||
844 | val |= (PAGE_SIZE / psz) - 1; | 867 | val |= (alloc_size / psz) - 1; |
845 | 868 | ||
846 | writeq_relaxed(val, its->base + GITS_BASER + i * 8); | 869 | writeq_relaxed(val, its->base + GITS_BASER + i * 8); |
847 | tmp = readq_relaxed(its->base + GITS_BASER + i * 8); | 870 | tmp = readq_relaxed(its->base + GITS_BASER + i * 8); |
@@ -882,7 +905,7 @@ retry_baser: | |||
882 | } | 905 | } |
883 | 906 | ||
884 | pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n", | 907 | pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n", |
885 | (int)(PAGE_SIZE / entry_size), | 908 | (int)(alloc_size / entry_size), |
886 | its_base_type_string[type], | 909 | its_base_type_string[type], |
887 | (unsigned long)virt_to_phys(base), | 910 | (unsigned long)virt_to_phys(base), |
888 | psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); | 911 | psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); |
@@ -1020,8 +1043,9 @@ static void its_cpu_init_collection(void) | |||
1020 | static struct its_device *its_find_device(struct its_node *its, u32 dev_id) | 1043 | static struct its_device *its_find_device(struct its_node *its, u32 dev_id) |
1021 | { | 1044 | { |
1022 | struct its_device *its_dev = NULL, *tmp; | 1045 | struct its_device *its_dev = NULL, *tmp; |
1046 | unsigned long flags; | ||
1023 | 1047 | ||
1024 | raw_spin_lock(&its->lock); | 1048 | raw_spin_lock_irqsave(&its->lock, flags); |
1025 | 1049 | ||
1026 | list_for_each_entry(tmp, &its->its_device_list, entry) { | 1050 | list_for_each_entry(tmp, &its->its_device_list, entry) { |
1027 | if (tmp->device_id == dev_id) { | 1051 | if (tmp->device_id == dev_id) { |
@@ -1030,7 +1054,7 @@ static struct its_device *its_find_device(struct its_node *its, u32 dev_id) | |||
1030 | } | 1054 | } |
1031 | } | 1055 | } |
1032 | 1056 | ||
1033 | raw_spin_unlock(&its->lock); | 1057 | raw_spin_unlock_irqrestore(&its->lock, flags); |
1034 | 1058 | ||
1035 | return its_dev; | 1059 | return its_dev; |
1036 | } | 1060 | } |
@@ -1040,6 +1064,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, | |||
1040 | { | 1064 | { |
1041 | struct its_device *dev; | 1065 | struct its_device *dev; |
1042 | unsigned long *lpi_map; | 1066 | unsigned long *lpi_map; |
1067 | unsigned long flags; | ||
1043 | void *itt; | 1068 | void *itt; |
1044 | int lpi_base; | 1069 | int lpi_base; |
1045 | int nr_lpis; | 1070 | int nr_lpis; |
@@ -1056,7 +1081,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, | |||
1056 | nr_ites = max(2UL, roundup_pow_of_two(nvecs)); | 1081 | nr_ites = max(2UL, roundup_pow_of_two(nvecs)); |
1057 | sz = nr_ites * its->ite_size; | 1082 | sz = nr_ites * its->ite_size; |
1058 | sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; | 1083 | sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; |
1059 | itt = kmalloc(sz, GFP_KERNEL); | 1084 | itt = kzalloc(sz, GFP_KERNEL); |
1060 | lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); | 1085 | lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); |
1061 | 1086 | ||
1062 | if (!dev || !itt || !lpi_map) { | 1087 | if (!dev || !itt || !lpi_map) { |
@@ -1075,9 +1100,9 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, | |||
1075 | dev->device_id = dev_id; | 1100 | dev->device_id = dev_id; |
1076 | INIT_LIST_HEAD(&dev->entry); | 1101 | INIT_LIST_HEAD(&dev->entry); |
1077 | 1102 | ||
1078 | raw_spin_lock(&its->lock); | 1103 | raw_spin_lock_irqsave(&its->lock, flags); |
1079 | list_add(&dev->entry, &its->its_device_list); | 1104 | list_add(&dev->entry, &its->its_device_list); |
1080 | raw_spin_unlock(&its->lock); | 1105 | raw_spin_unlock_irqrestore(&its->lock, flags); |
1081 | 1106 | ||
1082 | /* Bind the device to the first possible CPU */ | 1107 | /* Bind the device to the first possible CPU */ |
1083 | cpu = cpumask_first(cpu_online_mask); | 1108 | cpu = cpumask_first(cpu_online_mask); |
@@ -1091,9 +1116,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, | |||
1091 | 1116 | ||
1092 | static void its_free_device(struct its_device *its_dev) | 1117 | static void its_free_device(struct its_device *its_dev) |
1093 | { | 1118 | { |
1094 | raw_spin_lock(&its_dev->its->lock); | 1119 | unsigned long flags; |
1120 | |||
1121 | raw_spin_lock_irqsave(&its_dev->its->lock, flags); | ||
1095 | list_del(&its_dev->entry); | 1122 | list_del(&its_dev->entry); |
1096 | raw_spin_unlock(&its_dev->its->lock); | 1123 | raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); |
1097 | kfree(its_dev->itt); | 1124 | kfree(its_dev->itt); |
1098 | kfree(its_dev); | 1125 | kfree(its_dev); |
1099 | } | 1126 | } |
@@ -1112,31 +1139,69 @@ static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) | |||
1112 | return 0; | 1139 | return 0; |
1113 | } | 1140 | } |
1114 | 1141 | ||
1142 | struct its_pci_alias { | ||
1143 | struct pci_dev *pdev; | ||
1144 | u32 dev_id; | ||
1145 | u32 count; | ||
1146 | }; | ||
1147 | |||
1148 | static int its_pci_msi_vec_count(struct pci_dev *pdev) | ||
1149 | { | ||
1150 | int msi, msix; | ||
1151 | |||
1152 | msi = max(pci_msi_vec_count(pdev), 0); | ||
1153 | msix = max(pci_msix_vec_count(pdev), 0); | ||
1154 | |||
1155 | return max(msi, msix); | ||
1156 | } | ||
1157 | |||
1158 | static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data) | ||
1159 | { | ||
1160 | struct its_pci_alias *dev_alias = data; | ||
1161 | |||
1162 | dev_alias->dev_id = alias; | ||
1163 | if (pdev != dev_alias->pdev) | ||
1164 | dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev); | ||
1165 | |||
1166 | return 0; | ||
1167 | } | ||
1168 | |||
1115 | static int its_msi_prepare(struct irq_domain *domain, struct device *dev, | 1169 | static int its_msi_prepare(struct irq_domain *domain, struct device *dev, |
1116 | int nvec, msi_alloc_info_t *info) | 1170 | int nvec, msi_alloc_info_t *info) |
1117 | { | 1171 | { |
1118 | struct pci_dev *pdev; | 1172 | struct pci_dev *pdev; |
1119 | struct its_node *its; | 1173 | struct its_node *its; |
1120 | u32 dev_id; | ||
1121 | struct its_device *its_dev; | 1174 | struct its_device *its_dev; |
1175 | struct its_pci_alias dev_alias; | ||
1122 | 1176 | ||
1123 | if (!dev_is_pci(dev)) | 1177 | if (!dev_is_pci(dev)) |
1124 | return -EINVAL; | 1178 | return -EINVAL; |
1125 | 1179 | ||
1126 | pdev = to_pci_dev(dev); | 1180 | pdev = to_pci_dev(dev); |
1127 | dev_id = PCI_DEVID(pdev->bus->number, pdev->devfn); | 1181 | dev_alias.pdev = pdev; |
1182 | dev_alias.count = nvec; | ||
1183 | |||
1184 | pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias); | ||
1128 | its = domain->parent->host_data; | 1185 | its = domain->parent->host_data; |
1129 | 1186 | ||
1130 | its_dev = its_find_device(its, dev_id); | 1187 | its_dev = its_find_device(its, dev_alias.dev_id); |
1131 | if (WARN_ON(its_dev)) | 1188 | if (its_dev) { |
1132 | return -EINVAL; | 1189 | /* |
1190 | * We already have seen this ID, probably through | ||
1191 | * another alias (PCI bridge of some sort). No need to | ||
1192 | * create the device. | ||
1193 | */ | ||
1194 | dev_dbg(dev, "Reusing ITT for devID %x\n", dev_alias.dev_id); | ||
1195 | goto out; | ||
1196 | } | ||
1133 | 1197 | ||
1134 | its_dev = its_create_device(its, dev_id, nvec); | 1198 | its_dev = its_create_device(its, dev_alias.dev_id, dev_alias.count); |
1135 | if (!its_dev) | 1199 | if (!its_dev) |
1136 | return -ENOMEM; | 1200 | return -ENOMEM; |
1137 | 1201 | ||
1138 | dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n", nvec, ilog2(nvec)); | 1202 | dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n", |
1139 | 1203 | dev_alias.count, ilog2(dev_alias.count)); | |
1204 | out: | ||
1140 | info->scratchpad[0].ptr = its_dev; | 1205 | info->scratchpad[0].ptr = its_dev; |
1141 | info->scratchpad[1].ptr = dev; | 1206 | info->scratchpad[1].ptr = dev; |
1142 | return 0; | 1207 | return 0; |
@@ -1255,6 +1320,34 @@ static const struct irq_domain_ops its_domain_ops = { | |||
1255 | .deactivate = its_irq_domain_deactivate, | 1320 | .deactivate = its_irq_domain_deactivate, |
1256 | }; | 1321 | }; |
1257 | 1322 | ||
1323 | static int its_force_quiescent(void __iomem *base) | ||
1324 | { | ||
1325 | u32 count = 1000000; /* 1s */ | ||
1326 | u32 val; | ||
1327 | |||
1328 | val = readl_relaxed(base + GITS_CTLR); | ||
1329 | if (val & GITS_CTLR_QUIESCENT) | ||
1330 | return 0; | ||
1331 | |||
1332 | /* Disable the generation of all interrupts to this ITS */ | ||
1333 | val &= ~GITS_CTLR_ENABLE; | ||
1334 | writel_relaxed(val, base + GITS_CTLR); | ||
1335 | |||
1336 | /* Poll GITS_CTLR and wait until ITS becomes quiescent */ | ||
1337 | while (1) { | ||
1338 | val = readl_relaxed(base + GITS_CTLR); | ||
1339 | if (val & GITS_CTLR_QUIESCENT) | ||
1340 | return 0; | ||
1341 | |||
1342 | count--; | ||
1343 | if (!count) | ||
1344 | return -EBUSY; | ||
1345 | |||
1346 | cpu_relax(); | ||
1347 | udelay(1); | ||
1348 | } | ||
1349 | } | ||
1350 | |||
1258 | static int its_probe(struct device_node *node, struct irq_domain *parent) | 1351 | static int its_probe(struct device_node *node, struct irq_domain *parent) |
1259 | { | 1352 | { |
1260 | struct resource res; | 1353 | struct resource res; |
@@ -1283,6 +1376,13 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) | |||
1283 | goto out_unmap; | 1376 | goto out_unmap; |
1284 | } | 1377 | } |
1285 | 1378 | ||
1379 | err = its_force_quiescent(its_base); | ||
1380 | if (err) { | ||
1381 | pr_warn("%s: failed to quiesce, giving up\n", | ||
1382 | node->full_name); | ||
1383 | goto out_unmap; | ||
1384 | } | ||
1385 | |||
1286 | pr_info("ITS: %s\n", node->full_name); | 1386 | pr_info("ITS: %s\n", node->full_name); |
1287 | 1387 | ||
1288 | its = kzalloc(sizeof(*its), GFP_KERNEL); | 1388 | its = kzalloc(sizeof(*its), GFP_KERNEL); |
@@ -1323,7 +1423,7 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) | |||
1323 | writeq_relaxed(baser, its->base + GITS_CBASER); | 1423 | writeq_relaxed(baser, its->base + GITS_CBASER); |
1324 | tmp = readq_relaxed(its->base + GITS_CBASER); | 1424 | tmp = readq_relaxed(its->base + GITS_CBASER); |
1325 | writeq_relaxed(0, its->base + GITS_CWRITER); | 1425 | writeq_relaxed(0, its->base + GITS_CWRITER); |
1326 | writel_relaxed(1, its->base + GITS_CTLR); | 1426 | writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR); |
1327 | 1427 | ||
1328 | if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) { | 1428 | if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) { |
1329 | pr_info("ITS: using cache flushing for cmd queue\n"); | 1429 | pr_info("ITS: using cache flushing for cmd queue\n"); |
@@ -1382,12 +1482,11 @@ static bool gic_rdists_supports_plpis(void) | |||
1382 | 1482 | ||
1383 | int its_cpu_init(void) | 1483 | int its_cpu_init(void) |
1384 | { | 1484 | { |
1385 | if (!gic_rdists_supports_plpis()) { | ||
1386 | pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); | ||
1387 | return -ENXIO; | ||
1388 | } | ||
1389 | |||
1390 | if (!list_empty(&its_nodes)) { | 1485 | if (!list_empty(&its_nodes)) { |
1486 | if (!gic_rdists_supports_plpis()) { | ||
1487 | pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); | ||
1488 | return -ENXIO; | ||
1489 | } | ||
1391 | its_cpu_init_lpis(); | 1490 | its_cpu_init_lpis(); |
1392 | its_cpu_init_collection(); | 1491 | its_cpu_init_collection(); |
1393 | } | 1492 | } |
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 1c6dea2fbc34..fd8850def1b8 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c | |||
@@ -466,7 +466,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, | |||
466 | tlist |= 1 << (mpidr & 0xf); | 466 | tlist |= 1 << (mpidr & 0xf); |
467 | 467 | ||
468 | cpu = cpumask_next(cpu, mask); | 468 | cpu = cpumask_next(cpu, mask); |
469 | if (cpu == nr_cpu_ids) | 469 | if (cpu >= nr_cpu_ids) |
470 | goto out; | 470 | goto out; |
471 | 471 | ||
472 | mpidr = cpu_logical_map(cpu); | 472 | mpidr = cpu_logical_map(cpu); |
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 4634cf7d0ec3..471e1cdc1933 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
@@ -154,23 +154,25 @@ static inline unsigned int gic_irq(struct irq_data *d) | |||
154 | static void gic_mask_irq(struct irq_data *d) | 154 | static void gic_mask_irq(struct irq_data *d) |
155 | { | 155 | { |
156 | u32 mask = 1 << (gic_irq(d) % 32); | 156 | u32 mask = 1 << (gic_irq(d) % 32); |
157 | unsigned long flags; | ||
157 | 158 | ||
158 | raw_spin_lock(&irq_controller_lock); | 159 | raw_spin_lock_irqsave(&irq_controller_lock, flags); |
159 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); | 160 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); |
160 | if (gic_arch_extn.irq_mask) | 161 | if (gic_arch_extn.irq_mask) |
161 | gic_arch_extn.irq_mask(d); | 162 | gic_arch_extn.irq_mask(d); |
162 | raw_spin_unlock(&irq_controller_lock); | 163 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); |
163 | } | 164 | } |
164 | 165 | ||
165 | static void gic_unmask_irq(struct irq_data *d) | 166 | static void gic_unmask_irq(struct irq_data *d) |
166 | { | 167 | { |
167 | u32 mask = 1 << (gic_irq(d) % 32); | 168 | u32 mask = 1 << (gic_irq(d) % 32); |
169 | unsigned long flags; | ||
168 | 170 | ||
169 | raw_spin_lock(&irq_controller_lock); | 171 | raw_spin_lock_irqsave(&irq_controller_lock, flags); |
170 | if (gic_arch_extn.irq_unmask) | 172 | if (gic_arch_extn.irq_unmask) |
171 | gic_arch_extn.irq_unmask(d); | 173 | gic_arch_extn.irq_unmask(d); |
172 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); | 174 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); |
173 | raw_spin_unlock(&irq_controller_lock); | 175 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); |
174 | } | 176 | } |
175 | 177 | ||
176 | static void gic_eoi_irq(struct irq_data *d) | 178 | static void gic_eoi_irq(struct irq_data *d) |
@@ -188,6 +190,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) | |||
188 | { | 190 | { |
189 | void __iomem *base = gic_dist_base(d); | 191 | void __iomem *base = gic_dist_base(d); |
190 | unsigned int gicirq = gic_irq(d); | 192 | unsigned int gicirq = gic_irq(d); |
193 | unsigned long flags; | ||
191 | int ret; | 194 | int ret; |
192 | 195 | ||
193 | /* Interrupt configuration for SGIs can't be changed */ | 196 | /* Interrupt configuration for SGIs can't be changed */ |
@@ -199,14 +202,14 @@ static int gic_set_type(struct irq_data *d, unsigned int type) | |||
199 | type != IRQ_TYPE_EDGE_RISING) | 202 | type != IRQ_TYPE_EDGE_RISING) |
200 | return -EINVAL; | 203 | return -EINVAL; |
201 | 204 | ||
202 | raw_spin_lock(&irq_controller_lock); | 205 | raw_spin_lock_irqsave(&irq_controller_lock, flags); |
203 | 206 | ||
204 | if (gic_arch_extn.irq_set_type) | 207 | if (gic_arch_extn.irq_set_type) |
205 | gic_arch_extn.irq_set_type(d, type); | 208 | gic_arch_extn.irq_set_type(d, type); |
206 | 209 | ||
207 | ret = gic_configure_irq(gicirq, type, base, NULL); | 210 | ret = gic_configure_irq(gicirq, type, base, NULL); |
208 | 211 | ||
209 | raw_spin_unlock(&irq_controller_lock); | 212 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); |
210 | 213 | ||
211 | return ret; | 214 | return ret; |
212 | } | 215 | } |
@@ -227,6 +230,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | |||
227 | void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); | 230 | void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); |
228 | unsigned int cpu, shift = (gic_irq(d) % 4) * 8; | 231 | unsigned int cpu, shift = (gic_irq(d) % 4) * 8; |
229 | u32 val, mask, bit; | 232 | u32 val, mask, bit; |
233 | unsigned long flags; | ||
230 | 234 | ||
231 | if (!force) | 235 | if (!force) |
232 | cpu = cpumask_any_and(mask_val, cpu_online_mask); | 236 | cpu = cpumask_any_and(mask_val, cpu_online_mask); |
@@ -236,12 +240,12 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | |||
236 | if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) | 240 | if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) |
237 | return -EINVAL; | 241 | return -EINVAL; |
238 | 242 | ||
239 | raw_spin_lock(&irq_controller_lock); | 243 | raw_spin_lock_irqsave(&irq_controller_lock, flags); |
240 | mask = 0xff << shift; | 244 | mask = 0xff << shift; |
241 | bit = gic_cpu_map[cpu] << shift; | 245 | bit = gic_cpu_map[cpu] << shift; |
242 | val = readl_relaxed(reg) & ~mask; | 246 | val = readl_relaxed(reg) & ~mask; |
243 | writel_relaxed(val | bit, reg); | 247 | writel_relaxed(val | bit, reg); |
244 | raw_spin_unlock(&irq_controller_lock); | 248 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); |
245 | 249 | ||
246 | return IRQ_SET_MASK_OK; | 250 | return IRQ_SET_MASK_OK; |
247 | } | 251 | } |
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c index 6a7447c304ac..358a574d9e8b 100644 --- a/drivers/isdn/icn/icn.c +++ b/drivers/isdn/icn/icn.c | |||
@@ -1609,7 +1609,7 @@ icn_setup(char *line) | |||
1609 | if (ints[0] > 1) | 1609 | if (ints[0] > 1) |
1610 | membase = (unsigned long)ints[2]; | 1610 | membase = (unsigned long)ints[2]; |
1611 | if (str && *str) { | 1611 | if (str && *str) { |
1612 | strcpy(sid, str); | 1612 | strlcpy(sid, str, sizeof(sid)); |
1613 | icn_id = sid; | 1613 | icn_id = sid; |
1614 | if ((p = strchr(sid, ','))) { | 1614 | if ((p = strchr(sid, ','))) { |
1615 | *p++ = 0; | 1615 | *p++ = 0; |
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 37de0173b6d2..74adcd2c967e 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
@@ -289,9 +289,16 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, | |||
289 | struct request_queue *q = bdev_get_queue(where->bdev); | 289 | struct request_queue *q = bdev_get_queue(where->bdev); |
290 | unsigned short logical_block_size = queue_logical_block_size(q); | 290 | unsigned short logical_block_size = queue_logical_block_size(q); |
291 | sector_t num_sectors; | 291 | sector_t num_sectors; |
292 | unsigned int uninitialized_var(special_cmd_max_sectors); | ||
292 | 293 | ||
293 | /* Reject unsupported discard requests */ | 294 | /* |
294 | if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) { | 295 | * Reject unsupported discard and write same requests. |
296 | */ | ||
297 | if (rw & REQ_DISCARD) | ||
298 | special_cmd_max_sectors = q->limits.max_discard_sectors; | ||
299 | else if (rw & REQ_WRITE_SAME) | ||
300 | special_cmd_max_sectors = q->limits.max_write_same_sectors; | ||
301 | if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) { | ||
295 | dec_count(io, region, -EOPNOTSUPP); | 302 | dec_count(io, region, -EOPNOTSUPP); |
296 | return; | 303 | return; |
297 | } | 304 | } |
@@ -317,7 +324,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, | |||
317 | store_io_and_region_in_bio(bio, io, region); | 324 | store_io_and_region_in_bio(bio, io, region); |
318 | 325 | ||
319 | if (rw & REQ_DISCARD) { | 326 | if (rw & REQ_DISCARD) { |
320 | num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); | 327 | num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); |
321 | bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; | 328 | bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; |
322 | remaining -= num_sectors; | 329 | remaining -= num_sectors; |
323 | } else if (rw & REQ_WRITE_SAME) { | 330 | } else if (rw & REQ_WRITE_SAME) { |
@@ -326,7 +333,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, | |||
326 | */ | 333 | */ |
327 | dp->get_page(dp, &page, &len, &offset); | 334 | dp->get_page(dp, &page, &len, &offset); |
328 | bio_add_page(bio, page, logical_block_size, offset); | 335 | bio_add_page(bio, page, logical_block_size, offset); |
329 | num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); | 336 | num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); |
330 | bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; | 337 | bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; |
331 | 338 | ||
332 | offset = 0; | 339 | offset = 0; |
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 8b204ae216ab..f83a0f3fc365 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -20,6 +20,8 @@ | |||
20 | #include <linux/log2.h> | 20 | #include <linux/log2.h> |
21 | #include <linux/dm-kcopyd.h> | 21 | #include <linux/dm-kcopyd.h> |
22 | 22 | ||
23 | #include "dm.h" | ||
24 | |||
23 | #include "dm-exception-store.h" | 25 | #include "dm-exception-store.h" |
24 | 26 | ||
25 | #define DM_MSG_PREFIX "snapshots" | 27 | #define DM_MSG_PREFIX "snapshots" |
@@ -291,12 +293,23 @@ struct origin { | |||
291 | }; | 293 | }; |
292 | 294 | ||
293 | /* | 295 | /* |
296 | * This structure is allocated for each origin target | ||
297 | */ | ||
298 | struct dm_origin { | ||
299 | struct dm_dev *dev; | ||
300 | struct dm_target *ti; | ||
301 | unsigned split_boundary; | ||
302 | struct list_head hash_list; | ||
303 | }; | ||
304 | |||
305 | /* | ||
294 | * Size of the hash table for origin volumes. If we make this | 306 | * Size of the hash table for origin volumes. If we make this |
295 | * the size of the minors list then it should be nearly perfect | 307 | * the size of the minors list then it should be nearly perfect |
296 | */ | 308 | */ |
297 | #define ORIGIN_HASH_SIZE 256 | 309 | #define ORIGIN_HASH_SIZE 256 |
298 | #define ORIGIN_MASK 0xFF | 310 | #define ORIGIN_MASK 0xFF |
299 | static struct list_head *_origins; | 311 | static struct list_head *_origins; |
312 | static struct list_head *_dm_origins; | ||
300 | static struct rw_semaphore _origins_lock; | 313 | static struct rw_semaphore _origins_lock; |
301 | 314 | ||
302 | static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done); | 315 | static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done); |
@@ -310,12 +323,22 @@ static int init_origin_hash(void) | |||
310 | _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), | 323 | _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), |
311 | GFP_KERNEL); | 324 | GFP_KERNEL); |
312 | if (!_origins) { | 325 | if (!_origins) { |
313 | DMERR("unable to allocate memory"); | 326 | DMERR("unable to allocate memory for _origins"); |
314 | return -ENOMEM; | 327 | return -ENOMEM; |
315 | } | 328 | } |
316 | |||
317 | for (i = 0; i < ORIGIN_HASH_SIZE; i++) | 329 | for (i = 0; i < ORIGIN_HASH_SIZE; i++) |
318 | INIT_LIST_HEAD(_origins + i); | 330 | INIT_LIST_HEAD(_origins + i); |
331 | |||
332 | _dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), | ||
333 | GFP_KERNEL); | ||
334 | if (!_dm_origins) { | ||
335 | DMERR("unable to allocate memory for _dm_origins"); | ||
336 | kfree(_origins); | ||
337 | return -ENOMEM; | ||
338 | } | ||
339 | for (i = 0; i < ORIGIN_HASH_SIZE; i++) | ||
340 | INIT_LIST_HEAD(_dm_origins + i); | ||
341 | |||
319 | init_rwsem(&_origins_lock); | 342 | init_rwsem(&_origins_lock); |
320 | 343 | ||
321 | return 0; | 344 | return 0; |
@@ -324,6 +347,7 @@ static int init_origin_hash(void) | |||
324 | static void exit_origin_hash(void) | 347 | static void exit_origin_hash(void) |
325 | { | 348 | { |
326 | kfree(_origins); | 349 | kfree(_origins); |
350 | kfree(_dm_origins); | ||
327 | } | 351 | } |
328 | 352 | ||
329 | static unsigned origin_hash(struct block_device *bdev) | 353 | static unsigned origin_hash(struct block_device *bdev) |
@@ -350,6 +374,30 @@ static void __insert_origin(struct origin *o) | |||
350 | list_add_tail(&o->hash_list, sl); | 374 | list_add_tail(&o->hash_list, sl); |
351 | } | 375 | } |
352 | 376 | ||
377 | static struct dm_origin *__lookup_dm_origin(struct block_device *origin) | ||
378 | { | ||
379 | struct list_head *ol; | ||
380 | struct dm_origin *o; | ||
381 | |||
382 | ol = &_dm_origins[origin_hash(origin)]; | ||
383 | list_for_each_entry (o, ol, hash_list) | ||
384 | if (bdev_equal(o->dev->bdev, origin)) | ||
385 | return o; | ||
386 | |||
387 | return NULL; | ||
388 | } | ||
389 | |||
390 | static void __insert_dm_origin(struct dm_origin *o) | ||
391 | { | ||
392 | struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)]; | ||
393 | list_add_tail(&o->hash_list, sl); | ||
394 | } | ||
395 | |||
396 | static void __remove_dm_origin(struct dm_origin *o) | ||
397 | { | ||
398 | list_del(&o->hash_list); | ||
399 | } | ||
400 | |||
353 | /* | 401 | /* |
354 | * _origins_lock must be held when calling this function. | 402 | * _origins_lock must be held when calling this function. |
355 | * Returns number of snapshots registered using the supplied cow device, plus: | 403 | * Returns number of snapshots registered using the supplied cow device, plus: |
@@ -1840,9 +1888,40 @@ static int snapshot_preresume(struct dm_target *ti) | |||
1840 | static void snapshot_resume(struct dm_target *ti) | 1888 | static void snapshot_resume(struct dm_target *ti) |
1841 | { | 1889 | { |
1842 | struct dm_snapshot *s = ti->private; | 1890 | struct dm_snapshot *s = ti->private; |
1843 | struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; | 1891 | struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL; |
1892 | struct dm_origin *o; | ||
1893 | struct mapped_device *origin_md = NULL; | ||
1894 | bool must_restart_merging = false; | ||
1844 | 1895 | ||
1845 | down_read(&_origins_lock); | 1896 | down_read(&_origins_lock); |
1897 | |||
1898 | o = __lookup_dm_origin(s->origin->bdev); | ||
1899 | if (o) | ||
1900 | origin_md = dm_table_get_md(o->ti->table); | ||
1901 | if (!origin_md) { | ||
1902 | (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging); | ||
1903 | if (snap_merging) | ||
1904 | origin_md = dm_table_get_md(snap_merging->ti->table); | ||
1905 | } | ||
1906 | if (origin_md == dm_table_get_md(ti->table)) | ||
1907 | origin_md = NULL; | ||
1908 | if (origin_md) { | ||
1909 | if (dm_hold(origin_md)) | ||
1910 | origin_md = NULL; | ||
1911 | } | ||
1912 | |||
1913 | up_read(&_origins_lock); | ||
1914 | |||
1915 | if (origin_md) { | ||
1916 | dm_internal_suspend_fast(origin_md); | ||
1917 | if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) { | ||
1918 | must_restart_merging = true; | ||
1919 | stop_merge(snap_merging); | ||
1920 | } | ||
1921 | } | ||
1922 | |||
1923 | down_read(&_origins_lock); | ||
1924 | |||
1846 | (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); | 1925 | (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); |
1847 | if (snap_src && snap_dest) { | 1926 | if (snap_src && snap_dest) { |
1848 | down_write(&snap_src->lock); | 1927 | down_write(&snap_src->lock); |
@@ -1851,8 +1930,16 @@ static void snapshot_resume(struct dm_target *ti) | |||
1851 | up_write(&snap_dest->lock); | 1930 | up_write(&snap_dest->lock); |
1852 | up_write(&snap_src->lock); | 1931 | up_write(&snap_src->lock); |
1853 | } | 1932 | } |
1933 | |||
1854 | up_read(&_origins_lock); | 1934 | up_read(&_origins_lock); |
1855 | 1935 | ||
1936 | if (origin_md) { | ||
1937 | if (must_restart_merging) | ||
1938 | start_merge(snap_merging); | ||
1939 | dm_internal_resume_fast(origin_md); | ||
1940 | dm_put(origin_md); | ||
1941 | } | ||
1942 | |||
1856 | /* Now we have correct chunk size, reregister */ | 1943 | /* Now we have correct chunk size, reregister */ |
1857 | reregister_snapshot(s); | 1944 | reregister_snapshot(s); |
1858 | 1945 | ||
@@ -2133,11 +2220,6 @@ static int origin_write_extent(struct dm_snapshot *merging_snap, | |||
2133 | * Origin: maps a linear range of a device, with hooks for snapshotting. | 2220 | * Origin: maps a linear range of a device, with hooks for snapshotting. |
2134 | */ | 2221 | */ |
2135 | 2222 | ||
2136 | struct dm_origin { | ||
2137 | struct dm_dev *dev; | ||
2138 | unsigned split_boundary; | ||
2139 | }; | ||
2140 | |||
2141 | /* | 2223 | /* |
2142 | * Construct an origin mapping: <dev_path> | 2224 | * Construct an origin mapping: <dev_path> |
2143 | * The context for an origin is merely a 'struct dm_dev *' | 2225 | * The context for an origin is merely a 'struct dm_dev *' |
@@ -2166,6 +2248,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
2166 | goto bad_open; | 2248 | goto bad_open; |
2167 | } | 2249 | } |
2168 | 2250 | ||
2251 | o->ti = ti; | ||
2169 | ti->private = o; | 2252 | ti->private = o; |
2170 | ti->num_flush_bios = 1; | 2253 | ti->num_flush_bios = 1; |
2171 | 2254 | ||
@@ -2180,6 +2263,7 @@ bad_alloc: | |||
2180 | static void origin_dtr(struct dm_target *ti) | 2263 | static void origin_dtr(struct dm_target *ti) |
2181 | { | 2264 | { |
2182 | struct dm_origin *o = ti->private; | 2265 | struct dm_origin *o = ti->private; |
2266 | |||
2183 | dm_put_device(ti, o->dev); | 2267 | dm_put_device(ti, o->dev); |
2184 | kfree(o); | 2268 | kfree(o); |
2185 | } | 2269 | } |
@@ -2216,6 +2300,19 @@ static void origin_resume(struct dm_target *ti) | |||
2216 | struct dm_origin *o = ti->private; | 2300 | struct dm_origin *o = ti->private; |
2217 | 2301 | ||
2218 | o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev); | 2302 | o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev); |
2303 | |||
2304 | down_write(&_origins_lock); | ||
2305 | __insert_dm_origin(o); | ||
2306 | up_write(&_origins_lock); | ||
2307 | } | ||
2308 | |||
2309 | static void origin_postsuspend(struct dm_target *ti) | ||
2310 | { | ||
2311 | struct dm_origin *o = ti->private; | ||
2312 | |||
2313 | down_write(&_origins_lock); | ||
2314 | __remove_dm_origin(o); | ||
2315 | up_write(&_origins_lock); | ||
2219 | } | 2316 | } |
2220 | 2317 | ||
2221 | static void origin_status(struct dm_target *ti, status_type_t type, | 2318 | static void origin_status(struct dm_target *ti, status_type_t type, |
@@ -2258,12 +2355,13 @@ static int origin_iterate_devices(struct dm_target *ti, | |||
2258 | 2355 | ||
2259 | static struct target_type origin_target = { | 2356 | static struct target_type origin_target = { |
2260 | .name = "snapshot-origin", | 2357 | .name = "snapshot-origin", |
2261 | .version = {1, 8, 1}, | 2358 | .version = {1, 9, 0}, |
2262 | .module = THIS_MODULE, | 2359 | .module = THIS_MODULE, |
2263 | .ctr = origin_ctr, | 2360 | .ctr = origin_ctr, |
2264 | .dtr = origin_dtr, | 2361 | .dtr = origin_dtr, |
2265 | .map = origin_map, | 2362 | .map = origin_map, |
2266 | .resume = origin_resume, | 2363 | .resume = origin_resume, |
2364 | .postsuspend = origin_postsuspend, | ||
2267 | .status = origin_status, | 2365 | .status = origin_status, |
2268 | .merge = origin_merge, | 2366 | .merge = origin_merge, |
2269 | .iterate_devices = origin_iterate_devices, | 2367 | .iterate_devices = origin_iterate_devices, |
@@ -2271,7 +2369,7 @@ static struct target_type origin_target = { | |||
2271 | 2369 | ||
2272 | static struct target_type snapshot_target = { | 2370 | static struct target_type snapshot_target = { |
2273 | .name = "snapshot", | 2371 | .name = "snapshot", |
2274 | .version = {1, 12, 0}, | 2372 | .version = {1, 13, 0}, |
2275 | .module = THIS_MODULE, | 2373 | .module = THIS_MODULE, |
2276 | .ctr = snapshot_ctr, | 2374 | .ctr = snapshot_ctr, |
2277 | .dtr = snapshot_dtr, | 2375 | .dtr = snapshot_dtr, |
@@ -2285,7 +2383,7 @@ static struct target_type snapshot_target = { | |||
2285 | 2383 | ||
2286 | static struct target_type merge_target = { | 2384 | static struct target_type merge_target = { |
2287 | .name = dm_snapshot_merge_target_name, | 2385 | .name = dm_snapshot_merge_target_name, |
2288 | .version = {1, 2, 0}, | 2386 | .version = {1, 3, 0}, |
2289 | .module = THIS_MODULE, | 2387 | .module = THIS_MODULE, |
2290 | .ctr = snapshot_ctr, | 2388 | .ctr = snapshot_ctr, |
2291 | .dtr = snapshot_dtr, | 2389 | .dtr = snapshot_dtr, |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 654773cb1eee..921aafd12aee 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -2358,17 +2358,6 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) | |||
2358 | return DM_MAPIO_REMAPPED; | 2358 | return DM_MAPIO_REMAPPED; |
2359 | 2359 | ||
2360 | case -ENODATA: | 2360 | case -ENODATA: |
2361 | if (get_pool_mode(tc->pool) == PM_READ_ONLY) { | ||
2362 | /* | ||
2363 | * This block isn't provisioned, and we have no way | ||
2364 | * of doing so. | ||
2365 | */ | ||
2366 | handle_unserviceable_bio(tc->pool, bio); | ||
2367 | cell_defer_no_holder(tc, virt_cell); | ||
2368 | return DM_MAPIO_SUBMITTED; | ||
2369 | } | ||
2370 | /* fall through */ | ||
2371 | |||
2372 | case -EWOULDBLOCK: | 2361 | case -EWOULDBLOCK: |
2373 | thin_defer_cell(tc, virt_cell); | 2362 | thin_defer_cell(tc, virt_cell); |
2374 | return DM_MAPIO_SUBMITTED; | 2363 | return DM_MAPIO_SUBMITTED; |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 73f28802dc7a..8001fe9e3434 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -433,7 +433,6 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode) | |||
433 | 433 | ||
434 | dm_get(md); | 434 | dm_get(md); |
435 | atomic_inc(&md->open_count); | 435 | atomic_inc(&md->open_count); |
436 | |||
437 | out: | 436 | out: |
438 | spin_unlock(&_minor_lock); | 437 | spin_unlock(&_minor_lock); |
439 | 438 | ||
@@ -442,16 +441,20 @@ out: | |||
442 | 441 | ||
443 | static void dm_blk_close(struct gendisk *disk, fmode_t mode) | 442 | static void dm_blk_close(struct gendisk *disk, fmode_t mode) |
444 | { | 443 | { |
445 | struct mapped_device *md = disk->private_data; | 444 | struct mapped_device *md; |
446 | 445 | ||
447 | spin_lock(&_minor_lock); | 446 | spin_lock(&_minor_lock); |
448 | 447 | ||
448 | md = disk->private_data; | ||
449 | if (WARN_ON(!md)) | ||
450 | goto out; | ||
451 | |||
449 | if (atomic_dec_and_test(&md->open_count) && | 452 | if (atomic_dec_and_test(&md->open_count) && |
450 | (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) | 453 | (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) |
451 | queue_work(deferred_remove_workqueue, &deferred_remove_work); | 454 | queue_work(deferred_remove_workqueue, &deferred_remove_work); |
452 | 455 | ||
453 | dm_put(md); | 456 | dm_put(md); |
454 | 457 | out: | |
455 | spin_unlock(&_minor_lock); | 458 | spin_unlock(&_minor_lock); |
456 | } | 459 | } |
457 | 460 | ||
@@ -2241,7 +2244,6 @@ static void free_dev(struct mapped_device *md) | |||
2241 | int minor = MINOR(disk_devt(md->disk)); | 2244 | int minor = MINOR(disk_devt(md->disk)); |
2242 | 2245 | ||
2243 | unlock_fs(md); | 2246 | unlock_fs(md); |
2244 | bdput(md->bdev); | ||
2245 | destroy_workqueue(md->wq); | 2247 | destroy_workqueue(md->wq); |
2246 | 2248 | ||
2247 | if (md->kworker_task) | 2249 | if (md->kworker_task) |
@@ -2252,19 +2254,22 @@ static void free_dev(struct mapped_device *md) | |||
2252 | mempool_destroy(md->rq_pool); | 2254 | mempool_destroy(md->rq_pool); |
2253 | if (md->bs) | 2255 | if (md->bs) |
2254 | bioset_free(md->bs); | 2256 | bioset_free(md->bs); |
2255 | blk_integrity_unregister(md->disk); | 2257 | |
2256 | del_gendisk(md->disk); | ||
2257 | cleanup_srcu_struct(&md->io_barrier); | 2258 | cleanup_srcu_struct(&md->io_barrier); |
2258 | free_table_devices(&md->table_devices); | 2259 | free_table_devices(&md->table_devices); |
2259 | free_minor(minor); | 2260 | dm_stats_cleanup(&md->stats); |
2260 | 2261 | ||
2261 | spin_lock(&_minor_lock); | 2262 | spin_lock(&_minor_lock); |
2262 | md->disk->private_data = NULL; | 2263 | md->disk->private_data = NULL; |
2263 | spin_unlock(&_minor_lock); | 2264 | spin_unlock(&_minor_lock); |
2264 | 2265 | if (blk_get_integrity(md->disk)) | |
2266 | blk_integrity_unregister(md->disk); | ||
2267 | del_gendisk(md->disk); | ||
2265 | put_disk(md->disk); | 2268 | put_disk(md->disk); |
2266 | blk_cleanup_queue(md->queue); | 2269 | blk_cleanup_queue(md->queue); |
2267 | dm_stats_cleanup(&md->stats); | 2270 | bdput(md->bdev); |
2271 | free_minor(minor); | ||
2272 | |||
2268 | module_put(THIS_MODULE); | 2273 | module_put(THIS_MODULE); |
2269 | kfree(md); | 2274 | kfree(md); |
2270 | } | 2275 | } |
@@ -2616,6 +2621,19 @@ void dm_get(struct mapped_device *md) | |||
2616 | BUG_ON(test_bit(DMF_FREEING, &md->flags)); | 2621 | BUG_ON(test_bit(DMF_FREEING, &md->flags)); |
2617 | } | 2622 | } |
2618 | 2623 | ||
2624 | int dm_hold(struct mapped_device *md) | ||
2625 | { | ||
2626 | spin_lock(&_minor_lock); | ||
2627 | if (test_bit(DMF_FREEING, &md->flags)) { | ||
2628 | spin_unlock(&_minor_lock); | ||
2629 | return -EBUSY; | ||
2630 | } | ||
2631 | dm_get(md); | ||
2632 | spin_unlock(&_minor_lock); | ||
2633 | return 0; | ||
2634 | } | ||
2635 | EXPORT_SYMBOL_GPL(dm_hold); | ||
2636 | |||
2619 | const char *dm_device_name(struct mapped_device *md) | 2637 | const char *dm_device_name(struct mapped_device *md) |
2620 | { | 2638 | { |
2621 | return md->name; | 2639 | return md->name; |
@@ -2629,8 +2647,9 @@ static void __dm_destroy(struct mapped_device *md, bool wait) | |||
2629 | 2647 | ||
2630 | might_sleep(); | 2648 | might_sleep(); |
2631 | 2649 | ||
2632 | spin_lock(&_minor_lock); | ||
2633 | map = dm_get_live_table(md, &srcu_idx); | 2650 | map = dm_get_live_table(md, &srcu_idx); |
2651 | |||
2652 | spin_lock(&_minor_lock); | ||
2634 | idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); | 2653 | idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); |
2635 | set_bit(DMF_FREEING, &md->flags); | 2654 | set_bit(DMF_FREEING, &md->flags); |
2636 | spin_unlock(&_minor_lock); | 2655 | spin_unlock(&_minor_lock); |
@@ -2638,10 +2657,16 @@ static void __dm_destroy(struct mapped_device *md, bool wait) | |||
2638 | if (dm_request_based(md)) | 2657 | if (dm_request_based(md)) |
2639 | flush_kthread_worker(&md->kworker); | 2658 | flush_kthread_worker(&md->kworker); |
2640 | 2659 | ||
2660 | /* | ||
2661 | * Take suspend_lock so that presuspend and postsuspend methods | ||
2662 | * do not race with internal suspend. | ||
2663 | */ | ||
2664 | mutex_lock(&md->suspend_lock); | ||
2641 | if (!dm_suspended_md(md)) { | 2665 | if (!dm_suspended_md(md)) { |
2642 | dm_table_presuspend_targets(map); | 2666 | dm_table_presuspend_targets(map); |
2643 | dm_table_postsuspend_targets(map); | 2667 | dm_table_postsuspend_targets(map); |
2644 | } | 2668 | } |
2669 | mutex_unlock(&md->suspend_lock); | ||
2645 | 2670 | ||
2646 | /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ | 2671 | /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ |
2647 | dm_put_live_table(md, srcu_idx); | 2672 | dm_put_live_table(md, srcu_idx); |
@@ -3115,6 +3140,7 @@ void dm_internal_suspend_fast(struct mapped_device *md) | |||
3115 | flush_workqueue(md->wq); | 3140 | flush_workqueue(md->wq); |
3116 | dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); | 3141 | dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); |
3117 | } | 3142 | } |
3143 | EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); | ||
3118 | 3144 | ||
3119 | void dm_internal_resume_fast(struct mapped_device *md) | 3145 | void dm_internal_resume_fast(struct mapped_device *md) |
3120 | { | 3146 | { |
@@ -3126,6 +3152,7 @@ void dm_internal_resume_fast(struct mapped_device *md) | |||
3126 | done: | 3152 | done: |
3127 | mutex_unlock(&md->suspend_lock); | 3153 | mutex_unlock(&md->suspend_lock); |
3128 | } | 3154 | } |
3155 | EXPORT_SYMBOL_GPL(dm_internal_resume_fast); | ||
3129 | 3156 | ||
3130 | /*----------------------------------------------------------------- | 3157 | /*----------------------------------------------------------------- |
3131 | * Event notification. | 3158 | * Event notification. |
diff --git a/drivers/md/md.c b/drivers/md/md.c index cadf9cc02b25..717daad71fb1 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -5080,7 +5080,8 @@ int md_run(struct mddev *mddev) | |||
5080 | } | 5080 | } |
5081 | if (err) { | 5081 | if (err) { |
5082 | mddev_detach(mddev); | 5082 | mddev_detach(mddev); |
5083 | pers->free(mddev, mddev->private); | 5083 | if (mddev->private) |
5084 | pers->free(mddev, mddev->private); | ||
5084 | module_put(pers->owner); | 5085 | module_put(pers->owner); |
5085 | bitmap_destroy(mddev); | 5086 | bitmap_destroy(mddev); |
5086 | return err; | 5087 | return err; |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index a13f738a7b39..3ed9f42ddca6 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -467,8 +467,6 @@ static int raid0_run(struct mddev *mddev) | |||
467 | dump_zones(mddev); | 467 | dump_zones(mddev); |
468 | 468 | ||
469 | ret = md_integrity_register(mddev); | 469 | ret = md_integrity_register(mddev); |
470 | if (ret) | ||
471 | raid0_free(mddev, conf); | ||
472 | 470 | ||
473 | return ret; | 471 | return ret; |
474 | } | 472 | } |
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c index f38ec424872e..5615522f8d62 100644 --- a/drivers/mfd/kempld-core.c +++ b/drivers/mfd/kempld-core.c | |||
@@ -739,7 +739,7 @@ static int __init kempld_init(void) | |||
739 | for (id = kempld_dmi_table; | 739 | for (id = kempld_dmi_table; |
740 | id->matches[0].slot != DMI_NONE; id++) | 740 | id->matches[0].slot != DMI_NONE; id++) |
741 | if (strstr(id->ident, force_device_id)) | 741 | if (strstr(id->ident, force_device_id)) |
742 | if (id->callback && id->callback(id)) | 742 | if (id->callback && !id->callback(id)) |
743 | break; | 743 | break; |
744 | if (id->matches[0].slot == DMI_NONE) | 744 | if (id->matches[0].slot == DMI_NONE) |
745 | return -ENODEV; | 745 | return -ENODEV; |
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c index ede50244f265..dbd907d7170e 100644 --- a/drivers/mfd/rtsx_usb.c +++ b/drivers/mfd/rtsx_usb.c | |||
@@ -196,18 +196,27 @@ EXPORT_SYMBOL_GPL(rtsx_usb_ep0_write_register); | |||
196 | int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data) | 196 | int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data) |
197 | { | 197 | { |
198 | u16 value; | 198 | u16 value; |
199 | u8 *buf; | ||
200 | int ret; | ||
199 | 201 | ||
200 | if (!data) | 202 | if (!data) |
201 | return -EINVAL; | 203 | return -EINVAL; |
202 | *data = 0; | 204 | |
205 | buf = kzalloc(sizeof(u8), GFP_KERNEL); | ||
206 | if (!buf) | ||
207 | return -ENOMEM; | ||
203 | 208 | ||
204 | addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT; | 209 | addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT; |
205 | value = swab16(addr); | 210 | value = swab16(addr); |
206 | 211 | ||
207 | return usb_control_msg(ucr->pusb_dev, | 212 | ret = usb_control_msg(ucr->pusb_dev, |
208 | usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP, | 213 | usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP, |
209 | USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, | 214 | USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, |
210 | value, 0, data, 1, 100); | 215 | value, 0, buf, 1, 100); |
216 | *data = *buf; | ||
217 | |||
218 | kfree(buf); | ||
219 | return ret; | ||
211 | } | 220 | } |
212 | EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register); | 221 | EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register); |
213 | 222 | ||
@@ -288,18 +297,27 @@ static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status) | |||
288 | int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status) | 297 | int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status) |
289 | { | 298 | { |
290 | int ret; | 299 | int ret; |
300 | u16 *buf; | ||
291 | 301 | ||
292 | if (!status) | 302 | if (!status) |
293 | return -EINVAL; | 303 | return -EINVAL; |
294 | 304 | ||
295 | if (polling_pipe == 0) | 305 | if (polling_pipe == 0) { |
306 | buf = kzalloc(sizeof(u16), GFP_KERNEL); | ||
307 | if (!buf) | ||
308 | return -ENOMEM; | ||
309 | |||
296 | ret = usb_control_msg(ucr->pusb_dev, | 310 | ret = usb_control_msg(ucr->pusb_dev, |
297 | usb_rcvctrlpipe(ucr->pusb_dev, 0), | 311 | usb_rcvctrlpipe(ucr->pusb_dev, 0), |
298 | RTSX_USB_REQ_POLL, | 312 | RTSX_USB_REQ_POLL, |
299 | USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, | 313 | USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, |
300 | 0, 0, status, 2, 100); | 314 | 0, 0, buf, 2, 100); |
301 | else | 315 | *status = *buf; |
316 | |||
317 | kfree(buf); | ||
318 | } else { | ||
302 | ret = rtsx_usb_get_status_with_bulk(ucr, status); | 319 | ret = rtsx_usb_get_status_with_bulk(ucr, status); |
320 | } | ||
303 | 321 | ||
304 | /* usb_control_msg may return positive when success */ | 322 | /* usb_control_msg may return positive when success */ |
305 | if (ret < 0) | 323 | if (ret < 0) |
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c index e9f1d8d84613..c53f14a7ce54 100644 --- a/drivers/mmc/core/pwrseq_simple.c +++ b/drivers/mmc/core/pwrseq_simple.c | |||
@@ -124,7 +124,7 @@ int mmc_pwrseq_simple_alloc(struct mmc_host *host, struct device *dev) | |||
124 | PTR_ERR(pwrseq->reset_gpios[i]) != -ENOSYS) { | 124 | PTR_ERR(pwrseq->reset_gpios[i]) != -ENOSYS) { |
125 | ret = PTR_ERR(pwrseq->reset_gpios[i]); | 125 | ret = PTR_ERR(pwrseq->reset_gpios[i]); |
126 | 126 | ||
127 | while (--i) | 127 | while (i--) |
128 | gpiod_put(pwrseq->reset_gpios[i]); | 128 | gpiod_put(pwrseq->reset_gpios[i]); |
129 | 129 | ||
130 | goto clk_put; | 130 | goto clk_put; |
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 5b76a173cd95..5897d8d8fa5a 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig | |||
@@ -526,6 +526,7 @@ config MTD_NAND_SUNXI | |||
526 | 526 | ||
527 | config MTD_NAND_HISI504 | 527 | config MTD_NAND_HISI504 |
528 | tristate "Support for NAND controller on Hisilicon SoC Hip04" | 528 | tristate "Support for NAND controller on Hisilicon SoC Hip04" |
529 | depends on HAS_DMA | ||
529 | help | 530 | help |
530 | Enables support for NAND controller on Hisilicon SoC Hip04. | 531 | Enables support for NAND controller on Hisilicon SoC Hip04. |
531 | 532 | ||
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 96b0b1d27df1..10b1f7a4fe50 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c | |||
@@ -480,6 +480,42 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask) | |||
480 | nand_writel(info, NDCR, ndcr | int_mask); | 480 | nand_writel(info, NDCR, ndcr | int_mask); |
481 | } | 481 | } |
482 | 482 | ||
483 | static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len) | ||
484 | { | ||
485 | if (info->ecc_bch) { | ||
486 | int timeout; | ||
487 | |||
488 | /* | ||
489 | * According to the datasheet, when reading from NDDB | ||
490 | * with BCH enabled, after each 32 bytes reads, we | ||
491 | * have to make sure that the NDSR.RDDREQ bit is set. | ||
492 | * | ||
493 | * Drain the FIFO 8 32 bits reads at a time, and skip | ||
494 | * the polling on the last read. | ||
495 | */ | ||
496 | while (len > 8) { | ||
497 | __raw_readsl(info->mmio_base + NDDB, data, 8); | ||
498 | |||
499 | for (timeout = 0; | ||
500 | !(nand_readl(info, NDSR) & NDSR_RDDREQ); | ||
501 | timeout++) { | ||
502 | if (timeout >= 5) { | ||
503 | dev_err(&info->pdev->dev, | ||
504 | "Timeout on RDDREQ while draining the FIFO\n"); | ||
505 | return; | ||
506 | } | ||
507 | |||
508 | mdelay(1); | ||
509 | } | ||
510 | |||
511 | data += 32; | ||
512 | len -= 8; | ||
513 | } | ||
514 | } | ||
515 | |||
516 | __raw_readsl(info->mmio_base + NDDB, data, len); | ||
517 | } | ||
518 | |||
483 | static void handle_data_pio(struct pxa3xx_nand_info *info) | 519 | static void handle_data_pio(struct pxa3xx_nand_info *info) |
484 | { | 520 | { |
485 | unsigned int do_bytes = min(info->data_size, info->chunk_size); | 521 | unsigned int do_bytes = min(info->data_size, info->chunk_size); |
@@ -496,14 +532,14 @@ static void handle_data_pio(struct pxa3xx_nand_info *info) | |||
496 | DIV_ROUND_UP(info->oob_size, 4)); | 532 | DIV_ROUND_UP(info->oob_size, 4)); |
497 | break; | 533 | break; |
498 | case STATE_PIO_READING: | 534 | case STATE_PIO_READING: |
499 | __raw_readsl(info->mmio_base + NDDB, | 535 | drain_fifo(info, |
500 | info->data_buff + info->data_buff_pos, | 536 | info->data_buff + info->data_buff_pos, |
501 | DIV_ROUND_UP(do_bytes, 4)); | 537 | DIV_ROUND_UP(do_bytes, 4)); |
502 | 538 | ||
503 | if (info->oob_size > 0) | 539 | if (info->oob_size > 0) |
504 | __raw_readsl(info->mmio_base + NDDB, | 540 | drain_fifo(info, |
505 | info->oob_buff + info->oob_buff_pos, | 541 | info->oob_buff + info->oob_buff_pos, |
506 | DIV_ROUND_UP(info->oob_size, 4)); | 542 | DIV_ROUND_UP(info->oob_size, 4)); |
507 | break; | 543 | break; |
508 | default: | 544 | default: |
509 | dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, | 545 | dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, |
@@ -1572,6 +1608,8 @@ static int alloc_nand_resource(struct platform_device *pdev) | |||
1572 | int ret, irq, cs; | 1608 | int ret, irq, cs; |
1573 | 1609 | ||
1574 | pdata = dev_get_platdata(&pdev->dev); | 1610 | pdata = dev_get_platdata(&pdev->dev); |
1611 | if (pdata->num_cs <= 0) | ||
1612 | return -ENODEV; | ||
1575 | info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) + | 1613 | info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) + |
1576 | sizeof(*host)) * pdata->num_cs, GFP_KERNEL); | 1614 | sizeof(*host)) * pdata->num_cs, GFP_KERNEL); |
1577 | if (!info) | 1615 | if (!info) |
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index da4c79259f67..16e34b37d134 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c | |||
@@ -425,9 +425,10 @@ retry: | |||
425 | ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d", | 425 | ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d", |
426 | pnum, vol_id, lnum); | 426 | pnum, vol_id, lnum); |
427 | err = -EBADMSG; | 427 | err = -EBADMSG; |
428 | } else | 428 | } else { |
429 | err = -EINVAL; | 429 | err = -EINVAL; |
430 | ubi_ro_mode(ubi); | 430 | ubi_ro_mode(ubi); |
431 | } | ||
431 | } | 432 | } |
432 | goto out_free; | 433 | goto out_free; |
433 | } else if (err == UBI_IO_BITFLIPS) | 434 | } else if (err == UBI_IO_BITFLIPS) |
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 98d73aab52fe..58808f651452 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig | |||
@@ -131,7 +131,7 @@ config CAN_RCAR | |||
131 | 131 | ||
132 | config CAN_XILINXCAN | 132 | config CAN_XILINXCAN |
133 | tristate "Xilinx CAN" | 133 | tristate "Xilinx CAN" |
134 | depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST | 134 | depends on ARCH_ZYNQ || ARM64 || MICROBLAZE || COMPILE_TEST |
135 | depends on COMMON_CLK && HAS_IOMEM | 135 | depends on COMMON_CLK && HAS_IOMEM |
136 | ---help--- | 136 | ---help--- |
137 | Xilinx CAN driver. This driver supports both soft AXI CAN IP and | 137 | Xilinx CAN driver. This driver supports both soft AXI CAN IP and |
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 3c82e02e3dae..b0f69248cb71 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c | |||
@@ -579,6 +579,10 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf) | |||
579 | skb->pkt_type = PACKET_BROADCAST; | 579 | skb->pkt_type = PACKET_BROADCAST; |
580 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 580 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
581 | 581 | ||
582 | skb_reset_mac_header(skb); | ||
583 | skb_reset_network_header(skb); | ||
584 | skb_reset_transport_header(skb); | ||
585 | |||
582 | can_skb_reserve(skb); | 586 | can_skb_reserve(skb); |
583 | can_skb_prv(skb)->ifindex = dev->ifindex; | 587 | can_skb_prv(skb)->ifindex = dev->ifindex; |
584 | 588 | ||
@@ -603,6 +607,10 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev, | |||
603 | skb->pkt_type = PACKET_BROADCAST; | 607 | skb->pkt_type = PACKET_BROADCAST; |
604 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 608 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
605 | 609 | ||
610 | skb_reset_mac_header(skb); | ||
611 | skb_reset_network_header(skb); | ||
612 | skb_reset_transport_header(skb); | ||
613 | |||
606 | can_skb_reserve(skb); | 614 | can_skb_reserve(skb); |
607 | can_skb_prv(skb)->ifindex = dev->ifindex; | 615 | can_skb_prv(skb)->ifindex = dev->ifindex; |
608 | 616 | ||
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 2928f7003041..e97a08ce0b90 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c | |||
@@ -14,6 +14,8 @@ | |||
14 | * Copyright (C) 2015 Valeo S.A. | 14 | * Copyright (C) 2015 Valeo S.A. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/spinlock.h> | ||
18 | #include <linux/kernel.h> | ||
17 | #include <linux/completion.h> | 19 | #include <linux/completion.h> |
18 | #include <linux/module.h> | 20 | #include <linux/module.h> |
19 | #include <linux/netdevice.h> | 21 | #include <linux/netdevice.h> |
@@ -466,10 +468,11 @@ struct kvaser_usb { | |||
466 | struct kvaser_usb_net_priv { | 468 | struct kvaser_usb_net_priv { |
467 | struct can_priv can; | 469 | struct can_priv can; |
468 | 470 | ||
469 | atomic_t active_tx_urbs; | 471 | spinlock_t tx_contexts_lock; |
470 | struct usb_anchor tx_submitted; | 472 | int active_tx_contexts; |
471 | struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS]; | 473 | struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS]; |
472 | 474 | ||
475 | struct usb_anchor tx_submitted; | ||
473 | struct completion start_comp, stop_comp; | 476 | struct completion start_comp, stop_comp; |
474 | 477 | ||
475 | struct kvaser_usb *dev; | 478 | struct kvaser_usb *dev; |
@@ -584,8 +587,15 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id, | |||
584 | while (pos <= actual_len - MSG_HEADER_LEN) { | 587 | while (pos <= actual_len - MSG_HEADER_LEN) { |
585 | tmp = buf + pos; | 588 | tmp = buf + pos; |
586 | 589 | ||
587 | if (!tmp->len) | 590 | /* Handle messages crossing the USB endpoint max packet |
588 | break; | 591 | * size boundary. Check kvaser_usb_read_bulk_callback() |
592 | * for further details. | ||
593 | */ | ||
594 | if (tmp->len == 0) { | ||
595 | pos = round_up(pos, | ||
596 | dev->bulk_in->wMaxPacketSize); | ||
597 | continue; | ||
598 | } | ||
589 | 599 | ||
590 | if (pos + tmp->len > actual_len) { | 600 | if (pos + tmp->len > actual_len) { |
591 | dev_err(dev->udev->dev.parent, | 601 | dev_err(dev->udev->dev.parent, |
@@ -686,6 +696,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev, | |||
686 | struct kvaser_usb_net_priv *priv; | 696 | struct kvaser_usb_net_priv *priv; |
687 | struct sk_buff *skb; | 697 | struct sk_buff *skb; |
688 | struct can_frame *cf; | 698 | struct can_frame *cf; |
699 | unsigned long flags; | ||
689 | u8 channel, tid; | 700 | u8 channel, tid; |
690 | 701 | ||
691 | channel = msg->u.tx_acknowledge_header.channel; | 702 | channel = msg->u.tx_acknowledge_header.channel; |
@@ -729,12 +740,15 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev, | |||
729 | 740 | ||
730 | stats->tx_packets++; | 741 | stats->tx_packets++; |
731 | stats->tx_bytes += context->dlc; | 742 | stats->tx_bytes += context->dlc; |
732 | can_get_echo_skb(priv->netdev, context->echo_index); | ||
733 | 743 | ||
734 | context->echo_index = MAX_TX_URBS; | 744 | spin_lock_irqsave(&priv->tx_contexts_lock, flags); |
735 | atomic_dec(&priv->active_tx_urbs); | ||
736 | 745 | ||
746 | can_get_echo_skb(priv->netdev, context->echo_index); | ||
747 | context->echo_index = MAX_TX_URBS; | ||
748 | --priv->active_tx_contexts; | ||
737 | netif_wake_queue(priv->netdev); | 749 | netif_wake_queue(priv->netdev); |
750 | |||
751 | spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); | ||
738 | } | 752 | } |
739 | 753 | ||
740 | static void kvaser_usb_simple_msg_callback(struct urb *urb) | 754 | static void kvaser_usb_simple_msg_callback(struct urb *urb) |
@@ -787,7 +801,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, | |||
787 | netdev_err(netdev, "Error transmitting URB\n"); | 801 | netdev_err(netdev, "Error transmitting URB\n"); |
788 | usb_unanchor_urb(urb); | 802 | usb_unanchor_urb(urb); |
789 | usb_free_urb(urb); | 803 | usb_free_urb(urb); |
790 | kfree(buf); | ||
791 | return err; | 804 | return err; |
792 | } | 805 | } |
793 | 806 | ||
@@ -796,17 +809,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, | |||
796 | return 0; | 809 | return 0; |
797 | } | 810 | } |
798 | 811 | ||
799 | static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv) | ||
800 | { | ||
801 | int i; | ||
802 | |||
803 | usb_kill_anchored_urbs(&priv->tx_submitted); | ||
804 | atomic_set(&priv->active_tx_urbs, 0); | ||
805 | |||
806 | for (i = 0; i < MAX_TX_URBS; i++) | ||
807 | priv->tx_contexts[i].echo_index = MAX_TX_URBS; | ||
808 | } | ||
809 | |||
810 | static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, | 812 | static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, |
811 | const struct kvaser_usb_error_summary *es, | 813 | const struct kvaser_usb_error_summary *es, |
812 | struct can_frame *cf) | 814 | struct can_frame *cf) |
@@ -1317,8 +1319,19 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) | |||
1317 | while (pos <= urb->actual_length - MSG_HEADER_LEN) { | 1319 | while (pos <= urb->actual_length - MSG_HEADER_LEN) { |
1318 | msg = urb->transfer_buffer + pos; | 1320 | msg = urb->transfer_buffer + pos; |
1319 | 1321 | ||
1320 | if (!msg->len) | 1322 | /* The Kvaser firmware can only read and write messages that |
1321 | break; | 1323 | * does not cross the USB's endpoint wMaxPacketSize boundary. |
1324 | * If a follow-up command crosses such boundary, firmware puts | ||
1325 | * a placeholder zero-length command in its place then aligns | ||
1326 | * the real command to the next max packet size. | ||
1327 | * | ||
1328 | * Handle such cases or we're going to miss a significant | ||
1329 | * number of events in case of a heavy rx load on the bus. | ||
1330 | */ | ||
1331 | if (msg->len == 0) { | ||
1332 | pos = round_up(pos, dev->bulk_in->wMaxPacketSize); | ||
1333 | continue; | ||
1334 | } | ||
1322 | 1335 | ||
1323 | if (pos + msg->len > urb->actual_length) { | 1336 | if (pos + msg->len > urb->actual_length) { |
1324 | dev_err(dev->udev->dev.parent, "Format error\n"); | 1337 | dev_err(dev->udev->dev.parent, "Format error\n"); |
@@ -1326,7 +1339,6 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) | |||
1326 | } | 1339 | } |
1327 | 1340 | ||
1328 | kvaser_usb_handle_message(dev, msg); | 1341 | kvaser_usb_handle_message(dev, msg); |
1329 | |||
1330 | pos += msg->len; | 1342 | pos += msg->len; |
1331 | } | 1343 | } |
1332 | 1344 | ||
@@ -1498,6 +1510,24 @@ error: | |||
1498 | return err; | 1510 | return err; |
1499 | } | 1511 | } |
1500 | 1512 | ||
1513 | static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv) | ||
1514 | { | ||
1515 | int i; | ||
1516 | |||
1517 | priv->active_tx_contexts = 0; | ||
1518 | for (i = 0; i < MAX_TX_URBS; i++) | ||
1519 | priv->tx_contexts[i].echo_index = MAX_TX_URBS; | ||
1520 | } | ||
1521 | |||
1522 | /* This method might sleep. Do not call it in the atomic context | ||
1523 | * of URB completions. | ||
1524 | */ | ||
1525 | static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv) | ||
1526 | { | ||
1527 | usb_kill_anchored_urbs(&priv->tx_submitted); | ||
1528 | kvaser_usb_reset_tx_urb_contexts(priv); | ||
1529 | } | ||
1530 | |||
1501 | static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev) | 1531 | static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev) |
1502 | { | 1532 | { |
1503 | int i; | 1533 | int i; |
@@ -1615,9 +1645,9 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
1615 | struct urb *urb; | 1645 | struct urb *urb; |
1616 | void *buf; | 1646 | void *buf; |
1617 | struct kvaser_msg *msg; | 1647 | struct kvaser_msg *msg; |
1618 | int i, err; | 1648 | int i, err, ret = NETDEV_TX_OK; |
1619 | int ret = NETDEV_TX_OK; | ||
1620 | u8 *msg_tx_can_flags = NULL; /* GCC */ | 1649 | u8 *msg_tx_can_flags = NULL; /* GCC */ |
1650 | unsigned long flags; | ||
1621 | 1651 | ||
1622 | if (can_dropped_invalid_skb(netdev, skb)) | 1652 | if (can_dropped_invalid_skb(netdev, skb)) |
1623 | return NETDEV_TX_OK; | 1653 | return NETDEV_TX_OK; |
@@ -1634,7 +1664,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
1634 | if (!buf) { | 1664 | if (!buf) { |
1635 | stats->tx_dropped++; | 1665 | stats->tx_dropped++; |
1636 | dev_kfree_skb(skb); | 1666 | dev_kfree_skb(skb); |
1637 | goto nobufmem; | 1667 | goto freeurb; |
1638 | } | 1668 | } |
1639 | 1669 | ||
1640 | msg = buf; | 1670 | msg = buf; |
@@ -1671,22 +1701,32 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
1671 | if (cf->can_id & CAN_RTR_FLAG) | 1701 | if (cf->can_id & CAN_RTR_FLAG) |
1672 | *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; | 1702 | *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; |
1673 | 1703 | ||
1704 | spin_lock_irqsave(&priv->tx_contexts_lock, flags); | ||
1674 | for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) { | 1705 | for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) { |
1675 | if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) { | 1706 | if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) { |
1676 | context = &priv->tx_contexts[i]; | 1707 | context = &priv->tx_contexts[i]; |
1708 | |||
1709 | context->echo_index = i; | ||
1710 | can_put_echo_skb(skb, netdev, context->echo_index); | ||
1711 | ++priv->active_tx_contexts; | ||
1712 | if (priv->active_tx_contexts >= MAX_TX_URBS) | ||
1713 | netif_stop_queue(netdev); | ||
1714 | |||
1677 | break; | 1715 | break; |
1678 | } | 1716 | } |
1679 | } | 1717 | } |
1718 | spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); | ||
1680 | 1719 | ||
1681 | /* This should never happen; it implies a flow control bug */ | 1720 | /* This should never happen; it implies a flow control bug */ |
1682 | if (!context) { | 1721 | if (!context) { |
1683 | netdev_warn(netdev, "cannot find free context\n"); | 1722 | netdev_warn(netdev, "cannot find free context\n"); |
1723 | |||
1724 | kfree(buf); | ||
1684 | ret = NETDEV_TX_BUSY; | 1725 | ret = NETDEV_TX_BUSY; |
1685 | goto releasebuf; | 1726 | goto freeurb; |
1686 | } | 1727 | } |
1687 | 1728 | ||
1688 | context->priv = priv; | 1729 | context->priv = priv; |
1689 | context->echo_index = i; | ||
1690 | context->dlc = cf->can_dlc; | 1730 | context->dlc = cf->can_dlc; |
1691 | 1731 | ||
1692 | msg->u.tx_can.tid = context->echo_index; | 1732 | msg->u.tx_can.tid = context->echo_index; |
@@ -1698,18 +1738,17 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
1698 | kvaser_usb_write_bulk_callback, context); | 1738 | kvaser_usb_write_bulk_callback, context); |
1699 | usb_anchor_urb(urb, &priv->tx_submitted); | 1739 | usb_anchor_urb(urb, &priv->tx_submitted); |
1700 | 1740 | ||
1701 | can_put_echo_skb(skb, netdev, context->echo_index); | ||
1702 | |||
1703 | atomic_inc(&priv->active_tx_urbs); | ||
1704 | |||
1705 | if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS) | ||
1706 | netif_stop_queue(netdev); | ||
1707 | |||
1708 | err = usb_submit_urb(urb, GFP_ATOMIC); | 1741 | err = usb_submit_urb(urb, GFP_ATOMIC); |
1709 | if (unlikely(err)) { | 1742 | if (unlikely(err)) { |
1743 | spin_lock_irqsave(&priv->tx_contexts_lock, flags); | ||
1744 | |||
1710 | can_free_echo_skb(netdev, context->echo_index); | 1745 | can_free_echo_skb(netdev, context->echo_index); |
1746 | context->echo_index = MAX_TX_URBS; | ||
1747 | --priv->active_tx_contexts; | ||
1748 | netif_wake_queue(netdev); | ||
1749 | |||
1750 | spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); | ||
1711 | 1751 | ||
1712 | atomic_dec(&priv->active_tx_urbs); | ||
1713 | usb_unanchor_urb(urb); | 1752 | usb_unanchor_urb(urb); |
1714 | 1753 | ||
1715 | stats->tx_dropped++; | 1754 | stats->tx_dropped++; |
@@ -1719,16 +1758,12 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
1719 | else | 1758 | else |
1720 | netdev_warn(netdev, "Failed tx_urb %d\n", err); | 1759 | netdev_warn(netdev, "Failed tx_urb %d\n", err); |
1721 | 1760 | ||
1722 | goto releasebuf; | 1761 | goto freeurb; |
1723 | } | 1762 | } |
1724 | 1763 | ||
1725 | usb_free_urb(urb); | 1764 | ret = NETDEV_TX_OK; |
1726 | |||
1727 | return NETDEV_TX_OK; | ||
1728 | 1765 | ||
1729 | releasebuf: | 1766 | freeurb: |
1730 | kfree(buf); | ||
1731 | nobufmem: | ||
1732 | usb_free_urb(urb); | 1767 | usb_free_urb(urb); |
1733 | return ret; | 1768 | return ret; |
1734 | } | 1769 | } |
@@ -1840,7 +1875,7 @@ static int kvaser_usb_init_one(struct usb_interface *intf, | |||
1840 | struct kvaser_usb *dev = usb_get_intfdata(intf); | 1875 | struct kvaser_usb *dev = usb_get_intfdata(intf); |
1841 | struct net_device *netdev; | 1876 | struct net_device *netdev; |
1842 | struct kvaser_usb_net_priv *priv; | 1877 | struct kvaser_usb_net_priv *priv; |
1843 | int i, err; | 1878 | int err; |
1844 | 1879 | ||
1845 | err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel); | 1880 | err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel); |
1846 | if (err) | 1881 | if (err) |
@@ -1854,19 +1889,17 @@ static int kvaser_usb_init_one(struct usb_interface *intf, | |||
1854 | 1889 | ||
1855 | priv = netdev_priv(netdev); | 1890 | priv = netdev_priv(netdev); |
1856 | 1891 | ||
1892 | init_usb_anchor(&priv->tx_submitted); | ||
1857 | init_completion(&priv->start_comp); | 1893 | init_completion(&priv->start_comp); |
1858 | init_completion(&priv->stop_comp); | 1894 | init_completion(&priv->stop_comp); |
1859 | 1895 | ||
1860 | init_usb_anchor(&priv->tx_submitted); | ||
1861 | atomic_set(&priv->active_tx_urbs, 0); | ||
1862 | |||
1863 | for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) | ||
1864 | priv->tx_contexts[i].echo_index = MAX_TX_URBS; | ||
1865 | |||
1866 | priv->dev = dev; | 1896 | priv->dev = dev; |
1867 | priv->netdev = netdev; | 1897 | priv->netdev = netdev; |
1868 | priv->channel = channel; | 1898 | priv->channel = channel; |
1869 | 1899 | ||
1900 | spin_lock_init(&priv->tx_contexts_lock); | ||
1901 | kvaser_usb_reset_tx_urb_contexts(priv); | ||
1902 | |||
1870 | priv->can.state = CAN_STATE_STOPPED; | 1903 | priv->can.state = CAN_STATE_STOPPED; |
1871 | priv->can.clock.freq = CAN_USB_CLOCK; | 1904 | priv->can.clock.freq = CAN_USB_CLOCK; |
1872 | priv->can.bittiming_const = &kvaser_usb_bittiming_const; | 1905 | priv->can.bittiming_const = &kvaser_usb_bittiming_const; |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index 962c3f027383..0bac0f14edc3 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c | |||
@@ -879,6 +879,10 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev) | |||
879 | 879 | ||
880 | pdev->usb_if = ppdev->usb_if; | 880 | pdev->usb_if = ppdev->usb_if; |
881 | pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr; | 881 | pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr; |
882 | |||
883 | /* do a copy of the ctrlmode[_supported] too */ | ||
884 | dev->can.ctrlmode = ppdev->dev.can.ctrlmode; | ||
885 | dev->can.ctrlmode_supported = ppdev->dev.can.ctrlmode_supported; | ||
882 | } | 886 | } |
883 | 887 | ||
884 | pdev->usb_if->dev[dev->ctrl_idx] = dev; | 888 | pdev->usb_if->dev[dev->ctrl_idx] = dev; |
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 11d6e6561df1..15a8190a6f75 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c | |||
@@ -1543,7 +1543,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1543 | { | 1543 | { |
1544 | struct pcnet32_private *lp; | 1544 | struct pcnet32_private *lp; |
1545 | int i, media; | 1545 | int i, media; |
1546 | int fdx, mii, fset, dxsuflo; | 1546 | int fdx, mii, fset, dxsuflo, sram; |
1547 | int chip_version; | 1547 | int chip_version; |
1548 | char *chipname; | 1548 | char *chipname; |
1549 | struct net_device *dev; | 1549 | struct net_device *dev; |
@@ -1580,7 +1580,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1580 | } | 1580 | } |
1581 | 1581 | ||
1582 | /* initialize variables */ | 1582 | /* initialize variables */ |
1583 | fdx = mii = fset = dxsuflo = 0; | 1583 | fdx = mii = fset = dxsuflo = sram = 0; |
1584 | chip_version = (chip_version >> 12) & 0xffff; | 1584 | chip_version = (chip_version >> 12) & 0xffff; |
1585 | 1585 | ||
1586 | switch (chip_version) { | 1586 | switch (chip_version) { |
@@ -1613,6 +1613,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1613 | chipname = "PCnet/FAST III 79C973"; /* PCI */ | 1613 | chipname = "PCnet/FAST III 79C973"; /* PCI */ |
1614 | fdx = 1; | 1614 | fdx = 1; |
1615 | mii = 1; | 1615 | mii = 1; |
1616 | sram = 1; | ||
1616 | break; | 1617 | break; |
1617 | case 0x2626: | 1618 | case 0x2626: |
1618 | chipname = "PCnet/Home 79C978"; /* PCI */ | 1619 | chipname = "PCnet/Home 79C978"; /* PCI */ |
@@ -1636,6 +1637,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1636 | chipname = "PCnet/FAST III 79C975"; /* PCI */ | 1637 | chipname = "PCnet/FAST III 79C975"; /* PCI */ |
1637 | fdx = 1; | 1638 | fdx = 1; |
1638 | mii = 1; | 1639 | mii = 1; |
1640 | sram = 1; | ||
1639 | break; | 1641 | break; |
1640 | case 0x2628: | 1642 | case 0x2628: |
1641 | chipname = "PCnet/PRO 79C976"; | 1643 | chipname = "PCnet/PRO 79C976"; |
@@ -1664,6 +1666,31 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1664 | dxsuflo = 1; | 1666 | dxsuflo = 1; |
1665 | } | 1667 | } |
1666 | 1668 | ||
1669 | /* | ||
1670 | * The Am79C973/Am79C975 controllers come with 12K of SRAM | ||
1671 | * which we can use for the Tx/Rx buffers but most importantly, | ||
1672 | * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid | ||
1673 | * Tx fifo underflows. | ||
1674 | */ | ||
1675 | if (sram) { | ||
1676 | /* | ||
1677 | * The SRAM is being configured in two steps. First we | ||
1678 | * set the SRAM size in the BCR25:SRAM_SIZE bits. According | ||
1679 | * to the datasheet, each bit corresponds to a 512-byte | ||
1680 | * page so we can have at most 24 pages. The SRAM_SIZE | ||
1681 | * holds the value of the upper 8 bits of the 16-bit SRAM size. | ||
1682 | * The low 8-bits start at 0x00 and end at 0xff. So the | ||
1683 | * address range is from 0x0000 up to 0x17ff. Therefore, | ||
1684 | * the SRAM_SIZE is set to 0x17. The next step is to set | ||
1685 | * the BCR26:SRAM_BND midway through so the Tx and Rx | ||
1686 | * buffers can share the SRAM equally. | ||
1687 | */ | ||
1688 | a->write_bcr(ioaddr, 25, 0x17); | ||
1689 | a->write_bcr(ioaddr, 26, 0xc); | ||
1690 | /* And finally enable the NOUFLO bit */ | ||
1691 | a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11)); | ||
1692 | } | ||
1693 | |||
1667 | dev = alloc_etherdev(sizeof(*lp)); | 1694 | dev = alloc_etherdev(sizeof(*lp)); |
1668 | if (!dev) { | 1695 | if (!dev) { |
1669 | ret = -ENOMEM; | 1696 | ret = -ENOMEM; |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 869d97fcf781..b927021c6c40 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c | |||
@@ -593,7 +593,7 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata) | |||
593 | if (!xgene_ring_mgr_init(pdata)) | 593 | if (!xgene_ring_mgr_init(pdata)) |
594 | return -ENODEV; | 594 | return -ENODEV; |
595 | 595 | ||
596 | if (!efi_enabled(EFI_BOOT)) { | 596 | if (pdata->clk) { |
597 | clk_prepare_enable(pdata->clk); | 597 | clk_prepare_enable(pdata->clk); |
598 | clk_disable_unprepare(pdata->clk); | 598 | clk_disable_unprepare(pdata->clk); |
599 | clk_prepare_enable(pdata->clk); | 599 | clk_prepare_enable(pdata->clk); |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 4de62b210c85..635a83be7e5e 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c | |||
@@ -1025,6 +1025,8 @@ static int xgene_enet_remove(struct platform_device *pdev) | |||
1025 | #ifdef CONFIG_ACPI | 1025 | #ifdef CONFIG_ACPI |
1026 | static const struct acpi_device_id xgene_enet_acpi_match[] = { | 1026 | static const struct acpi_device_id xgene_enet_acpi_match[] = { |
1027 | { "APMC0D05", }, | 1027 | { "APMC0D05", }, |
1028 | { "APMC0D30", }, | ||
1029 | { "APMC0D31", }, | ||
1028 | { } | 1030 | { } |
1029 | }; | 1031 | }; |
1030 | MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); | 1032 | MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); |
@@ -1033,6 +1035,8 @@ MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); | |||
1033 | #ifdef CONFIG_OF | 1035 | #ifdef CONFIG_OF |
1034 | static struct of_device_id xgene_enet_of_match[] = { | 1036 | static struct of_device_id xgene_enet_of_match[] = { |
1035 | {.compatible = "apm,xgene-enet",}, | 1037 | {.compatible = "apm,xgene-enet",}, |
1038 | {.compatible = "apm,xgene1-sgenet",}, | ||
1039 | {.compatible = "apm,xgene1-xgenet",}, | ||
1036 | {}, | 1040 | {}, |
1037 | }; | 1041 | }; |
1038 | 1042 | ||
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 21206d33b638..a7f2cc3e485e 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c | |||
@@ -486,7 +486,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) | |||
486 | { | 486 | { |
487 | struct bcm_enet_priv *priv; | 487 | struct bcm_enet_priv *priv; |
488 | struct net_device *dev; | 488 | struct net_device *dev; |
489 | int tx_work_done, rx_work_done; | 489 | int rx_work_done; |
490 | 490 | ||
491 | priv = container_of(napi, struct bcm_enet_priv, napi); | 491 | priv = container_of(napi, struct bcm_enet_priv, napi); |
492 | dev = priv->net_dev; | 492 | dev = priv->net_dev; |
@@ -498,14 +498,14 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) | |||
498 | ENETDMAC_IR, priv->tx_chan); | 498 | ENETDMAC_IR, priv->tx_chan); |
499 | 499 | ||
500 | /* reclaim sent skb */ | 500 | /* reclaim sent skb */ |
501 | tx_work_done = bcm_enet_tx_reclaim(dev, 0); | 501 | bcm_enet_tx_reclaim(dev, 0); |
502 | 502 | ||
503 | spin_lock(&priv->rx_lock); | 503 | spin_lock(&priv->rx_lock); |
504 | rx_work_done = bcm_enet_receive_queue(dev, budget); | 504 | rx_work_done = bcm_enet_receive_queue(dev, budget); |
505 | spin_unlock(&priv->rx_lock); | 505 | spin_unlock(&priv->rx_lock); |
506 | 506 | ||
507 | if (rx_work_done >= budget || tx_work_done > 0) { | 507 | if (rx_work_done >= budget) { |
508 | /* rx/tx queue is not yet empty/clean */ | 508 | /* rx queue is not yet empty/clean */ |
509 | return rx_work_done; | 509 | return rx_work_done; |
510 | } | 510 | } |
511 | 511 | ||
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 676ffe093180..0469f72c6e7e 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c | |||
@@ -302,9 +302,6 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, | |||
302 | slot->skb = skb; | 302 | slot->skb = skb; |
303 | slot->dma_addr = dma_addr; | 303 | slot->dma_addr = dma_addr; |
304 | 304 | ||
305 | if (slot->dma_addr & 0xC0000000) | ||
306 | bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); | ||
307 | |||
308 | return 0; | 305 | return 0; |
309 | } | 306 | } |
310 | 307 | ||
@@ -505,8 +502,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) | |||
505 | ring->mmio_base); | 502 | ring->mmio_base); |
506 | goto err_dma_free; | 503 | goto err_dma_free; |
507 | } | 504 | } |
508 | if (ring->dma_base & 0xC0000000) | ||
509 | bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); | ||
510 | 505 | ||
511 | ring->unaligned = bgmac_dma_unaligned(bgmac, ring, | 506 | ring->unaligned = bgmac_dma_unaligned(bgmac, ring, |
512 | BGMAC_DMA_RING_TX); | 507 | BGMAC_DMA_RING_TX); |
@@ -536,8 +531,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) | |||
536 | err = -ENOMEM; | 531 | err = -ENOMEM; |
537 | goto err_dma_free; | 532 | goto err_dma_free; |
538 | } | 533 | } |
539 | if (ring->dma_base & 0xC0000000) | ||
540 | bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); | ||
541 | 534 | ||
542 | ring->unaligned = bgmac_dma_unaligned(bgmac, ring, | 535 | ring->unaligned = bgmac_dma_unaligned(bgmac, ring, |
543 | BGMAC_DMA_RING_RX); | 536 | BGMAC_DMA_RING_RX); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 7155e1d2c208..996e215fc324 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -12722,6 +12722,9 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, | |||
12722 | pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, | 12722 | pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, |
12723 | PCICFG_VENDOR_ID_OFFSET); | 12723 | PCICFG_VENDOR_ID_OFFSET); |
12724 | 12724 | ||
12725 | /* Set PCIe reset type to fundamental for EEH recovery */ | ||
12726 | pdev->needs_freset = 1; | ||
12727 | |||
12725 | /* AER (Advanced Error reporting) configuration */ | 12728 | /* AER (Advanced Error reporting) configuration */ |
12726 | rc = pci_enable_pcie_error_reporting(pdev); | 12729 | rc = pci_enable_pcie_error_reporting(pdev); |
12727 | if (!rc) | 12730 | if (!rc) |
@@ -12766,7 +12769,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, | |||
12766 | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | | 12769 | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | |
12767 | NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | | 12770 | NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | |
12768 | NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX; | 12771 | NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX; |
12769 | if (!CHIP_IS_E1x(bp)) { | 12772 | if (!chip_is_e1x) { |
12770 | dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | | 12773 | dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | |
12771 | NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT; | 12774 | NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT; |
12772 | dev->hw_enc_features = | 12775 | dev->hw_enc_features = |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index 149a0d70c108..b97122926d3a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c | |||
@@ -73,15 +73,17 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
73 | if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE)) | 73 | if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE)) |
74 | return -EINVAL; | 74 | return -EINVAL; |
75 | 75 | ||
76 | reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); | ||
76 | if (wol->wolopts & WAKE_MAGICSECURE) { | 77 | if (wol->wolopts & WAKE_MAGICSECURE) { |
77 | bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), | 78 | bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), |
78 | UMAC_MPD_PW_MS); | 79 | UMAC_MPD_PW_MS); |
79 | bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), | 80 | bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), |
80 | UMAC_MPD_PW_LS); | 81 | UMAC_MPD_PW_LS); |
81 | reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); | ||
82 | reg |= MPD_PW_EN; | 82 | reg |= MPD_PW_EN; |
83 | bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); | 83 | } else { |
84 | reg &= ~MPD_PW_EN; | ||
84 | } | 85 | } |
86 | bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); | ||
85 | 87 | ||
86 | /* Flag the device and relevant IRQ as wakeup capable */ | 88 | /* Flag the device and relevant IRQ as wakeup capable */ |
87 | if (wol->wolopts) { | 89 | if (wol->wolopts) { |
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index ad76b8e35a00..81d41539fcba 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
@@ -2113,17 +2113,17 @@ static const struct net_device_ops macb_netdev_ops = { | |||
2113 | }; | 2113 | }; |
2114 | 2114 | ||
2115 | #if defined(CONFIG_OF) | 2115 | #if defined(CONFIG_OF) |
2116 | static struct macb_config pc302gem_config = { | 2116 | static const struct macb_config pc302gem_config = { |
2117 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, | 2117 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, |
2118 | .dma_burst_length = 16, | 2118 | .dma_burst_length = 16, |
2119 | }; | 2119 | }; |
2120 | 2120 | ||
2121 | static struct macb_config sama5d3_config = { | 2121 | static const struct macb_config sama5d3_config = { |
2122 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, | 2122 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, |
2123 | .dma_burst_length = 16, | 2123 | .dma_burst_length = 16, |
2124 | }; | 2124 | }; |
2125 | 2125 | ||
2126 | static struct macb_config sama5d4_config = { | 2126 | static const struct macb_config sama5d4_config = { |
2127 | .caps = 0, | 2127 | .caps = 0, |
2128 | .dma_burst_length = 4, | 2128 | .dma_burst_length = 4, |
2129 | }; | 2129 | }; |
@@ -2154,7 +2154,7 @@ static void macb_configure_caps(struct macb *bp) | |||
2154 | if (bp->pdev->dev.of_node) { | 2154 | if (bp->pdev->dev.of_node) { |
2155 | match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node); | 2155 | match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node); |
2156 | if (match && match->data) { | 2156 | if (match && match->data) { |
2157 | config = (const struct macb_config *)match->data; | 2157 | config = match->data; |
2158 | 2158 | ||
2159 | bp->caps = config->caps; | 2159 | bp->caps = config->caps; |
2160 | /* | 2160 | /* |
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 31dc080f2437..ff85619a9732 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h | |||
@@ -351,7 +351,7 @@ | |||
351 | 351 | ||
352 | /* Bitfields in MID */ | 352 | /* Bitfields in MID */ |
353 | #define MACB_IDNUM_OFFSET 16 | 353 | #define MACB_IDNUM_OFFSET 16 |
354 | #define MACB_IDNUM_SIZE 16 | 354 | #define MACB_IDNUM_SIZE 12 |
355 | #define MACB_REV_OFFSET 0 | 355 | #define MACB_REV_OFFSET 0 |
356 | #define MACB_REV_SIZE 16 | 356 | #define MACB_REV_SIZE 16 |
357 | 357 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 853c38997c82..1abdfa123c6c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
@@ -1120,7 +1120,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, | |||
1120 | } | 1120 | } |
1121 | 1121 | ||
1122 | /* Installed successfully, update the cached header too. */ | 1122 | /* Installed successfully, update the cached header too. */ |
1123 | memcpy(card_fw, fs_fw, sizeof(*card_fw)); | 1123 | *card_fw = *fs_fw; |
1124 | card_fw_usable = 1; | 1124 | card_fw_usable = 1; |
1125 | *reset = 0; /* already reset as part of load_fw */ | 1125 | *reset = 0; /* already reset as part of load_fw */ |
1126 | } | 1126 | } |
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 3b42556f7f8d..ed41559bae77 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c | |||
@@ -589,7 +589,7 @@ static void tulip_tx_timeout(struct net_device *dev) | |||
589 | (unsigned int)tp->rx_ring[i].buffer1, | 589 | (unsigned int)tp->rx_ring[i].buffer1, |
590 | (unsigned int)tp->rx_ring[i].buffer2, | 590 | (unsigned int)tp->rx_ring[i].buffer2, |
591 | buf[0], buf[1], buf[2]); | 591 | buf[0], buf[1], buf[2]); |
592 | for (j = 0; buf[j] != 0xee && j < 1600; j++) | 592 | for (j = 0; ((j < 1600) && buf[j] != 0xee); j++) |
593 | if (j < 100) | 593 | if (j < 100) |
594 | pr_cont(" %02x", buf[j]); | 594 | pr_cont(" %02x", buf[j]); |
595 | pr_cont(" j=%d\n", j); | 595 | pr_cont(" j=%d\n", j); |
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 27de37aa90af..27b9fe99a9bd 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h | |||
@@ -354,6 +354,7 @@ struct be_vf_cfg { | |||
354 | u16 vlan_tag; | 354 | u16 vlan_tag; |
355 | u32 tx_rate; | 355 | u32 tx_rate; |
356 | u32 plink_tracking; | 356 | u32 plink_tracking; |
357 | u32 privileges; | ||
357 | }; | 358 | }; |
358 | 359 | ||
359 | enum vf_state { | 360 | enum vf_state { |
@@ -423,6 +424,7 @@ struct be_adapter { | |||
423 | 424 | ||
424 | u8 __iomem *csr; /* CSR BAR used only for BE2/3 */ | 425 | u8 __iomem *csr; /* CSR BAR used only for BE2/3 */ |
425 | u8 __iomem *db; /* Door Bell */ | 426 | u8 __iomem *db; /* Door Bell */ |
427 | u8 __iomem *pcicfg; /* On SH,BEx only. Shadow of PCI config space */ | ||
426 | 428 | ||
427 | struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ | 429 | struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ |
428 | struct be_dma_mem mbox_mem; | 430 | struct be_dma_mem mbox_mem; |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 36916cfa70f9..7f05f309e935 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
@@ -1902,15 +1902,11 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, | |||
1902 | { | 1902 | { |
1903 | int num_eqs, i = 0; | 1903 | int num_eqs, i = 0; |
1904 | 1904 | ||
1905 | if (lancer_chip(adapter) && num > 8) { | 1905 | while (num) { |
1906 | while (num) { | 1906 | num_eqs = min(num, 8); |
1907 | num_eqs = min(num, 8); | 1907 | __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); |
1908 | __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); | 1908 | i += num_eqs; |
1909 | i += num_eqs; | 1909 | num -= num_eqs; |
1910 | num -= num_eqs; | ||
1911 | } | ||
1912 | } else { | ||
1913 | __be_cmd_modify_eqd(adapter, set_eqd, num); | ||
1914 | } | 1910 | } |
1915 | 1911 | ||
1916 | return 0; | 1912 | return 0; |
@@ -1918,7 +1914,7 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, | |||
1918 | 1914 | ||
1919 | /* Uses sycnhronous mcc */ | 1915 | /* Uses sycnhronous mcc */ |
1920 | int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, | 1916 | int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, |
1921 | u32 num) | 1917 | u32 num, u32 domain) |
1922 | { | 1918 | { |
1923 | struct be_mcc_wrb *wrb; | 1919 | struct be_mcc_wrb *wrb; |
1924 | struct be_cmd_req_vlan_config *req; | 1920 | struct be_cmd_req_vlan_config *req; |
@@ -1936,6 +1932,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, | |||
1936 | be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | 1932 | be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, |
1937 | OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), | 1933 | OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), |
1938 | wrb, NULL); | 1934 | wrb, NULL); |
1935 | req->hdr.domain = domain; | ||
1939 | 1936 | ||
1940 | req->interface_id = if_id; | 1937 | req->interface_id = if_id; |
1941 | req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; | 1938 | req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index db761e8e42a3..a7634a3f052a 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h | |||
@@ -2256,7 +2256,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter, | |||
2256 | int be_cmd_get_fw_ver(struct be_adapter *adapter); | 2256 | int be_cmd_get_fw_ver(struct be_adapter *adapter); |
2257 | int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); | 2257 | int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); |
2258 | int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, | 2258 | int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, |
2259 | u32 num); | 2259 | u32 num, u32 domain); |
2260 | int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); | 2260 | int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); |
2261 | int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); | 2261 | int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); |
2262 | int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); | 2262 | int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 0a816859aca5..e6b790f0d9dc 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -1171,7 +1171,7 @@ static int be_vid_config(struct be_adapter *adapter) | |||
1171 | for_each_set_bit(i, adapter->vids, VLAN_N_VID) | 1171 | for_each_set_bit(i, adapter->vids, VLAN_N_VID) |
1172 | vids[num++] = cpu_to_le16(i); | 1172 | vids[num++] = cpu_to_le16(i); |
1173 | 1173 | ||
1174 | status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num); | 1174 | status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0); |
1175 | if (status) { | 1175 | if (status) { |
1176 | dev_err(dev, "Setting HW VLAN filtering failed\n"); | 1176 | dev_err(dev, "Setting HW VLAN filtering failed\n"); |
1177 | /* Set to VLAN promisc mode as setting VLAN filter failed */ | 1177 | /* Set to VLAN promisc mode as setting VLAN filter failed */ |
@@ -1380,11 +1380,67 @@ static int be_get_vf_config(struct net_device *netdev, int vf, | |||
1380 | return 0; | 1380 | return 0; |
1381 | } | 1381 | } |
1382 | 1382 | ||
1383 | static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan) | ||
1384 | { | ||
1385 | struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; | ||
1386 | u16 vids[BE_NUM_VLANS_SUPPORTED]; | ||
1387 | int vf_if_id = vf_cfg->if_handle; | ||
1388 | int status; | ||
1389 | |||
1390 | /* Enable Transparent VLAN Tagging */ | ||
1391 | status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0); | ||
1392 | if (status) | ||
1393 | return status; | ||
1394 | |||
1395 | /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */ | ||
1396 | vids[0] = 0; | ||
1397 | status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1); | ||
1398 | if (!status) | ||
1399 | dev_info(&adapter->pdev->dev, | ||
1400 | "Cleared guest VLANs on VF%d", vf); | ||
1401 | |||
1402 | /* After TVT is enabled, disallow VFs to program VLAN filters */ | ||
1403 | if (vf_cfg->privileges & BE_PRIV_FILTMGMT) { | ||
1404 | status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges & | ||
1405 | ~BE_PRIV_FILTMGMT, vf + 1); | ||
1406 | if (!status) | ||
1407 | vf_cfg->privileges &= ~BE_PRIV_FILTMGMT; | ||
1408 | } | ||
1409 | return 0; | ||
1410 | } | ||
1411 | |||
1412 | static int be_clear_vf_tvt(struct be_adapter *adapter, int vf) | ||
1413 | { | ||
1414 | struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; | ||
1415 | struct device *dev = &adapter->pdev->dev; | ||
1416 | int status; | ||
1417 | |||
1418 | /* Reset Transparent VLAN Tagging. */ | ||
1419 | status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1, | ||
1420 | vf_cfg->if_handle, 0); | ||
1421 | if (status) | ||
1422 | return status; | ||
1423 | |||
1424 | /* Allow VFs to program VLAN filtering */ | ||
1425 | if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) { | ||
1426 | status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges | | ||
1427 | BE_PRIV_FILTMGMT, vf + 1); | ||
1428 | if (!status) { | ||
1429 | vf_cfg->privileges |= BE_PRIV_FILTMGMT; | ||
1430 | dev_info(dev, "VF%d: FILTMGMT priv enabled", vf); | ||
1431 | } | ||
1432 | } | ||
1433 | |||
1434 | dev_info(dev, | ||
1435 | "Disable/re-enable i/f in VM to clear Transparent VLAN tag"); | ||
1436 | return 0; | ||
1437 | } | ||
1438 | |||
1383 | static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) | 1439 | static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) |
1384 | { | 1440 | { |
1385 | struct be_adapter *adapter = netdev_priv(netdev); | 1441 | struct be_adapter *adapter = netdev_priv(netdev); |
1386 | struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; | 1442 | struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; |
1387 | int status = 0; | 1443 | int status; |
1388 | 1444 | ||
1389 | if (!sriov_enabled(adapter)) | 1445 | if (!sriov_enabled(adapter)) |
1390 | return -EPERM; | 1446 | return -EPERM; |
@@ -1394,24 +1450,19 @@ static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) | |||
1394 | 1450 | ||
1395 | if (vlan || qos) { | 1451 | if (vlan || qos) { |
1396 | vlan |= qos << VLAN_PRIO_SHIFT; | 1452 | vlan |= qos << VLAN_PRIO_SHIFT; |
1397 | if (vf_cfg->vlan_tag != vlan) | 1453 | status = be_set_vf_tvt(adapter, vf, vlan); |
1398 | status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, | ||
1399 | vf_cfg->if_handle, 0); | ||
1400 | } else { | 1454 | } else { |
1401 | /* Reset Transparent Vlan Tagging. */ | 1455 | status = be_clear_vf_tvt(adapter, vf); |
1402 | status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, | ||
1403 | vf + 1, vf_cfg->if_handle, 0); | ||
1404 | } | 1456 | } |
1405 | 1457 | ||
1406 | if (status) { | 1458 | if (status) { |
1407 | dev_err(&adapter->pdev->dev, | 1459 | dev_err(&adapter->pdev->dev, |
1408 | "VLAN %d config on VF %d failed : %#x\n", vlan, | 1460 | "VLAN %d config on VF %d failed : %#x\n", vlan, vf, |
1409 | vf, status); | 1461 | status); |
1410 | return be_cmd_status(status); | 1462 | return be_cmd_status(status); |
1411 | } | 1463 | } |
1412 | 1464 | ||
1413 | vf_cfg->vlan_tag = vlan; | 1465 | vf_cfg->vlan_tag = vlan; |
1414 | |||
1415 | return 0; | 1466 | return 0; |
1416 | } | 1467 | } |
1417 | 1468 | ||
@@ -2772,14 +2823,12 @@ void be_detect_error(struct be_adapter *adapter) | |||
2772 | } | 2823 | } |
2773 | } | 2824 | } |
2774 | } else { | 2825 | } else { |
2775 | pci_read_config_dword(adapter->pdev, | 2826 | ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW); |
2776 | PCICFG_UE_STATUS_LOW, &ue_lo); | 2827 | ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH); |
2777 | pci_read_config_dword(adapter->pdev, | 2828 | ue_lo_mask = ioread32(adapter->pcicfg + |
2778 | PCICFG_UE_STATUS_HIGH, &ue_hi); | 2829 | PCICFG_UE_STATUS_LOW_MASK); |
2779 | pci_read_config_dword(adapter->pdev, | 2830 | ue_hi_mask = ioread32(adapter->pcicfg + |
2780 | PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask); | 2831 | PCICFG_UE_STATUS_HI_MASK); |
2781 | pci_read_config_dword(adapter->pdev, | ||
2782 | PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask); | ||
2783 | 2832 | ||
2784 | ue_lo = (ue_lo & ~ue_lo_mask); | 2833 | ue_lo = (ue_lo & ~ue_lo_mask); |
2785 | ue_hi = (ue_hi & ~ue_hi_mask); | 2834 | ue_hi = (ue_hi & ~ue_hi_mask); |
@@ -3339,7 +3388,6 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle, | |||
3339 | u32 cap_flags, u32 vf) | 3388 | u32 cap_flags, u32 vf) |
3340 | { | 3389 | { |
3341 | u32 en_flags; | 3390 | u32 en_flags; |
3342 | int status; | ||
3343 | 3391 | ||
3344 | en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | | 3392 | en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | |
3345 | BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS | | 3393 | BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS | |
@@ -3347,10 +3395,7 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle, | |||
3347 | 3395 | ||
3348 | en_flags &= cap_flags; | 3396 | en_flags &= cap_flags; |
3349 | 3397 | ||
3350 | status = be_cmd_if_create(adapter, cap_flags, en_flags, | 3398 | return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf); |
3351 | if_handle, vf); | ||
3352 | |||
3353 | return status; | ||
3354 | } | 3399 | } |
3355 | 3400 | ||
3356 | static int be_vfs_if_create(struct be_adapter *adapter) | 3401 | static int be_vfs_if_create(struct be_adapter *adapter) |
@@ -3368,8 +3413,13 @@ static int be_vfs_if_create(struct be_adapter *adapter) | |||
3368 | if (!BE3_chip(adapter)) { | 3413 | if (!BE3_chip(adapter)) { |
3369 | status = be_cmd_get_profile_config(adapter, &res, | 3414 | status = be_cmd_get_profile_config(adapter, &res, |
3370 | vf + 1); | 3415 | vf + 1); |
3371 | if (!status) | 3416 | if (!status) { |
3372 | cap_flags = res.if_cap_flags; | 3417 | cap_flags = res.if_cap_flags; |
3418 | /* Prevent VFs from enabling VLAN promiscuous | ||
3419 | * mode | ||
3420 | */ | ||
3421 | cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS; | ||
3422 | } | ||
3373 | } | 3423 | } |
3374 | 3424 | ||
3375 | status = be_if_create(adapter, &vf_cfg->if_handle, | 3425 | status = be_if_create(adapter, &vf_cfg->if_handle, |
@@ -3403,7 +3453,6 @@ static int be_vf_setup(struct be_adapter *adapter) | |||
3403 | struct device *dev = &adapter->pdev->dev; | 3453 | struct device *dev = &adapter->pdev->dev; |
3404 | struct be_vf_cfg *vf_cfg; | 3454 | struct be_vf_cfg *vf_cfg; |
3405 | int status, old_vfs, vf; | 3455 | int status, old_vfs, vf; |
3406 | u32 privileges; | ||
3407 | 3456 | ||
3408 | old_vfs = pci_num_vf(adapter->pdev); | 3457 | old_vfs = pci_num_vf(adapter->pdev); |
3409 | 3458 | ||
@@ -3433,15 +3482,18 @@ static int be_vf_setup(struct be_adapter *adapter) | |||
3433 | 3482 | ||
3434 | for_all_vfs(adapter, vf_cfg, vf) { | 3483 | for_all_vfs(adapter, vf_cfg, vf) { |
3435 | /* Allow VFs to programs MAC/VLAN filters */ | 3484 | /* Allow VFs to programs MAC/VLAN filters */ |
3436 | status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1); | 3485 | status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges, |
3437 | if (!status && !(privileges & BE_PRIV_FILTMGMT)) { | 3486 | vf + 1); |
3487 | if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) { | ||
3438 | status = be_cmd_set_fn_privileges(adapter, | 3488 | status = be_cmd_set_fn_privileges(adapter, |
3439 | privileges | | 3489 | vf_cfg->privileges | |
3440 | BE_PRIV_FILTMGMT, | 3490 | BE_PRIV_FILTMGMT, |
3441 | vf + 1); | 3491 | vf + 1); |
3442 | if (!status) | 3492 | if (!status) { |
3493 | vf_cfg->privileges |= BE_PRIV_FILTMGMT; | ||
3443 | dev_info(dev, "VF%d has FILTMGMT privilege\n", | 3494 | dev_info(dev, "VF%d has FILTMGMT privilege\n", |
3444 | vf); | 3495 | vf); |
3496 | } | ||
3445 | } | 3497 | } |
3446 | 3498 | ||
3447 | /* Allow full available bandwidth */ | 3499 | /* Allow full available bandwidth */ |
@@ -4820,24 +4872,37 @@ static int be_roce_map_pci_bars(struct be_adapter *adapter) | |||
4820 | 4872 | ||
4821 | static int be_map_pci_bars(struct be_adapter *adapter) | 4873 | static int be_map_pci_bars(struct be_adapter *adapter) |
4822 | { | 4874 | { |
4875 | struct pci_dev *pdev = adapter->pdev; | ||
4823 | u8 __iomem *addr; | 4876 | u8 __iomem *addr; |
4824 | 4877 | ||
4825 | if (BEx_chip(adapter) && be_physfn(adapter)) { | 4878 | if (BEx_chip(adapter) && be_physfn(adapter)) { |
4826 | adapter->csr = pci_iomap(adapter->pdev, 2, 0); | 4879 | adapter->csr = pci_iomap(pdev, 2, 0); |
4827 | if (!adapter->csr) | 4880 | if (!adapter->csr) |
4828 | return -ENOMEM; | 4881 | return -ENOMEM; |
4829 | } | 4882 | } |
4830 | 4883 | ||
4831 | addr = pci_iomap(adapter->pdev, db_bar(adapter), 0); | 4884 | addr = pci_iomap(pdev, db_bar(adapter), 0); |
4832 | if (!addr) | 4885 | if (!addr) |
4833 | goto pci_map_err; | 4886 | goto pci_map_err; |
4834 | adapter->db = addr; | 4887 | adapter->db = addr; |
4835 | 4888 | ||
4889 | if (skyhawk_chip(adapter) || BEx_chip(adapter)) { | ||
4890 | if (be_physfn(adapter)) { | ||
4891 | /* PCICFG is the 2nd BAR in BE2 */ | ||
4892 | addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0); | ||
4893 | if (!addr) | ||
4894 | goto pci_map_err; | ||
4895 | adapter->pcicfg = addr; | ||
4896 | } else { | ||
4897 | adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET; | ||
4898 | } | ||
4899 | } | ||
4900 | |||
4836 | be_roce_map_pci_bars(adapter); | 4901 | be_roce_map_pci_bars(adapter); |
4837 | return 0; | 4902 | return 0; |
4838 | 4903 | ||
4839 | pci_map_err: | 4904 | pci_map_err: |
4840 | dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n"); | 4905 | dev_err(&pdev->dev, "Error in mapping PCI BARs\n"); |
4841 | be_unmap_pci_bars(adapter); | 4906 | be_unmap_pci_bars(adapter); |
4842 | return -ENOMEM; | 4907 | return -ENOMEM; |
4843 | } | 4908 | } |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 9bb6220663b2..78e1ce09b1ab 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -1189,13 +1189,12 @@ static void | |||
1189 | fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) | 1189 | fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) |
1190 | { | 1190 | { |
1191 | struct fec_enet_private *fep; | 1191 | struct fec_enet_private *fep; |
1192 | struct bufdesc *bdp, *bdp_t; | 1192 | struct bufdesc *bdp; |
1193 | unsigned short status; | 1193 | unsigned short status; |
1194 | struct sk_buff *skb; | 1194 | struct sk_buff *skb; |
1195 | struct fec_enet_priv_tx_q *txq; | 1195 | struct fec_enet_priv_tx_q *txq; |
1196 | struct netdev_queue *nq; | 1196 | struct netdev_queue *nq; |
1197 | int index = 0; | 1197 | int index = 0; |
1198 | int i, bdnum; | ||
1199 | int entries_free; | 1198 | int entries_free; |
1200 | 1199 | ||
1201 | fep = netdev_priv(ndev); | 1200 | fep = netdev_priv(ndev); |
@@ -1216,29 +1215,18 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) | |||
1216 | if (bdp == txq->cur_tx) | 1215 | if (bdp == txq->cur_tx) |
1217 | break; | 1216 | break; |
1218 | 1217 | ||
1219 | bdp_t = bdp; | 1218 | index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); |
1220 | bdnum = 1; | ||
1221 | index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep); | ||
1222 | skb = txq->tx_skbuff[index]; | ||
1223 | while (!skb) { | ||
1224 | bdp_t = fec_enet_get_nextdesc(bdp_t, fep, queue_id); | ||
1225 | index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep); | ||
1226 | skb = txq->tx_skbuff[index]; | ||
1227 | bdnum++; | ||
1228 | } | ||
1229 | if (skb_shinfo(skb)->nr_frags && | ||
1230 | (status = bdp_t->cbd_sc) & BD_ENET_TX_READY) | ||
1231 | break; | ||
1232 | 1219 | ||
1233 | for (i = 0; i < bdnum; i++) { | 1220 | skb = txq->tx_skbuff[index]; |
1234 | if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) | ||
1235 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, | ||
1236 | bdp->cbd_datlen, DMA_TO_DEVICE); | ||
1237 | bdp->cbd_bufaddr = 0; | ||
1238 | if (i < bdnum - 1) | ||
1239 | bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); | ||
1240 | } | ||
1241 | txq->tx_skbuff[index] = NULL; | 1221 | txq->tx_skbuff[index] = NULL; |
1222 | if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) | ||
1223 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, | ||
1224 | bdp->cbd_datlen, DMA_TO_DEVICE); | ||
1225 | bdp->cbd_bufaddr = 0; | ||
1226 | if (!skb) { | ||
1227 | bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); | ||
1228 | continue; | ||
1229 | } | ||
1242 | 1230 | ||
1243 | /* Check for errors. */ | 1231 | /* Check for errors. */ |
1244 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | | 1232 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | |
@@ -1479,8 +1467,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) | |||
1479 | 1467 | ||
1480 | vlan_packet_rcvd = true; | 1468 | vlan_packet_rcvd = true; |
1481 | 1469 | ||
1482 | skb_copy_to_linear_data_offset(skb, VLAN_HLEN, | 1470 | memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2); |
1483 | data, (2 * ETH_ALEN)); | ||
1484 | skb_pull(skb, VLAN_HLEN); | 1471 | skb_pull(skb, VLAN_HLEN); |
1485 | } | 1472 | } |
1486 | 1473 | ||
@@ -1597,7 +1584,7 @@ fec_enet_interrupt(int irq, void *dev_id) | |||
1597 | writel(int_events, fep->hwp + FEC_IEVENT); | 1584 | writel(int_events, fep->hwp + FEC_IEVENT); |
1598 | fec_enet_collect_events(fep, int_events); | 1585 | fec_enet_collect_events(fep, int_events); |
1599 | 1586 | ||
1600 | if (fep->work_tx || fep->work_rx) { | 1587 | if ((fep->work_tx || fep->work_rx) && fep->link) { |
1601 | ret = IRQ_HANDLED; | 1588 | ret = IRQ_HANDLED; |
1602 | 1589 | ||
1603 | if (napi_schedule_prep(&fep->napi)) { | 1590 | if (napi_schedule_prep(&fep->napi)) { |
@@ -3383,7 +3370,6 @@ fec_drv_remove(struct platform_device *pdev) | |||
3383 | regulator_disable(fep->reg_phy); | 3370 | regulator_disable(fep->reg_phy); |
3384 | if (fep->ptp_clock) | 3371 | if (fep->ptp_clock) |
3385 | ptp_clock_unregister(fep->ptp_clock); | 3372 | ptp_clock_unregister(fep->ptp_clock); |
3386 | fec_enet_clk_enable(ndev, false); | ||
3387 | of_node_put(fep->phy_node); | 3373 | of_node_put(fep->phy_node); |
3388 | free_netdev(ndev); | 3374 | free_netdev(ndev); |
3389 | 3375 | ||
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 178e54028d10..7bf3682cdf47 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
@@ -747,6 +747,18 @@ static int gfar_parse_group(struct device_node *np, | |||
747 | return 0; | 747 | return 0; |
748 | } | 748 | } |
749 | 749 | ||
750 | static int gfar_of_group_count(struct device_node *np) | ||
751 | { | ||
752 | struct device_node *child; | ||
753 | int num = 0; | ||
754 | |||
755 | for_each_available_child_of_node(np, child) | ||
756 | if (!of_node_cmp(child->name, "queue-group")) | ||
757 | num++; | ||
758 | |||
759 | return num; | ||
760 | } | ||
761 | |||
750 | static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) | 762 | static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) |
751 | { | 763 | { |
752 | const char *model; | 764 | const char *model; |
@@ -784,7 +796,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) | |||
784 | num_rx_qs = 1; | 796 | num_rx_qs = 1; |
785 | } else { /* MQ_MG_MODE */ | 797 | } else { /* MQ_MG_MODE */ |
786 | /* get the actual number of supported groups */ | 798 | /* get the actual number of supported groups */ |
787 | unsigned int num_grps = of_get_available_child_count(np); | 799 | unsigned int num_grps = gfar_of_group_count(np); |
788 | 800 | ||
789 | if (num_grps == 0 || num_grps > MAXGROUPS) { | 801 | if (num_grps == 0 || num_grps > MAXGROUPS) { |
790 | dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", | 802 | dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", |
@@ -851,7 +863,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) | |||
851 | 863 | ||
852 | /* Parse and initialize group specific information */ | 864 | /* Parse and initialize group specific information */ |
853 | if (priv->mode == MQ_MG_MODE) { | 865 | if (priv->mode == MQ_MG_MODE) { |
854 | for_each_child_of_node(np, child) { | 866 | for_each_available_child_of_node(np, child) { |
867 | if (of_node_cmp(child->name, "queue-group")) | ||
868 | continue; | ||
869 | |||
855 | err = gfar_parse_group(child, priv, model); | 870 | err = gfar_parse_group(child, priv, model); |
856 | if (err) | 871 | if (err) |
857 | goto err_grp_init; | 872 | goto err_grp_init; |
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 072426a72745..cd7675ac5bf9 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c | |||
@@ -1136,6 +1136,8 @@ restart_poll: | |||
1136 | ibmveth_replenish_task(adapter); | 1136 | ibmveth_replenish_task(adapter); |
1137 | 1137 | ||
1138 | if (frames_processed < budget) { | 1138 | if (frames_processed < budget) { |
1139 | napi_complete(napi); | ||
1140 | |||
1139 | /* We think we are done - reenable interrupts, | 1141 | /* We think we are done - reenable interrupts, |
1140 | * then check once more to make sure we are done. | 1142 | * then check once more to make sure we are done. |
1141 | */ | 1143 | */ |
@@ -1144,8 +1146,6 @@ restart_poll: | |||
1144 | 1146 | ||
1145 | BUG_ON(lpar_rc != H_SUCCESS); | 1147 | BUG_ON(lpar_rc != H_SUCCESS); |
1146 | 1148 | ||
1147 | napi_complete(napi); | ||
1148 | |||
1149 | if (ibmveth_rxq_pending_buffer(adapter) && | 1149 | if (ibmveth_rxq_pending_buffer(adapter) && |
1150 | napi_reschedule(napi)) { | 1150 | napi_reschedule(napi)) { |
1151 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, | 1151 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 2a210c4efb89..ebce5bb24df9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -1698,8 +1698,6 @@ int mlx4_en_start_port(struct net_device *dev) | |||
1698 | /* Schedule multicast task to populate multicast list */ | 1698 | /* Schedule multicast task to populate multicast list */ |
1699 | queue_work(mdev->workqueue, &priv->rx_mode_task); | 1699 | queue_work(mdev->workqueue, &priv->rx_mode_task); |
1700 | 1700 | ||
1701 | mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); | ||
1702 | |||
1703 | #ifdef CONFIG_MLX4_EN_VXLAN | 1701 | #ifdef CONFIG_MLX4_EN_VXLAN |
1704 | if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) | 1702 | if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) |
1705 | vxlan_get_rx_port(dev); | 1703 | vxlan_get_rx_port(dev); |
@@ -2853,6 +2851,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
2853 | queue_delayed_work(mdev->workqueue, &priv->service_task, | 2851 | queue_delayed_work(mdev->workqueue, &priv->service_task, |
2854 | SERVICE_TASK_DELAY); | 2852 | SERVICE_TASK_DELAY); |
2855 | 2853 | ||
2854 | mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); | ||
2855 | |||
2856 | return 0; | 2856 | return 0; |
2857 | 2857 | ||
2858 | out: | 2858 | out: |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 2a8268e6be15..ebbe244e80dd 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | |||
@@ -453,7 +453,7 @@ struct mlx4_en_port_stats { | |||
453 | unsigned long rx_chksum_none; | 453 | unsigned long rx_chksum_none; |
454 | unsigned long rx_chksum_complete; | 454 | unsigned long rx_chksum_complete; |
455 | unsigned long tx_chksum_offload; | 455 | unsigned long tx_chksum_offload; |
456 | #define NUM_PORT_STATS 9 | 456 | #define NUM_PORT_STATS 10 |
457 | }; | 457 | }; |
458 | 458 | ||
459 | struct mlx4_en_perf_stats { | 459 | struct mlx4_en_perf_stats { |
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 209ee1b27f8d..8678e39aba08 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c | |||
@@ -92,6 +92,7 @@ static const char version[] = | |||
92 | #include "smc91x.h" | 92 | #include "smc91x.h" |
93 | 93 | ||
94 | #if defined(CONFIG_ASSABET_NEPONSET) | 94 | #if defined(CONFIG_ASSABET_NEPONSET) |
95 | #include <mach/assabet.h> | ||
95 | #include <mach/neponset.h> | 96 | #include <mach/neponset.h> |
96 | #endif | 97 | #endif |
97 | 98 | ||
@@ -2247,10 +2248,9 @@ static int smc_drv_probe(struct platform_device *pdev) | |||
2247 | const struct of_device_id *match = NULL; | 2248 | const struct of_device_id *match = NULL; |
2248 | struct smc_local *lp; | 2249 | struct smc_local *lp; |
2249 | struct net_device *ndev; | 2250 | struct net_device *ndev; |
2250 | struct resource *res; | 2251 | struct resource *res, *ires; |
2251 | unsigned int __iomem *addr; | 2252 | unsigned int __iomem *addr; |
2252 | unsigned long irq_flags = SMC_IRQ_FLAGS; | 2253 | unsigned long irq_flags = SMC_IRQ_FLAGS; |
2253 | unsigned long irq_resflags; | ||
2254 | int ret; | 2254 | int ret; |
2255 | 2255 | ||
2256 | ndev = alloc_etherdev(sizeof(struct smc_local)); | 2256 | ndev = alloc_etherdev(sizeof(struct smc_local)); |
@@ -2342,19 +2342,16 @@ static int smc_drv_probe(struct platform_device *pdev) | |||
2342 | goto out_free_netdev; | 2342 | goto out_free_netdev; |
2343 | } | 2343 | } |
2344 | 2344 | ||
2345 | ndev->irq = platform_get_irq(pdev, 0); | 2345 | ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
2346 | if (ndev->irq <= 0) { | 2346 | if (!ires) { |
2347 | ret = -ENODEV; | 2347 | ret = -ENODEV; |
2348 | goto out_release_io; | 2348 | goto out_release_io; |
2349 | } | 2349 | } |
2350 | /* | 2350 | |
2351 | * If this platform does not specify any special irqflags, or if | 2351 | ndev->irq = ires->start; |
2352 | * the resource supplies a trigger, override the irqflags with | 2352 | |
2353 | * the trigger flags from the resource. | 2353 | if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK) |
2354 | */ | 2354 | irq_flags = ires->flags & IRQF_TRIGGER_MASK; |
2355 | irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq)); | ||
2356 | if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK) | ||
2357 | irq_flags = irq_resflags & IRQF_TRIGGER_MASK; | ||
2358 | 2355 | ||
2359 | ret = smc_request_attrib(pdev, ndev); | 2356 | ret = smc_request_attrib(pdev, ndev); |
2360 | if (ret) | 2357 | if (ret) |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index fb846ebba1d9..f9b42f11950f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
@@ -272,6 +272,37 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) | |||
272 | struct stmmac_priv *priv = NULL; | 272 | struct stmmac_priv *priv = NULL; |
273 | struct plat_stmmacenet_data *plat_dat = NULL; | 273 | struct plat_stmmacenet_data *plat_dat = NULL; |
274 | const char *mac = NULL; | 274 | const char *mac = NULL; |
275 | int irq, wol_irq, lpi_irq; | ||
276 | |||
277 | /* Get IRQ information early to have an ability to ask for deferred | ||
278 | * probe if needed before we went too far with resource allocation. | ||
279 | */ | ||
280 | irq = platform_get_irq_byname(pdev, "macirq"); | ||
281 | if (irq < 0) { | ||
282 | if (irq != -EPROBE_DEFER) { | ||
283 | dev_err(dev, | ||
284 | "MAC IRQ configuration information not found\n"); | ||
285 | } | ||
286 | return irq; | ||
287 | } | ||
288 | |||
289 | /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq | ||
290 | * The external wake up irq can be passed through the platform code | ||
291 | * named as "eth_wake_irq" | ||
292 | * | ||
293 | * In case the wake up interrupt is not passed from the platform | ||
294 | * so the driver will continue to use the mac irq (ndev->irq) | ||
295 | */ | ||
296 | wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); | ||
297 | if (wol_irq < 0) { | ||
298 | if (wol_irq == -EPROBE_DEFER) | ||
299 | return -EPROBE_DEFER; | ||
300 | wol_irq = irq; | ||
301 | } | ||
302 | |||
303 | lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); | ||
304 | if (lpi_irq == -EPROBE_DEFER) | ||
305 | return -EPROBE_DEFER; | ||
275 | 306 | ||
276 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 307 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
277 | addr = devm_ioremap_resource(dev, res); | 308 | addr = devm_ioremap_resource(dev, res); |
@@ -323,39 +354,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) | |||
323 | return PTR_ERR(priv); | 354 | return PTR_ERR(priv); |
324 | } | 355 | } |
325 | 356 | ||
357 | /* Copy IRQ values to priv structure which is now avaialble */ | ||
358 | priv->dev->irq = irq; | ||
359 | priv->wol_irq = wol_irq; | ||
360 | priv->lpi_irq = lpi_irq; | ||
361 | |||
326 | /* Get MAC address if available (DT) */ | 362 | /* Get MAC address if available (DT) */ |
327 | if (mac) | 363 | if (mac) |
328 | memcpy(priv->dev->dev_addr, mac, ETH_ALEN); | 364 | memcpy(priv->dev->dev_addr, mac, ETH_ALEN); |
329 | 365 | ||
330 | /* Get the MAC information */ | ||
331 | priv->dev->irq = platform_get_irq_byname(pdev, "macirq"); | ||
332 | if (priv->dev->irq < 0) { | ||
333 | if (priv->dev->irq != -EPROBE_DEFER) { | ||
334 | netdev_err(priv->dev, | ||
335 | "MAC IRQ configuration information not found\n"); | ||
336 | } | ||
337 | return priv->dev->irq; | ||
338 | } | ||
339 | |||
340 | /* | ||
341 | * On some platforms e.g. SPEAr the wake up irq differs from the mac irq | ||
342 | * The external wake up irq can be passed through the platform code | ||
343 | * named as "eth_wake_irq" | ||
344 | * | ||
345 | * In case the wake up interrupt is not passed from the platform | ||
346 | * so the driver will continue to use the mac irq (ndev->irq) | ||
347 | */ | ||
348 | priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); | ||
349 | if (priv->wol_irq < 0) { | ||
350 | if (priv->wol_irq == -EPROBE_DEFER) | ||
351 | return -EPROBE_DEFER; | ||
352 | priv->wol_irq = priv->dev->irq; | ||
353 | } | ||
354 | |||
355 | priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); | ||
356 | if (priv->lpi_irq == -EPROBE_DEFER) | ||
357 | return -EPROBE_DEFER; | ||
358 | |||
359 | platform_set_drvdata(pdev, priv->dev); | 366 | platform_set_drvdata(pdev, priv->dev); |
360 | 367 | ||
361 | pr_debug("STMMAC platform driver registration completed"); | 368 | pr_debug("STMMAC platform driver registration completed"); |
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index a495931a66a1..0e0fbb5842b3 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c | |||
@@ -498,9 +498,9 @@ static int w5100_napi_poll(struct napi_struct *napi, int budget) | |||
498 | } | 498 | } |
499 | 499 | ||
500 | if (rx_count < budget) { | 500 | if (rx_count < budget) { |
501 | napi_complete(napi); | ||
501 | w5100_write(priv, W5100_IMR, IR_S0); | 502 | w5100_write(priv, W5100_IMR, IR_S0); |
502 | mmiowb(); | 503 | mmiowb(); |
503 | napi_complete(napi); | ||
504 | } | 504 | } |
505 | 505 | ||
506 | return rx_count; | 506 | return rx_count; |
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 09322d9db578..4b310002258d 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c | |||
@@ -418,9 +418,9 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget) | |||
418 | } | 418 | } |
419 | 419 | ||
420 | if (rx_count < budget) { | 420 | if (rx_count < budget) { |
421 | napi_complete(napi); | ||
421 | w5300_write(priv, W5300_IMR, IR_S0); | 422 | w5300_write(priv, W5300_IMR, IR_S0); |
422 | mmiowb(); | 423 | mmiowb(); |
423 | napi_complete(napi); | ||
424 | } | 424 | } |
425 | 425 | ||
426 | return rx_count; | 426 | return rx_count; |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index f1ee71e22241..7d394846afc2 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
@@ -1730,11 +1730,11 @@ static int team_set_mac_address(struct net_device *dev, void *p) | |||
1730 | if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) | 1730 | if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) |
1731 | return -EADDRNOTAVAIL; | 1731 | return -EADDRNOTAVAIL; |
1732 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | 1732 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); |
1733 | rcu_read_lock(); | 1733 | mutex_lock(&team->lock); |
1734 | list_for_each_entry_rcu(port, &team->port_list, list) | 1734 | list_for_each_entry(port, &team->port_list, list) |
1735 | if (team->ops.port_change_dev_addr) | 1735 | if (team->ops.port_change_dev_addr) |
1736 | team->ops.port_change_dev_addr(team, port); | 1736 | team->ops.port_change_dev_addr(team, port); |
1737 | rcu_read_unlock(); | 1737 | mutex_unlock(&team->lock); |
1738 | return 0; | 1738 | return 0; |
1739 | } | 1739 | } |
1740 | 1740 | ||
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c index 3eed708a6182..1762ad3910b2 100644 --- a/drivers/net/usb/cx82310_eth.c +++ b/drivers/net/usb/cx82310_eth.c | |||
@@ -46,8 +46,7 @@ enum cx82310_status { | |||
46 | }; | 46 | }; |
47 | 47 | ||
48 | #define CMD_PACKET_SIZE 64 | 48 | #define CMD_PACKET_SIZE 64 |
49 | /* first command after power on can take around 8 seconds */ | 49 | #define CMD_TIMEOUT 100 |
50 | #define CMD_TIMEOUT 15000 | ||
51 | #define CMD_REPLY_RETRY 5 | 50 | #define CMD_REPLY_RETRY 5 |
52 | 51 | ||
53 | #define CX82310_MTU 1514 | 52 | #define CX82310_MTU 1514 |
@@ -78,8 +77,9 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply, | |||
78 | ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf, | 77 | ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf, |
79 | CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT); | 78 | CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT); |
80 | if (ret < 0) { | 79 | if (ret < 0) { |
81 | dev_err(&dev->udev->dev, "send command %#x: error %d\n", | 80 | if (cmd != CMD_GET_LINK_STATUS) |
82 | cmd, ret); | 81 | dev_err(&dev->udev->dev, "send command %#x: error %d\n", |
82 | cmd, ret); | ||
83 | goto end; | 83 | goto end; |
84 | } | 84 | } |
85 | 85 | ||
@@ -90,8 +90,10 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply, | |||
90 | buf, CMD_PACKET_SIZE, &actual_len, | 90 | buf, CMD_PACKET_SIZE, &actual_len, |
91 | CMD_TIMEOUT); | 91 | CMD_TIMEOUT); |
92 | if (ret < 0) { | 92 | if (ret < 0) { |
93 | dev_err(&dev->udev->dev, | 93 | if (cmd != CMD_GET_LINK_STATUS) |
94 | "reply receive error %d\n", ret); | 94 | dev_err(&dev->udev->dev, |
95 | "reply receive error %d\n", | ||
96 | ret); | ||
95 | goto end; | 97 | goto end; |
96 | } | 98 | } |
97 | if (actual_len > 0) | 99 | if (actual_len > 0) |
@@ -134,6 +136,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf) | |||
134 | int ret; | 136 | int ret; |
135 | char buf[15]; | 137 | char buf[15]; |
136 | struct usb_device *udev = dev->udev; | 138 | struct usb_device *udev = dev->udev; |
139 | u8 link[3]; | ||
140 | int timeout = 50; | ||
137 | 141 | ||
138 | /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */ | 142 | /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */ |
139 | if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0 | 143 | if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0 |
@@ -160,6 +164,20 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf) | |||
160 | if (!dev->partial_data) | 164 | if (!dev->partial_data) |
161 | return -ENOMEM; | 165 | return -ENOMEM; |
162 | 166 | ||
167 | /* wait for firmware to become ready (indicated by the link being up) */ | ||
168 | while (--timeout) { | ||
169 | ret = cx82310_cmd(dev, CMD_GET_LINK_STATUS, true, NULL, 0, | ||
170 | link, sizeof(link)); | ||
171 | /* the command can time out during boot - it's not an error */ | ||
172 | if (!ret && link[0] == 1 && link[2] == 1) | ||
173 | break; | ||
174 | msleep(500); | ||
175 | }; | ||
176 | if (!timeout) { | ||
177 | dev_err(&udev->dev, "firmware not ready in time\n"); | ||
178 | return -ETIMEDOUT; | ||
179 | } | ||
180 | |||
163 | /* enable ethernet mode (?) */ | 181 | /* enable ethernet mode (?) */ |
164 | ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0); | 182 | ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0); |
165 | if (ret) { | 183 | if (ret) { |
@@ -300,9 +318,18 @@ static const struct driver_info cx82310_info = { | |||
300 | .tx_fixup = cx82310_tx_fixup, | 318 | .tx_fixup = cx82310_tx_fixup, |
301 | }; | 319 | }; |
302 | 320 | ||
321 | #define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \ | ||
322 | .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ | ||
323 | USB_DEVICE_ID_MATCH_DEV_INFO, \ | ||
324 | .idVendor = (vend), \ | ||
325 | .idProduct = (prod), \ | ||
326 | .bDeviceClass = (cl), \ | ||
327 | .bDeviceSubClass = (sc), \ | ||
328 | .bDeviceProtocol = (pr) | ||
329 | |||
303 | static const struct usb_device_id products[] = { | 330 | static const struct usb_device_id products[] = { |
304 | { | 331 | { |
305 | USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0), | 332 | USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0), |
306 | .driver_info = (unsigned long) &cx82310_info | 333 | .driver_info = (unsigned long) &cx82310_info |
307 | }, | 334 | }, |
308 | { }, | 335 | { }, |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index f1ff3666f090..59b0e9754ae3 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -1448,8 +1448,10 @@ static void virtnet_free_queues(struct virtnet_info *vi) | |||
1448 | { | 1448 | { |
1449 | int i; | 1449 | int i; |
1450 | 1450 | ||
1451 | for (i = 0; i < vi->max_queue_pairs; i++) | 1451 | for (i = 0; i < vi->max_queue_pairs; i++) { |
1452 | napi_hash_del(&vi->rq[i].napi); | ||
1452 | netif_napi_del(&vi->rq[i].napi); | 1453 | netif_napi_del(&vi->rq[i].napi); |
1454 | } | ||
1453 | 1455 | ||
1454 | kfree(vi->rq); | 1456 | kfree(vi->rq); |
1455 | kfree(vi->sq); | 1457 | kfree(vi->sq); |
@@ -1948,11 +1950,8 @@ static int virtnet_freeze(struct virtio_device *vdev) | |||
1948 | cancel_delayed_work_sync(&vi->refill); | 1950 | cancel_delayed_work_sync(&vi->refill); |
1949 | 1951 | ||
1950 | if (netif_running(vi->dev)) { | 1952 | if (netif_running(vi->dev)) { |
1951 | for (i = 0; i < vi->max_queue_pairs; i++) { | 1953 | for (i = 0; i < vi->max_queue_pairs; i++) |
1952 | napi_disable(&vi->rq[i].napi); | 1954 | napi_disable(&vi->rq[i].napi); |
1953 | napi_hash_del(&vi->rq[i].napi); | ||
1954 | netif_napi_del(&vi->rq[i].napi); | ||
1955 | } | ||
1956 | } | 1955 | } |
1957 | 1956 | ||
1958 | remove_vq_common(vi); | 1957 | remove_vq_common(vi); |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 1e0a775ea882..f8528a4cf54f 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -1218,7 +1218,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) | |||
1218 | goto drop; | 1218 | goto drop; |
1219 | 1219 | ||
1220 | flags &= ~VXLAN_HF_RCO; | 1220 | flags &= ~VXLAN_HF_RCO; |
1221 | vni &= VXLAN_VID_MASK; | 1221 | vni &= VXLAN_VNI_MASK; |
1222 | } | 1222 | } |
1223 | 1223 | ||
1224 | /* For backwards compatibility, only allow reserved fields to be | 1224 | /* For backwards compatibility, only allow reserved fields to be |
@@ -1239,7 +1239,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) | |||
1239 | flags &= ~VXLAN_GBP_USED_BITS; | 1239 | flags &= ~VXLAN_GBP_USED_BITS; |
1240 | } | 1240 | } |
1241 | 1241 | ||
1242 | if (flags || (vni & ~VXLAN_VID_MASK)) { | 1242 | if (flags || vni & ~VXLAN_VNI_MASK) { |
1243 | /* If there are any unprocessed flags remaining treat | 1243 | /* If there are any unprocessed flags remaining treat |
1244 | * this as a malformed packet. This behavior diverges from | 1244 | * this as a malformed packet. This behavior diverges from |
1245 | * VXLAN RFC (RFC7348) which stipulates that bits in reserved | 1245 | * VXLAN RFC (RFC7348) which stipulates that bits in reserved |
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index ccbdb05b28cd..75345c1e8c34 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c | |||
@@ -5370,6 +5370,7 @@ static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy, | |||
5370 | case 0x432a: /* BCM4321 */ | 5370 | case 0x432a: /* BCM4321 */ |
5371 | case 0x432d: /* BCM4322 */ | 5371 | case 0x432d: /* BCM4322 */ |
5372 | case 0x4352: /* BCM43222 */ | 5372 | case 0x4352: /* BCM43222 */ |
5373 | case 0x435a: /* BCM43228 */ | ||
5373 | case 0x4333: /* BCM4331 */ | 5374 | case 0x4333: /* BCM4331 */ |
5374 | case 0x43a2: /* BCM4360 */ | 5375 | case 0x43a2: /* BCM4360 */ |
5375 | case 0x43b3: /* BCM4352 */ | 5376 | case 0x43b3: /* BCM4352 */ |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c index 50cdf7090198..8eff2753abad 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c | |||
@@ -39,13 +39,22 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy, | |||
39 | void *dcmd_buf = NULL, *wr_pointer; | 39 | void *dcmd_buf = NULL, *wr_pointer; |
40 | u16 msglen, maxmsglen = PAGE_SIZE - 0x100; | 40 | u16 msglen, maxmsglen = PAGE_SIZE - 0x100; |
41 | 41 | ||
42 | brcmf_dbg(TRACE, "cmd %x set %d len %d\n", cmdhdr->cmd, cmdhdr->set, | 42 | if (len < sizeof(*cmdhdr)) { |
43 | cmdhdr->len); | 43 | brcmf_err("vendor command too short: %d\n", len); |
44 | return -EINVAL; | ||
45 | } | ||
44 | 46 | ||
45 | vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); | 47 | vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); |
46 | ifp = vif->ifp; | 48 | ifp = vif->ifp; |
47 | 49 | ||
48 | len -= sizeof(struct brcmf_vndr_dcmd_hdr); | 50 | brcmf_dbg(TRACE, "ifidx=%d, cmd=%d\n", ifp->ifidx, cmdhdr->cmd); |
51 | |||
52 | if (cmdhdr->offset > len) { | ||
53 | brcmf_err("bad buffer offset %d > %d\n", cmdhdr->offset, len); | ||
54 | return -EINVAL; | ||
55 | } | ||
56 | |||
57 | len -= cmdhdr->offset; | ||
49 | ret_len = cmdhdr->len; | 58 | ret_len = cmdhdr->len; |
50 | if (ret_len > 0 || len > 0) { | 59 | if (ret_len > 0 || len > 0) { |
51 | if (len > BRCMF_DCMD_MAXLEN) { | 60 | if (len > BRCMF_DCMD_MAXLEN) { |
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c index c3817fae16c0..06f6cc08f451 100644 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c | |||
@@ -95,7 +95,8 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = { | |||
95 | .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ | 95 | .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ |
96 | .base_params = &iwl1000_base_params, \ | 96 | .base_params = &iwl1000_base_params, \ |
97 | .eeprom_params = &iwl1000_eeprom_params, \ | 97 | .eeprom_params = &iwl1000_eeprom_params, \ |
98 | .led_mode = IWL_LED_BLINK | 98 | .led_mode = IWL_LED_BLINK, \ |
99 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
99 | 100 | ||
100 | const struct iwl_cfg iwl1000_bgn_cfg = { | 101 | const struct iwl_cfg iwl1000_bgn_cfg = { |
101 | .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN", | 102 | .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN", |
@@ -121,7 +122,8 @@ const struct iwl_cfg iwl1000_bg_cfg = { | |||
121 | .base_params = &iwl1000_base_params, \ | 122 | .base_params = &iwl1000_base_params, \ |
122 | .eeprom_params = &iwl1000_eeprom_params, \ | 123 | .eeprom_params = &iwl1000_eeprom_params, \ |
123 | .led_mode = IWL_LED_RF_STATE, \ | 124 | .led_mode = IWL_LED_RF_STATE, \ |
124 | .rx_with_siso_diversity = true | 125 | .rx_with_siso_diversity = true, \ |
126 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
125 | 127 | ||
126 | const struct iwl_cfg iwl100_bgn_cfg = { | 128 | const struct iwl_cfg iwl100_bgn_cfg = { |
127 | .name = "Intel(R) Centrino(R) Wireless-N 100 BGN", | 129 | .name = "Intel(R) Centrino(R) Wireless-N 100 BGN", |
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c index 21e5d0843a62..890b95f497d6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-2000.c +++ b/drivers/net/wireless/iwlwifi/iwl-2000.c | |||
@@ -123,7 +123,9 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = { | |||
123 | .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ | 123 | .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ |
124 | .base_params = &iwl2000_base_params, \ | 124 | .base_params = &iwl2000_base_params, \ |
125 | .eeprom_params = &iwl20x0_eeprom_params, \ | 125 | .eeprom_params = &iwl20x0_eeprom_params, \ |
126 | .led_mode = IWL_LED_RF_STATE | 126 | .led_mode = IWL_LED_RF_STATE, \ |
127 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
128 | |||
127 | 129 | ||
128 | const struct iwl_cfg iwl2000_2bgn_cfg = { | 130 | const struct iwl_cfg iwl2000_2bgn_cfg = { |
129 | .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN", | 131 | .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN", |
@@ -149,7 +151,8 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = { | |||
149 | .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ | 151 | .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ |
150 | .base_params = &iwl2030_base_params, \ | 152 | .base_params = &iwl2030_base_params, \ |
151 | .eeprom_params = &iwl20x0_eeprom_params, \ | 153 | .eeprom_params = &iwl20x0_eeprom_params, \ |
152 | .led_mode = IWL_LED_RF_STATE | 154 | .led_mode = IWL_LED_RF_STATE, \ |
155 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
153 | 156 | ||
154 | const struct iwl_cfg iwl2030_2bgn_cfg = { | 157 | const struct iwl_cfg iwl2030_2bgn_cfg = { |
155 | .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", | 158 | .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", |
@@ -170,7 +173,8 @@ const struct iwl_cfg iwl2030_2bgn_cfg = { | |||
170 | .base_params = &iwl2000_base_params, \ | 173 | .base_params = &iwl2000_base_params, \ |
171 | .eeprom_params = &iwl20x0_eeprom_params, \ | 174 | .eeprom_params = &iwl20x0_eeprom_params, \ |
172 | .led_mode = IWL_LED_RF_STATE, \ | 175 | .led_mode = IWL_LED_RF_STATE, \ |
173 | .rx_with_siso_diversity = true | 176 | .rx_with_siso_diversity = true, \ |
177 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
174 | 178 | ||
175 | const struct iwl_cfg iwl105_bgn_cfg = { | 179 | const struct iwl_cfg iwl105_bgn_cfg = { |
176 | .name = "Intel(R) Centrino(R) Wireless-N 105 BGN", | 180 | .name = "Intel(R) Centrino(R) Wireless-N 105 BGN", |
@@ -197,7 +201,8 @@ const struct iwl_cfg iwl105_bgn_d_cfg = { | |||
197 | .base_params = &iwl2030_base_params, \ | 201 | .base_params = &iwl2030_base_params, \ |
198 | .eeprom_params = &iwl20x0_eeprom_params, \ | 202 | .eeprom_params = &iwl20x0_eeprom_params, \ |
199 | .led_mode = IWL_LED_RF_STATE, \ | 203 | .led_mode = IWL_LED_RF_STATE, \ |
200 | .rx_with_siso_diversity = true | 204 | .rx_with_siso_diversity = true, \ |
205 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
201 | 206 | ||
202 | const struct iwl_cfg iwl135_bgn_cfg = { | 207 | const struct iwl_cfg iwl135_bgn_cfg = { |
203 | .name = "Intel(R) Centrino(R) Wireless-N 135 BGN", | 208 | .name = "Intel(R) Centrino(R) Wireless-N 135 BGN", |
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index 332bbede39e5..724194e23414 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c | |||
@@ -93,7 +93,8 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = { | |||
93 | .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \ | 93 | .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \ |
94 | .base_params = &iwl5000_base_params, \ | 94 | .base_params = &iwl5000_base_params, \ |
95 | .eeprom_params = &iwl5000_eeprom_params, \ | 95 | .eeprom_params = &iwl5000_eeprom_params, \ |
96 | .led_mode = IWL_LED_BLINK | 96 | .led_mode = IWL_LED_BLINK, \ |
97 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
97 | 98 | ||
98 | const struct iwl_cfg iwl5300_agn_cfg = { | 99 | const struct iwl_cfg iwl5300_agn_cfg = { |
99 | .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", | 100 | .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", |
@@ -158,7 +159,8 @@ const struct iwl_cfg iwl5350_agn_cfg = { | |||
158 | .base_params = &iwl5000_base_params, \ | 159 | .base_params = &iwl5000_base_params, \ |
159 | .eeprom_params = &iwl5000_eeprom_params, \ | 160 | .eeprom_params = &iwl5000_eeprom_params, \ |
160 | .led_mode = IWL_LED_BLINK, \ | 161 | .led_mode = IWL_LED_BLINK, \ |
161 | .internal_wimax_coex = true | 162 | .internal_wimax_coex = true, \ |
163 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
162 | 164 | ||
163 | const struct iwl_cfg iwl5150_agn_cfg = { | 165 | const struct iwl_cfg iwl5150_agn_cfg = { |
164 | .name = "Intel(R) WiMAX/WiFi Link 5150 AGN", | 166 | .name = "Intel(R) WiMAX/WiFi Link 5150 AGN", |
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index 8f2c3c8c6b84..21b2630763dc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c | |||
@@ -145,7 +145,8 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = { | |||
145 | .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ | 145 | .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ |
146 | .base_params = &iwl6000_g2_base_params, \ | 146 | .base_params = &iwl6000_g2_base_params, \ |
147 | .eeprom_params = &iwl6000_eeprom_params, \ | 147 | .eeprom_params = &iwl6000_eeprom_params, \ |
148 | .led_mode = IWL_LED_RF_STATE | 148 | .led_mode = IWL_LED_RF_STATE, \ |
149 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
149 | 150 | ||
150 | const struct iwl_cfg iwl6005_2agn_cfg = { | 151 | const struct iwl_cfg iwl6005_2agn_cfg = { |
151 | .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN", | 152 | .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN", |
@@ -199,7 +200,8 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = { | |||
199 | .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ | 200 | .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ |
200 | .base_params = &iwl6000_g2_base_params, \ | 201 | .base_params = &iwl6000_g2_base_params, \ |
201 | .eeprom_params = &iwl6000_eeprom_params, \ | 202 | .eeprom_params = &iwl6000_eeprom_params, \ |
202 | .led_mode = IWL_LED_RF_STATE | 203 | .led_mode = IWL_LED_RF_STATE, \ |
204 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
203 | 205 | ||
204 | const struct iwl_cfg iwl6030_2agn_cfg = { | 206 | const struct iwl_cfg iwl6030_2agn_cfg = { |
205 | .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN", | 207 | .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN", |
@@ -235,7 +237,8 @@ const struct iwl_cfg iwl6030_2bg_cfg = { | |||
235 | .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ | 237 | .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ |
236 | .base_params = &iwl6000_g2_base_params, \ | 238 | .base_params = &iwl6000_g2_base_params, \ |
237 | .eeprom_params = &iwl6000_eeprom_params, \ | 239 | .eeprom_params = &iwl6000_eeprom_params, \ |
238 | .led_mode = IWL_LED_RF_STATE | 240 | .led_mode = IWL_LED_RF_STATE, \ |
241 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
239 | 242 | ||
240 | const struct iwl_cfg iwl6035_2agn_cfg = { | 243 | const struct iwl_cfg iwl6035_2agn_cfg = { |
241 | .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", | 244 | .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", |
@@ -290,7 +293,8 @@ const struct iwl_cfg iwl130_bg_cfg = { | |||
290 | .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \ | 293 | .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \ |
291 | .base_params = &iwl6000_base_params, \ | 294 | .base_params = &iwl6000_base_params, \ |
292 | .eeprom_params = &iwl6000_eeprom_params, \ | 295 | .eeprom_params = &iwl6000_eeprom_params, \ |
293 | .led_mode = IWL_LED_BLINK | 296 | .led_mode = IWL_LED_BLINK, \ |
297 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
294 | 298 | ||
295 | const struct iwl_cfg iwl6000i_2agn_cfg = { | 299 | const struct iwl_cfg iwl6000i_2agn_cfg = { |
296 | .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN", | 300 | .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN", |
@@ -322,7 +326,8 @@ const struct iwl_cfg iwl6000i_2bg_cfg = { | |||
322 | .base_params = &iwl6050_base_params, \ | 326 | .base_params = &iwl6050_base_params, \ |
323 | .eeprom_params = &iwl6000_eeprom_params, \ | 327 | .eeprom_params = &iwl6000_eeprom_params, \ |
324 | .led_mode = IWL_LED_BLINK, \ | 328 | .led_mode = IWL_LED_BLINK, \ |
325 | .internal_wimax_coex = true | 329 | .internal_wimax_coex = true, \ |
330 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
326 | 331 | ||
327 | const struct iwl_cfg iwl6050_2agn_cfg = { | 332 | const struct iwl_cfg iwl6050_2agn_cfg = { |
328 | .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN", | 333 | .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN", |
@@ -347,7 +352,8 @@ const struct iwl_cfg iwl6050_2abg_cfg = { | |||
347 | .base_params = &iwl6050_base_params, \ | 352 | .base_params = &iwl6050_base_params, \ |
348 | .eeprom_params = &iwl6000_eeprom_params, \ | 353 | .eeprom_params = &iwl6000_eeprom_params, \ |
349 | .led_mode = IWL_LED_BLINK, \ | 354 | .led_mode = IWL_LED_BLINK, \ |
350 | .internal_wimax_coex = true | 355 | .internal_wimax_coex = true, \ |
356 | .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K | ||
351 | 357 | ||
352 | const struct iwl_cfg iwl6150_bgn_cfg = { | 358 | const struct iwl_cfg iwl6150_bgn_cfg = { |
353 | .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN", | 359 | .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN", |
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index 1ec4d55155f7..7810c41cf9a7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c | |||
@@ -793,7 +793,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, | |||
793 | if (!vif->bss_conf.assoc) | 793 | if (!vif->bss_conf.assoc) |
794 | smps_mode = IEEE80211_SMPS_AUTOMATIC; | 794 | smps_mode = IEEE80211_SMPS_AUTOMATIC; |
795 | 795 | ||
796 | if (IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status, | 796 | if (mvmvif->phy_ctxt && |
797 | IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status, | ||
797 | mvmvif->phy_ctxt->id)) | 798 | mvmvif->phy_ctxt->id)) |
798 | smps_mode = IEEE80211_SMPS_AUTOMATIC; | 799 | smps_mode = IEEE80211_SMPS_AUTOMATIC; |
799 | 800 | ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c index d530ef3da107..542ee74f290a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c | |||
@@ -832,7 +832,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, | |||
832 | if (!vif->bss_conf.assoc) | 832 | if (!vif->bss_conf.assoc) |
833 | smps_mode = IEEE80211_SMPS_AUTOMATIC; | 833 | smps_mode = IEEE80211_SMPS_AUTOMATIC; |
834 | 834 | ||
835 | if (data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id)) | 835 | if (mvmvif->phy_ctxt && |
836 | data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id)) | ||
836 | smps_mode = IEEE80211_SMPS_AUTOMATIC; | 837 | smps_mode = IEEE80211_SMPS_AUTOMATIC; |
837 | 838 | ||
838 | IWL_DEBUG_COEX(data->mvm, | 839 | IWL_DEBUG_COEX(data->mvm, |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 1ff7ec08532d..09654e73a533 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c | |||
@@ -405,7 +405,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) | |||
405 | hw->wiphy->bands[IEEE80211_BAND_5GHZ] = | 405 | hw->wiphy->bands[IEEE80211_BAND_5GHZ] = |
406 | &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; | 406 | &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; |
407 | 407 | ||
408 | if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER) | 408 | if ((mvm->fw->ucode_capa.capa[0] & |
409 | IWL_UCODE_TLV_CAPA_BEAMFORMER) && | ||
410 | (mvm->fw->ucode_capa.api[0] & | ||
411 | IWL_UCODE_TLV_API_LQ_SS_PARAMS)) | ||
409 | hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |= | 412 | hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |= |
410 | IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; | 413 | IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; |
411 | } | 414 | } |
@@ -2215,7 +2218,19 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, | |||
2215 | 2218 | ||
2216 | mutex_lock(&mvm->mutex); | 2219 | mutex_lock(&mvm->mutex); |
2217 | 2220 | ||
2218 | iwl_mvm_cancel_scan(mvm); | 2221 | /* Due to a race condition, it's possible that mac80211 asks |
2222 | * us to stop a hw_scan when it's already stopped. This can | ||
2223 | * happen, for instance, if we stopped the scan ourselves, | ||
2224 | * called ieee80211_scan_completed() and the userspace called | ||
2225 | * cancel scan scan before ieee80211_scan_work() could run. | ||
2226 | * To handle that, simply return if the scan is not running. | ||
2227 | */ | ||
2228 | /* FIXME: for now, we ignore this race for UMAC scans, since | ||
2229 | * they don't set the scan_status. | ||
2230 | */ | ||
2231 | if ((mvm->scan_status == IWL_MVM_SCAN_OS) || | ||
2232 | (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) | ||
2233 | iwl_mvm_cancel_scan(mvm); | ||
2219 | 2234 | ||
2220 | mutex_unlock(&mvm->mutex); | 2235 | mutex_unlock(&mvm->mutex); |
2221 | } | 2236 | } |
@@ -2559,12 +2574,29 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, | |||
2559 | int ret; | 2574 | int ret; |
2560 | 2575 | ||
2561 | mutex_lock(&mvm->mutex); | 2576 | mutex_lock(&mvm->mutex); |
2577 | |||
2578 | /* Due to a race condition, it's possible that mac80211 asks | ||
2579 | * us to stop a sched_scan when it's already stopped. This | ||
2580 | * can happen, for instance, if we stopped the scan ourselves, | ||
2581 | * called ieee80211_sched_scan_stopped() and the userspace called | ||
2582 | * stop sched scan scan before ieee80211_sched_scan_stopped_work() | ||
2583 | * could run. To handle this, simply return if the scan is | ||
2584 | * not running. | ||
2585 | */ | ||
2586 | /* FIXME: for now, we ignore this race for UMAC scans, since | ||
2587 | * they don't set the scan_status. | ||
2588 | */ | ||
2589 | if (mvm->scan_status != IWL_MVM_SCAN_SCHED && | ||
2590 | !(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { | ||
2591 | mutex_unlock(&mvm->mutex); | ||
2592 | return 0; | ||
2593 | } | ||
2594 | |||
2562 | ret = iwl_mvm_scan_offload_stop(mvm, false); | 2595 | ret = iwl_mvm_scan_offload_stop(mvm, false); |
2563 | mutex_unlock(&mvm->mutex); | 2596 | mutex_unlock(&mvm->mutex); |
2564 | iwl_mvm_wait_for_async_handlers(mvm); | 2597 | iwl_mvm_wait_for_async_handlers(mvm); |
2565 | 2598 | ||
2566 | return ret; | 2599 | return ret; |
2567 | |||
2568 | } | 2600 | } |
2569 | 2601 | ||
2570 | static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, | 2602 | static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, |
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index 194bd1f939ca..efa9688a4cf1 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c | |||
@@ -134,9 +134,12 @@ enum rs_column_mode { | |||
134 | #define MAX_NEXT_COLUMNS 7 | 134 | #define MAX_NEXT_COLUMNS 7 |
135 | #define MAX_COLUMN_CHECKS 3 | 135 | #define MAX_COLUMN_CHECKS 3 |
136 | 136 | ||
137 | struct rs_tx_column; | ||
138 | |||
137 | typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm, | 139 | typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm, |
138 | struct ieee80211_sta *sta, | 140 | struct ieee80211_sta *sta, |
139 | struct iwl_scale_tbl_info *tbl); | 141 | struct iwl_scale_tbl_info *tbl, |
142 | const struct rs_tx_column *next_col); | ||
140 | 143 | ||
141 | struct rs_tx_column { | 144 | struct rs_tx_column { |
142 | enum rs_column_mode mode; | 145 | enum rs_column_mode mode; |
@@ -147,13 +150,15 @@ struct rs_tx_column { | |||
147 | }; | 150 | }; |
148 | 151 | ||
149 | static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | 152 | static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, |
150 | struct iwl_scale_tbl_info *tbl) | 153 | struct iwl_scale_tbl_info *tbl, |
154 | const struct rs_tx_column *next_col) | ||
151 | { | 155 | { |
152 | return iwl_mvm_bt_coex_is_ant_avail(mvm, tbl->rate.ant); | 156 | return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant); |
153 | } | 157 | } |
154 | 158 | ||
155 | static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | 159 | static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, |
156 | struct iwl_scale_tbl_info *tbl) | 160 | struct iwl_scale_tbl_info *tbl, |
161 | const struct rs_tx_column *next_col) | ||
157 | { | 162 | { |
158 | if (!sta->ht_cap.ht_supported) | 163 | if (!sta->ht_cap.ht_supported) |
159 | return false; | 164 | return false; |
@@ -171,7 +176,8 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | |||
171 | } | 176 | } |
172 | 177 | ||
173 | static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | 178 | static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, |
174 | struct iwl_scale_tbl_info *tbl) | 179 | struct iwl_scale_tbl_info *tbl, |
180 | const struct rs_tx_column *next_col) | ||
175 | { | 181 | { |
176 | if (!sta->ht_cap.ht_supported) | 182 | if (!sta->ht_cap.ht_supported) |
177 | return false; | 183 | return false; |
@@ -180,7 +186,8 @@ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | |||
180 | } | 186 | } |
181 | 187 | ||
182 | static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | 188 | static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, |
183 | struct iwl_scale_tbl_info *tbl) | 189 | struct iwl_scale_tbl_info *tbl, |
190 | const struct rs_tx_column *next_col) | ||
184 | { | 191 | { |
185 | struct rs_rate *rate = &tbl->rate; | 192 | struct rs_rate *rate = &tbl->rate; |
186 | struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; | 193 | struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; |
@@ -1590,7 +1597,7 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm, | |||
1590 | 1597 | ||
1591 | for (j = 0; j < MAX_COLUMN_CHECKS; j++) { | 1598 | for (j = 0; j < MAX_COLUMN_CHECKS; j++) { |
1592 | allow_func = next_col->checks[j]; | 1599 | allow_func = next_col->checks[j]; |
1593 | if (allow_func && !allow_func(mvm, sta, tbl)) | 1600 | if (allow_func && !allow_func(mvm, sta, tbl, next_col)) |
1594 | break; | 1601 | break; |
1595 | } | 1602 | } |
1596 | 1603 | ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 7e9aa3cb3254..c47c8051da77 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c | |||
@@ -1128,8 +1128,10 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) | |||
1128 | if (mvm->scan_status == IWL_MVM_SCAN_NONE) | 1128 | if (mvm->scan_status == IWL_MVM_SCAN_NONE) |
1129 | return 0; | 1129 | return 0; |
1130 | 1130 | ||
1131 | if (iwl_mvm_is_radio_killed(mvm)) | 1131 | if (iwl_mvm_is_radio_killed(mvm)) { |
1132 | ret = 0; | ||
1132 | goto out; | 1133 | goto out; |
1134 | } | ||
1133 | 1135 | ||
1134 | if (mvm->scan_status != IWL_MVM_SCAN_SCHED && | 1136 | if (mvm->scan_status != IWL_MVM_SCAN_SCHED && |
1135 | (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || | 1137 | (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || |
@@ -1148,16 +1150,14 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) | |||
1148 | IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n", | 1150 | IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n", |
1149 | sched ? "offloaded " : "", ret); | 1151 | sched ? "offloaded " : "", ret); |
1150 | iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); | 1152 | iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); |
1151 | return ret; | 1153 | goto out; |
1152 | } | 1154 | } |
1153 | 1155 | ||
1154 | IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n", | 1156 | IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n", |
1155 | sched ? "offloaded " : ""); | 1157 | sched ? "offloaded " : ""); |
1156 | 1158 | ||
1157 | ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ); | 1159 | ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ); |
1158 | if (ret) | 1160 | out: |
1159 | return ret; | ||
1160 | |||
1161 | /* | 1161 | /* |
1162 | * Clear the scan status so the next scan requests will succeed. This | 1162 | * Clear the scan status so the next scan requests will succeed. This |
1163 | * also ensures the Rx handler doesn't do anything, as the scan was | 1163 | * also ensures the Rx handler doesn't do anything, as the scan was |
@@ -1167,7 +1167,6 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) | |||
1167 | if (mvm->scan_status == IWL_MVM_SCAN_OS) | 1167 | if (mvm->scan_status == IWL_MVM_SCAN_OS) |
1168 | iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); | 1168 | iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); |
1169 | 1169 | ||
1170 | out: | ||
1171 | mvm->scan_status = IWL_MVM_SCAN_NONE; | 1170 | mvm->scan_status = IWL_MVM_SCAN_NONE; |
1172 | 1171 | ||
1173 | if (notify) { | 1172 | if (notify) { |
@@ -1177,7 +1176,7 @@ out: | |||
1177 | ieee80211_scan_completed(mvm->hw, true); | 1176 | ieee80211_scan_completed(mvm->hw, true); |
1178 | } | 1177 | } |
1179 | 1178 | ||
1180 | return 0; | 1179 | return ret; |
1181 | } | 1180 | } |
1182 | 1181 | ||
1183 | static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm, | 1182 | static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm, |
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index 54fafbf9a711..f8d6f306dd76 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c | |||
@@ -750,8 +750,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm) | |||
750 | * request | 750 | * request |
751 | */ | 751 | */ |
752 | list_for_each_entry(te_data, &mvm->time_event_list, list) { | 752 | list_for_each_entry(te_data, &mvm->time_event_list, list) { |
753 | if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE && | 753 | if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { |
754 | te_data->running) { | ||
755 | mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); | 754 | mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); |
756 | is_p2p = true; | 755 | is_p2p = true; |
757 | goto remove_te; | 756 | goto remove_te; |
@@ -766,10 +765,8 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm) | |||
766 | * request | 765 | * request |
767 | */ | 766 | */ |
768 | list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) { | 767 | list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) { |
769 | if (te_data->running) { | 768 | mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); |
770 | mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); | 769 | goto remove_te; |
771 | goto remove_te; | ||
772 | } | ||
773 | } | 770 | } |
774 | 771 | ||
775 | remove_te: | 772 | remove_te: |
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c index 1d4677460711..074f716020aa 100644 --- a/drivers/net/wireless/rtlwifi/base.c +++ b/drivers/net/wireless/rtlwifi/base.c | |||
@@ -1386,8 +1386,11 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) | |||
1386 | } | 1386 | } |
1387 | 1387 | ||
1388 | return true; | 1388 | return true; |
1389 | } else if (0x86DD == ether_type) { | 1389 | } else if (ETH_P_IPV6 == ether_type) { |
1390 | return true; | 1390 | /* TODO: Handle any IPv6 cases that need special handling. |
1391 | * For now, always return false | ||
1392 | */ | ||
1393 | goto end; | ||
1391 | } | 1394 | } |
1392 | 1395 | ||
1393 | end: | 1396 | end: |
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index f38227afe099..3aa8648080c8 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -340,12 +340,11 @@ static void xenvif_get_ethtool_stats(struct net_device *dev, | |||
340 | unsigned int num_queues = vif->num_queues; | 340 | unsigned int num_queues = vif->num_queues; |
341 | int i; | 341 | int i; |
342 | unsigned int queue_index; | 342 | unsigned int queue_index; |
343 | struct xenvif_stats *vif_stats; | ||
344 | 343 | ||
345 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { | 344 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { |
346 | unsigned long accum = 0; | 345 | unsigned long accum = 0; |
347 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | 346 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
348 | vif_stats = &vif->queues[queue_index].stats; | 347 | void *vif_stats = &vif->queues[queue_index].stats; |
349 | accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); | 348 | accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); |
350 | } | 349 | } |
351 | data[i] = accum; | 350 | data[i] = accum; |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index c4d68d768408..997cf0901ac2 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -96,6 +96,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, | |||
96 | static void make_tx_response(struct xenvif_queue *queue, | 96 | static void make_tx_response(struct xenvif_queue *queue, |
97 | struct xen_netif_tx_request *txp, | 97 | struct xen_netif_tx_request *txp, |
98 | s8 st); | 98 | s8 st); |
99 | static void push_tx_responses(struct xenvif_queue *queue); | ||
99 | 100 | ||
100 | static inline int tx_work_todo(struct xenvif_queue *queue); | 101 | static inline int tx_work_todo(struct xenvif_queue *queue); |
101 | 102 | ||
@@ -655,15 +656,10 @@ static void xenvif_tx_err(struct xenvif_queue *queue, | |||
655 | unsigned long flags; | 656 | unsigned long flags; |
656 | 657 | ||
657 | do { | 658 | do { |
658 | int notify; | ||
659 | |||
660 | spin_lock_irqsave(&queue->response_lock, flags); | 659 | spin_lock_irqsave(&queue->response_lock, flags); |
661 | make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); | 660 | make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); |
662 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); | 661 | push_tx_responses(queue); |
663 | spin_unlock_irqrestore(&queue->response_lock, flags); | 662 | spin_unlock_irqrestore(&queue->response_lock, flags); |
664 | if (notify) | ||
665 | notify_remote_via_irq(queue->tx_irq); | ||
666 | |||
667 | if (cons == end) | 663 | if (cons == end) |
668 | break; | 664 | break; |
669 | txp = RING_GET_REQUEST(&queue->tx, cons++); | 665 | txp = RING_GET_REQUEST(&queue->tx, cons++); |
@@ -1349,7 +1345,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s | |||
1349 | { | 1345 | { |
1350 | unsigned int offset = skb_headlen(skb); | 1346 | unsigned int offset = skb_headlen(skb); |
1351 | skb_frag_t frags[MAX_SKB_FRAGS]; | 1347 | skb_frag_t frags[MAX_SKB_FRAGS]; |
1352 | int i; | 1348 | int i, f; |
1353 | struct ubuf_info *uarg; | 1349 | struct ubuf_info *uarg; |
1354 | struct sk_buff *nskb = skb_shinfo(skb)->frag_list; | 1350 | struct sk_buff *nskb = skb_shinfo(skb)->frag_list; |
1355 | 1351 | ||
@@ -1389,23 +1385,25 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s | |||
1389 | frags[i].page_offset = 0; | 1385 | frags[i].page_offset = 0; |
1390 | skb_frag_size_set(&frags[i], len); | 1386 | skb_frag_size_set(&frags[i], len); |
1391 | } | 1387 | } |
1392 | /* swap out with old one */ | ||
1393 | memcpy(skb_shinfo(skb)->frags, | ||
1394 | frags, | ||
1395 | i * sizeof(skb_frag_t)); | ||
1396 | skb_shinfo(skb)->nr_frags = i; | ||
1397 | skb->truesize += i * PAGE_SIZE; | ||
1398 | 1388 | ||
1399 | /* remove traces of mapped pages and frag_list */ | 1389 | /* Copied all the bits from the frag list -- free it. */ |
1400 | skb_frag_list_init(skb); | 1390 | skb_frag_list_init(skb); |
1391 | xenvif_skb_zerocopy_prepare(queue, nskb); | ||
1392 | kfree_skb(nskb); | ||
1393 | |||
1394 | /* Release all the original (foreign) frags. */ | ||
1395 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) | ||
1396 | skb_frag_unref(skb, f); | ||
1401 | uarg = skb_shinfo(skb)->destructor_arg; | 1397 | uarg = skb_shinfo(skb)->destructor_arg; |
1402 | /* increase inflight counter to offset decrement in callback */ | 1398 | /* increase inflight counter to offset decrement in callback */ |
1403 | atomic_inc(&queue->inflight_packets); | 1399 | atomic_inc(&queue->inflight_packets); |
1404 | uarg->callback(uarg, true); | 1400 | uarg->callback(uarg, true); |
1405 | skb_shinfo(skb)->destructor_arg = NULL; | 1401 | skb_shinfo(skb)->destructor_arg = NULL; |
1406 | 1402 | ||
1407 | xenvif_skb_zerocopy_prepare(queue, nskb); | 1403 | /* Fill the skb with the new (local) frags. */ |
1408 | kfree_skb(nskb); | 1404 | memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t)); |
1405 | skb_shinfo(skb)->nr_frags = i; | ||
1406 | skb->truesize += i * PAGE_SIZE; | ||
1409 | 1407 | ||
1410 | return 0; | 1408 | return 0; |
1411 | } | 1409 | } |
@@ -1655,7 +1653,6 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, | |||
1655 | { | 1653 | { |
1656 | struct pending_tx_info *pending_tx_info; | 1654 | struct pending_tx_info *pending_tx_info; |
1657 | pending_ring_idx_t index; | 1655 | pending_ring_idx_t index; |
1658 | int notify; | ||
1659 | unsigned long flags; | 1656 | unsigned long flags; |
1660 | 1657 | ||
1661 | pending_tx_info = &queue->pending_tx_info[pending_idx]; | 1658 | pending_tx_info = &queue->pending_tx_info[pending_idx]; |
@@ -1671,12 +1668,9 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, | |||
1671 | index = pending_index(queue->pending_prod++); | 1668 | index = pending_index(queue->pending_prod++); |
1672 | queue->pending_ring[index] = pending_idx; | 1669 | queue->pending_ring[index] = pending_idx; |
1673 | 1670 | ||
1674 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); | 1671 | push_tx_responses(queue); |
1675 | 1672 | ||
1676 | spin_unlock_irqrestore(&queue->response_lock, flags); | 1673 | spin_unlock_irqrestore(&queue->response_lock, flags); |
1677 | |||
1678 | if (notify) | ||
1679 | notify_remote_via_irq(queue->tx_irq); | ||
1680 | } | 1674 | } |
1681 | 1675 | ||
1682 | 1676 | ||
@@ -1697,6 +1691,15 @@ static void make_tx_response(struct xenvif_queue *queue, | |||
1697 | queue->tx.rsp_prod_pvt = ++i; | 1691 | queue->tx.rsp_prod_pvt = ++i; |
1698 | } | 1692 | } |
1699 | 1693 | ||
1694 | static void push_tx_responses(struct xenvif_queue *queue) | ||
1695 | { | ||
1696 | int notify; | ||
1697 | |||
1698 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); | ||
1699 | if (notify) | ||
1700 | notify_remote_via_irq(queue->tx_irq); | ||
1701 | } | ||
1702 | |||
1700 | static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, | 1703 | static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, |
1701 | u16 id, | 1704 | u16 id, |
1702 | s8 st, | 1705 | s8 st, |
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index 38d1c51f58b1..7bcaeec876c0 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig | |||
@@ -84,8 +84,7 @@ config OF_RESOLVE | |||
84 | bool | 84 | bool |
85 | 85 | ||
86 | config OF_OVERLAY | 86 | config OF_OVERLAY |
87 | bool | 87 | bool "Device Tree overlays" |
88 | depends on OF | ||
89 | select OF_DYNAMIC | 88 | select OF_DYNAMIC |
90 | select OF_RESOLVE | 89 | select OF_RESOLVE |
91 | 90 | ||
diff --git a/drivers/of/base.c b/drivers/of/base.c index 0a8aeb8523fe..8f165b112e03 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
@@ -714,16 +714,12 @@ static struct device_node *__of_find_node_by_path(struct device_node *parent, | |||
714 | const char *path) | 714 | const char *path) |
715 | { | 715 | { |
716 | struct device_node *child; | 716 | struct device_node *child; |
717 | int len = strchrnul(path, '/') - path; | 717 | int len; |
718 | int term; | ||
719 | 718 | ||
719 | len = strcspn(path, "/:"); | ||
720 | if (!len) | 720 | if (!len) |
721 | return NULL; | 721 | return NULL; |
722 | 722 | ||
723 | term = strchrnul(path, ':') - path; | ||
724 | if (term < len) | ||
725 | len = term; | ||
726 | |||
727 | __for_each_child_of_node(parent, child) { | 723 | __for_each_child_of_node(parent, child) { |
728 | const char *name = strrchr(child->full_name, '/'); | 724 | const char *name = strrchr(child->full_name, '/'); |
729 | if (WARN(!name, "malformed device_node %s\n", child->full_name)) | 725 | if (WARN(!name, "malformed device_node %s\n", child->full_name)) |
@@ -768,8 +764,12 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt | |||
768 | 764 | ||
769 | /* The path could begin with an alias */ | 765 | /* The path could begin with an alias */ |
770 | if (*path != '/') { | 766 | if (*path != '/') { |
771 | char *p = strchrnul(path, '/'); | 767 | int len; |
772 | int len = separator ? separator - path : p - path; | 768 | const char *p = separator; |
769 | |||
770 | if (!p) | ||
771 | p = strchrnul(path, '/'); | ||
772 | len = p - path; | ||
773 | 773 | ||
774 | /* of_aliases must not be NULL */ | 774 | /* of_aliases must not be NULL */ |
775 | if (!of_aliases) | 775 | if (!of_aliases) |
@@ -794,6 +794,8 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt | |||
794 | path++; /* Increment past '/' delimiter */ | 794 | path++; /* Increment past '/' delimiter */ |
795 | np = __of_find_node_by_path(np, path); | 795 | np = __of_find_node_by_path(np, path); |
796 | path = strchrnul(path, '/'); | 796 | path = strchrnul(path, '/'); |
797 | if (separator && separator < path) | ||
798 | break; | ||
797 | } | 799 | } |
798 | raw_spin_unlock_irqrestore(&devtree_lock, flags); | 800 | raw_spin_unlock_irqrestore(&devtree_lock, flags); |
799 | return np; | 801 | return np; |
diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 0d7765807f49..1a7980692f25 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c | |||
@@ -290,7 +290,7 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar | |||
290 | struct device_node *p; | 290 | struct device_node *p; |
291 | const __be32 *intspec, *tmp, *addr; | 291 | const __be32 *intspec, *tmp, *addr; |
292 | u32 intsize, intlen; | 292 | u32 intsize, intlen; |
293 | int i, res = -EINVAL; | 293 | int i, res; |
294 | 294 | ||
295 | pr_debug("of_irq_parse_one: dev=%s, index=%d\n", of_node_full_name(device), index); | 295 | pr_debug("of_irq_parse_one: dev=%s, index=%d\n", of_node_full_name(device), index); |
296 | 296 | ||
@@ -323,15 +323,19 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar | |||
323 | 323 | ||
324 | /* Get size of interrupt specifier */ | 324 | /* Get size of interrupt specifier */ |
325 | tmp = of_get_property(p, "#interrupt-cells", NULL); | 325 | tmp = of_get_property(p, "#interrupt-cells", NULL); |
326 | if (tmp == NULL) | 326 | if (tmp == NULL) { |
327 | res = -EINVAL; | ||
327 | goto out; | 328 | goto out; |
329 | } | ||
328 | intsize = be32_to_cpu(*tmp); | 330 | intsize = be32_to_cpu(*tmp); |
329 | 331 | ||
330 | pr_debug(" intsize=%d intlen=%d\n", intsize, intlen); | 332 | pr_debug(" intsize=%d intlen=%d\n", intsize, intlen); |
331 | 333 | ||
332 | /* Check index */ | 334 | /* Check index */ |
333 | if ((index + 1) * intsize > intlen) | 335 | if ((index + 1) * intsize > intlen) { |
336 | res = -EINVAL; | ||
334 | goto out; | 337 | goto out; |
338 | } | ||
335 | 339 | ||
336 | /* Copy intspec into irq structure */ | 340 | /* Copy intspec into irq structure */ |
337 | intspec += index * intsize; | 341 | intspec += index * intsize; |
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 352b4f28f82c..dee9270ba547 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/string.h> | 19 | #include <linux/string.h> |
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
22 | #include <linux/idr.h> | ||
22 | 23 | ||
23 | #include "of_private.h" | 24 | #include "of_private.h" |
24 | 25 | ||
@@ -85,7 +86,7 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov, | |||
85 | struct device_node *target, struct device_node *child) | 86 | struct device_node *target, struct device_node *child) |
86 | { | 87 | { |
87 | const char *cname; | 88 | const char *cname; |
88 | struct device_node *tchild, *grandchild; | 89 | struct device_node *tchild; |
89 | int ret = 0; | 90 | int ret = 0; |
90 | 91 | ||
91 | cname = kbasename(child->full_name); | 92 | cname = kbasename(child->full_name); |
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 0cf9a236d438..52c45c7df07f 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c | |||
@@ -92,6 +92,16 @@ static void __init of_selftest_find_node_by_name(void) | |||
92 | "option path test failed\n"); | 92 | "option path test failed\n"); |
93 | of_node_put(np); | 93 | of_node_put(np); |
94 | 94 | ||
95 | np = of_find_node_opts_by_path("/testcase-data:test/option", &options); | ||
96 | selftest(np && !strcmp("test/option", options), | ||
97 | "option path test, subcase #1 failed\n"); | ||
98 | of_node_put(np); | ||
99 | |||
100 | np = of_find_node_opts_by_path("/testcase-data/testcase-device1:test/option", &options); | ||
101 | selftest(np && !strcmp("test/option", options), | ||
102 | "option path test, subcase #2 failed\n"); | ||
103 | of_node_put(np); | ||
104 | |||
95 | np = of_find_node_opts_by_path("/testcase-data:testoption", NULL); | 105 | np = of_find_node_opts_by_path("/testcase-data:testoption", NULL); |
96 | selftest(np, "NULL option path test failed\n"); | 106 | selftest(np, "NULL option path test failed\n"); |
97 | of_node_put(np); | 107 | of_node_put(np); |
@@ -102,6 +112,12 @@ static void __init of_selftest_find_node_by_name(void) | |||
102 | "option alias path test failed\n"); | 112 | "option alias path test failed\n"); |
103 | of_node_put(np); | 113 | of_node_put(np); |
104 | 114 | ||
115 | np = of_find_node_opts_by_path("testcase-alias:test/alias/option", | ||
116 | &options); | ||
117 | selftest(np && !strcmp("test/alias/option", options), | ||
118 | "option alias path test, subcase #1 failed\n"); | ||
119 | of_node_put(np); | ||
120 | |||
105 | np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL); | 121 | np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL); |
106 | selftest(np, "NULL option alias path test failed\n"); | 122 | selftest(np, "NULL option alias path test failed\n"); |
107 | of_node_put(np); | 123 | of_node_put(np); |
@@ -378,9 +394,9 @@ static void __init of_selftest_property_string(void) | |||
378 | rc = of_property_match_string(np, "phandle-list-names", "first"); | 394 | rc = of_property_match_string(np, "phandle-list-names", "first"); |
379 | selftest(rc == 0, "first expected:0 got:%i\n", rc); | 395 | selftest(rc == 0, "first expected:0 got:%i\n", rc); |
380 | rc = of_property_match_string(np, "phandle-list-names", "second"); | 396 | rc = of_property_match_string(np, "phandle-list-names", "second"); |
381 | selftest(rc == 1, "second expected:0 got:%i\n", rc); | 397 | selftest(rc == 1, "second expected:1 got:%i\n", rc); |
382 | rc = of_property_match_string(np, "phandle-list-names", "third"); | 398 | rc = of_property_match_string(np, "phandle-list-names", "third"); |
383 | selftest(rc == 2, "third expected:0 got:%i\n", rc); | 399 | selftest(rc == 2, "third expected:2 got:%i\n", rc); |
384 | rc = of_property_match_string(np, "phandle-list-names", "fourth"); | 400 | rc = of_property_match_string(np, "phandle-list-names", "fourth"); |
385 | selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc); | 401 | selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc); |
386 | rc = of_property_match_string(np, "missing-property", "blah"); | 402 | rc = of_property_match_string(np, "missing-property", "blah"); |
@@ -478,7 +494,6 @@ static void __init of_selftest_changeset(void) | |||
478 | struct device_node *n1, *n2, *n21, *nremove, *parent, *np; | 494 | struct device_node *n1, *n2, *n21, *nremove, *parent, *np; |
479 | struct of_changeset chgset; | 495 | struct of_changeset chgset; |
480 | 496 | ||
481 | of_changeset_init(&chgset); | ||
482 | n1 = __of_node_dup(NULL, "/testcase-data/changeset/n1"); | 497 | n1 = __of_node_dup(NULL, "/testcase-data/changeset/n1"); |
483 | selftest(n1, "testcase setup failure\n"); | 498 | selftest(n1, "testcase setup failure\n"); |
484 | n2 = __of_node_dup(NULL, "/testcase-data/changeset/n2"); | 499 | n2 = __of_node_dup(NULL, "/testcase-data/changeset/n2"); |
@@ -979,7 +994,7 @@ static int of_path_platform_device_exists(const char *path) | |||
979 | return pdev != NULL; | 994 | return pdev != NULL; |
980 | } | 995 | } |
981 | 996 | ||
982 | #if IS_ENABLED(CONFIG_I2C) | 997 | #if IS_BUILTIN(CONFIG_I2C) |
983 | 998 | ||
984 | /* get the i2c client device instantiated at the path */ | 999 | /* get the i2c client device instantiated at the path */ |
985 | static struct i2c_client *of_path_to_i2c_client(const char *path) | 1000 | static struct i2c_client *of_path_to_i2c_client(const char *path) |
@@ -1445,7 +1460,7 @@ static void of_selftest_overlay_11(void) | |||
1445 | return; | 1460 | return; |
1446 | } | 1461 | } |
1447 | 1462 | ||
1448 | #if IS_ENABLED(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY) | 1463 | #if IS_BUILTIN(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY) |
1449 | 1464 | ||
1450 | struct selftest_i2c_bus_data { | 1465 | struct selftest_i2c_bus_data { |
1451 | struct platform_device *pdev; | 1466 | struct platform_device *pdev; |
@@ -1584,7 +1599,7 @@ static struct i2c_driver selftest_i2c_dev_driver = { | |||
1584 | .id_table = selftest_i2c_dev_id, | 1599 | .id_table = selftest_i2c_dev_id, |
1585 | }; | 1600 | }; |
1586 | 1601 | ||
1587 | #if IS_ENABLED(CONFIG_I2C_MUX) | 1602 | #if IS_BUILTIN(CONFIG_I2C_MUX) |
1588 | 1603 | ||
1589 | struct selftest_i2c_mux_data { | 1604 | struct selftest_i2c_mux_data { |
1590 | int nchans; | 1605 | int nchans; |
@@ -1695,7 +1710,7 @@ static int of_selftest_overlay_i2c_init(void) | |||
1695 | "could not register selftest i2c bus driver\n")) | 1710 | "could not register selftest i2c bus driver\n")) |
1696 | return ret; | 1711 | return ret; |
1697 | 1712 | ||
1698 | #if IS_ENABLED(CONFIG_I2C_MUX) | 1713 | #if IS_BUILTIN(CONFIG_I2C_MUX) |
1699 | ret = i2c_add_driver(&selftest_i2c_mux_driver); | 1714 | ret = i2c_add_driver(&selftest_i2c_mux_driver); |
1700 | if (selftest(ret == 0, | 1715 | if (selftest(ret == 0, |
1701 | "could not register selftest i2c mux driver\n")) | 1716 | "could not register selftest i2c mux driver\n")) |
@@ -1707,7 +1722,7 @@ static int of_selftest_overlay_i2c_init(void) | |||
1707 | 1722 | ||
1708 | static void of_selftest_overlay_i2c_cleanup(void) | 1723 | static void of_selftest_overlay_i2c_cleanup(void) |
1709 | { | 1724 | { |
1710 | #if IS_ENABLED(CONFIG_I2C_MUX) | 1725 | #if IS_BUILTIN(CONFIG_I2C_MUX) |
1711 | i2c_del_driver(&selftest_i2c_mux_driver); | 1726 | i2c_del_driver(&selftest_i2c_mux_driver); |
1712 | #endif | 1727 | #endif |
1713 | platform_driver_unregister(&selftest_i2c_bus_driver); | 1728 | platform_driver_unregister(&selftest_i2c_bus_driver); |
@@ -1814,7 +1829,7 @@ static void __init of_selftest_overlay(void) | |||
1814 | of_selftest_overlay_10(); | 1829 | of_selftest_overlay_10(); |
1815 | of_selftest_overlay_11(); | 1830 | of_selftest_overlay_11(); |
1816 | 1831 | ||
1817 | #if IS_ENABLED(CONFIG_I2C) | 1832 | #if IS_BUILTIN(CONFIG_I2C) |
1818 | if (selftest(of_selftest_overlay_i2c_init() == 0, "i2c init failed\n")) | 1833 | if (selftest(of_selftest_overlay_i2c_init() == 0, "i2c init failed\n")) |
1819 | goto out; | 1834 | goto out; |
1820 | 1835 | ||
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c index aab55474dd0d..ee082c0366ec 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c | |||
@@ -127,7 +127,7 @@ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset) | |||
127 | return false; | 127 | return false; |
128 | } | 128 | } |
129 | 129 | ||
130 | static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, | 130 | static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, |
131 | int offset) | 131 | int offset) |
132 | { | 132 | { |
133 | struct xgene_pcie_port *port = bus->sysdata; | 133 | struct xgene_pcie_port *port = bus->sysdata; |
@@ -137,7 +137,7 @@ static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, | |||
137 | return NULL; | 137 | return NULL; |
138 | 138 | ||
139 | xgene_pcie_set_rtdid_reg(bus, devfn); | 139 | xgene_pcie_set_rtdid_reg(bus, devfn); |
140 | return xgene_pcie_get_cfg_base(bus); | 140 | return xgene_pcie_get_cfg_base(bus) + offset; |
141 | } | 141 | } |
142 | 142 | ||
143 | static struct pci_ops xgene_pcie_ops = { | 143 | static struct pci_ops xgene_pcie_ops = { |
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index aa012fb3834b..312f23a8429c 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
@@ -521,7 +521,8 @@ static ssize_t driver_override_store(struct device *dev, | |||
521 | struct pci_dev *pdev = to_pci_dev(dev); | 521 | struct pci_dev *pdev = to_pci_dev(dev); |
522 | char *driver_override, *old = pdev->driver_override, *cp; | 522 | char *driver_override, *old = pdev->driver_override, *cp; |
523 | 523 | ||
524 | if (count > PATH_MAX) | 524 | /* We need to keep extra room for a newline */ |
525 | if (count >= (PAGE_SIZE - 1)) | ||
525 | return -EINVAL; | 526 | return -EINVAL; |
526 | 527 | ||
527 | driver_override = kstrndup(buf, count, GFP_KERNEL); | 528 | driver_override = kstrndup(buf, count, GFP_KERNEL); |
@@ -549,7 +550,7 @@ static ssize_t driver_override_show(struct device *dev, | |||
549 | { | 550 | { |
550 | struct pci_dev *pdev = to_pci_dev(dev); | 551 | struct pci_dev *pdev = to_pci_dev(dev); |
551 | 552 | ||
552 | return sprintf(buf, "%s\n", pdev->driver_override); | 553 | return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); |
553 | } | 554 | } |
554 | static DEVICE_ATTR_RW(driver_override); | 555 | static DEVICE_ATTR_RW(driver_override); |
555 | 556 | ||
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig index 3bb49252a098..45f67c63d385 100644 --- a/drivers/pcmcia/Kconfig +++ b/drivers/pcmcia/Kconfig | |||
@@ -69,8 +69,7 @@ config YENTA | |||
69 | tristate "CardBus yenta-compatible bridge support" | 69 | tristate "CardBus yenta-compatible bridge support" |
70 | depends on PCI | 70 | depends on PCI |
71 | select CARDBUS if !EXPERT | 71 | select CARDBUS if !EXPERT |
72 | select PCCARD_NONSTATIC if PCMCIA != n && ISA | 72 | select PCCARD_NONSTATIC if PCMCIA != n |
73 | select PCCARD_PCI if PCMCIA !=n && !ISA | ||
74 | ---help--- | 73 | ---help--- |
75 | This option enables support for CardBus host bridges. Virtually | 74 | This option enables support for CardBus host bridges. Virtually |
76 | all modern PCMCIA bridges are CardBus compatible. A "bridge" is | 75 | all modern PCMCIA bridges are CardBus compatible. A "bridge" is |
@@ -110,8 +109,7 @@ config YENTA_TOSHIBA | |||
110 | config PD6729 | 109 | config PD6729 |
111 | tristate "Cirrus PD6729 compatible bridge support" | 110 | tristate "Cirrus PD6729 compatible bridge support" |
112 | depends on PCMCIA && PCI | 111 | depends on PCMCIA && PCI |
113 | select PCCARD_NONSTATIC if PCMCIA != n && ISA | 112 | select PCCARD_NONSTATIC |
114 | select PCCARD_PCI if PCMCIA !=n && !ISA | ||
115 | help | 113 | help |
116 | This provides support for the Cirrus PD6729 PCI-to-PCMCIA bridge | 114 | This provides support for the Cirrus PD6729 PCI-to-PCMCIA bridge |
117 | device, found in some older laptops and PCMCIA card readers. | 115 | device, found in some older laptops and PCMCIA card readers. |
@@ -119,8 +117,7 @@ config PD6729 | |||
119 | config I82092 | 117 | config I82092 |
120 | tristate "i82092 compatible bridge support" | 118 | tristate "i82092 compatible bridge support" |
121 | depends on PCMCIA && PCI | 119 | depends on PCMCIA && PCI |
122 | select PCCARD_NONSTATIC if PCMCIA != n && ISA | 120 | select PCCARD_NONSTATIC |
123 | select PCCARD_PCI if PCMCIA !=n && !ISA | ||
124 | help | 121 | help |
125 | This provides support for the Intel I82092AA PCI-to-PCMCIA bridge device, | 122 | This provides support for the Intel I82092AA PCI-to-PCMCIA bridge device, |
126 | found in some older laptops and more commonly in evaluation boards for the | 123 | found in some older laptops and more commonly in evaluation boards for the |
@@ -291,9 +288,6 @@ config ELECTRA_CF | |||
291 | Say Y here to support the CompactFlash controller on the | 288 | Say Y here to support the CompactFlash controller on the |
292 | PA Semi Electra eval board. | 289 | PA Semi Electra eval board. |
293 | 290 | ||
294 | config PCCARD_PCI | ||
295 | bool | ||
296 | |||
297 | config PCCARD_NONSTATIC | 291 | config PCCARD_NONSTATIC |
298 | bool | 292 | bool |
299 | 293 | ||
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile index f1a7ca04d89e..27e94b30cf96 100644 --- a/drivers/pcmcia/Makefile +++ b/drivers/pcmcia/Makefile | |||
@@ -12,7 +12,6 @@ obj-$(CONFIG_PCMCIA) += pcmcia.o | |||
12 | pcmcia_rsrc-y += rsrc_mgr.o | 12 | pcmcia_rsrc-y += rsrc_mgr.o |
13 | pcmcia_rsrc-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o | 13 | pcmcia_rsrc-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o |
14 | pcmcia_rsrc-$(CONFIG_PCCARD_IODYN) += rsrc_iodyn.o | 14 | pcmcia_rsrc-$(CONFIG_PCCARD_IODYN) += rsrc_iodyn.o |
15 | pcmcia_rsrc-$(CONFIG_PCCARD_PCI) += rsrc_pci.o | ||
16 | obj-$(CONFIG_PCCARD) += pcmcia_rsrc.o | 15 | obj-$(CONFIG_PCCARD) += pcmcia_rsrc.o |
17 | 16 | ||
18 | 17 | ||
diff --git a/drivers/pcmcia/rsrc_pci.c b/drivers/pcmcia/rsrc_pci.c deleted file mode 100644 index 1f67b3ba70fb..000000000000 --- a/drivers/pcmcia/rsrc_pci.c +++ /dev/null | |||
@@ -1,173 +0,0 @@ | |||
1 | #include <linux/slab.h> | ||
2 | #include <linux/module.h> | ||
3 | #include <linux/kernel.h> | ||
4 | #include <linux/pci.h> | ||
5 | |||
6 | #include <pcmcia/ss.h> | ||
7 | #include <pcmcia/cistpl.h> | ||
8 | #include "cs_internal.h" | ||
9 | |||
10 | |||
11 | struct pcmcia_align_data { | ||
12 | unsigned long mask; | ||
13 | unsigned long offset; | ||
14 | }; | ||
15 | |||
16 | static resource_size_t pcmcia_align(void *align_data, | ||
17 | const struct resource *res, | ||
18 | resource_size_t size, resource_size_t align) | ||
19 | { | ||
20 | struct pcmcia_align_data *data = align_data; | ||
21 | resource_size_t start; | ||
22 | |||
23 | start = (res->start & ~data->mask) + data->offset; | ||
24 | if (start < res->start) | ||
25 | start += data->mask + 1; | ||
26 | return start; | ||
27 | } | ||
28 | |||
29 | static struct resource *find_io_region(struct pcmcia_socket *s, | ||
30 | unsigned long base, int num, | ||
31 | unsigned long align) | ||
32 | { | ||
33 | struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_IO, | ||
34 | dev_name(&s->dev)); | ||
35 | struct pcmcia_align_data data; | ||
36 | int ret; | ||
37 | |||
38 | data.mask = align - 1; | ||
39 | data.offset = base & data.mask; | ||
40 | |||
41 | ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1, | ||
42 | base, 0, pcmcia_align, &data); | ||
43 | if (ret != 0) { | ||
44 | kfree(res); | ||
45 | res = NULL; | ||
46 | } | ||
47 | return res; | ||
48 | } | ||
49 | |||
50 | static int res_pci_find_io(struct pcmcia_socket *s, unsigned int attr, | ||
51 | unsigned int *base, unsigned int num, | ||
52 | unsigned int align, struct resource **parent) | ||
53 | { | ||
54 | int i, ret = 0; | ||
55 | |||
56 | /* Check for an already-allocated window that must conflict with | ||
57 | * what was asked for. It is a hack because it does not catch all | ||
58 | * potential conflicts, just the most obvious ones. | ||
59 | */ | ||
60 | for (i = 0; i < MAX_IO_WIN; i++) { | ||
61 | if (!s->io[i].res) | ||
62 | continue; | ||
63 | |||
64 | if (!*base) | ||
65 | continue; | ||
66 | |||
67 | if ((s->io[i].res->start & (align-1)) == *base) | ||
68 | return -EBUSY; | ||
69 | } | ||
70 | |||
71 | for (i = 0; i < MAX_IO_WIN; i++) { | ||
72 | struct resource *res = s->io[i].res; | ||
73 | unsigned int try; | ||
74 | |||
75 | if (res && (res->flags & IORESOURCE_BITS) != | ||
76 | (attr & IORESOURCE_BITS)) | ||
77 | continue; | ||
78 | |||
79 | if (!res) { | ||
80 | if (align == 0) | ||
81 | align = 0x10000; | ||
82 | |||
83 | res = s->io[i].res = find_io_region(s, *base, num, | ||
84 | align); | ||
85 | if (!res) | ||
86 | return -EINVAL; | ||
87 | |||
88 | *base = res->start; | ||
89 | s->io[i].res->flags = | ||
90 | ((res->flags & ~IORESOURCE_BITS) | | ||
91 | (attr & IORESOURCE_BITS)); | ||
92 | s->io[i].InUse = num; | ||
93 | *parent = res; | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | /* Try to extend top of window */ | ||
98 | try = res->end + 1; | ||
99 | if ((*base == 0) || (*base == try)) { | ||
100 | ret = adjust_resource(s->io[i].res, res->start, | ||
101 | resource_size(res) + num); | ||
102 | if (ret) | ||
103 | continue; | ||
104 | *base = try; | ||
105 | s->io[i].InUse += num; | ||
106 | *parent = res; | ||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | /* Try to extend bottom of window */ | ||
111 | try = res->start - num; | ||
112 | if ((*base == 0) || (*base == try)) { | ||
113 | ret = adjust_resource(s->io[i].res, | ||
114 | res->start - num, | ||
115 | resource_size(res) + num); | ||
116 | if (ret) | ||
117 | continue; | ||
118 | *base = try; | ||
119 | s->io[i].InUse += num; | ||
120 | *parent = res; | ||
121 | return 0; | ||
122 | } | ||
123 | } | ||
124 | return -EINVAL; | ||
125 | } | ||
126 | |||
127 | static struct resource *res_pci_find_mem(u_long base, u_long num, | ||
128 | u_long align, int low, struct pcmcia_socket *s) | ||
129 | { | ||
130 | struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_MEM, | ||
131 | dev_name(&s->dev)); | ||
132 | struct pcmcia_align_data data; | ||
133 | unsigned long min; | ||
134 | int ret; | ||
135 | |||
136 | if (align < 0x20000) | ||
137 | align = 0x20000; | ||
138 | data.mask = align - 1; | ||
139 | data.offset = base & data.mask; | ||
140 | |||
141 | min = 0; | ||
142 | if (!low) | ||
143 | min = 0x100000UL; | ||
144 | |||
145 | ret = pci_bus_alloc_resource(s->cb_dev->bus, | ||
146 | res, num, 1, min, 0, | ||
147 | pcmcia_align, &data); | ||
148 | |||
149 | if (ret != 0) { | ||
150 | kfree(res); | ||
151 | res = NULL; | ||
152 | } | ||
153 | return res; | ||
154 | } | ||
155 | |||
156 | |||
157 | static int res_pci_init(struct pcmcia_socket *s) | ||
158 | { | ||
159 | if (!s->cb_dev || !(s->features & SS_CAP_PAGE_REGS)) { | ||
160 | dev_err(&s->dev, "not supported by res_pci\n"); | ||
161 | return -EOPNOTSUPP; | ||
162 | } | ||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | struct pccard_resource_ops pccard_nonstatic_ops = { | ||
167 | .validate_mem = NULL, | ||
168 | .find_io = res_pci_find_io, | ||
169 | .find_mem = res_pci_find_mem, | ||
170 | .init = res_pci_init, | ||
171 | .exit = NULL, | ||
172 | }; | ||
173 | EXPORT_SYMBOL(pccard_nonstatic_ops); | ||
diff --git a/drivers/phy/phy-armada375-usb2.c b/drivers/phy/phy-armada375-usb2.c index 7c99ca256f05..8ccc3952c13d 100644 --- a/drivers/phy/phy-armada375-usb2.c +++ b/drivers/phy/phy-armada375-usb2.c | |||
@@ -37,7 +37,7 @@ static int armada375_usb_phy_init(struct phy *phy) | |||
37 | struct armada375_cluster_phy *cluster_phy; | 37 | struct armada375_cluster_phy *cluster_phy; |
38 | u32 reg; | 38 | u32 reg; |
39 | 39 | ||
40 | cluster_phy = dev_get_drvdata(phy->dev.parent); | 40 | cluster_phy = phy_get_drvdata(phy); |
41 | if (!cluster_phy) | 41 | if (!cluster_phy) |
42 | return -ENODEV; | 42 | return -ENODEV; |
43 | 43 | ||
@@ -131,6 +131,7 @@ static int armada375_usb_phy_probe(struct platform_device *pdev) | |||
131 | cluster_phy->reg = usb_cluster_base; | 131 | cluster_phy->reg = usb_cluster_base; |
132 | 132 | ||
133 | dev_set_drvdata(dev, cluster_phy); | 133 | dev_set_drvdata(dev, cluster_phy); |
134 | phy_set_drvdata(phy, cluster_phy); | ||
134 | 135 | ||
135 | phy_provider = devm_of_phy_provider_register(&pdev->dev, | 136 | phy_provider = devm_of_phy_provider_register(&pdev->dev, |
136 | armada375_usb_phy_xlate); | 137 | armada375_usb_phy_xlate); |
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index a12d35338313..3791838f4bd4 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c | |||
@@ -52,7 +52,9 @@ static void devm_phy_consume(struct device *dev, void *res) | |||
52 | 52 | ||
53 | static int devm_phy_match(struct device *dev, void *res, void *match_data) | 53 | static int devm_phy_match(struct device *dev, void *res, void *match_data) |
54 | { | 54 | { |
55 | return res == match_data; | 55 | struct phy **phy = res; |
56 | |||
57 | return *phy == match_data; | ||
56 | } | 58 | } |
57 | 59 | ||
58 | /** | 60 | /** |
@@ -223,6 +225,7 @@ int phy_init(struct phy *phy) | |||
223 | ret = phy_pm_runtime_get_sync(phy); | 225 | ret = phy_pm_runtime_get_sync(phy); |
224 | if (ret < 0 && ret != -ENOTSUPP) | 226 | if (ret < 0 && ret != -ENOTSUPP) |
225 | return ret; | 227 | return ret; |
228 | ret = 0; /* Override possible ret == -ENOTSUPP */ | ||
226 | 229 | ||
227 | mutex_lock(&phy->mutex); | 230 | mutex_lock(&phy->mutex); |
228 | if (phy->init_count == 0 && phy->ops->init) { | 231 | if (phy->init_count == 0 && phy->ops->init) { |
@@ -231,8 +234,6 @@ int phy_init(struct phy *phy) | |||
231 | dev_err(&phy->dev, "phy init failed --> %d\n", ret); | 234 | dev_err(&phy->dev, "phy init failed --> %d\n", ret); |
232 | goto out; | 235 | goto out; |
233 | } | 236 | } |
234 | } else { | ||
235 | ret = 0; /* Override possible ret == -ENOTSUPP */ | ||
236 | } | 237 | } |
237 | ++phy->init_count; | 238 | ++phy->init_count; |
238 | 239 | ||
@@ -253,6 +254,7 @@ int phy_exit(struct phy *phy) | |||
253 | ret = phy_pm_runtime_get_sync(phy); | 254 | ret = phy_pm_runtime_get_sync(phy); |
254 | if (ret < 0 && ret != -ENOTSUPP) | 255 | if (ret < 0 && ret != -ENOTSUPP) |
255 | return ret; | 256 | return ret; |
257 | ret = 0; /* Override possible ret == -ENOTSUPP */ | ||
256 | 258 | ||
257 | mutex_lock(&phy->mutex); | 259 | mutex_lock(&phy->mutex); |
258 | if (phy->init_count == 1 && phy->ops->exit) { | 260 | if (phy->init_count == 1 && phy->ops->exit) { |
@@ -287,6 +289,7 @@ int phy_power_on(struct phy *phy) | |||
287 | ret = phy_pm_runtime_get_sync(phy); | 289 | ret = phy_pm_runtime_get_sync(phy); |
288 | if (ret < 0 && ret != -ENOTSUPP) | 290 | if (ret < 0 && ret != -ENOTSUPP) |
289 | return ret; | 291 | return ret; |
292 | ret = 0; /* Override possible ret == -ENOTSUPP */ | ||
290 | 293 | ||
291 | mutex_lock(&phy->mutex); | 294 | mutex_lock(&phy->mutex); |
292 | if (phy->power_count == 0 && phy->ops->power_on) { | 295 | if (phy->power_count == 0 && phy->ops->power_on) { |
@@ -295,8 +298,6 @@ int phy_power_on(struct phy *phy) | |||
295 | dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); | 298 | dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); |
296 | goto out; | 299 | goto out; |
297 | } | 300 | } |
298 | } else { | ||
299 | ret = 0; /* Override possible ret == -ENOTSUPP */ | ||
300 | } | 301 | } |
301 | ++phy->power_count; | 302 | ++phy->power_count; |
302 | mutex_unlock(&phy->mutex); | 303 | mutex_unlock(&phy->mutex); |
diff --git a/drivers/phy/phy-exynos-dp-video.c b/drivers/phy/phy-exynos-dp-video.c index f86cbe68ddaf..179cbf9451aa 100644 --- a/drivers/phy/phy-exynos-dp-video.c +++ b/drivers/phy/phy-exynos-dp-video.c | |||
@@ -30,28 +30,13 @@ struct exynos_dp_video_phy { | |||
30 | const struct exynos_dp_video_phy_drvdata *drvdata; | 30 | const struct exynos_dp_video_phy_drvdata *drvdata; |
31 | }; | 31 | }; |
32 | 32 | ||
33 | static void exynos_dp_video_phy_pwr_isol(struct exynos_dp_video_phy *state, | ||
34 | unsigned int on) | ||
35 | { | ||
36 | unsigned int val; | ||
37 | |||
38 | if (IS_ERR(state->regs)) | ||
39 | return; | ||
40 | |||
41 | val = on ? 0 : EXYNOS5_PHY_ENABLE; | ||
42 | |||
43 | regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset, | ||
44 | EXYNOS5_PHY_ENABLE, val); | ||
45 | } | ||
46 | |||
47 | static int exynos_dp_video_phy_power_on(struct phy *phy) | 33 | static int exynos_dp_video_phy_power_on(struct phy *phy) |
48 | { | 34 | { |
49 | struct exynos_dp_video_phy *state = phy_get_drvdata(phy); | 35 | struct exynos_dp_video_phy *state = phy_get_drvdata(phy); |
50 | 36 | ||
51 | /* Disable power isolation on DP-PHY */ | 37 | /* Disable power isolation on DP-PHY */ |
52 | exynos_dp_video_phy_pwr_isol(state, 0); | 38 | return regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset, |
53 | 39 | EXYNOS5_PHY_ENABLE, EXYNOS5_PHY_ENABLE); | |
54 | return 0; | ||
55 | } | 40 | } |
56 | 41 | ||
57 | static int exynos_dp_video_phy_power_off(struct phy *phy) | 42 | static int exynos_dp_video_phy_power_off(struct phy *phy) |
@@ -59,9 +44,8 @@ static int exynos_dp_video_phy_power_off(struct phy *phy) | |||
59 | struct exynos_dp_video_phy *state = phy_get_drvdata(phy); | 44 | struct exynos_dp_video_phy *state = phy_get_drvdata(phy); |
60 | 45 | ||
61 | /* Enable power isolation on DP-PHY */ | 46 | /* Enable power isolation on DP-PHY */ |
62 | exynos_dp_video_phy_pwr_isol(state, 1); | 47 | return regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset, |
63 | 48 | EXYNOS5_PHY_ENABLE, 0); | |
64 | return 0; | ||
65 | } | 49 | } |
66 | 50 | ||
67 | static struct phy_ops exynos_dp_video_phy_ops = { | 51 | static struct phy_ops exynos_dp_video_phy_ops = { |
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c index f017b2f2a54e..df7519a39ba0 100644 --- a/drivers/phy/phy-exynos-mipi-video.c +++ b/drivers/phy/phy-exynos-mipi-video.c | |||
@@ -43,7 +43,6 @@ struct exynos_mipi_video_phy { | |||
43 | } phys[EXYNOS_MIPI_PHYS_NUM]; | 43 | } phys[EXYNOS_MIPI_PHYS_NUM]; |
44 | spinlock_t slock; | 44 | spinlock_t slock; |
45 | void __iomem *regs; | 45 | void __iomem *regs; |
46 | struct mutex mutex; | ||
47 | struct regmap *regmap; | 46 | struct regmap *regmap; |
48 | }; | 47 | }; |
49 | 48 | ||
@@ -59,8 +58,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state, | |||
59 | else | 58 | else |
60 | reset = EXYNOS4_MIPI_PHY_SRESETN; | 59 | reset = EXYNOS4_MIPI_PHY_SRESETN; |
61 | 60 | ||
62 | if (state->regmap) { | 61 | spin_lock(&state->slock); |
63 | mutex_lock(&state->mutex); | 62 | |
63 | if (!IS_ERR(state->regmap)) { | ||
64 | regmap_read(state->regmap, offset, &val); | 64 | regmap_read(state->regmap, offset, &val); |
65 | if (on) | 65 | if (on) |
66 | val |= reset; | 66 | val |= reset; |
@@ -72,11 +72,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state, | |||
72 | else if (!(val & EXYNOS4_MIPI_PHY_RESET_MASK)) | 72 | else if (!(val & EXYNOS4_MIPI_PHY_RESET_MASK)) |
73 | val &= ~EXYNOS4_MIPI_PHY_ENABLE; | 73 | val &= ~EXYNOS4_MIPI_PHY_ENABLE; |
74 | regmap_write(state->regmap, offset, val); | 74 | regmap_write(state->regmap, offset, val); |
75 | mutex_unlock(&state->mutex); | ||
76 | } else { | 75 | } else { |
77 | addr = state->regs + EXYNOS_MIPI_PHY_CONTROL(id / 2); | 76 | addr = state->regs + EXYNOS_MIPI_PHY_CONTROL(id / 2); |
78 | 77 | ||
79 | spin_lock(&state->slock); | ||
80 | val = readl(addr); | 78 | val = readl(addr); |
81 | if (on) | 79 | if (on) |
82 | val |= reset; | 80 | val |= reset; |
@@ -90,9 +88,9 @@ static int __set_phy_state(struct exynos_mipi_video_phy *state, | |||
90 | val &= ~EXYNOS4_MIPI_PHY_ENABLE; | 88 | val &= ~EXYNOS4_MIPI_PHY_ENABLE; |
91 | 89 | ||
92 | writel(val, addr); | 90 | writel(val, addr); |
93 | spin_unlock(&state->slock); | ||
94 | } | 91 | } |
95 | 92 | ||
93 | spin_unlock(&state->slock); | ||
96 | return 0; | 94 | return 0; |
97 | } | 95 | } |
98 | 96 | ||
@@ -158,7 +156,6 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev) | |||
158 | 156 | ||
159 | dev_set_drvdata(dev, state); | 157 | dev_set_drvdata(dev, state); |
160 | spin_lock_init(&state->slock); | 158 | spin_lock_init(&state->slock); |
161 | mutex_init(&state->mutex); | ||
162 | 159 | ||
163 | for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) { | 160 | for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) { |
164 | struct phy *phy = devm_phy_create(dev, NULL, | 161 | struct phy *phy = devm_phy_create(dev, NULL, |
diff --git a/drivers/phy/phy-exynos4210-usb2.c b/drivers/phy/phy-exynos4210-usb2.c index 236a52ad94eb..f30bbb0fb3b2 100644 --- a/drivers/phy/phy-exynos4210-usb2.c +++ b/drivers/phy/phy-exynos4210-usb2.c | |||
@@ -250,7 +250,6 @@ static const struct samsung_usb2_common_phy exynos4210_phys[] = { | |||
250 | .power_on = exynos4210_power_on, | 250 | .power_on = exynos4210_power_on, |
251 | .power_off = exynos4210_power_off, | 251 | .power_off = exynos4210_power_off, |
252 | }, | 252 | }, |
253 | {}, | ||
254 | }; | 253 | }; |
255 | 254 | ||
256 | const struct samsung_usb2_phy_config exynos4210_usb2_phy_config = { | 255 | const struct samsung_usb2_phy_config exynos4210_usb2_phy_config = { |
diff --git a/drivers/phy/phy-exynos4x12-usb2.c b/drivers/phy/phy-exynos4x12-usb2.c index 0b9de88579b1..765da90a536f 100644 --- a/drivers/phy/phy-exynos4x12-usb2.c +++ b/drivers/phy/phy-exynos4x12-usb2.c | |||
@@ -361,7 +361,6 @@ static const struct samsung_usb2_common_phy exynos4x12_phys[] = { | |||
361 | .power_on = exynos4x12_power_on, | 361 | .power_on = exynos4x12_power_on, |
362 | .power_off = exynos4x12_power_off, | 362 | .power_off = exynos4x12_power_off, |
363 | }, | 363 | }, |
364 | {}, | ||
365 | }; | 364 | }; |
366 | 365 | ||
367 | const struct samsung_usb2_phy_config exynos3250_usb2_phy_config = { | 366 | const struct samsung_usb2_phy_config exynos3250_usb2_phy_config = { |
diff --git a/drivers/phy/phy-exynos5-usbdrd.c b/drivers/phy/phy-exynos5-usbdrd.c index 04374018425f..e2a0be750ad9 100644 --- a/drivers/phy/phy-exynos5-usbdrd.c +++ b/drivers/phy/phy-exynos5-usbdrd.c | |||
@@ -531,7 +531,7 @@ static struct phy *exynos5_usbdrd_phy_xlate(struct device *dev, | |||
531 | { | 531 | { |
532 | struct exynos5_usbdrd_phy *phy_drd = dev_get_drvdata(dev); | 532 | struct exynos5_usbdrd_phy *phy_drd = dev_get_drvdata(dev); |
533 | 533 | ||
534 | if (WARN_ON(args->args[0] > EXYNOS5_DRDPHYS_NUM)) | 534 | if (WARN_ON(args->args[0] >= EXYNOS5_DRDPHYS_NUM)) |
535 | return ERR_PTR(-ENODEV); | 535 | return ERR_PTR(-ENODEV); |
536 | 536 | ||
537 | return phy_drd->phys[args->args[0]].phy; | 537 | return phy_drd->phys[args->args[0]].phy; |
diff --git a/drivers/phy/phy-exynos5250-usb2.c b/drivers/phy/phy-exynos5250-usb2.c index 1c139aa0d074..2ed1735a076a 100644 --- a/drivers/phy/phy-exynos5250-usb2.c +++ b/drivers/phy/phy-exynos5250-usb2.c | |||
@@ -391,7 +391,6 @@ static const struct samsung_usb2_common_phy exynos5250_phys[] = { | |||
391 | .power_on = exynos5250_power_on, | 391 | .power_on = exynos5250_power_on, |
392 | .power_off = exynos5250_power_off, | 392 | .power_off = exynos5250_power_off, |
393 | }, | 393 | }, |
394 | {}, | ||
395 | }; | 394 | }; |
396 | 395 | ||
397 | const struct samsung_usb2_phy_config exynos5250_usb2_phy_config = { | 396 | const struct samsung_usb2_phy_config exynos5250_usb2_phy_config = { |
diff --git a/drivers/phy/phy-hix5hd2-sata.c b/drivers/phy/phy-hix5hd2-sata.c index 34915b4202f1..d6b22659cac1 100644 --- a/drivers/phy/phy-hix5hd2-sata.c +++ b/drivers/phy/phy-hix5hd2-sata.c | |||
@@ -147,6 +147,9 @@ static int hix5hd2_sata_phy_probe(struct platform_device *pdev) | |||
147 | return -ENOMEM; | 147 | return -ENOMEM; |
148 | 148 | ||
149 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 149 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
150 | if (!res) | ||
151 | return -EINVAL; | ||
152 | |||
150 | priv->base = devm_ioremap(dev, res->start, resource_size(res)); | 153 | priv->base = devm_ioremap(dev, res->start, resource_size(res)); |
151 | if (!priv->base) | 154 | if (!priv->base) |
152 | return -ENOMEM; | 155 | return -ENOMEM; |
diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c index 9b2848e6115d..933435214acc 100644 --- a/drivers/phy/phy-miphy28lp.c +++ b/drivers/phy/phy-miphy28lp.c | |||
@@ -228,6 +228,7 @@ struct miphy28lp_dev { | |||
228 | struct regmap *regmap; | 228 | struct regmap *regmap; |
229 | struct mutex miphy_mutex; | 229 | struct mutex miphy_mutex; |
230 | struct miphy28lp_phy **phys; | 230 | struct miphy28lp_phy **phys; |
231 | int nphys; | ||
231 | }; | 232 | }; |
232 | 233 | ||
233 | struct miphy_initval { | 234 | struct miphy_initval { |
@@ -1116,7 +1117,7 @@ static struct phy *miphy28lp_xlate(struct device *dev, | |||
1116 | return ERR_PTR(-EINVAL); | 1117 | return ERR_PTR(-EINVAL); |
1117 | } | 1118 | } |
1118 | 1119 | ||
1119 | for (index = 0; index < of_get_child_count(dev->of_node); index++) | 1120 | for (index = 0; index < miphy_dev->nphys; index++) |
1120 | if (phynode == miphy_dev->phys[index]->phy->dev.of_node) { | 1121 | if (phynode == miphy_dev->phys[index]->phy->dev.of_node) { |
1121 | miphy_phy = miphy_dev->phys[index]; | 1122 | miphy_phy = miphy_dev->phys[index]; |
1122 | break; | 1123 | break; |
@@ -1138,6 +1139,7 @@ static struct phy *miphy28lp_xlate(struct device *dev, | |||
1138 | 1139 | ||
1139 | static struct phy_ops miphy28lp_ops = { | 1140 | static struct phy_ops miphy28lp_ops = { |
1140 | .init = miphy28lp_init, | 1141 | .init = miphy28lp_init, |
1142 | .owner = THIS_MODULE, | ||
1141 | }; | 1143 | }; |
1142 | 1144 | ||
1143 | static int miphy28lp_probe_resets(struct device_node *node, | 1145 | static int miphy28lp_probe_resets(struct device_node *node, |
@@ -1200,16 +1202,15 @@ static int miphy28lp_probe(struct platform_device *pdev) | |||
1200 | struct miphy28lp_dev *miphy_dev; | 1202 | struct miphy28lp_dev *miphy_dev; |
1201 | struct phy_provider *provider; | 1203 | struct phy_provider *provider; |
1202 | struct phy *phy; | 1204 | struct phy *phy; |
1203 | int chancount, port = 0; | 1205 | int ret, port = 0; |
1204 | int ret; | ||
1205 | 1206 | ||
1206 | miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL); | 1207 | miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL); |
1207 | if (!miphy_dev) | 1208 | if (!miphy_dev) |
1208 | return -ENOMEM; | 1209 | return -ENOMEM; |
1209 | 1210 | ||
1210 | chancount = of_get_child_count(np); | 1211 | miphy_dev->nphys = of_get_child_count(np); |
1211 | miphy_dev->phys = devm_kzalloc(&pdev->dev, sizeof(phy) * chancount, | 1212 | miphy_dev->phys = devm_kcalloc(&pdev->dev, miphy_dev->nphys, |
1212 | GFP_KERNEL); | 1213 | sizeof(*miphy_dev->phys), GFP_KERNEL); |
1213 | if (!miphy_dev->phys) | 1214 | if (!miphy_dev->phys) |
1214 | return -ENOMEM; | 1215 | return -ENOMEM; |
1215 | 1216 | ||
diff --git a/drivers/phy/phy-miphy365x.c b/drivers/phy/phy-miphy365x.c index 6c80154e8bff..51b459db9137 100644 --- a/drivers/phy/phy-miphy365x.c +++ b/drivers/phy/phy-miphy365x.c | |||
@@ -150,6 +150,7 @@ struct miphy365x_dev { | |||
150 | struct regmap *regmap; | 150 | struct regmap *regmap; |
151 | struct mutex miphy_mutex; | 151 | struct mutex miphy_mutex; |
152 | struct miphy365x_phy **phys; | 152 | struct miphy365x_phy **phys; |
153 | int nphys; | ||
153 | }; | 154 | }; |
154 | 155 | ||
155 | /* | 156 | /* |
@@ -485,7 +486,7 @@ static struct phy *miphy365x_xlate(struct device *dev, | |||
485 | return ERR_PTR(-EINVAL); | 486 | return ERR_PTR(-EINVAL); |
486 | } | 487 | } |
487 | 488 | ||
488 | for (index = 0; index < of_get_child_count(dev->of_node); index++) | 489 | for (index = 0; index < miphy_dev->nphys; index++) |
489 | if (phynode == miphy_dev->phys[index]->phy->dev.of_node) { | 490 | if (phynode == miphy_dev->phys[index]->phy->dev.of_node) { |
490 | miphy_phy = miphy_dev->phys[index]; | 491 | miphy_phy = miphy_dev->phys[index]; |
491 | break; | 492 | break; |
@@ -541,16 +542,15 @@ static int miphy365x_probe(struct platform_device *pdev) | |||
541 | struct miphy365x_dev *miphy_dev; | 542 | struct miphy365x_dev *miphy_dev; |
542 | struct phy_provider *provider; | 543 | struct phy_provider *provider; |
543 | struct phy *phy; | 544 | struct phy *phy; |
544 | int chancount, port = 0; | 545 | int ret, port = 0; |
545 | int ret; | ||
546 | 546 | ||
547 | miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL); | 547 | miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL); |
548 | if (!miphy_dev) | 548 | if (!miphy_dev) |
549 | return -ENOMEM; | 549 | return -ENOMEM; |
550 | 550 | ||
551 | chancount = of_get_child_count(np); | 551 | miphy_dev->nphys = of_get_child_count(np); |
552 | miphy_dev->phys = devm_kzalloc(&pdev->dev, sizeof(phy) * chancount, | 552 | miphy_dev->phys = devm_kcalloc(&pdev->dev, miphy_dev->nphys, |
553 | GFP_KERNEL); | 553 | sizeof(*miphy_dev->phys), GFP_KERNEL); |
554 | if (!miphy_dev->phys) | 554 | if (!miphy_dev->phys) |
555 | return -ENOMEM; | 555 | return -ENOMEM; |
556 | 556 | ||
diff --git a/drivers/phy/phy-omap-control.c b/drivers/phy/phy-omap-control.c index efe724f97e02..93252e053a31 100644 --- a/drivers/phy/phy-omap-control.c +++ b/drivers/phy/phy-omap-control.c | |||
@@ -360,7 +360,7 @@ static void __exit omap_control_phy_exit(void) | |||
360 | } | 360 | } |
361 | module_exit(omap_control_phy_exit); | 361 | module_exit(omap_control_phy_exit); |
362 | 362 | ||
363 | MODULE_ALIAS("platform: omap_control_phy"); | 363 | MODULE_ALIAS("platform:omap_control_phy"); |
364 | MODULE_AUTHOR("Texas Instruments Inc."); | 364 | MODULE_AUTHOR("Texas Instruments Inc."); |
365 | MODULE_DESCRIPTION("OMAP Control Module PHY Driver"); | 365 | MODULE_DESCRIPTION("OMAP Control Module PHY Driver"); |
366 | MODULE_LICENSE("GPL v2"); | 366 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c index 6f4aef3db248..4757e765696a 100644 --- a/drivers/phy/phy-omap-usb2.c +++ b/drivers/phy/phy-omap-usb2.c | |||
@@ -296,10 +296,11 @@ static int omap_usb2_probe(struct platform_device *pdev) | |||
296 | dev_warn(&pdev->dev, | 296 | dev_warn(&pdev->dev, |
297 | "found usb_otg_ss_refclk960m, please fix DTS\n"); | 297 | "found usb_otg_ss_refclk960m, please fix DTS\n"); |
298 | } | 298 | } |
299 | } else { | ||
300 | clk_prepare(phy->optclk); | ||
301 | } | 299 | } |
302 | 300 | ||
301 | if (!IS_ERR(phy->optclk)) | ||
302 | clk_prepare(phy->optclk); | ||
303 | |||
303 | usb_add_phy_dev(&phy->phy); | 304 | usb_add_phy_dev(&phy->phy); |
304 | 305 | ||
305 | return 0; | 306 | return 0; |
@@ -383,7 +384,7 @@ static struct platform_driver omap_usb2_driver = { | |||
383 | 384 | ||
384 | module_platform_driver(omap_usb2_driver); | 385 | module_platform_driver(omap_usb2_driver); |
385 | 386 | ||
386 | MODULE_ALIAS("platform: omap_usb2"); | 387 | MODULE_ALIAS("platform:omap_usb2"); |
387 | MODULE_AUTHOR("Texas Instruments Inc."); | 388 | MODULE_AUTHOR("Texas Instruments Inc."); |
388 | MODULE_DESCRIPTION("OMAP USB2 phy driver"); | 389 | MODULE_DESCRIPTION("OMAP USB2 phy driver"); |
389 | MODULE_LICENSE("GPL v2"); | 390 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c index 22011c3b6a4b..7d4c33643768 100644 --- a/drivers/phy/phy-rockchip-usb.c +++ b/drivers/phy/phy-rockchip-usb.c | |||
@@ -61,8 +61,6 @@ static int rockchip_usb_phy_power_off(struct phy *_phy) | |||
61 | return ret; | 61 | return ret; |
62 | 62 | ||
63 | clk_disable_unprepare(phy->clk); | 63 | clk_disable_unprepare(phy->clk); |
64 | if (ret) | ||
65 | return ret; | ||
66 | 64 | ||
67 | return 0; | 65 | return 0; |
68 | } | 66 | } |
@@ -78,8 +76,10 @@ static int rockchip_usb_phy_power_on(struct phy *_phy) | |||
78 | 76 | ||
79 | /* Power up usb phy analog blocks by set siddq 0 */ | 77 | /* Power up usb phy analog blocks by set siddq 0 */ |
80 | ret = rockchip_usb_phy_power(phy, 0); | 78 | ret = rockchip_usb_phy_power(phy, 0); |
81 | if (ret) | 79 | if (ret) { |
80 | clk_disable_unprepare(phy->clk); | ||
82 | return ret; | 81 | return ret; |
82 | } | ||
83 | 83 | ||
84 | return 0; | 84 | return 0; |
85 | } | 85 | } |
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c index 95c88f929f27..2ba610b72ca2 100644 --- a/drivers/phy/phy-ti-pipe3.c +++ b/drivers/phy/phy-ti-pipe3.c | |||
@@ -165,15 +165,11 @@ static int ti_pipe3_dpll_wait_lock(struct ti_pipe3 *phy) | |||
165 | cpu_relax(); | 165 | cpu_relax(); |
166 | val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); | 166 | val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); |
167 | if (val & PLL_LOCK) | 167 | if (val & PLL_LOCK) |
168 | break; | 168 | return 0; |
169 | } while (!time_after(jiffies, timeout)); | 169 | } while (!time_after(jiffies, timeout)); |
170 | 170 | ||
171 | if (!(val & PLL_LOCK)) { | 171 | dev_err(phy->dev, "DPLL failed to lock\n"); |
172 | dev_err(phy->dev, "DPLL failed to lock\n"); | 172 | return -EBUSY; |
173 | return -EBUSY; | ||
174 | } | ||
175 | |||
176 | return 0; | ||
177 | } | 173 | } |
178 | 174 | ||
179 | static int ti_pipe3_dpll_program(struct ti_pipe3 *phy) | 175 | static int ti_pipe3_dpll_program(struct ti_pipe3 *phy) |
@@ -608,7 +604,7 @@ static struct platform_driver ti_pipe3_driver = { | |||
608 | 604 | ||
609 | module_platform_driver(ti_pipe3_driver); | 605 | module_platform_driver(ti_pipe3_driver); |
610 | 606 | ||
611 | MODULE_ALIAS("platform: ti_pipe3"); | 607 | MODULE_ALIAS("platform:ti_pipe3"); |
612 | MODULE_AUTHOR("Texas Instruments Inc."); | 608 | MODULE_AUTHOR("Texas Instruments Inc."); |
613 | MODULE_DESCRIPTION("TI PIPE3 phy driver"); | 609 | MODULE_DESCRIPTION("TI PIPE3 phy driver"); |
614 | MODULE_LICENSE("GPL v2"); | 610 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c index 8e87f54671f3..bc42d6a8939f 100644 --- a/drivers/phy/phy-twl4030-usb.c +++ b/drivers/phy/phy-twl4030-usb.c | |||
@@ -666,7 +666,6 @@ static int twl4030_usb_probe(struct platform_device *pdev) | |||
666 | twl->dev = &pdev->dev; | 666 | twl->dev = &pdev->dev; |
667 | twl->irq = platform_get_irq(pdev, 0); | 667 | twl->irq = platform_get_irq(pdev, 0); |
668 | twl->vbus_supplied = false; | 668 | twl->vbus_supplied = false; |
669 | twl->linkstat = -EINVAL; | ||
670 | twl->linkstat = OMAP_MUSB_UNKNOWN; | 669 | twl->linkstat = OMAP_MUSB_UNKNOWN; |
671 | 670 | ||
672 | twl->phy.dev = twl->dev; | 671 | twl->phy.dev = twl->dev; |
diff --git a/drivers/phy/phy-xgene.c b/drivers/phy/phy-xgene.c index 29214a36ea28..2263cd010032 100644 --- a/drivers/phy/phy-xgene.c +++ b/drivers/phy/phy-xgene.c | |||
@@ -1704,7 +1704,6 @@ static int xgene_phy_probe(struct platform_device *pdev) | |||
1704 | for (i = 0; i < MAX_LANE; i++) | 1704 | for (i = 0; i < MAX_LANE; i++) |
1705 | ctx->sata_param.speed[i] = 2; /* Default to Gen3 */ | 1705 | ctx->sata_param.speed[i] = 2; /* Default to Gen3 */ |
1706 | 1706 | ||
1707 | ctx->dev = &pdev->dev; | ||
1708 | platform_set_drvdata(pdev, ctx); | 1707 | platform_set_drvdata(pdev, ctx); |
1709 | 1708 | ||
1710 | ctx->phy = devm_phy_create(ctx->dev, NULL, &xgene_phy_ops); | 1709 | ctx->phy = devm_phy_create(ctx->dev, NULL, &xgene_phy_ops); |
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index 5afe03e28b91..2062c224e32f 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c | |||
@@ -66,6 +66,10 @@ | |||
66 | #define BYT_DIR_MASK (BIT(1) | BIT(2)) | 66 | #define BYT_DIR_MASK (BIT(1) | BIT(2)) |
67 | #define BYT_TRIG_MASK (BIT(26) | BIT(25) | BIT(24)) | 67 | #define BYT_TRIG_MASK (BIT(26) | BIT(25) | BIT(24)) |
68 | 68 | ||
69 | #define BYT_CONF0_RESTORE_MASK (BYT_DIRECT_IRQ_EN | BYT_TRIG_MASK | \ | ||
70 | BYT_PIN_MUX) | ||
71 | #define BYT_VAL_RESTORE_MASK (BYT_DIR_MASK | BYT_LEVEL) | ||
72 | |||
69 | #define BYT_NGPIO_SCORE 102 | 73 | #define BYT_NGPIO_SCORE 102 |
70 | #define BYT_NGPIO_NCORE 28 | 74 | #define BYT_NGPIO_NCORE 28 |
71 | #define BYT_NGPIO_SUS 44 | 75 | #define BYT_NGPIO_SUS 44 |
@@ -134,12 +138,18 @@ static struct pinctrl_gpio_range byt_ranges[] = { | |||
134 | }, | 138 | }, |
135 | }; | 139 | }; |
136 | 140 | ||
141 | struct byt_gpio_pin_context { | ||
142 | u32 conf0; | ||
143 | u32 val; | ||
144 | }; | ||
145 | |||
137 | struct byt_gpio { | 146 | struct byt_gpio { |
138 | struct gpio_chip chip; | 147 | struct gpio_chip chip; |
139 | struct platform_device *pdev; | 148 | struct platform_device *pdev; |
140 | spinlock_t lock; | 149 | spinlock_t lock; |
141 | void __iomem *reg_base; | 150 | void __iomem *reg_base; |
142 | struct pinctrl_gpio_range *range; | 151 | struct pinctrl_gpio_range *range; |
152 | struct byt_gpio_pin_context *saved_context; | ||
143 | }; | 153 | }; |
144 | 154 | ||
145 | #define to_byt_gpio(c) container_of(c, struct byt_gpio, chip) | 155 | #define to_byt_gpio(c) container_of(c, struct byt_gpio, chip) |
@@ -158,40 +168,62 @@ static void __iomem *byt_gpio_reg(struct gpio_chip *chip, unsigned offset, | |||
158 | return vg->reg_base + reg_offset + reg; | 168 | return vg->reg_base + reg_offset + reg; |
159 | } | 169 | } |
160 | 170 | ||
161 | static bool is_special_pin(struct byt_gpio *vg, unsigned offset) | 171 | static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned offset) |
172 | { | ||
173 | void __iomem *reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG); | ||
174 | unsigned long flags; | ||
175 | u32 value; | ||
176 | |||
177 | spin_lock_irqsave(&vg->lock, flags); | ||
178 | value = readl(reg); | ||
179 | value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL); | ||
180 | writel(value, reg); | ||
181 | spin_unlock_irqrestore(&vg->lock, flags); | ||
182 | } | ||
183 | |||
184 | static u32 byt_get_gpio_mux(struct byt_gpio *vg, unsigned offset) | ||
162 | { | 185 | { |
163 | /* SCORE pin 92-93 */ | 186 | /* SCORE pin 92-93 */ |
164 | if (!strcmp(vg->range->name, BYT_SCORE_ACPI_UID) && | 187 | if (!strcmp(vg->range->name, BYT_SCORE_ACPI_UID) && |
165 | offset >= 92 && offset <= 93) | 188 | offset >= 92 && offset <= 93) |
166 | return true; | 189 | return 1; |
167 | 190 | ||
168 | /* SUS pin 11-21 */ | 191 | /* SUS pin 11-21 */ |
169 | if (!strcmp(vg->range->name, BYT_SUS_ACPI_UID) && | 192 | if (!strcmp(vg->range->name, BYT_SUS_ACPI_UID) && |
170 | offset >= 11 && offset <= 21) | 193 | offset >= 11 && offset <= 21) |
171 | return true; | 194 | return 1; |
172 | 195 | ||
173 | return false; | 196 | return 0; |
174 | } | 197 | } |
175 | 198 | ||
176 | static int byt_gpio_request(struct gpio_chip *chip, unsigned offset) | 199 | static int byt_gpio_request(struct gpio_chip *chip, unsigned offset) |
177 | { | 200 | { |
178 | struct byt_gpio *vg = to_byt_gpio(chip); | 201 | struct byt_gpio *vg = to_byt_gpio(chip); |
179 | void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG); | 202 | void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG); |
180 | u32 value; | 203 | u32 value, gpio_mux; |
181 | bool special; | ||
182 | 204 | ||
183 | /* | 205 | /* |
184 | * In most cases, func pin mux 000 means GPIO function. | 206 | * In most cases, func pin mux 000 means GPIO function. |
185 | * But, some pins may have func pin mux 001 represents | 207 | * But, some pins may have func pin mux 001 represents |
186 | * GPIO function. Only allow user to export pin with | 208 | * GPIO function. |
187 | * func pin mux preset as GPIO function by BIOS/FW. | 209 | * |
210 | * Because there are devices out there where some pins were not | ||
211 | * configured correctly we allow changing the mux value from | ||
212 | * request (but print out warning about that). | ||
188 | */ | 213 | */ |
189 | value = readl(reg) & BYT_PIN_MUX; | 214 | value = readl(reg) & BYT_PIN_MUX; |
190 | special = is_special_pin(vg, offset); | 215 | gpio_mux = byt_get_gpio_mux(vg, offset); |
191 | if ((special && value != 1) || (!special && value)) { | 216 | if (WARN_ON(gpio_mux != value)) { |
192 | dev_err(&vg->pdev->dev, | 217 | unsigned long flags; |
193 | "pin %u cannot be used as GPIO.\n", offset); | 218 | |
194 | return -EINVAL; | 219 | spin_lock_irqsave(&vg->lock, flags); |
220 | value = readl(reg) & ~BYT_PIN_MUX; | ||
221 | value |= gpio_mux; | ||
222 | writel(value, reg); | ||
223 | spin_unlock_irqrestore(&vg->lock, flags); | ||
224 | |||
225 | dev_warn(&vg->pdev->dev, | ||
226 | "pin %u forcibly re-configured as GPIO\n", offset); | ||
195 | } | 227 | } |
196 | 228 | ||
197 | pm_runtime_get(&vg->pdev->dev); | 229 | pm_runtime_get(&vg->pdev->dev); |
@@ -202,14 +234,8 @@ static int byt_gpio_request(struct gpio_chip *chip, unsigned offset) | |||
202 | static void byt_gpio_free(struct gpio_chip *chip, unsigned offset) | 234 | static void byt_gpio_free(struct gpio_chip *chip, unsigned offset) |
203 | { | 235 | { |
204 | struct byt_gpio *vg = to_byt_gpio(chip); | 236 | struct byt_gpio *vg = to_byt_gpio(chip); |
205 | void __iomem *reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG); | ||
206 | u32 value; | ||
207 | |||
208 | /* clear interrupt triggering */ | ||
209 | value = readl(reg); | ||
210 | value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL); | ||
211 | writel(value, reg); | ||
212 | 237 | ||
238 | byt_gpio_clear_triggering(vg, offset); | ||
213 | pm_runtime_put(&vg->pdev->dev); | 239 | pm_runtime_put(&vg->pdev->dev); |
214 | } | 240 | } |
215 | 241 | ||
@@ -236,23 +262,13 @@ static int byt_irq_type(struct irq_data *d, unsigned type) | |||
236 | value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG | | 262 | value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG | |
237 | BYT_TRIG_LVL); | 263 | BYT_TRIG_LVL); |
238 | 264 | ||
239 | switch (type) { | ||
240 | case IRQ_TYPE_LEVEL_HIGH: | ||
241 | value |= BYT_TRIG_LVL; | ||
242 | case IRQ_TYPE_EDGE_RISING: | ||
243 | value |= BYT_TRIG_POS; | ||
244 | break; | ||
245 | case IRQ_TYPE_LEVEL_LOW: | ||
246 | value |= BYT_TRIG_LVL; | ||
247 | case IRQ_TYPE_EDGE_FALLING: | ||
248 | value |= BYT_TRIG_NEG; | ||
249 | break; | ||
250 | case IRQ_TYPE_EDGE_BOTH: | ||
251 | value |= (BYT_TRIG_NEG | BYT_TRIG_POS); | ||
252 | break; | ||
253 | } | ||
254 | writel(value, reg); | 265 | writel(value, reg); |
255 | 266 | ||
267 | if (type & IRQ_TYPE_EDGE_BOTH) | ||
268 | __irq_set_handler_locked(d->irq, handle_edge_irq); | ||
269 | else if (type & IRQ_TYPE_LEVEL_MASK) | ||
270 | __irq_set_handler_locked(d->irq, handle_level_irq); | ||
271 | |||
256 | spin_unlock_irqrestore(&vg->lock, flags); | 272 | spin_unlock_irqrestore(&vg->lock, flags); |
257 | 273 | ||
258 | return 0; | 274 | return 0; |
@@ -410,58 +426,80 @@ static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc) | |||
410 | struct irq_data *data = irq_desc_get_irq_data(desc); | 426 | struct irq_data *data = irq_desc_get_irq_data(desc); |
411 | struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc)); | 427 | struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc)); |
412 | struct irq_chip *chip = irq_data_get_irq_chip(data); | 428 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
413 | u32 base, pin, mask; | 429 | u32 base, pin; |
414 | void __iomem *reg; | 430 | void __iomem *reg; |
415 | u32 pending; | 431 | unsigned long pending; |
416 | unsigned virq; | 432 | unsigned virq; |
417 | int looplimit = 0; | ||
418 | 433 | ||
419 | /* check from GPIO controller which pin triggered the interrupt */ | 434 | /* check from GPIO controller which pin triggered the interrupt */ |
420 | for (base = 0; base < vg->chip.ngpio; base += 32) { | 435 | for (base = 0; base < vg->chip.ngpio; base += 32) { |
421 | |||
422 | reg = byt_gpio_reg(&vg->chip, base, BYT_INT_STAT_REG); | 436 | reg = byt_gpio_reg(&vg->chip, base, BYT_INT_STAT_REG); |
423 | 437 | pending = readl(reg); | |
424 | while ((pending = readl(reg))) { | 438 | for_each_set_bit(pin, &pending, 32) { |
425 | pin = __ffs(pending); | ||
426 | mask = BIT(pin); | ||
427 | /* Clear before handling so we can't lose an edge */ | ||
428 | writel(mask, reg); | ||
429 | |||
430 | virq = irq_find_mapping(vg->chip.irqdomain, base + pin); | 439 | virq = irq_find_mapping(vg->chip.irqdomain, base + pin); |
431 | generic_handle_irq(virq); | 440 | generic_handle_irq(virq); |
432 | |||
433 | /* In case bios or user sets triggering incorretly a pin | ||
434 | * might remain in "interrupt triggered" state. | ||
435 | */ | ||
436 | if (looplimit++ > 32) { | ||
437 | dev_err(&vg->pdev->dev, | ||
438 | "Gpio %d interrupt flood, disabling\n", | ||
439 | base + pin); | ||
440 | |||
441 | reg = byt_gpio_reg(&vg->chip, base + pin, | ||
442 | BYT_CONF0_REG); | ||
443 | mask = readl(reg); | ||
444 | mask &= ~(BYT_TRIG_NEG | BYT_TRIG_POS | | ||
445 | BYT_TRIG_LVL); | ||
446 | writel(mask, reg); | ||
447 | mask = readl(reg); /* flush */ | ||
448 | break; | ||
449 | } | ||
450 | } | 441 | } |
451 | } | 442 | } |
452 | chip->irq_eoi(data); | 443 | chip->irq_eoi(data); |
453 | } | 444 | } |
454 | 445 | ||
446 | static void byt_irq_ack(struct irq_data *d) | ||
447 | { | ||
448 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | ||
449 | struct byt_gpio *vg = to_byt_gpio(gc); | ||
450 | unsigned offset = irqd_to_hwirq(d); | ||
451 | void __iomem *reg; | ||
452 | |||
453 | reg = byt_gpio_reg(&vg->chip, offset, BYT_INT_STAT_REG); | ||
454 | writel(BIT(offset % 32), reg); | ||
455 | } | ||
456 | |||
455 | static void byt_irq_unmask(struct irq_data *d) | 457 | static void byt_irq_unmask(struct irq_data *d) |
456 | { | 458 | { |
459 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | ||
460 | struct byt_gpio *vg = to_byt_gpio(gc); | ||
461 | unsigned offset = irqd_to_hwirq(d); | ||
462 | unsigned long flags; | ||
463 | void __iomem *reg; | ||
464 | u32 value; | ||
465 | |||
466 | spin_lock_irqsave(&vg->lock, flags); | ||
467 | |||
468 | reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG); | ||
469 | value = readl(reg); | ||
470 | |||
471 | switch (irqd_get_trigger_type(d)) { | ||
472 | case IRQ_TYPE_LEVEL_HIGH: | ||
473 | value |= BYT_TRIG_LVL; | ||
474 | case IRQ_TYPE_EDGE_RISING: | ||
475 | value |= BYT_TRIG_POS; | ||
476 | break; | ||
477 | case IRQ_TYPE_LEVEL_LOW: | ||
478 | value |= BYT_TRIG_LVL; | ||
479 | case IRQ_TYPE_EDGE_FALLING: | ||
480 | value |= BYT_TRIG_NEG; | ||
481 | break; | ||
482 | case IRQ_TYPE_EDGE_BOTH: | ||
483 | value |= (BYT_TRIG_NEG | BYT_TRIG_POS); | ||
484 | break; | ||
485 | } | ||
486 | |||
487 | writel(value, reg); | ||
488 | |||
489 | spin_unlock_irqrestore(&vg->lock, flags); | ||
457 | } | 490 | } |
458 | 491 | ||
459 | static void byt_irq_mask(struct irq_data *d) | 492 | static void byt_irq_mask(struct irq_data *d) |
460 | { | 493 | { |
494 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | ||
495 | struct byt_gpio *vg = to_byt_gpio(gc); | ||
496 | |||
497 | byt_gpio_clear_triggering(vg, irqd_to_hwirq(d)); | ||
461 | } | 498 | } |
462 | 499 | ||
463 | static struct irq_chip byt_irqchip = { | 500 | static struct irq_chip byt_irqchip = { |
464 | .name = "BYT-GPIO", | 501 | .name = "BYT-GPIO", |
502 | .irq_ack = byt_irq_ack, | ||
465 | .irq_mask = byt_irq_mask, | 503 | .irq_mask = byt_irq_mask, |
466 | .irq_unmask = byt_irq_unmask, | 504 | .irq_unmask = byt_irq_unmask, |
467 | .irq_set_type = byt_irq_type, | 505 | .irq_set_type = byt_irq_type, |
@@ -472,6 +510,21 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg) | |||
472 | { | 510 | { |
473 | void __iomem *reg; | 511 | void __iomem *reg; |
474 | u32 base, value; | 512 | u32 base, value; |
513 | int i; | ||
514 | |||
515 | /* | ||
516 | * Clear interrupt triggers for all pins that are GPIOs and | ||
517 | * do not use direct IRQ mode. This will prevent spurious | ||
518 | * interrupts from misconfigured pins. | ||
519 | */ | ||
520 | for (i = 0; i < vg->chip.ngpio; i++) { | ||
521 | value = readl(byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG)); | ||
522 | if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) && | ||
523 | !(value & BYT_DIRECT_IRQ_EN)) { | ||
524 | byt_gpio_clear_triggering(vg, i); | ||
525 | dev_dbg(&vg->pdev->dev, "disabling GPIO %d\n", i); | ||
526 | } | ||
527 | } | ||
475 | 528 | ||
476 | /* clear interrupt status trigger registers */ | 529 | /* clear interrupt status trigger registers */ |
477 | for (base = 0; base < vg->chip.ngpio; base += 32) { | 530 | for (base = 0; base < vg->chip.ngpio; base += 32) { |
@@ -541,6 +594,11 @@ static int byt_gpio_probe(struct platform_device *pdev) | |||
541 | gc->can_sleep = false; | 594 | gc->can_sleep = false; |
542 | gc->dev = dev; | 595 | gc->dev = dev; |
543 | 596 | ||
597 | #ifdef CONFIG_PM_SLEEP | ||
598 | vg->saved_context = devm_kcalloc(&pdev->dev, gc->ngpio, | ||
599 | sizeof(*vg->saved_context), GFP_KERNEL); | ||
600 | #endif | ||
601 | |||
544 | ret = gpiochip_add(gc); | 602 | ret = gpiochip_add(gc); |
545 | if (ret) { | 603 | if (ret) { |
546 | dev_err(&pdev->dev, "failed adding byt-gpio chip\n"); | 604 | dev_err(&pdev->dev, "failed adding byt-gpio chip\n"); |
@@ -569,6 +627,69 @@ static int byt_gpio_probe(struct platform_device *pdev) | |||
569 | return 0; | 627 | return 0; |
570 | } | 628 | } |
571 | 629 | ||
630 | #ifdef CONFIG_PM_SLEEP | ||
631 | static int byt_gpio_suspend(struct device *dev) | ||
632 | { | ||
633 | struct platform_device *pdev = to_platform_device(dev); | ||
634 | struct byt_gpio *vg = platform_get_drvdata(pdev); | ||
635 | int i; | ||
636 | |||
637 | for (i = 0; i < vg->chip.ngpio; i++) { | ||
638 | void __iomem *reg; | ||
639 | u32 value; | ||
640 | |||
641 | reg = byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG); | ||
642 | value = readl(reg) & BYT_CONF0_RESTORE_MASK; | ||
643 | vg->saved_context[i].conf0 = value; | ||
644 | |||
645 | reg = byt_gpio_reg(&vg->chip, i, BYT_VAL_REG); | ||
646 | value = readl(reg) & BYT_VAL_RESTORE_MASK; | ||
647 | vg->saved_context[i].val = value; | ||
648 | } | ||
649 | |||
650 | return 0; | ||
651 | } | ||
652 | |||
653 | static int byt_gpio_resume(struct device *dev) | ||
654 | { | ||
655 | struct platform_device *pdev = to_platform_device(dev); | ||
656 | struct byt_gpio *vg = platform_get_drvdata(pdev); | ||
657 | int i; | ||
658 | |||
659 | for (i = 0; i < vg->chip.ngpio; i++) { | ||
660 | void __iomem *reg; | ||
661 | u32 value; | ||
662 | |||
663 | reg = byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG); | ||
664 | value = readl(reg); | ||
665 | if ((value & BYT_CONF0_RESTORE_MASK) != | ||
666 | vg->saved_context[i].conf0) { | ||
667 | value &= ~BYT_CONF0_RESTORE_MASK; | ||
668 | value |= vg->saved_context[i].conf0; | ||
669 | writel(value, reg); | ||
670 | dev_info(dev, "restored pin %d conf0 %#08x", i, value); | ||
671 | } | ||
672 | |||
673 | reg = byt_gpio_reg(&vg->chip, i, BYT_VAL_REG); | ||
674 | value = readl(reg); | ||
675 | if ((value & BYT_VAL_RESTORE_MASK) != | ||
676 | vg->saved_context[i].val) { | ||
677 | u32 v; | ||
678 | |||
679 | v = value & ~BYT_VAL_RESTORE_MASK; | ||
680 | v |= vg->saved_context[i].val; | ||
681 | if (v != value) { | ||
682 | writel(v, reg); | ||
683 | dev_dbg(dev, "restored pin %d val %#08x\n", | ||
684 | i, v); | ||
685 | } | ||
686 | } | ||
687 | } | ||
688 | |||
689 | return 0; | ||
690 | } | ||
691 | #endif | ||
692 | |||
572 | static int byt_gpio_runtime_suspend(struct device *dev) | 693 | static int byt_gpio_runtime_suspend(struct device *dev) |
573 | { | 694 | { |
574 | return 0; | 695 | return 0; |
@@ -580,8 +701,9 @@ static int byt_gpio_runtime_resume(struct device *dev) | |||
580 | } | 701 | } |
581 | 702 | ||
582 | static const struct dev_pm_ops byt_gpio_pm_ops = { | 703 | static const struct dev_pm_ops byt_gpio_pm_ops = { |
583 | .runtime_suspend = byt_gpio_runtime_suspend, | 704 | SET_LATE_SYSTEM_SLEEP_PM_OPS(byt_gpio_suspend, byt_gpio_resume) |
584 | .runtime_resume = byt_gpio_runtime_resume, | 705 | SET_RUNTIME_PM_OPS(byt_gpio_runtime_suspend, byt_gpio_runtime_resume, |
706 | NULL) | ||
585 | }; | 707 | }; |
586 | 708 | ||
587 | static const struct acpi_device_id byt_gpio_acpi_match[] = { | 709 | static const struct acpi_device_id byt_gpio_acpi_match[] = { |
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 3034fd03bced..82f691eeeec4 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c | |||
@@ -1226,6 +1226,7 @@ static int chv_gpio_direction_input(struct gpio_chip *chip, unsigned offset) | |||
1226 | static int chv_gpio_direction_output(struct gpio_chip *chip, unsigned offset, | 1226 | static int chv_gpio_direction_output(struct gpio_chip *chip, unsigned offset, |
1227 | int value) | 1227 | int value) |
1228 | { | 1228 | { |
1229 | chv_gpio_set(chip, offset, value); | ||
1229 | return pinctrl_gpio_direction_output(chip->base + offset); | 1230 | return pinctrl_gpio_direction_output(chip->base + offset); |
1230 | } | 1231 | } |
1231 | 1232 | ||
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index f4cd0b9b2438..a4814066ea08 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c | |||
@@ -1477,28 +1477,25 @@ static void gpio_irq_ack(struct irq_data *d) | |||
1477 | /* the interrupt is already cleared before by reading ISR */ | 1477 | /* the interrupt is already cleared before by reading ISR */ |
1478 | } | 1478 | } |
1479 | 1479 | ||
1480 | static unsigned int gpio_irq_startup(struct irq_data *d) | 1480 | static int gpio_irq_request_res(struct irq_data *d) |
1481 | { | 1481 | { |
1482 | struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); | 1482 | struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); |
1483 | unsigned pin = d->hwirq; | 1483 | unsigned pin = d->hwirq; |
1484 | int ret; | 1484 | int ret; |
1485 | 1485 | ||
1486 | ret = gpiochip_lock_as_irq(&at91_gpio->chip, pin); | 1486 | ret = gpiochip_lock_as_irq(&at91_gpio->chip, pin); |
1487 | if (ret) { | 1487 | if (ret) |
1488 | dev_err(at91_gpio->chip.dev, "unable to lock pind %lu IRQ\n", | 1488 | dev_err(at91_gpio->chip.dev, "unable to lock pind %lu IRQ\n", |
1489 | d->hwirq); | 1489 | d->hwirq); |
1490 | return ret; | 1490 | |
1491 | } | 1491 | return ret; |
1492 | gpio_irq_unmask(d); | ||
1493 | return 0; | ||
1494 | } | 1492 | } |
1495 | 1493 | ||
1496 | static void gpio_irq_shutdown(struct irq_data *d) | 1494 | static void gpio_irq_release_res(struct irq_data *d) |
1497 | { | 1495 | { |
1498 | struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); | 1496 | struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); |
1499 | unsigned pin = d->hwirq; | 1497 | unsigned pin = d->hwirq; |
1500 | 1498 | ||
1501 | gpio_irq_mask(d); | ||
1502 | gpiochip_unlock_as_irq(&at91_gpio->chip, pin); | 1499 | gpiochip_unlock_as_irq(&at91_gpio->chip, pin); |
1503 | } | 1500 | } |
1504 | 1501 | ||
@@ -1577,8 +1574,8 @@ void at91_pinctrl_gpio_resume(void) | |||
1577 | static struct irq_chip gpio_irqchip = { | 1574 | static struct irq_chip gpio_irqchip = { |
1578 | .name = "GPIO", | 1575 | .name = "GPIO", |
1579 | .irq_ack = gpio_irq_ack, | 1576 | .irq_ack = gpio_irq_ack, |
1580 | .irq_startup = gpio_irq_startup, | 1577 | .irq_request_resources = gpio_irq_request_res, |
1581 | .irq_shutdown = gpio_irq_shutdown, | 1578 | .irq_release_resources = gpio_irq_release_res, |
1582 | .irq_disable = gpio_irq_mask, | 1579 | .irq_disable = gpio_irq_mask, |
1583 | .irq_mask = gpio_irq_mask, | 1580 | .irq_mask = gpio_irq_mask, |
1584 | .irq_unmask = gpio_irq_unmask, | 1581 | .irq_unmask = gpio_irq_unmask, |
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c index 24c5d88f943f..3c68a8e5e0dd 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c | |||
@@ -1011,6 +1011,7 @@ static const struct sunxi_pinctrl_desc sun4i_a10_pinctrl_data = { | |||
1011 | .pins = sun4i_a10_pins, | 1011 | .pins = sun4i_a10_pins, |
1012 | .npins = ARRAY_SIZE(sun4i_a10_pins), | 1012 | .npins = ARRAY_SIZE(sun4i_a10_pins), |
1013 | .irq_banks = 1, | 1013 | .irq_banks = 1, |
1014 | .irq_read_needs_mux = true, | ||
1014 | }; | 1015 | }; |
1015 | 1016 | ||
1016 | static int sun4i_a10_pinctrl_probe(struct platform_device *pdev) | 1017 | static int sun4i_a10_pinctrl_probe(struct platform_device *pdev) |
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index 3d0744337736..f8e171b76693 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | 30 | ||
31 | #include "../core.h" | 31 | #include "../core.h" |
32 | #include "../../gpio/gpiolib.h" | ||
32 | #include "pinctrl-sunxi.h" | 33 | #include "pinctrl-sunxi.h" |
33 | 34 | ||
34 | static struct irq_chip sunxi_pinctrl_edge_irq_chip; | 35 | static struct irq_chip sunxi_pinctrl_edge_irq_chip; |
@@ -464,10 +465,19 @@ static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip, | |||
464 | static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset) | 465 | static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset) |
465 | { | 466 | { |
466 | struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev); | 467 | struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev); |
467 | |||
468 | u32 reg = sunxi_data_reg(offset); | 468 | u32 reg = sunxi_data_reg(offset); |
469 | u8 index = sunxi_data_offset(offset); | 469 | u8 index = sunxi_data_offset(offset); |
470 | u32 val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK; | 470 | u32 set_mux = pctl->desc->irq_read_needs_mux && |
471 | test_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags); | ||
472 | u32 val; | ||
473 | |||
474 | if (set_mux) | ||
475 | sunxi_pmx_set(pctl->pctl_dev, offset, SUN4I_FUNC_INPUT); | ||
476 | |||
477 | val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK; | ||
478 | |||
479 | if (set_mux) | ||
480 | sunxi_pmx_set(pctl->pctl_dev, offset, SUN4I_FUNC_IRQ); | ||
471 | 481 | ||
472 | return val; | 482 | return val; |
473 | } | 483 | } |
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h index 5a51523a3459..e248e81a0f9e 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h | |||
@@ -77,6 +77,9 @@ | |||
77 | #define IRQ_LEVEL_LOW 0x03 | 77 | #define IRQ_LEVEL_LOW 0x03 |
78 | #define IRQ_EDGE_BOTH 0x04 | 78 | #define IRQ_EDGE_BOTH 0x04 |
79 | 79 | ||
80 | #define SUN4I_FUNC_INPUT 0 | ||
81 | #define SUN4I_FUNC_IRQ 6 | ||
82 | |||
80 | struct sunxi_desc_function { | 83 | struct sunxi_desc_function { |
81 | const char *name; | 84 | const char *name; |
82 | u8 muxval; | 85 | u8 muxval; |
@@ -94,6 +97,7 @@ struct sunxi_pinctrl_desc { | |||
94 | int npins; | 97 | int npins; |
95 | unsigned pin_base; | 98 | unsigned pin_base; |
96 | unsigned irq_banks; | 99 | unsigned irq_banks; |
100 | bool irq_read_needs_mux; | ||
97 | }; | 101 | }; |
98 | 102 | ||
99 | struct sunxi_pinctrl_function { | 103 | struct sunxi_pinctrl_function { |
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c index 97b5e4ee1ca4..63d4033eb683 100644 --- a/drivers/powercap/intel_rapl.c +++ b/drivers/powercap/intel_rapl.c | |||
@@ -73,7 +73,7 @@ | |||
73 | 73 | ||
74 | #define TIME_WINDOW_MAX_MSEC 40000 | 74 | #define TIME_WINDOW_MAX_MSEC 40000 |
75 | #define TIME_WINDOW_MIN_MSEC 250 | 75 | #define TIME_WINDOW_MIN_MSEC 250 |
76 | 76 | #define ENERGY_UNIT_SCALE 1000 /* scale from driver unit to powercap unit */ | |
77 | enum unit_type { | 77 | enum unit_type { |
78 | ARBITRARY_UNIT, /* no translation */ | 78 | ARBITRARY_UNIT, /* no translation */ |
79 | POWER_UNIT, | 79 | POWER_UNIT, |
@@ -158,6 +158,7 @@ struct rapl_domain { | |||
158 | struct rapl_power_limit rpl[NR_POWER_LIMITS]; | 158 | struct rapl_power_limit rpl[NR_POWER_LIMITS]; |
159 | u64 attr_map; /* track capabilities */ | 159 | u64 attr_map; /* track capabilities */ |
160 | unsigned int state; | 160 | unsigned int state; |
161 | unsigned int domain_energy_unit; | ||
161 | int package_id; | 162 | int package_id; |
162 | }; | 163 | }; |
163 | #define power_zone_to_rapl_domain(_zone) \ | 164 | #define power_zone_to_rapl_domain(_zone) \ |
@@ -190,6 +191,7 @@ struct rapl_defaults { | |||
190 | void (*set_floor_freq)(struct rapl_domain *rd, bool mode); | 191 | void (*set_floor_freq)(struct rapl_domain *rd, bool mode); |
191 | u64 (*compute_time_window)(struct rapl_package *rp, u64 val, | 192 | u64 (*compute_time_window)(struct rapl_package *rp, u64 val, |
192 | bool to_raw); | 193 | bool to_raw); |
194 | unsigned int dram_domain_energy_unit; | ||
193 | }; | 195 | }; |
194 | static struct rapl_defaults *rapl_defaults; | 196 | static struct rapl_defaults *rapl_defaults; |
195 | 197 | ||
@@ -227,7 +229,8 @@ static int rapl_read_data_raw(struct rapl_domain *rd, | |||
227 | static int rapl_write_data_raw(struct rapl_domain *rd, | 229 | static int rapl_write_data_raw(struct rapl_domain *rd, |
228 | enum rapl_primitives prim, | 230 | enum rapl_primitives prim, |
229 | unsigned long long value); | 231 | unsigned long long value); |
230 | static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, | 232 | static u64 rapl_unit_xlate(struct rapl_domain *rd, int package, |
233 | enum unit_type type, u64 value, | ||
231 | int to_raw); | 234 | int to_raw); |
232 | static void package_power_limit_irq_save(int package_id); | 235 | static void package_power_limit_irq_save(int package_id); |
233 | 236 | ||
@@ -305,7 +308,9 @@ static int get_energy_counter(struct powercap_zone *power_zone, u64 *energy_raw) | |||
305 | 308 | ||
306 | static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy) | 309 | static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy) |
307 | { | 310 | { |
308 | *energy = rapl_unit_xlate(0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0); | 311 | struct rapl_domain *rd = power_zone_to_rapl_domain(pcd_dev); |
312 | |||
313 | *energy = rapl_unit_xlate(rd, 0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0); | ||
309 | return 0; | 314 | return 0; |
310 | } | 315 | } |
311 | 316 | ||
@@ -639,6 +644,11 @@ static void rapl_init_domains(struct rapl_package *rp) | |||
639 | rd->msrs[4] = MSR_DRAM_POWER_INFO; | 644 | rd->msrs[4] = MSR_DRAM_POWER_INFO; |
640 | rd->rpl[0].prim_id = PL1_ENABLE; | 645 | rd->rpl[0].prim_id = PL1_ENABLE; |
641 | rd->rpl[0].name = pl1_name; | 646 | rd->rpl[0].name = pl1_name; |
647 | rd->domain_energy_unit = | ||
648 | rapl_defaults->dram_domain_energy_unit; | ||
649 | if (rd->domain_energy_unit) | ||
650 | pr_info("DRAM domain energy unit %dpj\n", | ||
651 | rd->domain_energy_unit); | ||
642 | break; | 652 | break; |
643 | } | 653 | } |
644 | if (mask) { | 654 | if (mask) { |
@@ -648,11 +658,13 @@ static void rapl_init_domains(struct rapl_package *rp) | |||
648 | } | 658 | } |
649 | } | 659 | } |
650 | 660 | ||
651 | static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, | 661 | static u64 rapl_unit_xlate(struct rapl_domain *rd, int package, |
662 | enum unit_type type, u64 value, | ||
652 | int to_raw) | 663 | int to_raw) |
653 | { | 664 | { |
654 | u64 units = 1; | 665 | u64 units = 1; |
655 | struct rapl_package *rp; | 666 | struct rapl_package *rp; |
667 | u64 scale = 1; | ||
656 | 668 | ||
657 | rp = find_package_by_id(package); | 669 | rp = find_package_by_id(package); |
658 | if (!rp) | 670 | if (!rp) |
@@ -663,7 +675,12 @@ static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, | |||
663 | units = rp->power_unit; | 675 | units = rp->power_unit; |
664 | break; | 676 | break; |
665 | case ENERGY_UNIT: | 677 | case ENERGY_UNIT: |
666 | units = rp->energy_unit; | 678 | scale = ENERGY_UNIT_SCALE; |
679 | /* per domain unit takes precedence */ | ||
680 | if (rd && rd->domain_energy_unit) | ||
681 | units = rd->domain_energy_unit; | ||
682 | else | ||
683 | units = rp->energy_unit; | ||
667 | break; | 684 | break; |
668 | case TIME_UNIT: | 685 | case TIME_UNIT: |
669 | return rapl_defaults->compute_time_window(rp, value, to_raw); | 686 | return rapl_defaults->compute_time_window(rp, value, to_raw); |
@@ -673,11 +690,11 @@ static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value, | |||
673 | }; | 690 | }; |
674 | 691 | ||
675 | if (to_raw) | 692 | if (to_raw) |
676 | return div64_u64(value, units); | 693 | return div64_u64(value, units) * scale; |
677 | 694 | ||
678 | value *= units; | 695 | value *= units; |
679 | 696 | ||
680 | return value; | 697 | return div64_u64(value, scale); |
681 | } | 698 | } |
682 | 699 | ||
683 | /* in the order of enum rapl_primitives */ | 700 | /* in the order of enum rapl_primitives */ |
@@ -773,7 +790,7 @@ static int rapl_read_data_raw(struct rapl_domain *rd, | |||
773 | final = value & rp->mask; | 790 | final = value & rp->mask; |
774 | final = final >> rp->shift; | 791 | final = final >> rp->shift; |
775 | if (xlate) | 792 | if (xlate) |
776 | *data = rapl_unit_xlate(rd->package_id, rp->unit, final, 0); | 793 | *data = rapl_unit_xlate(rd, rd->package_id, rp->unit, final, 0); |
777 | else | 794 | else |
778 | *data = final; | 795 | *data = final; |
779 | 796 | ||
@@ -799,7 +816,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd, | |||
799 | "failed to read msr 0x%x on cpu %d\n", msr, cpu); | 816 | "failed to read msr 0x%x on cpu %d\n", msr, cpu); |
800 | return -EIO; | 817 | return -EIO; |
801 | } | 818 | } |
802 | value = rapl_unit_xlate(rd->package_id, rp->unit, value, 1); | 819 | value = rapl_unit_xlate(rd, rd->package_id, rp->unit, value, 1); |
803 | msr_val &= ~rp->mask; | 820 | msr_val &= ~rp->mask; |
804 | msr_val |= value << rp->shift; | 821 | msr_val |= value << rp->shift; |
805 | if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) { | 822 | if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) { |
@@ -818,7 +835,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd, | |||
818 | * calculate units differ on different CPUs. | 835 | * calculate units differ on different CPUs. |
819 | * We convert the units to below format based on CPUs. | 836 | * We convert the units to below format based on CPUs. |
820 | * i.e. | 837 | * i.e. |
821 | * energy unit: microJoules : Represented in microJoules by default | 838 | * energy unit: picoJoules : Represented in picoJoules by default |
822 | * power unit : microWatts : Represented in milliWatts by default | 839 | * power unit : microWatts : Represented in milliWatts by default |
823 | * time unit : microseconds: Represented in seconds by default | 840 | * time unit : microseconds: Represented in seconds by default |
824 | */ | 841 | */ |
@@ -834,7 +851,7 @@ static int rapl_check_unit_core(struct rapl_package *rp, int cpu) | |||
834 | } | 851 | } |
835 | 852 | ||
836 | value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; | 853 | value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; |
837 | rp->energy_unit = 1000000 / (1 << value); | 854 | rp->energy_unit = ENERGY_UNIT_SCALE * 1000000 / (1 << value); |
838 | 855 | ||
839 | value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; | 856 | value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; |
840 | rp->power_unit = 1000000 / (1 << value); | 857 | rp->power_unit = 1000000 / (1 << value); |
@@ -842,7 +859,7 @@ static int rapl_check_unit_core(struct rapl_package *rp, int cpu) | |||
842 | value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; | 859 | value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; |
843 | rp->time_unit = 1000000 / (1 << value); | 860 | rp->time_unit = 1000000 / (1 << value); |
844 | 861 | ||
845 | pr_debug("Core CPU package %d energy=%duJ, time=%dus, power=%duW\n", | 862 | pr_debug("Core CPU package %d energy=%dpJ, time=%dus, power=%duW\n", |
846 | rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); | 863 | rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); |
847 | 864 | ||
848 | return 0; | 865 | return 0; |
@@ -859,7 +876,7 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu) | |||
859 | return -ENODEV; | 876 | return -ENODEV; |
860 | } | 877 | } |
861 | value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; | 878 | value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET; |
862 | rp->energy_unit = 1 << value; | 879 | rp->energy_unit = ENERGY_UNIT_SCALE * 1 << value; |
863 | 880 | ||
864 | value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; | 881 | value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET; |
865 | rp->power_unit = (1 << value) * 1000; | 882 | rp->power_unit = (1 << value) * 1000; |
@@ -867,7 +884,7 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu) | |||
867 | value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; | 884 | value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET; |
868 | rp->time_unit = 1000000 / (1 << value); | 885 | rp->time_unit = 1000000 / (1 << value); |
869 | 886 | ||
870 | pr_debug("Atom package %d energy=%duJ, time=%dus, power=%duW\n", | 887 | pr_debug("Atom package %d energy=%dpJ, time=%dus, power=%duW\n", |
871 | rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); | 888 | rp->id, rp->energy_unit, rp->time_unit, rp->power_unit); |
872 | 889 | ||
873 | return 0; | 890 | return 0; |
@@ -1017,6 +1034,13 @@ static const struct rapl_defaults rapl_defaults_core = { | |||
1017 | .compute_time_window = rapl_compute_time_window_core, | 1034 | .compute_time_window = rapl_compute_time_window_core, |
1018 | }; | 1035 | }; |
1019 | 1036 | ||
1037 | static const struct rapl_defaults rapl_defaults_hsw_server = { | ||
1038 | .check_unit = rapl_check_unit_core, | ||
1039 | .set_floor_freq = set_floor_freq_default, | ||
1040 | .compute_time_window = rapl_compute_time_window_core, | ||
1041 | .dram_domain_energy_unit = 15300, | ||
1042 | }; | ||
1043 | |||
1020 | static const struct rapl_defaults rapl_defaults_atom = { | 1044 | static const struct rapl_defaults rapl_defaults_atom = { |
1021 | .check_unit = rapl_check_unit_atom, | 1045 | .check_unit = rapl_check_unit_atom, |
1022 | .set_floor_freq = set_floor_freq_atom, | 1046 | .set_floor_freq = set_floor_freq_atom, |
@@ -1037,7 +1061,7 @@ static const struct x86_cpu_id rapl_ids[] = { | |||
1037 | RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */ | 1061 | RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */ |
1038 | RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */ | 1062 | RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */ |
1039 | RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */ | 1063 | RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */ |
1040 | RAPL_CPU(0x3f, rapl_defaults_core),/* Haswell */ | 1064 | RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */ |
1041 | RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */ | 1065 | RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */ |
1042 | RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */ | 1066 | RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */ |
1043 | RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */ | 1067 | RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */ |
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index b899947d839d..a4a8a6dc60c4 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
@@ -1839,10 +1839,12 @@ static int _regulator_do_enable(struct regulator_dev *rdev) | |||
1839 | } | 1839 | } |
1840 | 1840 | ||
1841 | if (rdev->ena_pin) { | 1841 | if (rdev->ena_pin) { |
1842 | ret = regulator_ena_gpio_ctrl(rdev, true); | 1842 | if (!rdev->ena_gpio_state) { |
1843 | if (ret < 0) | 1843 | ret = regulator_ena_gpio_ctrl(rdev, true); |
1844 | return ret; | 1844 | if (ret < 0) |
1845 | rdev->ena_gpio_state = 1; | 1845 | return ret; |
1846 | rdev->ena_gpio_state = 1; | ||
1847 | } | ||
1846 | } else if (rdev->desc->ops->enable) { | 1848 | } else if (rdev->desc->ops->enable) { |
1847 | ret = rdev->desc->ops->enable(rdev); | 1849 | ret = rdev->desc->ops->enable(rdev); |
1848 | if (ret < 0) | 1850 | if (ret < 0) |
@@ -1939,10 +1941,12 @@ static int _regulator_do_disable(struct regulator_dev *rdev) | |||
1939 | trace_regulator_disable(rdev_get_name(rdev)); | 1941 | trace_regulator_disable(rdev_get_name(rdev)); |
1940 | 1942 | ||
1941 | if (rdev->ena_pin) { | 1943 | if (rdev->ena_pin) { |
1942 | ret = regulator_ena_gpio_ctrl(rdev, false); | 1944 | if (rdev->ena_gpio_state) { |
1943 | if (ret < 0) | 1945 | ret = regulator_ena_gpio_ctrl(rdev, false); |
1944 | return ret; | 1946 | if (ret < 0) |
1945 | rdev->ena_gpio_state = 0; | 1947 | return ret; |
1948 | rdev->ena_gpio_state = 0; | ||
1949 | } | ||
1946 | 1950 | ||
1947 | } else if (rdev->desc->ops->disable) { | 1951 | } else if (rdev->desc->ops->disable) { |
1948 | ret = rdev->desc->ops->disable(rdev); | 1952 | ret = rdev->desc->ops->disable(rdev); |
@@ -3444,13 +3448,6 @@ static umode_t regulator_attr_is_visible(struct kobject *kobj, | |||
3444 | if (attr == &dev_attr_requested_microamps.attr) | 3448 | if (attr == &dev_attr_requested_microamps.attr) |
3445 | return rdev->desc->type == REGULATOR_CURRENT ? mode : 0; | 3449 | return rdev->desc->type == REGULATOR_CURRENT ? mode : 0; |
3446 | 3450 | ||
3447 | /* all the other attributes exist to support constraints; | ||
3448 | * don't show them if there are no constraints, or if the | ||
3449 | * relevant supporting methods are missing. | ||
3450 | */ | ||
3451 | if (!rdev->constraints) | ||
3452 | return 0; | ||
3453 | |||
3454 | /* constraints need specific supporting methods */ | 3451 | /* constraints need specific supporting methods */ |
3455 | if (attr == &dev_attr_min_microvolts.attr || | 3452 | if (attr == &dev_attr_min_microvolts.attr || |
3456 | attr == &dev_attr_max_microvolts.attr) | 3453 | attr == &dev_attr_max_microvolts.attr) |
@@ -3633,12 +3630,6 @@ regulator_register(const struct regulator_desc *regulator_desc, | |||
3633 | config->ena_gpio, ret); | 3630 | config->ena_gpio, ret); |
3634 | goto wash; | 3631 | goto wash; |
3635 | } | 3632 | } |
3636 | |||
3637 | if (config->ena_gpio_flags & GPIOF_OUT_INIT_HIGH) | ||
3638 | rdev->ena_gpio_state = 1; | ||
3639 | |||
3640 | if (config->ena_gpio_invert) | ||
3641 | rdev->ena_gpio_state = !rdev->ena_gpio_state; | ||
3642 | } | 3633 | } |
3643 | 3634 | ||
3644 | /* set regulator constraints */ | 3635 | /* set regulator constraints */ |
@@ -3807,9 +3798,11 @@ int regulator_suspend_finish(void) | |||
3807 | list_for_each_entry(rdev, ®ulator_list, list) { | 3798 | list_for_each_entry(rdev, ®ulator_list, list) { |
3808 | mutex_lock(&rdev->mutex); | 3799 | mutex_lock(&rdev->mutex); |
3809 | if (rdev->use_count > 0 || rdev->constraints->always_on) { | 3800 | if (rdev->use_count > 0 || rdev->constraints->always_on) { |
3810 | error = _regulator_do_enable(rdev); | 3801 | if (!_regulator_is_enabled(rdev)) { |
3811 | if (error) | 3802 | error = _regulator_do_enable(rdev); |
3812 | ret = error; | 3803 | if (error) |
3804 | ret = error; | ||
3805 | } | ||
3813 | } else { | 3806 | } else { |
3814 | if (!have_full_constraints()) | 3807 | if (!have_full_constraints()) |
3815 | goto unlock; | 3808 | goto unlock; |
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c index bc6100103f7f..f0489cb9018b 100644 --- a/drivers/regulator/da9210-regulator.c +++ b/drivers/regulator/da9210-regulator.c | |||
@@ -152,6 +152,15 @@ static int da9210_i2c_probe(struct i2c_client *i2c, | |||
152 | config.regmap = chip->regmap; | 152 | config.regmap = chip->regmap; |
153 | config.of_node = dev->of_node; | 153 | config.of_node = dev->of_node; |
154 | 154 | ||
155 | /* Mask all interrupt sources to deassert interrupt line */ | ||
156 | error = regmap_write(chip->regmap, DA9210_REG_MASK_A, ~0); | ||
157 | if (!error) | ||
158 | error = regmap_write(chip->regmap, DA9210_REG_MASK_B, ~0); | ||
159 | if (error) { | ||
160 | dev_err(&i2c->dev, "Failed to write to mask reg: %d\n", error); | ||
161 | return error; | ||
162 | } | ||
163 | |||
155 | rdev = devm_regulator_register(&i2c->dev, &da9210_reg, &config); | 164 | rdev = devm_regulator_register(&i2c->dev, &da9210_reg, &config); |
156 | if (IS_ERR(rdev)) { | 165 | if (IS_ERR(rdev)) { |
157 | dev_err(&i2c->dev, "Failed to register DA9210 regulator\n"); | 166 | dev_err(&i2c->dev, "Failed to register DA9210 regulator\n"); |
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c index 9205f433573c..18198316b6cf 100644 --- a/drivers/regulator/palmas-regulator.c +++ b/drivers/regulator/palmas-regulator.c | |||
@@ -1572,6 +1572,10 @@ static int palmas_regulators_probe(struct platform_device *pdev) | |||
1572 | if (!pmic) | 1572 | if (!pmic) |
1573 | return -ENOMEM; | 1573 | return -ENOMEM; |
1574 | 1574 | ||
1575 | if (of_device_is_compatible(node, "ti,tps659038-pmic")) | ||
1576 | palmas_generic_regs_info[PALMAS_REG_REGEN2].ctrl_addr = | ||
1577 | TPS659038_REGEN2_CTRL; | ||
1578 | |||
1575 | pmic->dev = &pdev->dev; | 1579 | pmic->dev = &pdev->dev; |
1576 | pmic->palmas = palmas; | 1580 | pmic->palmas = palmas; |
1577 | palmas->pmic = pmic; | 1581 | palmas->pmic = pmic; |
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c index 1f93b752a81c..3fd44353cc80 100644 --- a/drivers/regulator/rk808-regulator.c +++ b/drivers/regulator/rk808-regulator.c | |||
@@ -235,6 +235,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
235 | .vsel_mask = RK808_LDO_VSEL_MASK, | 235 | .vsel_mask = RK808_LDO_VSEL_MASK, |
236 | .enable_reg = RK808_LDO_EN_REG, | 236 | .enable_reg = RK808_LDO_EN_REG, |
237 | .enable_mask = BIT(0), | 237 | .enable_mask = BIT(0), |
238 | .enable_time = 400, | ||
238 | .owner = THIS_MODULE, | 239 | .owner = THIS_MODULE, |
239 | }, { | 240 | }, { |
240 | .name = "LDO_REG2", | 241 | .name = "LDO_REG2", |
@@ -249,6 +250,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
249 | .vsel_mask = RK808_LDO_VSEL_MASK, | 250 | .vsel_mask = RK808_LDO_VSEL_MASK, |
250 | .enable_reg = RK808_LDO_EN_REG, | 251 | .enable_reg = RK808_LDO_EN_REG, |
251 | .enable_mask = BIT(1), | 252 | .enable_mask = BIT(1), |
253 | .enable_time = 400, | ||
252 | .owner = THIS_MODULE, | 254 | .owner = THIS_MODULE, |
253 | }, { | 255 | }, { |
254 | .name = "LDO_REG3", | 256 | .name = "LDO_REG3", |
@@ -263,6 +265,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
263 | .vsel_mask = RK808_BUCK4_VSEL_MASK, | 265 | .vsel_mask = RK808_BUCK4_VSEL_MASK, |
264 | .enable_reg = RK808_LDO_EN_REG, | 266 | .enable_reg = RK808_LDO_EN_REG, |
265 | .enable_mask = BIT(2), | 267 | .enable_mask = BIT(2), |
268 | .enable_time = 400, | ||
266 | .owner = THIS_MODULE, | 269 | .owner = THIS_MODULE, |
267 | }, { | 270 | }, { |
268 | .name = "LDO_REG4", | 271 | .name = "LDO_REG4", |
@@ -277,6 +280,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
277 | .vsel_mask = RK808_LDO_VSEL_MASK, | 280 | .vsel_mask = RK808_LDO_VSEL_MASK, |
278 | .enable_reg = RK808_LDO_EN_REG, | 281 | .enable_reg = RK808_LDO_EN_REG, |
279 | .enable_mask = BIT(3), | 282 | .enable_mask = BIT(3), |
283 | .enable_time = 400, | ||
280 | .owner = THIS_MODULE, | 284 | .owner = THIS_MODULE, |
281 | }, { | 285 | }, { |
282 | .name = "LDO_REG5", | 286 | .name = "LDO_REG5", |
@@ -291,6 +295,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
291 | .vsel_mask = RK808_LDO_VSEL_MASK, | 295 | .vsel_mask = RK808_LDO_VSEL_MASK, |
292 | .enable_reg = RK808_LDO_EN_REG, | 296 | .enable_reg = RK808_LDO_EN_REG, |
293 | .enable_mask = BIT(4), | 297 | .enable_mask = BIT(4), |
298 | .enable_time = 400, | ||
294 | .owner = THIS_MODULE, | 299 | .owner = THIS_MODULE, |
295 | }, { | 300 | }, { |
296 | .name = "LDO_REG6", | 301 | .name = "LDO_REG6", |
@@ -305,6 +310,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
305 | .vsel_mask = RK808_LDO_VSEL_MASK, | 310 | .vsel_mask = RK808_LDO_VSEL_MASK, |
306 | .enable_reg = RK808_LDO_EN_REG, | 311 | .enable_reg = RK808_LDO_EN_REG, |
307 | .enable_mask = BIT(5), | 312 | .enable_mask = BIT(5), |
313 | .enable_time = 400, | ||
308 | .owner = THIS_MODULE, | 314 | .owner = THIS_MODULE, |
309 | }, { | 315 | }, { |
310 | .name = "LDO_REG7", | 316 | .name = "LDO_REG7", |
@@ -319,6 +325,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
319 | .vsel_mask = RK808_LDO_VSEL_MASK, | 325 | .vsel_mask = RK808_LDO_VSEL_MASK, |
320 | .enable_reg = RK808_LDO_EN_REG, | 326 | .enable_reg = RK808_LDO_EN_REG, |
321 | .enable_mask = BIT(6), | 327 | .enable_mask = BIT(6), |
328 | .enable_time = 400, | ||
322 | .owner = THIS_MODULE, | 329 | .owner = THIS_MODULE, |
323 | }, { | 330 | }, { |
324 | .name = "LDO_REG8", | 331 | .name = "LDO_REG8", |
@@ -333,6 +340,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
333 | .vsel_mask = RK808_LDO_VSEL_MASK, | 340 | .vsel_mask = RK808_LDO_VSEL_MASK, |
334 | .enable_reg = RK808_LDO_EN_REG, | 341 | .enable_reg = RK808_LDO_EN_REG, |
335 | .enable_mask = BIT(7), | 342 | .enable_mask = BIT(7), |
343 | .enable_time = 400, | ||
336 | .owner = THIS_MODULE, | 344 | .owner = THIS_MODULE, |
337 | }, { | 345 | }, { |
338 | .name = "SWITCH_REG1", | 346 | .name = "SWITCH_REG1", |
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c index e2cffe01b807..fb991ec76423 100644 --- a/drivers/regulator/tps65910-regulator.c +++ b/drivers/regulator/tps65910-regulator.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/err.h> | 19 | #include <linux/err.h> |
20 | #include <linux/of.h> | ||
20 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
21 | #include <linux/regulator/driver.h> | 22 | #include <linux/regulator/driver.h> |
22 | #include <linux/regulator/machine.h> | 23 | #include <linux/regulator/machine.h> |
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c index 92f6af6da699..73354ee27877 100644 --- a/drivers/rpmsg/virtio_rpmsg_bus.c +++ b/drivers/rpmsg/virtio_rpmsg_bus.c | |||
@@ -951,6 +951,7 @@ static int rpmsg_probe(struct virtio_device *vdev) | |||
951 | void *bufs_va; | 951 | void *bufs_va; |
952 | int err = 0, i; | 952 | int err = 0, i; |
953 | size_t total_buf_space; | 953 | size_t total_buf_space; |
954 | bool notify; | ||
954 | 955 | ||
955 | vrp = kzalloc(sizeof(*vrp), GFP_KERNEL); | 956 | vrp = kzalloc(sizeof(*vrp), GFP_KERNEL); |
956 | if (!vrp) | 957 | if (!vrp) |
@@ -1030,8 +1031,22 @@ static int rpmsg_probe(struct virtio_device *vdev) | |||
1030 | } | 1031 | } |
1031 | } | 1032 | } |
1032 | 1033 | ||
1034 | /* | ||
1035 | * Prepare to kick but don't notify yet - we can't do this before | ||
1036 | * device is ready. | ||
1037 | */ | ||
1038 | notify = virtqueue_kick_prepare(vrp->rvq); | ||
1039 | |||
1040 | /* From this point on, we can notify and get callbacks. */ | ||
1041 | virtio_device_ready(vdev); | ||
1042 | |||
1033 | /* tell the remote processor it can start sending messages */ | 1043 | /* tell the remote processor it can start sending messages */ |
1034 | virtqueue_kick(vrp->rvq); | 1044 | /* |
1045 | * this might be concurrent with callbacks, but we are only | ||
1046 | * doing notify, not a full kick here, so that's ok. | ||
1047 | */ | ||
1048 | if (notify) | ||
1049 | virtqueue_notify(vrp->rvq); | ||
1035 | 1050 | ||
1036 | dev_info(&vdev->dev, "rpmsg host is online\n"); | 1051 | dev_info(&vdev->dev, "rpmsg host is online\n"); |
1037 | 1052 | ||
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index b4f7744f6751..b283a1a573b3 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c | |||
@@ -324,7 +324,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id) | |||
324 | 324 | ||
325 | ret = IRQ_HANDLED; | 325 | ret = IRQ_HANDLED; |
326 | } | 326 | } |
327 | spin_lock(&suspended_lock); | 327 | spin_unlock(&suspended_lock); |
328 | 328 | ||
329 | return ret; | 329 | return ret; |
330 | } | 330 | } |
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c index e2436d140175..3a6fd3a8a2ec 100644 --- a/drivers/rtc/rtc-mrst.c +++ b/drivers/rtc/rtc-mrst.c | |||
@@ -413,8 +413,8 @@ static void rtc_mrst_do_remove(struct device *dev) | |||
413 | mrst->dev = NULL; | 413 | mrst->dev = NULL; |
414 | } | 414 | } |
415 | 415 | ||
416 | #ifdef CONFIG_PM | 416 | #ifdef CONFIG_PM_SLEEP |
417 | static int mrst_suspend(struct device *dev, pm_message_t mesg) | 417 | static int mrst_suspend(struct device *dev) |
418 | { | 418 | { |
419 | struct mrst_rtc *mrst = dev_get_drvdata(dev); | 419 | struct mrst_rtc *mrst = dev_get_drvdata(dev); |
420 | unsigned char tmp; | 420 | unsigned char tmp; |
@@ -453,7 +453,7 @@ static int mrst_suspend(struct device *dev, pm_message_t mesg) | |||
453 | */ | 453 | */ |
454 | static inline int mrst_poweroff(struct device *dev) | 454 | static inline int mrst_poweroff(struct device *dev) |
455 | { | 455 | { |
456 | return mrst_suspend(dev, PMSG_HIBERNATE); | 456 | return mrst_suspend(dev); |
457 | } | 457 | } |
458 | 458 | ||
459 | static int mrst_resume(struct device *dev) | 459 | static int mrst_resume(struct device *dev) |
@@ -490,9 +490,11 @@ static int mrst_resume(struct device *dev) | |||
490 | return 0; | 490 | return 0; |
491 | } | 491 | } |
492 | 492 | ||
493 | static SIMPLE_DEV_PM_OPS(mrst_pm_ops, mrst_suspend, mrst_resume); | ||
494 | #define MRST_PM_OPS (&mrst_pm_ops) | ||
495 | |||
493 | #else | 496 | #else |
494 | #define mrst_suspend NULL | 497 | #define MRST_PM_OPS NULL |
495 | #define mrst_resume NULL | ||
496 | 498 | ||
497 | static inline int mrst_poweroff(struct device *dev) | 499 | static inline int mrst_poweroff(struct device *dev) |
498 | { | 500 | { |
@@ -529,9 +531,8 @@ static struct platform_driver vrtc_mrst_platform_driver = { | |||
529 | .remove = vrtc_mrst_platform_remove, | 531 | .remove = vrtc_mrst_platform_remove, |
530 | .shutdown = vrtc_mrst_platform_shutdown, | 532 | .shutdown = vrtc_mrst_platform_shutdown, |
531 | .driver = { | 533 | .driver = { |
532 | .name = (char *) driver_name, | 534 | .name = driver_name, |
533 | .suspend = mrst_suspend, | 535 | .pm = MRST_PM_OPS, |
534 | .resume = mrst_resume, | ||
535 | } | 536 | } |
536 | }; | 537 | }; |
537 | 538 | ||
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 4241eeab3386..f4cf6851fae9 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c | |||
@@ -849,6 +849,7 @@ static struct s3c_rtc_data const s3c2443_rtc_data = { | |||
849 | 849 | ||
850 | static struct s3c_rtc_data const s3c6410_rtc_data = { | 850 | static struct s3c_rtc_data const s3c6410_rtc_data = { |
851 | .max_user_freq = 32768, | 851 | .max_user_freq = 32768, |
852 | .needs_src_clk = true, | ||
852 | .irq_handler = s3c6410_rtc_irq, | 853 | .irq_handler = s3c6410_rtc_irq, |
853 | .set_freq = s3c6410_rtc_setfreq, | 854 | .set_freq = s3c6410_rtc_setfreq, |
854 | .enable_tick = s3c6410_rtc_enable_tick, | 855 | .enable_tick = s3c6410_rtc_enable_tick, |
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 96128cb009f3..da212813f2d5 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
@@ -547,7 +547,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char | |||
547 | * parse input | 547 | * parse input |
548 | */ | 548 | */ |
549 | num_of_segments = 0; | 549 | num_of_segments = 0; |
550 | for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) { | 550 | for (i = 0; (i < count && (buf[i] != '\0') && (buf[i] != '\n')); i++) { |
551 | for (j = i; (buf[j] != ':') && | 551 | for (j = i; (buf[j] != ':') && |
552 | (buf[j] != '\0') && | 552 | (buf[j] != '\0') && |
553 | (buf[j] != '\n') && | 553 | (buf[j] != '\n') && |
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c index 09db45296eed..7497ddde2dd6 100644 --- a/drivers/s390/block/scm_blk_cluster.c +++ b/drivers/s390/block/scm_blk_cluster.c | |||
@@ -92,7 +92,7 @@ bool scm_reserve_cluster(struct scm_request *scmrq) | |||
92 | add = 0; | 92 | add = 0; |
93 | continue; | 93 | continue; |
94 | } | 94 | } |
95 | for (pos = 0; pos <= iter->aob->request.msb_count; pos++) { | 95 | for (pos = 0; pos < iter->aob->request.msb_count; pos++) { |
96 | if (clusters_intersect(req, iter->request[pos]) && | 96 | if (clusters_intersect(req, iter->request[pos]) && |
97 | (rq_data_dir(req) == WRITE || | 97 | (rq_data_dir(req) == WRITE || |
98 | rq_data_dir(iter->request[pos]) == WRITE)) { | 98 | rq_data_dir(iter->request[pos]) == WRITE)) { |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 9219953ee949..d9afc51af7d3 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -6815,7 +6815,8 @@ static struct ata_port_operations ipr_sata_ops = { | |||
6815 | }; | 6815 | }; |
6816 | 6816 | ||
6817 | static struct ata_port_info sata_port_info = { | 6817 | static struct ata_port_info sata_port_info = { |
6818 | .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, | 6818 | .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | |
6819 | ATA_FLAG_SAS_HOST, | ||
6819 | .pio_mask = ATA_PIO4_ONLY, | 6820 | .pio_mask = ATA_PIO4_ONLY, |
6820 | .mwdma_mask = ATA_MWDMA2, | 6821 | .mwdma_mask = ATA_MWDMA2, |
6821 | .udma_mask = ATA_UDMA6, | 6822 | .udma_mask = ATA_UDMA6, |
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 932d9cc98d2f..9c706d8c1441 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c | |||
@@ -547,7 +547,8 @@ static struct ata_port_operations sas_sata_ops = { | |||
547 | }; | 547 | }; |
548 | 548 | ||
549 | static struct ata_port_info sata_port_info = { | 549 | static struct ata_port_info sata_port_info = { |
550 | .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ, | 550 | .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ | |
551 | ATA_FLAG_SAS_HOST, | ||
551 | .pio_mask = ATA_PIO4, | 552 | .pio_mask = ATA_PIO4, |
552 | .mwdma_mask = ATA_MWDMA2, | 553 | .mwdma_mask = ATA_MWDMA2, |
553 | .udma_mask = ATA_UDMA6, | 554 | .udma_mask = ATA_UDMA6, |
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index 62b58d38ce2e..60de66252fa2 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c | |||
@@ -500,6 +500,7 @@ static void sas_revalidate_domain(struct work_struct *work) | |||
500 | struct sas_discovery_event *ev = to_sas_discovery_event(work); | 500 | struct sas_discovery_event *ev = to_sas_discovery_event(work); |
501 | struct asd_sas_port *port = ev->port; | 501 | struct asd_sas_port *port = ev->port; |
502 | struct sas_ha_struct *ha = port->ha; | 502 | struct sas_ha_struct *ha = port->ha; |
503 | struct domain_device *ddev = port->port_dev; | ||
503 | 504 | ||
504 | /* prevent revalidation from finding sata links in recovery */ | 505 | /* prevent revalidation from finding sata links in recovery */ |
505 | mutex_lock(&ha->disco_mutex); | 506 | mutex_lock(&ha->disco_mutex); |
@@ -514,8 +515,9 @@ static void sas_revalidate_domain(struct work_struct *work) | |||
514 | SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, | 515 | SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, |
515 | task_pid_nr(current)); | 516 | task_pid_nr(current)); |
516 | 517 | ||
517 | if (port->port_dev) | 518 | if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE || |
518 | res = sas_ex_revalidate_domain(port->port_dev); | 519 | ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE)) |
520 | res = sas_ex_revalidate_domain(ddev); | ||
519 | 521 | ||
520 | SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", | 522 | SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", |
521 | port->id, task_pid_nr(current), res); | 523 | port->id, task_pid_nr(current), res); |
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 99f43b7fc9ab..ab4879e12ea7 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c | |||
@@ -1596,7 +1596,7 @@ static int tcm_qla2xxx_check_initiator_node_acl( | |||
1596 | /* | 1596 | /* |
1597 | * Finally register the new FC Nexus with TCM | 1597 | * Finally register the new FC Nexus with TCM |
1598 | */ | 1598 | */ |
1599 | __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess); | 1599 | transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess); |
1600 | 1600 | ||
1601 | return 0; | 1601 | return 0; |
1602 | } | 1602 | } |
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 9af7841f2e8c..06de34001c66 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c | |||
@@ -764,17 +764,17 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master, | |||
764 | (unsigned long long)xfer->rx_dma); | 764 | (unsigned long long)xfer->rx_dma); |
765 | } | 765 | } |
766 | 766 | ||
767 | /* REVISIT: We're waiting for ENDRX before we start the next | 767 | /* REVISIT: We're waiting for RXBUFF before we start the next |
768 | * transfer because we need to handle some difficult timing | 768 | * transfer because we need to handle some difficult timing |
769 | * issues otherwise. If we wait for ENDTX in one transfer and | 769 | * issues otherwise. If we wait for TXBUFE in one transfer and |
770 | * then starts waiting for ENDRX in the next, it's difficult | 770 | * then starts waiting for RXBUFF in the next, it's difficult |
771 | * to tell the difference between the ENDRX interrupt we're | 771 | * to tell the difference between the RXBUFF interrupt we're |
772 | * actually waiting for and the ENDRX interrupt of the | 772 | * actually waiting for and the RXBUFF interrupt of the |
773 | * previous transfer. | 773 | * previous transfer. |
774 | * | 774 | * |
775 | * It should be doable, though. Just not now... | 775 | * It should be doable, though. Just not now... |
776 | */ | 776 | */ |
777 | spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES)); | 777 | spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES)); |
778 | spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); | 778 | spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); |
779 | } | 779 | } |
780 | 780 | ||
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index a0197fd4e95c..4f8c798e0633 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c | |||
@@ -108,7 +108,8 @@ static void dw_spi_dma_tx_done(void *arg) | |||
108 | { | 108 | { |
109 | struct dw_spi *dws = arg; | 109 | struct dw_spi *dws = arg; |
110 | 110 | ||
111 | if (test_and_clear_bit(TX_BUSY, &dws->dma_chan_busy) & BIT(RX_BUSY)) | 111 | clear_bit(TX_BUSY, &dws->dma_chan_busy); |
112 | if (test_bit(RX_BUSY, &dws->dma_chan_busy)) | ||
112 | return; | 113 | return; |
113 | dw_spi_xfer_done(dws); | 114 | dw_spi_xfer_done(dws); |
114 | } | 115 | } |
@@ -139,6 +140,9 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws) | |||
139 | 1, | 140 | 1, |
140 | DMA_MEM_TO_DEV, | 141 | DMA_MEM_TO_DEV, |
141 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 142 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
143 | if (!txdesc) | ||
144 | return NULL; | ||
145 | |||
142 | txdesc->callback = dw_spi_dma_tx_done; | 146 | txdesc->callback = dw_spi_dma_tx_done; |
143 | txdesc->callback_param = dws; | 147 | txdesc->callback_param = dws; |
144 | 148 | ||
@@ -153,7 +157,8 @@ static void dw_spi_dma_rx_done(void *arg) | |||
153 | { | 157 | { |
154 | struct dw_spi *dws = arg; | 158 | struct dw_spi *dws = arg; |
155 | 159 | ||
156 | if (test_and_clear_bit(RX_BUSY, &dws->dma_chan_busy) & BIT(TX_BUSY)) | 160 | clear_bit(RX_BUSY, &dws->dma_chan_busy); |
161 | if (test_bit(TX_BUSY, &dws->dma_chan_busy)) | ||
157 | return; | 162 | return; |
158 | dw_spi_xfer_done(dws); | 163 | dw_spi_xfer_done(dws); |
159 | } | 164 | } |
@@ -184,6 +189,9 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws) | |||
184 | 1, | 189 | 1, |
185 | DMA_DEV_TO_MEM, | 190 | DMA_DEV_TO_MEM, |
186 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 191 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
192 | if (!rxdesc) | ||
193 | return NULL; | ||
194 | |||
187 | rxdesc->callback = dw_spi_dma_rx_done; | 195 | rxdesc->callback = dw_spi_dma_rx_done; |
188 | rxdesc->callback_param = dws; | 196 | rxdesc->callback_param = dws; |
189 | 197 | ||
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c index 5ba331047cbe..6d331e0db331 100644 --- a/drivers/spi/spi-dw-pci.c +++ b/drivers/spi/spi-dw-pci.c | |||
@@ -36,13 +36,13 @@ struct spi_pci_desc { | |||
36 | 36 | ||
37 | static struct spi_pci_desc spi_pci_mid_desc_1 = { | 37 | static struct spi_pci_desc spi_pci_mid_desc_1 = { |
38 | .setup = dw_spi_mid_init, | 38 | .setup = dw_spi_mid_init, |
39 | .num_cs = 32, | 39 | .num_cs = 5, |
40 | .bus_num = 0, | 40 | .bus_num = 0, |
41 | }; | 41 | }; |
42 | 42 | ||
43 | static struct spi_pci_desc spi_pci_mid_desc_2 = { | 43 | static struct spi_pci_desc spi_pci_mid_desc_2 = { |
44 | .setup = dw_spi_mid_init, | 44 | .setup = dw_spi_mid_init, |
45 | .num_cs = 4, | 45 | .num_cs = 2, |
46 | .bus_num = 1, | 46 | .bus_num = 1, |
47 | }; | 47 | }; |
48 | 48 | ||
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index 5a97a62b298a..4847afba89f4 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c | |||
@@ -621,14 +621,14 @@ static void spi_hw_init(struct device *dev, struct dw_spi *dws) | |||
621 | if (!dws->fifo_len) { | 621 | if (!dws->fifo_len) { |
622 | u32 fifo; | 622 | u32 fifo; |
623 | 623 | ||
624 | for (fifo = 2; fifo <= 256; fifo++) { | 624 | for (fifo = 1; fifo < 256; fifo++) { |
625 | dw_writew(dws, DW_SPI_TXFLTR, fifo); | 625 | dw_writew(dws, DW_SPI_TXFLTR, fifo); |
626 | if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) | 626 | if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) |
627 | break; | 627 | break; |
628 | } | 628 | } |
629 | dw_writew(dws, DW_SPI_TXFLTR, 0); | 629 | dw_writew(dws, DW_SPI_TXFLTR, 0); |
630 | 630 | ||
631 | dws->fifo_len = (fifo == 2) ? 0 : fifo - 1; | 631 | dws->fifo_len = (fifo == 1) ? 0 : fifo; |
632 | dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len); | 632 | dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len); |
633 | } | 633 | } |
634 | } | 634 | } |
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c index c01567d53581..e649bc7d4c08 100644 --- a/drivers/spi/spi-img-spfi.c +++ b/drivers/spi/spi-img-spfi.c | |||
@@ -459,6 +459,13 @@ static int img_spfi_transfer_one(struct spi_master *master, | |||
459 | unsigned long flags; | 459 | unsigned long flags; |
460 | int ret; | 460 | int ret; |
461 | 461 | ||
462 | if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) { | ||
463 | dev_err(spfi->dev, | ||
464 | "Transfer length (%d) is greater than the max supported (%d)", | ||
465 | xfer->len, SPFI_TRANSACTION_TSIZE_MASK); | ||
466 | return -EINVAL; | ||
467 | } | ||
468 | |||
462 | /* | 469 | /* |
463 | * Stop all DMA and reset the controller if the previous transaction | 470 | * Stop all DMA and reset the controller if the previous transaction |
464 | * timed-out and never completed it's DMA. | 471 | * timed-out and never completed it's DMA. |
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index 89ca162801da..ee513a85296b 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c | |||
@@ -534,12 +534,12 @@ static void giveback(struct pl022 *pl022) | |||
534 | pl022->cur_msg = NULL; | 534 | pl022->cur_msg = NULL; |
535 | pl022->cur_transfer = NULL; | 535 | pl022->cur_transfer = NULL; |
536 | pl022->cur_chip = NULL; | 536 | pl022->cur_chip = NULL; |
537 | spi_finalize_current_message(pl022->master); | ||
538 | 537 | ||
539 | /* disable the SPI/SSP operation */ | 538 | /* disable the SPI/SSP operation */ |
540 | writew((readw(SSP_CR1(pl022->virtbase)) & | 539 | writew((readw(SSP_CR1(pl022->virtbase)) & |
541 | (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); | 540 | (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); |
542 | 541 | ||
542 | spi_finalize_current_message(pl022->master); | ||
543 | } | 543 | } |
544 | 544 | ||
545 | /** | 545 | /** |
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c index ff9cdbdb6672..2b2c359f5a50 100644 --- a/drivers/spi/spi-qup.c +++ b/drivers/spi/spi-qup.c | |||
@@ -498,7 +498,7 @@ static int spi_qup_probe(struct platform_device *pdev) | |||
498 | struct resource *res; | 498 | struct resource *res; |
499 | struct device *dev; | 499 | struct device *dev; |
500 | void __iomem *base; | 500 | void __iomem *base; |
501 | u32 max_freq, iomode; | 501 | u32 max_freq, iomode, num_cs; |
502 | int ret, irq, size; | 502 | int ret, irq, size; |
503 | 503 | ||
504 | dev = &pdev->dev; | 504 | dev = &pdev->dev; |
@@ -550,10 +550,11 @@ static int spi_qup_probe(struct platform_device *pdev) | |||
550 | } | 550 | } |
551 | 551 | ||
552 | /* use num-cs unless not present or out of range */ | 552 | /* use num-cs unless not present or out of range */ |
553 | if (of_property_read_u16(dev->of_node, "num-cs", | 553 | if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) || |
554 | &master->num_chipselect) || | 554 | num_cs > SPI_NUM_CHIPSELECTS) |
555 | (master->num_chipselect > SPI_NUM_CHIPSELECTS)) | ||
556 | master->num_chipselect = SPI_NUM_CHIPSELECTS; | 555 | master->num_chipselect = SPI_NUM_CHIPSELECTS; |
556 | else | ||
557 | master->num_chipselect = num_cs; | ||
557 | 558 | ||
558 | master->bus_num = pdev->id; | 559 | master->bus_num = pdev->id; |
559 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; | 560 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; |
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 884a716e50cb..5c0616870358 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c | |||
@@ -101,6 +101,7 @@ struct ti_qspi { | |||
101 | #define QSPI_FLEN(n) ((n - 1) << 0) | 101 | #define QSPI_FLEN(n) ((n - 1) << 0) |
102 | 102 | ||
103 | /* STATUS REGISTER */ | 103 | /* STATUS REGISTER */ |
104 | #define BUSY 0x01 | ||
104 | #define WC 0x02 | 105 | #define WC 0x02 |
105 | 106 | ||
106 | /* INTERRUPT REGISTER */ | 107 | /* INTERRUPT REGISTER */ |
@@ -199,6 +200,21 @@ static void ti_qspi_restore_ctx(struct ti_qspi *qspi) | |||
199 | ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG); | 200 | ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG); |
200 | } | 201 | } |
201 | 202 | ||
203 | static inline u32 qspi_is_busy(struct ti_qspi *qspi) | ||
204 | { | ||
205 | u32 stat; | ||
206 | unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT; | ||
207 | |||
208 | stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG); | ||
209 | while ((stat & BUSY) && time_after(timeout, jiffies)) { | ||
210 | cpu_relax(); | ||
211 | stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG); | ||
212 | } | ||
213 | |||
214 | WARN(stat & BUSY, "qspi busy\n"); | ||
215 | return stat & BUSY; | ||
216 | } | ||
217 | |||
202 | static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) | 218 | static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) |
203 | { | 219 | { |
204 | int wlen, count; | 220 | int wlen, count; |
@@ -211,6 +227,9 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) | |||
211 | wlen = t->bits_per_word >> 3; /* in bytes */ | 227 | wlen = t->bits_per_word >> 3; /* in bytes */ |
212 | 228 | ||
213 | while (count) { | 229 | while (count) { |
230 | if (qspi_is_busy(qspi)) | ||
231 | return -EBUSY; | ||
232 | |||
214 | switch (wlen) { | 233 | switch (wlen) { |
215 | case 1: | 234 | case 1: |
216 | dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n", | 235 | dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n", |
@@ -266,6 +285,9 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t) | |||
266 | 285 | ||
267 | while (count) { | 286 | while (count) { |
268 | dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); | 287 | dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); |
288 | if (qspi_is_busy(qspi)) | ||
289 | return -EBUSY; | ||
290 | |||
269 | ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); | 291 | ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); |
270 | if (!wait_for_completion_timeout(&qspi->transfer_complete, | 292 | if (!wait_for_completion_timeout(&qspi->transfer_complete, |
271 | QSPI_COMPLETION_TIMEOUT)) { | 293 | QSPI_COMPLETION_TIMEOUT)) { |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index c64a3e59fce3..57a195041dc7 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -1105,13 +1105,14 @@ void spi_finalize_current_message(struct spi_master *master) | |||
1105 | "failed to unprepare message: %d\n", ret); | 1105 | "failed to unprepare message: %d\n", ret); |
1106 | } | 1106 | } |
1107 | } | 1107 | } |
1108 | |||
1109 | trace_spi_message_done(mesg); | ||
1110 | |||
1108 | master->cur_msg_prepared = false; | 1111 | master->cur_msg_prepared = false; |
1109 | 1112 | ||
1110 | mesg->state = NULL; | 1113 | mesg->state = NULL; |
1111 | if (mesg->complete) | 1114 | if (mesg->complete) |
1112 | mesg->complete(mesg->context); | 1115 | mesg->complete(mesg->context); |
1113 | |||
1114 | trace_spi_message_done(mesg); | ||
1115 | } | 1116 | } |
1116 | EXPORT_SYMBOL_GPL(spi_finalize_current_message); | 1117 | EXPORT_SYMBOL_GPL(spi_finalize_current_message); |
1117 | 1118 | ||
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index 4324282afe49..03b2a90b9ac0 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c | |||
@@ -330,16 +330,6 @@ static void device_init_registers(struct vnt_private *pDevice) | |||
330 | /* zonetype initial */ | 330 | /* zonetype initial */ |
331 | pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE]; | 331 | pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE]; |
332 | 332 | ||
333 | /* Get RFType */ | ||
334 | pDevice->byRFType = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_RFTYPE); | ||
335 | |||
336 | /* force change RevID for VT3253 emu */ | ||
337 | if ((pDevice->byRFType & RF_EMU) != 0) | ||
338 | pDevice->byRevId = 0x80; | ||
339 | |||
340 | pDevice->byRFType &= RF_MASK; | ||
341 | pr_debug("pDevice->byRFType = %x\n", pDevice->byRFType); | ||
342 | |||
343 | if (!pDevice->bZoneRegExist) | 333 | if (!pDevice->bZoneRegExist) |
344 | pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE]; | 334 | pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE]; |
345 | 335 | ||
@@ -1187,12 +1177,14 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) | |||
1187 | { | 1177 | { |
1188 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 1178 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
1189 | PSTxDesc head_td; | 1179 | PSTxDesc head_td; |
1190 | u32 dma_idx = TYPE_AC0DMA; | 1180 | u32 dma_idx; |
1191 | unsigned long flags; | 1181 | unsigned long flags; |
1192 | 1182 | ||
1193 | spin_lock_irqsave(&priv->lock, flags); | 1183 | spin_lock_irqsave(&priv->lock, flags); |
1194 | 1184 | ||
1195 | if (!ieee80211_is_data(hdr->frame_control)) | 1185 | if (ieee80211_is_data(hdr->frame_control)) |
1186 | dma_idx = TYPE_AC0DMA; | ||
1187 | else | ||
1196 | dma_idx = TYPE_TXDMA0; | 1188 | dma_idx = TYPE_TXDMA0; |
1197 | 1189 | ||
1198 | if (AVAIL_TD(priv, dma_idx) < 1) { | 1190 | if (AVAIL_TD(priv, dma_idx) < 1) { |
@@ -1206,6 +1198,9 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) | |||
1206 | 1198 | ||
1207 | head_td->pTDInfo->skb = skb; | 1199 | head_td->pTDInfo->skb = skb; |
1208 | 1200 | ||
1201 | if (dma_idx == TYPE_AC0DMA) | ||
1202 | head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB; | ||
1203 | |||
1209 | priv->iTDUsed[dma_idx]++; | 1204 | priv->iTDUsed[dma_idx]++; |
1210 | 1205 | ||
1211 | /* Take ownership */ | 1206 | /* Take ownership */ |
@@ -1234,13 +1229,10 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) | |||
1234 | 1229 | ||
1235 | head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma); | 1230 | head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma); |
1236 | 1231 | ||
1237 | if (dma_idx == TYPE_AC0DMA) { | 1232 | if (head_td->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) |
1238 | head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB; | ||
1239 | |||
1240 | MACvTransmitAC0(priv->PortOffset); | 1233 | MACvTransmitAC0(priv->PortOffset); |
1241 | } else { | 1234 | else |
1242 | MACvTransmit0(priv->PortOffset); | 1235 | MACvTransmit0(priv->PortOffset); |
1243 | } | ||
1244 | 1236 | ||
1245 | spin_unlock_irqrestore(&priv->lock, flags); | 1237 | spin_unlock_irqrestore(&priv->lock, flags); |
1246 | 1238 | ||
@@ -1778,6 +1770,12 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent) | |||
1778 | MACvInitialize(priv->PortOffset); | 1770 | MACvInitialize(priv->PortOffset); |
1779 | MACvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr); | 1771 | MACvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr); |
1780 | 1772 | ||
1773 | /* Get RFType */ | ||
1774 | priv->byRFType = SROMbyReadEmbedded(priv->PortOffset, EEP_OFS_RFTYPE); | ||
1775 | priv->byRFType &= RF_MASK; | ||
1776 | |||
1777 | dev_dbg(&pcid->dev, "RF Type = %x\n", priv->byRFType); | ||
1778 | |||
1781 | device_get_options(priv); | 1779 | device_get_options(priv); |
1782 | device_set_options(priv); | 1780 | device_set_options(priv); |
1783 | /* Mask out the options cannot be set to the chip */ | 1781 | /* Mask out the options cannot be set to the chip */ |
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c index 941b2adca95a..7626f635f160 100644 --- a/drivers/staging/vt6655/rf.c +++ b/drivers/staging/vt6655/rf.c | |||
@@ -794,6 +794,7 @@ bool RFbSetPower( | |||
794 | break; | 794 | break; |
795 | case RATE_6M: | 795 | case RATE_6M: |
796 | case RATE_9M: | 796 | case RATE_9M: |
797 | case RATE_12M: | ||
797 | case RATE_18M: | 798 | case RATE_18M: |
798 | byPwr = priv->abyOFDMPwrTbl[uCH]; | 799 | byPwr = priv->abyOFDMPwrTbl[uCH]; |
799 | if (priv->byRFType == RF_UW2452) | 800 | if (priv->byRFType == RF_UW2452) |
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c index c42cde59f598..c4286ccac320 100644 --- a/drivers/staging/vt6656/rf.c +++ b/drivers/staging/vt6656/rf.c | |||
@@ -640,6 +640,7 @@ int vnt_rf_setpower(struct vnt_private *priv, u32 rate, u32 channel) | |||
640 | break; | 640 | break; |
641 | case RATE_6M: | 641 | case RATE_6M: |
642 | case RATE_9M: | 642 | case RATE_9M: |
643 | case RATE_12M: | ||
643 | case RATE_18M: | 644 | case RATE_18M: |
644 | case RATE_24M: | 645 | case RATE_24M: |
645 | case RATE_36M: | 646 | case RATE_36M: |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 50bad55a0c42..2accb6e47beb 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -4256,11 +4256,17 @@ int iscsit_close_connection( | |||
4256 | pr_debug("Closing iSCSI connection CID %hu on SID:" | 4256 | pr_debug("Closing iSCSI connection CID %hu on SID:" |
4257 | " %u\n", conn->cid, sess->sid); | 4257 | " %u\n", conn->cid, sess->sid); |
4258 | /* | 4258 | /* |
4259 | * Always up conn_logout_comp just in case the RX Thread is sleeping | 4259 | * Always up conn_logout_comp for the traditional TCP case just in case |
4260 | * and the logout response never got sent because the connection | 4260 | * the RX Thread in iscsi_target_rx_opcode() is sleeping and the logout |
4261 | * failed. | 4261 | * response never got sent because the connection failed. |
4262 | * | ||
4263 | * However for iser-target, isert_wait4logout() is using conn_logout_comp | ||
4264 | * to signal logout response TX interrupt completion. Go ahead and skip | ||
4265 | * this for iser since isert_rx_opcode() does not wait on logout failure, | ||
4266 | * and to avoid iscsi_conn pointer dereference in iser-target code. | ||
4262 | */ | 4267 | */ |
4263 | complete(&conn->conn_logout_comp); | 4268 | if (conn->conn_transport->transport_type == ISCSI_TCP) |
4269 | complete(&conn->conn_logout_comp); | ||
4264 | 4270 | ||
4265 | iscsi_release_thread_set(conn); | 4271 | iscsi_release_thread_set(conn); |
4266 | 4272 | ||
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index 1c197bad6132..bdd8731a4daa 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <target/target_core_fabric.h> | 22 | #include <target/target_core_fabric.h> |
23 | 23 | ||
24 | #include <target/iscsi/iscsi_target_core.h> | 24 | #include <target/iscsi/iscsi_target_core.h> |
25 | #include <target/iscsi/iscsi_transport.h> | ||
26 | #include "iscsi_target_seq_pdu_list.h" | 25 | #include "iscsi_target_seq_pdu_list.h" |
27 | #include "iscsi_target_tq.h" | 26 | #include "iscsi_target_tq.h" |
28 | #include "iscsi_target_erl0.h" | 27 | #include "iscsi_target_erl0.h" |
@@ -940,8 +939,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) | |||
940 | 939 | ||
941 | if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { | 940 | if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { |
942 | spin_unlock_bh(&conn->state_lock); | 941 | spin_unlock_bh(&conn->state_lock); |
943 | if (conn->conn_transport->transport_type == ISCSI_TCP) | 942 | iscsit_close_connection(conn); |
944 | iscsit_close_connection(conn); | ||
945 | return; | 943 | return; |
946 | } | 944 | } |
947 | 945 | ||
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 6b3c32954689..c36bd7c29136 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c | |||
@@ -953,11 +953,8 @@ static int tcm_loop_make_nexus( | |||
953 | transport_free_session(tl_nexus->se_sess); | 953 | transport_free_session(tl_nexus->se_sess); |
954 | goto out; | 954 | goto out; |
955 | } | 955 | } |
956 | /* | 956 | /* Now, register the SAS I_T Nexus as active. */ |
957 | * Now, register the SAS I_T Nexus as active with the call to | 957 | transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, |
958 | * transport_register_session() | ||
959 | */ | ||
960 | __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, | ||
961 | tl_nexus->se_sess, tl_nexus); | 958 | tl_nexus->se_sess, tl_nexus); |
962 | tl_tpg->tl_nexus = tl_nexus; | 959 | tl_tpg->tl_nexus = tl_nexus; |
963 | pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" | 960 | pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 58f49ff69b14..79b4ec3ca2db 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -650,6 +650,18 @@ static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) | |||
650 | return aligned_max_sectors; | 650 | return aligned_max_sectors; |
651 | } | 651 | } |
652 | 652 | ||
653 | bool se_dev_check_wce(struct se_device *dev) | ||
654 | { | ||
655 | bool wce = false; | ||
656 | |||
657 | if (dev->transport->get_write_cache) | ||
658 | wce = dev->transport->get_write_cache(dev); | ||
659 | else if (dev->dev_attrib.emulate_write_cache > 0) | ||
660 | wce = true; | ||
661 | |||
662 | return wce; | ||
663 | } | ||
664 | |||
653 | int se_dev_set_max_unmap_lba_count( | 665 | int se_dev_set_max_unmap_lba_count( |
654 | struct se_device *dev, | 666 | struct se_device *dev, |
655 | u32 max_unmap_lba_count) | 667 | u32 max_unmap_lba_count) |
@@ -767,6 +779,16 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) | |||
767 | pr_err("Illegal value %d\n", flag); | 779 | pr_err("Illegal value %d\n", flag); |
768 | return -EINVAL; | 780 | return -EINVAL; |
769 | } | 781 | } |
782 | if (flag && | ||
783 | dev->transport->get_write_cache) { | ||
784 | pr_err("emulate_fua_write not supported for this device\n"); | ||
785 | return -EINVAL; | ||
786 | } | ||
787 | if (dev->export_count) { | ||
788 | pr_err("emulate_fua_write cannot be changed with active" | ||
789 | " exports: %d\n", dev->export_count); | ||
790 | return -EINVAL; | ||
791 | } | ||
770 | dev->dev_attrib.emulate_fua_write = flag; | 792 | dev->dev_attrib.emulate_fua_write = flag; |
771 | pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", | 793 | pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", |
772 | dev, dev->dev_attrib.emulate_fua_write); | 794 | dev, dev->dev_attrib.emulate_fua_write); |
@@ -801,7 +823,11 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) | |||
801 | pr_err("emulate_write_cache not supported for this device\n"); | 823 | pr_err("emulate_write_cache not supported for this device\n"); |
802 | return -EINVAL; | 824 | return -EINVAL; |
803 | } | 825 | } |
804 | 826 | if (dev->export_count) { | |
827 | pr_err("emulate_write_cache cannot be changed with active" | ||
828 | " exports: %d\n", dev->export_count); | ||
829 | return -EINVAL; | ||
830 | } | ||
805 | dev->dev_attrib.emulate_write_cache = flag; | 831 | dev->dev_attrib.emulate_write_cache = flag; |
806 | pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", | 832 | pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", |
807 | dev, dev->dev_attrib.emulate_write_cache); | 833 | dev, dev->dev_attrib.emulate_write_cache); |
@@ -1534,8 +1560,6 @@ int target_configure_device(struct se_device *dev) | |||
1534 | ret = dev->transport->configure_device(dev); | 1560 | ret = dev->transport->configure_device(dev); |
1535 | if (ret) | 1561 | if (ret) |
1536 | goto out; | 1562 | goto out; |
1537 | dev->dev_flags |= DF_CONFIGURED; | ||
1538 | |||
1539 | /* | 1563 | /* |
1540 | * XXX: there is not much point to have two different values here.. | 1564 | * XXX: there is not much point to have two different values here.. |
1541 | */ | 1565 | */ |
@@ -1597,6 +1621,8 @@ int target_configure_device(struct se_device *dev) | |||
1597 | list_add_tail(&dev->g_dev_node, &g_device_list); | 1621 | list_add_tail(&dev->g_dev_node, &g_device_list); |
1598 | mutex_unlock(&g_device_mutex); | 1622 | mutex_unlock(&g_device_mutex); |
1599 | 1623 | ||
1624 | dev->dev_flags |= DF_CONFIGURED; | ||
1625 | |||
1600 | return 0; | 1626 | return 0; |
1601 | 1627 | ||
1602 | out_free_alua: | 1628 | out_free_alua: |
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 1045dcd7bf65..f6c954c4635f 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c | |||
@@ -1121,7 +1121,7 @@ static u32 pscsi_get_device_type(struct se_device *dev) | |||
1121 | struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); | 1121 | struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); |
1122 | struct scsi_device *sd = pdv->pdv_sd; | 1122 | struct scsi_device *sd = pdv->pdv_sd; |
1123 | 1123 | ||
1124 | return sd->type; | 1124 | return (sd) ? sd->type : TYPE_NO_LUN; |
1125 | } | 1125 | } |
1126 | 1126 | ||
1127 | static sector_t pscsi_get_blocks(struct se_device *dev) | 1127 | static sector_t pscsi_get_blocks(struct se_device *dev) |
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 9a2f9d3a6e70..3e7297411110 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c | |||
@@ -708,8 +708,7 @@ sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb) | |||
708 | } | 708 | } |
709 | } | 709 | } |
710 | if (cdb[1] & 0x8) { | 710 | if (cdb[1] & 0x8) { |
711 | if (!dev->dev_attrib.emulate_fua_write || | 711 | if (!dev->dev_attrib.emulate_fua_write || !se_dev_check_wce(dev)) { |
712 | !dev->dev_attrib.emulate_write_cache) { | ||
713 | pr_err("Got CDB: 0x%02x with FUA bit set, but device" | 712 | pr_err("Got CDB: 0x%02x with FUA bit set, but device" |
714 | " does not advertise support for FUA write\n", | 713 | " does not advertise support for FUA write\n", |
715 | cdb[0]); | 714 | cdb[0]); |
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 460e93109473..6c8bd6bc175c 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c | |||
@@ -454,19 +454,6 @@ check_scsi_name: | |||
454 | } | 454 | } |
455 | EXPORT_SYMBOL(spc_emulate_evpd_83); | 455 | EXPORT_SYMBOL(spc_emulate_evpd_83); |
456 | 456 | ||
457 | static bool | ||
458 | spc_check_dev_wce(struct se_device *dev) | ||
459 | { | ||
460 | bool wce = false; | ||
461 | |||
462 | if (dev->transport->get_write_cache) | ||
463 | wce = dev->transport->get_write_cache(dev); | ||
464 | else if (dev->dev_attrib.emulate_write_cache > 0) | ||
465 | wce = true; | ||
466 | |||
467 | return wce; | ||
468 | } | ||
469 | |||
470 | /* Extended INQUIRY Data VPD Page */ | 457 | /* Extended INQUIRY Data VPD Page */ |
471 | static sense_reason_t | 458 | static sense_reason_t |
472 | spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) | 459 | spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) |
@@ -490,7 +477,7 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) | |||
490 | buf[5] = 0x07; | 477 | buf[5] = 0x07; |
491 | 478 | ||
492 | /* If WriteCache emulation is enabled, set V_SUP */ | 479 | /* If WriteCache emulation is enabled, set V_SUP */ |
493 | if (spc_check_dev_wce(dev)) | 480 | if (se_dev_check_wce(dev)) |
494 | buf[6] = 0x01; | 481 | buf[6] = 0x01; |
495 | /* If an LBA map is present set R_SUP */ | 482 | /* If an LBA map is present set R_SUP */ |
496 | spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); | 483 | spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); |
@@ -897,7 +884,7 @@ static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p) | |||
897 | if (pc == 1) | 884 | if (pc == 1) |
898 | goto out; | 885 | goto out; |
899 | 886 | ||
900 | if (spc_check_dev_wce(dev)) | 887 | if (se_dev_check_wce(dev)) |
901 | p[2] = 0x04; /* Write Cache Enable */ | 888 | p[2] = 0x04; /* Write Cache Enable */ |
902 | p[12] = 0x20; /* Disabled Read Ahead */ | 889 | p[12] = 0x20; /* Disabled Read Ahead */ |
903 | 890 | ||
@@ -1009,7 +996,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) | |||
1009 | (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) | 996 | (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) |
1010 | spc_modesense_write_protect(&buf[length], type); | 997 | spc_modesense_write_protect(&buf[length], type); |
1011 | 998 | ||
1012 | if ((spc_check_dev_wce(dev)) && | 999 | if ((se_dev_check_wce(dev)) && |
1013 | (dev->dev_attrib.emulate_fua_write > 0)) | 1000 | (dev->dev_attrib.emulate_fua_write > 0)) |
1014 | spc_modesense_dpofua(&buf[length], type); | 1001 | spc_modesense_dpofua(&buf[length], type); |
1015 | 1002 | ||
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 0adc0f650213..ac3cbabdbdf0 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -2389,6 +2389,10 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, | |||
2389 | list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); | 2389 | list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); |
2390 | out: | 2390 | out: |
2391 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 2391 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
2392 | |||
2393 | if (ret && ack_kref) | ||
2394 | target_put_sess_cmd(se_sess, se_cmd); | ||
2395 | |||
2392 | return ret; | 2396 | return ret; |
2393 | } | 2397 | } |
2394 | EXPORT_SYMBOL(target_get_sess_cmd); | 2398 | EXPORT_SYMBOL(target_get_sess_cmd); |
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index 97b486c3dda1..583e755d8091 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c | |||
@@ -359,7 +359,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd) | |||
359 | ep = fc_seq_exch(seq); | 359 | ep = fc_seq_exch(seq); |
360 | if (ep) { | 360 | if (ep) { |
361 | lport = ep->lp; | 361 | lport = ep->lp; |
362 | if (lport && (ep->xid <= lport->lro_xid)) | 362 | if (lport && (ep->xid <= lport->lro_xid)) { |
363 | /* | 363 | /* |
364 | * "ddp_done" trigger invalidation of HW | 364 | * "ddp_done" trigger invalidation of HW |
365 | * specific DDP context | 365 | * specific DDP context |
@@ -374,6 +374,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd) | |||
374 | * identified using ep->xid) | 374 | * identified using ep->xid) |
375 | */ | 375 | */ |
376 | cmd->was_ddp_setup = 0; | 376 | cmd->was_ddp_setup = 0; |
377 | } | ||
377 | } | 378 | } |
378 | } | 379 | } |
379 | } | 380 | } |
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 2ab229ddee38..6ae5b8560e4d 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c | |||
@@ -119,7 +119,10 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value) | |||
119 | dw8250_force_idle(p); | 119 | dw8250_force_idle(p); |
120 | writeb(value, p->membase + (UART_LCR << p->regshift)); | 120 | writeb(value, p->membase + (UART_LCR << p->regshift)); |
121 | } | 121 | } |
122 | dev_err(p->dev, "Couldn't set LCR to %d\n", value); | 122 | /* |
123 | * FIXME: this deadlocks if port->lock is already held | ||
124 | * dev_err(p->dev, "Couldn't set LCR to %d\n", value); | ||
125 | */ | ||
123 | } | 126 | } |
124 | } | 127 | } |
125 | 128 | ||
@@ -163,7 +166,10 @@ static void dw8250_serial_outq(struct uart_port *p, int offset, int value) | |||
163 | __raw_writeq(value & 0xff, | 166 | __raw_writeq(value & 0xff, |
164 | p->membase + (UART_LCR << p->regshift)); | 167 | p->membase + (UART_LCR << p->regshift)); |
165 | } | 168 | } |
166 | dev_err(p->dev, "Couldn't set LCR to %d\n", value); | 169 | /* |
170 | * FIXME: this deadlocks if port->lock is already held | ||
171 | * dev_err(p->dev, "Couldn't set LCR to %d\n", value); | ||
172 | */ | ||
167 | } | 173 | } |
168 | } | 174 | } |
169 | #endif /* CONFIG_64BIT */ | 175 | #endif /* CONFIG_64BIT */ |
@@ -187,7 +193,10 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value) | |||
187 | dw8250_force_idle(p); | 193 | dw8250_force_idle(p); |
188 | writel(value, p->membase + (UART_LCR << p->regshift)); | 194 | writel(value, p->membase + (UART_LCR << p->regshift)); |
189 | } | 195 | } |
190 | dev_err(p->dev, "Couldn't set LCR to %d\n", value); | 196 | /* |
197 | * FIXME: this deadlocks if port->lock is already held | ||
198 | * dev_err(p->dev, "Couldn't set LCR to %d\n", value); | ||
199 | */ | ||
191 | } | 200 | } |
192 | } | 201 | } |
193 | 202 | ||
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index ff451048c1ac..4bfb7ac0239f 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c | |||
@@ -929,6 +929,13 @@ __acquires(hwep->lock) | |||
929 | return retval; | 929 | return retval; |
930 | } | 930 | } |
931 | 931 | ||
932 | static int otg_a_alt_hnp_support(struct ci_hdrc *ci) | ||
933 | { | ||
934 | dev_warn(&ci->gadget.dev, | ||
935 | "connect the device to an alternate port if you want HNP\n"); | ||
936 | return isr_setup_status_phase(ci); | ||
937 | } | ||
938 | |||
932 | /** | 939 | /** |
933 | * isr_setup_packet_handler: setup packet handler | 940 | * isr_setup_packet_handler: setup packet handler |
934 | * @ci: UDC descriptor | 941 | * @ci: UDC descriptor |
@@ -1061,6 +1068,10 @@ __acquires(ci->lock) | |||
1061 | ci); | 1068 | ci); |
1062 | } | 1069 | } |
1063 | break; | 1070 | break; |
1071 | case USB_DEVICE_A_ALT_HNP_SUPPORT: | ||
1072 | if (ci_otg_is_fsm_mode(ci)) | ||
1073 | err = otg_a_alt_hnp_support(ci); | ||
1074 | break; | ||
1064 | default: | 1075 | default: |
1065 | goto delegate; | 1076 | goto delegate; |
1066 | } | 1077 | } |
diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c index c6b35b77dab7..61d538aa2346 100644 --- a/drivers/usb/common/usb-otg-fsm.c +++ b/drivers/usb/common/usb-otg-fsm.c | |||
@@ -150,9 +150,9 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state) | |||
150 | break; | 150 | break; |
151 | case OTG_STATE_B_PERIPHERAL: | 151 | case OTG_STATE_B_PERIPHERAL: |
152 | otg_chrg_vbus(fsm, 0); | 152 | otg_chrg_vbus(fsm, 0); |
153 | otg_loc_conn(fsm, 1); | ||
154 | otg_loc_sof(fsm, 0); | 153 | otg_loc_sof(fsm, 0); |
155 | otg_set_protocol(fsm, PROTO_GADGET); | 154 | otg_set_protocol(fsm, PROTO_GADGET); |
155 | otg_loc_conn(fsm, 1); | ||
156 | break; | 156 | break; |
157 | case OTG_STATE_B_WAIT_ACON: | 157 | case OTG_STATE_B_WAIT_ACON: |
158 | otg_chrg_vbus(fsm, 0); | 158 | otg_chrg_vbus(fsm, 0); |
@@ -213,10 +213,10 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state) | |||
213 | 213 | ||
214 | break; | 214 | break; |
215 | case OTG_STATE_A_PERIPHERAL: | 215 | case OTG_STATE_A_PERIPHERAL: |
216 | otg_loc_conn(fsm, 1); | ||
217 | otg_loc_sof(fsm, 0); | 216 | otg_loc_sof(fsm, 0); |
218 | otg_set_protocol(fsm, PROTO_GADGET); | 217 | otg_set_protocol(fsm, PROTO_GADGET); |
219 | otg_drv_vbus(fsm, 1); | 218 | otg_drv_vbus(fsm, 1); |
219 | otg_loc_conn(fsm, 1); | ||
220 | otg_add_timer(fsm, A_BIDL_ADIS); | 220 | otg_add_timer(fsm, A_BIDL_ADIS); |
221 | break; | 221 | break; |
222 | case OTG_STATE_A_WAIT_VFALL: | 222 | case OTG_STATE_A_WAIT_VFALL: |
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c index 02e3e2d4ea56..6cf047878dba 100644 --- a/drivers/usb/dwc2/core_intr.c +++ b/drivers/usb/dwc2/core_intr.c | |||
@@ -377,6 +377,9 @@ static void dwc2_handle_disconnect_intr(struct dwc2_hsotg *hsotg) | |||
377 | dwc2_is_host_mode(hsotg) ? "Host" : "Device", | 377 | dwc2_is_host_mode(hsotg) ? "Host" : "Device", |
378 | dwc2_op_state_str(hsotg)); | 378 | dwc2_op_state_str(hsotg)); |
379 | 379 | ||
380 | if (hsotg->op_state == OTG_STATE_A_HOST) | ||
381 | dwc2_hcd_disconnect(hsotg); | ||
382 | |||
380 | /* Change to L3 (OFF) state */ | 383 | /* Change to L3 (OFF) state */ |
381 | hsotg->lx_state = DWC2_L3; | 384 | hsotg->lx_state = DWC2_L3; |
382 | 385 | ||
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index af98b096af2f..175c9956cbe3 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c | |||
@@ -144,10 +144,9 @@ struct ffs_io_data { | |||
144 | bool read; | 144 | bool read; |
145 | 145 | ||
146 | struct kiocb *kiocb; | 146 | struct kiocb *kiocb; |
147 | const struct iovec *iovec; | 147 | struct iov_iter data; |
148 | unsigned long nr_segs; | 148 | const void *to_free; |
149 | char __user *buf; | 149 | char *buf; |
150 | size_t len; | ||
151 | 150 | ||
152 | struct mm_struct *mm; | 151 | struct mm_struct *mm; |
153 | struct work_struct work; | 152 | struct work_struct work; |
@@ -649,29 +648,10 @@ static void ffs_user_copy_worker(struct work_struct *work) | |||
649 | io_data->req->actual; | 648 | io_data->req->actual; |
650 | 649 | ||
651 | if (io_data->read && ret > 0) { | 650 | if (io_data->read && ret > 0) { |
652 | int i; | ||
653 | size_t pos = 0; | ||
654 | |||
655 | /* | ||
656 | * Since req->length may be bigger than io_data->len (after | ||
657 | * being rounded up to maxpacketsize), we may end up with more | ||
658 | * data then user space has space for. | ||
659 | */ | ||
660 | ret = min_t(int, ret, io_data->len); | ||
661 | |||
662 | use_mm(io_data->mm); | 651 | use_mm(io_data->mm); |
663 | for (i = 0; i < io_data->nr_segs; i++) { | 652 | ret = copy_to_iter(io_data->buf, ret, &io_data->data); |
664 | size_t len = min_t(size_t, ret - pos, | 653 | if (iov_iter_count(&io_data->data)) |
665 | io_data->iovec[i].iov_len); | 654 | ret = -EFAULT; |
666 | if (!len) | ||
667 | break; | ||
668 | if (unlikely(copy_to_user(io_data->iovec[i].iov_base, | ||
669 | &io_data->buf[pos], len))) { | ||
670 | ret = -EFAULT; | ||
671 | break; | ||
672 | } | ||
673 | pos += len; | ||
674 | } | ||
675 | unuse_mm(io_data->mm); | 655 | unuse_mm(io_data->mm); |
676 | } | 656 | } |
677 | 657 | ||
@@ -684,7 +664,7 @@ static void ffs_user_copy_worker(struct work_struct *work) | |||
684 | 664 | ||
685 | io_data->kiocb->private = NULL; | 665 | io_data->kiocb->private = NULL; |
686 | if (io_data->read) | 666 | if (io_data->read) |
687 | kfree(io_data->iovec); | 667 | kfree(io_data->to_free); |
688 | kfree(io_data->buf); | 668 | kfree(io_data->buf); |
689 | kfree(io_data); | 669 | kfree(io_data); |
690 | } | 670 | } |
@@ -743,6 +723,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) | |||
743 | * before the waiting completes, so do not assign to 'gadget' earlier | 723 | * before the waiting completes, so do not assign to 'gadget' earlier |
744 | */ | 724 | */ |
745 | struct usb_gadget *gadget = epfile->ffs->gadget; | 725 | struct usb_gadget *gadget = epfile->ffs->gadget; |
726 | size_t copied; | ||
746 | 727 | ||
747 | spin_lock_irq(&epfile->ffs->eps_lock); | 728 | spin_lock_irq(&epfile->ffs->eps_lock); |
748 | /* In the meantime, endpoint got disabled or changed. */ | 729 | /* In the meantime, endpoint got disabled or changed. */ |
@@ -750,34 +731,21 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) | |||
750 | spin_unlock_irq(&epfile->ffs->eps_lock); | 731 | spin_unlock_irq(&epfile->ffs->eps_lock); |
751 | return -ESHUTDOWN; | 732 | return -ESHUTDOWN; |
752 | } | 733 | } |
734 | data_len = iov_iter_count(&io_data->data); | ||
753 | /* | 735 | /* |
754 | * Controller may require buffer size to be aligned to | 736 | * Controller may require buffer size to be aligned to |
755 | * maxpacketsize of an out endpoint. | 737 | * maxpacketsize of an out endpoint. |
756 | */ | 738 | */ |
757 | data_len = io_data->read ? | 739 | if (io_data->read) |
758 | usb_ep_align_maybe(gadget, ep->ep, io_data->len) : | 740 | data_len = usb_ep_align_maybe(gadget, ep->ep, data_len); |
759 | io_data->len; | ||
760 | spin_unlock_irq(&epfile->ffs->eps_lock); | 741 | spin_unlock_irq(&epfile->ffs->eps_lock); |
761 | 742 | ||
762 | data = kmalloc(data_len, GFP_KERNEL); | 743 | data = kmalloc(data_len, GFP_KERNEL); |
763 | if (unlikely(!data)) | 744 | if (unlikely(!data)) |
764 | return -ENOMEM; | 745 | return -ENOMEM; |
765 | if (io_data->aio && !io_data->read) { | 746 | if (!io_data->read) { |
766 | int i; | 747 | copied = copy_from_iter(data, data_len, &io_data->data); |
767 | size_t pos = 0; | 748 | if (copied != data_len) { |
768 | for (i = 0; i < io_data->nr_segs; i++) { | ||
769 | if (unlikely(copy_from_user(&data[pos], | ||
770 | io_data->iovec[i].iov_base, | ||
771 | io_data->iovec[i].iov_len))) { | ||
772 | ret = -EFAULT; | ||
773 | goto error; | ||
774 | } | ||
775 | pos += io_data->iovec[i].iov_len; | ||
776 | } | ||
777 | } else { | ||
778 | if (!io_data->read && | ||
779 | unlikely(__copy_from_user(data, io_data->buf, | ||
780 | io_data->len))) { | ||
781 | ret = -EFAULT; | 749 | ret = -EFAULT; |
782 | goto error; | 750 | goto error; |
783 | } | 751 | } |
@@ -876,10 +844,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) | |||
876 | */ | 844 | */ |
877 | ret = ep->status; | 845 | ret = ep->status; |
878 | if (io_data->read && ret > 0) { | 846 | if (io_data->read && ret > 0) { |
879 | ret = min_t(size_t, ret, io_data->len); | 847 | ret = copy_to_iter(data, ret, &io_data->data); |
880 | 848 | if (unlikely(iov_iter_count(&io_data->data))) | |
881 | if (unlikely(copy_to_user(io_data->buf, | ||
882 | data, ret))) | ||
883 | ret = -EFAULT; | 849 | ret = -EFAULT; |
884 | } | 850 | } |
885 | } | 851 | } |
@@ -898,37 +864,6 @@ error: | |||
898 | return ret; | 864 | return ret; |
899 | } | 865 | } |
900 | 866 | ||
901 | static ssize_t | ||
902 | ffs_epfile_write(struct file *file, const char __user *buf, size_t len, | ||
903 | loff_t *ptr) | ||
904 | { | ||
905 | struct ffs_io_data io_data; | ||
906 | |||
907 | ENTER(); | ||
908 | |||
909 | io_data.aio = false; | ||
910 | io_data.read = false; | ||
911 | io_data.buf = (char * __user)buf; | ||
912 | io_data.len = len; | ||
913 | |||
914 | return ffs_epfile_io(file, &io_data); | ||
915 | } | ||
916 | |||
917 | static ssize_t | ||
918 | ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr) | ||
919 | { | ||
920 | struct ffs_io_data io_data; | ||
921 | |||
922 | ENTER(); | ||
923 | |||
924 | io_data.aio = false; | ||
925 | io_data.read = true; | ||
926 | io_data.buf = buf; | ||
927 | io_data.len = len; | ||
928 | |||
929 | return ffs_epfile_io(file, &io_data); | ||
930 | } | ||
931 | |||
932 | static int | 867 | static int |
933 | ffs_epfile_open(struct inode *inode, struct file *file) | 868 | ffs_epfile_open(struct inode *inode, struct file *file) |
934 | { | 869 | { |
@@ -965,67 +900,86 @@ static int ffs_aio_cancel(struct kiocb *kiocb) | |||
965 | return value; | 900 | return value; |
966 | } | 901 | } |
967 | 902 | ||
968 | static ssize_t ffs_epfile_aio_write(struct kiocb *kiocb, | 903 | static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from) |
969 | const struct iovec *iovec, | ||
970 | unsigned long nr_segs, loff_t loff) | ||
971 | { | 904 | { |
972 | struct ffs_io_data *io_data; | 905 | struct ffs_io_data io_data, *p = &io_data; |
906 | ssize_t res; | ||
973 | 907 | ||
974 | ENTER(); | 908 | ENTER(); |
975 | 909 | ||
976 | io_data = kmalloc(sizeof(*io_data), GFP_KERNEL); | 910 | if (!is_sync_kiocb(kiocb)) { |
977 | if (unlikely(!io_data)) | 911 | p = kmalloc(sizeof(io_data), GFP_KERNEL); |
978 | return -ENOMEM; | 912 | if (unlikely(!p)) |
913 | return -ENOMEM; | ||
914 | p->aio = true; | ||
915 | } else { | ||
916 | p->aio = false; | ||
917 | } | ||
979 | 918 | ||
980 | io_data->aio = true; | 919 | p->read = false; |
981 | io_data->read = false; | 920 | p->kiocb = kiocb; |
982 | io_data->kiocb = kiocb; | 921 | p->data = *from; |
983 | io_data->iovec = iovec; | 922 | p->mm = current->mm; |
984 | io_data->nr_segs = nr_segs; | ||
985 | io_data->len = kiocb->ki_nbytes; | ||
986 | io_data->mm = current->mm; | ||
987 | 923 | ||
988 | kiocb->private = io_data; | 924 | kiocb->private = p; |
989 | 925 | ||
990 | kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); | 926 | kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); |
991 | 927 | ||
992 | return ffs_epfile_io(kiocb->ki_filp, io_data); | 928 | res = ffs_epfile_io(kiocb->ki_filp, p); |
929 | if (res == -EIOCBQUEUED) | ||
930 | return res; | ||
931 | if (p->aio) | ||
932 | kfree(p); | ||
933 | else | ||
934 | *from = p->data; | ||
935 | return res; | ||
993 | } | 936 | } |
994 | 937 | ||
995 | static ssize_t ffs_epfile_aio_read(struct kiocb *kiocb, | 938 | static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to) |
996 | const struct iovec *iovec, | ||
997 | unsigned long nr_segs, loff_t loff) | ||
998 | { | 939 | { |
999 | struct ffs_io_data *io_data; | 940 | struct ffs_io_data io_data, *p = &io_data; |
1000 | struct iovec *iovec_copy; | 941 | ssize_t res; |
1001 | 942 | ||
1002 | ENTER(); | 943 | ENTER(); |
1003 | 944 | ||
1004 | iovec_copy = kmalloc_array(nr_segs, sizeof(*iovec_copy), GFP_KERNEL); | 945 | if (!is_sync_kiocb(kiocb)) { |
1005 | if (unlikely(!iovec_copy)) | 946 | p = kmalloc(sizeof(io_data), GFP_KERNEL); |
1006 | return -ENOMEM; | 947 | if (unlikely(!p)) |
1007 | 948 | return -ENOMEM; | |
1008 | memcpy(iovec_copy, iovec, sizeof(struct iovec)*nr_segs); | 949 | p->aio = true; |
1009 | 950 | } else { | |
1010 | io_data = kmalloc(sizeof(*io_data), GFP_KERNEL); | 951 | p->aio = false; |
1011 | if (unlikely(!io_data)) { | ||
1012 | kfree(iovec_copy); | ||
1013 | return -ENOMEM; | ||
1014 | } | 952 | } |
1015 | 953 | ||
1016 | io_data->aio = true; | 954 | p->read = true; |
1017 | io_data->read = true; | 955 | p->kiocb = kiocb; |
1018 | io_data->kiocb = kiocb; | 956 | if (p->aio) { |
1019 | io_data->iovec = iovec_copy; | 957 | p->to_free = dup_iter(&p->data, to, GFP_KERNEL); |
1020 | io_data->nr_segs = nr_segs; | 958 | if (!p->to_free) { |
1021 | io_data->len = kiocb->ki_nbytes; | 959 | kfree(p); |
1022 | io_data->mm = current->mm; | 960 | return -ENOMEM; |
961 | } | ||
962 | } else { | ||
963 | p->data = *to; | ||
964 | p->to_free = NULL; | ||
965 | } | ||
966 | p->mm = current->mm; | ||
1023 | 967 | ||
1024 | kiocb->private = io_data; | 968 | kiocb->private = p; |
1025 | 969 | ||
1026 | kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); | 970 | kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); |
1027 | 971 | ||
1028 | return ffs_epfile_io(kiocb->ki_filp, io_data); | 972 | res = ffs_epfile_io(kiocb->ki_filp, p); |
973 | if (res == -EIOCBQUEUED) | ||
974 | return res; | ||
975 | |||
976 | if (p->aio) { | ||
977 | kfree(p->to_free); | ||
978 | kfree(p); | ||
979 | } else { | ||
980 | *to = p->data; | ||
981 | } | ||
982 | return res; | ||
1029 | } | 983 | } |
1030 | 984 | ||
1031 | static int | 985 | static int |
@@ -1105,10 +1059,10 @@ static const struct file_operations ffs_epfile_operations = { | |||
1105 | .llseek = no_llseek, | 1059 | .llseek = no_llseek, |
1106 | 1060 | ||
1107 | .open = ffs_epfile_open, | 1061 | .open = ffs_epfile_open, |
1108 | .write = ffs_epfile_write, | 1062 | .write = new_sync_write, |
1109 | .read = ffs_epfile_read, | 1063 | .read = new_sync_read, |
1110 | .aio_write = ffs_epfile_aio_write, | 1064 | .write_iter = ffs_epfile_write_iter, |
1111 | .aio_read = ffs_epfile_aio_read, | 1065 | .read_iter = ffs_epfile_read_iter, |
1112 | .release = ffs_epfile_release, | 1066 | .release = ffs_epfile_release, |
1113 | .unlocked_ioctl = ffs_epfile_ioctl, | 1067 | .unlocked_ioctl = ffs_epfile_ioctl, |
1114 | }; | 1068 | }; |
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c index 298b46112b1a..39f49f1ad22f 100644 --- a/drivers/usb/gadget/function/f_loopback.c +++ b/drivers/usb/gadget/function/f_loopback.c | |||
@@ -289,8 +289,7 @@ static void disable_loopback(struct f_loopback *loop) | |||
289 | struct usb_composite_dev *cdev; | 289 | struct usb_composite_dev *cdev; |
290 | 290 | ||
291 | cdev = loop->function.config->cdev; | 291 | cdev = loop->function.config->cdev; |
292 | disable_endpoints(cdev, loop->in_ep, loop->out_ep, NULL, NULL, NULL, | 292 | disable_endpoints(cdev, loop->in_ep, loop->out_ep, NULL, NULL); |
293 | NULL); | ||
294 | VDBG(cdev, "%s disabled\n", loop->function.name); | 293 | VDBG(cdev, "%s disabled\n", loop->function.name); |
295 | } | 294 | } |
296 | 295 | ||
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c index e3dae47baef3..3a5ae9900b1e 100644 --- a/drivers/usb/gadget/function/f_sourcesink.c +++ b/drivers/usb/gadget/function/f_sourcesink.c | |||
@@ -23,15 +23,6 @@ | |||
23 | #include "gadget_chips.h" | 23 | #include "gadget_chips.h" |
24 | #include "u_f.h" | 24 | #include "u_f.h" |
25 | 25 | ||
26 | #define USB_MS_TO_SS_INTERVAL(x) USB_MS_TO_HS_INTERVAL(x) | ||
27 | |||
28 | enum eptype { | ||
29 | EP_CONTROL = 0, | ||
30 | EP_BULK, | ||
31 | EP_ISOC, | ||
32 | EP_INTERRUPT, | ||
33 | }; | ||
34 | |||
35 | /* | 26 | /* |
36 | * SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral | 27 | * SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral |
37 | * controller drivers. | 28 | * controller drivers. |
@@ -64,8 +55,6 @@ struct f_sourcesink { | |||
64 | struct usb_ep *out_ep; | 55 | struct usb_ep *out_ep; |
65 | struct usb_ep *iso_in_ep; | 56 | struct usb_ep *iso_in_ep; |
66 | struct usb_ep *iso_out_ep; | 57 | struct usb_ep *iso_out_ep; |
67 | struct usb_ep *int_in_ep; | ||
68 | struct usb_ep *int_out_ep; | ||
69 | int cur_alt; | 58 | int cur_alt; |
70 | }; | 59 | }; |
71 | 60 | ||
@@ -79,10 +68,6 @@ static unsigned isoc_interval; | |||
79 | static unsigned isoc_maxpacket; | 68 | static unsigned isoc_maxpacket; |
80 | static unsigned isoc_mult; | 69 | static unsigned isoc_mult; |
81 | static unsigned isoc_maxburst; | 70 | static unsigned isoc_maxburst; |
82 | static unsigned int_interval; /* In ms */ | ||
83 | static unsigned int_maxpacket; | ||
84 | static unsigned int_mult; | ||
85 | static unsigned int_maxburst; | ||
86 | static unsigned buflen; | 71 | static unsigned buflen; |
87 | 72 | ||
88 | /*-------------------------------------------------------------------------*/ | 73 | /*-------------------------------------------------------------------------*/ |
@@ -107,16 +92,6 @@ static struct usb_interface_descriptor source_sink_intf_alt1 = { | |||
107 | /* .iInterface = DYNAMIC */ | 92 | /* .iInterface = DYNAMIC */ |
108 | }; | 93 | }; |
109 | 94 | ||
110 | static struct usb_interface_descriptor source_sink_intf_alt2 = { | ||
111 | .bLength = USB_DT_INTERFACE_SIZE, | ||
112 | .bDescriptorType = USB_DT_INTERFACE, | ||
113 | |||
114 | .bAlternateSetting = 2, | ||
115 | .bNumEndpoints = 2, | ||
116 | .bInterfaceClass = USB_CLASS_VENDOR_SPEC, | ||
117 | /* .iInterface = DYNAMIC */ | ||
118 | }; | ||
119 | |||
120 | /* full speed support: */ | 95 | /* full speed support: */ |
121 | 96 | ||
122 | static struct usb_endpoint_descriptor fs_source_desc = { | 97 | static struct usb_endpoint_descriptor fs_source_desc = { |
@@ -155,26 +130,6 @@ static struct usb_endpoint_descriptor fs_iso_sink_desc = { | |||
155 | .bInterval = 4, | 130 | .bInterval = 4, |
156 | }; | 131 | }; |
157 | 132 | ||
158 | static struct usb_endpoint_descriptor fs_int_source_desc = { | ||
159 | .bLength = USB_DT_ENDPOINT_SIZE, | ||
160 | .bDescriptorType = USB_DT_ENDPOINT, | ||
161 | |||
162 | .bEndpointAddress = USB_DIR_IN, | ||
163 | .bmAttributes = USB_ENDPOINT_XFER_INT, | ||
164 | .wMaxPacketSize = cpu_to_le16(64), | ||
165 | .bInterval = GZERO_INT_INTERVAL, | ||
166 | }; | ||
167 | |||
168 | static struct usb_endpoint_descriptor fs_int_sink_desc = { | ||
169 | .bLength = USB_DT_ENDPOINT_SIZE, | ||
170 | .bDescriptorType = USB_DT_ENDPOINT, | ||
171 | |||
172 | .bEndpointAddress = USB_DIR_OUT, | ||
173 | .bmAttributes = USB_ENDPOINT_XFER_INT, | ||
174 | .wMaxPacketSize = cpu_to_le16(64), | ||
175 | .bInterval = GZERO_INT_INTERVAL, | ||
176 | }; | ||
177 | |||
178 | static struct usb_descriptor_header *fs_source_sink_descs[] = { | 133 | static struct usb_descriptor_header *fs_source_sink_descs[] = { |
179 | (struct usb_descriptor_header *) &source_sink_intf_alt0, | 134 | (struct usb_descriptor_header *) &source_sink_intf_alt0, |
180 | (struct usb_descriptor_header *) &fs_sink_desc, | 135 | (struct usb_descriptor_header *) &fs_sink_desc, |
@@ -185,10 +140,6 @@ static struct usb_descriptor_header *fs_source_sink_descs[] = { | |||
185 | (struct usb_descriptor_header *) &fs_source_desc, | 140 | (struct usb_descriptor_header *) &fs_source_desc, |
186 | (struct usb_descriptor_header *) &fs_iso_sink_desc, | 141 | (struct usb_descriptor_header *) &fs_iso_sink_desc, |
187 | (struct usb_descriptor_header *) &fs_iso_source_desc, | 142 | (struct usb_descriptor_header *) &fs_iso_source_desc, |
188 | (struct usb_descriptor_header *) &source_sink_intf_alt2, | ||
189 | #define FS_ALT_IFC_2_OFFSET 8 | ||
190 | (struct usb_descriptor_header *) &fs_int_sink_desc, | ||
191 | (struct usb_descriptor_header *) &fs_int_source_desc, | ||
192 | NULL, | 143 | NULL, |
193 | }; | 144 | }; |
194 | 145 | ||
@@ -228,24 +179,6 @@ static struct usb_endpoint_descriptor hs_iso_sink_desc = { | |||
228 | .bInterval = 4, | 179 | .bInterval = 4, |
229 | }; | 180 | }; |
230 | 181 | ||
231 | static struct usb_endpoint_descriptor hs_int_source_desc = { | ||
232 | .bLength = USB_DT_ENDPOINT_SIZE, | ||
233 | .bDescriptorType = USB_DT_ENDPOINT, | ||
234 | |||
235 | .bmAttributes = USB_ENDPOINT_XFER_INT, | ||
236 | .wMaxPacketSize = cpu_to_le16(1024), | ||
237 | .bInterval = USB_MS_TO_HS_INTERVAL(GZERO_INT_INTERVAL), | ||
238 | }; | ||
239 | |||
240 | static struct usb_endpoint_descriptor hs_int_sink_desc = { | ||
241 | .bLength = USB_DT_ENDPOINT_SIZE, | ||
242 | .bDescriptorType = USB_DT_ENDPOINT, | ||
243 | |||
244 | .bmAttributes = USB_ENDPOINT_XFER_INT, | ||
245 | .wMaxPacketSize = cpu_to_le16(1024), | ||
246 | .bInterval = USB_MS_TO_HS_INTERVAL(GZERO_INT_INTERVAL), | ||
247 | }; | ||
248 | |||
249 | static struct usb_descriptor_header *hs_source_sink_descs[] = { | 182 | static struct usb_descriptor_header *hs_source_sink_descs[] = { |
250 | (struct usb_descriptor_header *) &source_sink_intf_alt0, | 183 | (struct usb_descriptor_header *) &source_sink_intf_alt0, |
251 | (struct usb_descriptor_header *) &hs_source_desc, | 184 | (struct usb_descriptor_header *) &hs_source_desc, |
@@ -256,10 +189,6 @@ static struct usb_descriptor_header *hs_source_sink_descs[] = { | |||
256 | (struct usb_descriptor_header *) &hs_sink_desc, | 189 | (struct usb_descriptor_header *) &hs_sink_desc, |
257 | (struct usb_descriptor_header *) &hs_iso_source_desc, | 190 | (struct usb_descriptor_header *) &hs_iso_source_desc, |
258 | (struct usb_descriptor_header *) &hs_iso_sink_desc, | 191 | (struct usb_descriptor_header *) &hs_iso_sink_desc, |
259 | (struct usb_descriptor_header *) &source_sink_intf_alt2, | ||
260 | #define HS_ALT_IFC_2_OFFSET 8 | ||
261 | (struct usb_descriptor_header *) &hs_int_source_desc, | ||
262 | (struct usb_descriptor_header *) &hs_int_sink_desc, | ||
263 | NULL, | 192 | NULL, |
264 | }; | 193 | }; |
265 | 194 | ||
@@ -335,42 +264,6 @@ static struct usb_ss_ep_comp_descriptor ss_iso_sink_comp_desc = { | |||
335 | .wBytesPerInterval = cpu_to_le16(1024), | 264 | .wBytesPerInterval = cpu_to_le16(1024), |
336 | }; | 265 | }; |
337 | 266 | ||
338 | static struct usb_endpoint_descriptor ss_int_source_desc = { | ||
339 | .bLength = USB_DT_ENDPOINT_SIZE, | ||
340 | .bDescriptorType = USB_DT_ENDPOINT, | ||
341 | |||
342 | .bmAttributes = USB_ENDPOINT_XFER_INT, | ||
343 | .wMaxPacketSize = cpu_to_le16(1024), | ||
344 | .bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL), | ||
345 | }; | ||
346 | |||
347 | static struct usb_ss_ep_comp_descriptor ss_int_source_comp_desc = { | ||
348 | .bLength = USB_DT_SS_EP_COMP_SIZE, | ||
349 | .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, | ||
350 | |||
351 | .bMaxBurst = 0, | ||
352 | .bmAttributes = 0, | ||
353 | .wBytesPerInterval = cpu_to_le16(1024), | ||
354 | }; | ||
355 | |||
356 | static struct usb_endpoint_descriptor ss_int_sink_desc = { | ||
357 | .bLength = USB_DT_ENDPOINT_SIZE, | ||
358 | .bDescriptorType = USB_DT_ENDPOINT, | ||
359 | |||
360 | .bmAttributes = USB_ENDPOINT_XFER_INT, | ||
361 | .wMaxPacketSize = cpu_to_le16(1024), | ||
362 | .bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL), | ||
363 | }; | ||
364 | |||
365 | static struct usb_ss_ep_comp_descriptor ss_int_sink_comp_desc = { | ||
366 | .bLength = USB_DT_SS_EP_COMP_SIZE, | ||
367 | .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, | ||
368 | |||
369 | .bMaxBurst = 0, | ||
370 | .bmAttributes = 0, | ||
371 | .wBytesPerInterval = cpu_to_le16(1024), | ||
372 | }; | ||
373 | |||
374 | static struct usb_descriptor_header *ss_source_sink_descs[] = { | 267 | static struct usb_descriptor_header *ss_source_sink_descs[] = { |
375 | (struct usb_descriptor_header *) &source_sink_intf_alt0, | 268 | (struct usb_descriptor_header *) &source_sink_intf_alt0, |
376 | (struct usb_descriptor_header *) &ss_source_desc, | 269 | (struct usb_descriptor_header *) &ss_source_desc, |
@@ -387,12 +280,6 @@ static struct usb_descriptor_header *ss_source_sink_descs[] = { | |||
387 | (struct usb_descriptor_header *) &ss_iso_source_comp_desc, | 280 | (struct usb_descriptor_header *) &ss_iso_source_comp_desc, |
388 | (struct usb_descriptor_header *) &ss_iso_sink_desc, | 281 | (struct usb_descriptor_header *) &ss_iso_sink_desc, |
389 | (struct usb_descriptor_header *) &ss_iso_sink_comp_desc, | 282 | (struct usb_descriptor_header *) &ss_iso_sink_comp_desc, |
390 | (struct usb_descriptor_header *) &source_sink_intf_alt2, | ||
391 | #define SS_ALT_IFC_2_OFFSET 14 | ||
392 | (struct usb_descriptor_header *) &ss_int_source_desc, | ||
393 | (struct usb_descriptor_header *) &ss_int_source_comp_desc, | ||
394 | (struct usb_descriptor_header *) &ss_int_sink_desc, | ||
395 | (struct usb_descriptor_header *) &ss_int_sink_comp_desc, | ||
396 | NULL, | 283 | NULL, |
397 | }; | 284 | }; |
398 | 285 | ||
@@ -414,21 +301,6 @@ static struct usb_gadget_strings *sourcesink_strings[] = { | |||
414 | }; | 301 | }; |
415 | 302 | ||
416 | /*-------------------------------------------------------------------------*/ | 303 | /*-------------------------------------------------------------------------*/ |
417 | static const char *get_ep_string(enum eptype ep_type) | ||
418 | { | ||
419 | switch (ep_type) { | ||
420 | case EP_ISOC: | ||
421 | return "ISOC-"; | ||
422 | case EP_INTERRUPT: | ||
423 | return "INTERRUPT-"; | ||
424 | case EP_CONTROL: | ||
425 | return "CTRL-"; | ||
426 | case EP_BULK: | ||
427 | return "BULK-"; | ||
428 | default: | ||
429 | return "UNKNOWN-"; | ||
430 | } | ||
431 | } | ||
432 | 304 | ||
433 | static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len) | 305 | static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len) |
434 | { | 306 | { |
@@ -456,8 +328,7 @@ static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep) | |||
456 | 328 | ||
457 | void disable_endpoints(struct usb_composite_dev *cdev, | 329 | void disable_endpoints(struct usb_composite_dev *cdev, |
458 | struct usb_ep *in, struct usb_ep *out, | 330 | struct usb_ep *in, struct usb_ep *out, |
459 | struct usb_ep *iso_in, struct usb_ep *iso_out, | 331 | struct usb_ep *iso_in, struct usb_ep *iso_out) |
460 | struct usb_ep *int_in, struct usb_ep *int_out) | ||
461 | { | 332 | { |
462 | disable_ep(cdev, in); | 333 | disable_ep(cdev, in); |
463 | disable_ep(cdev, out); | 334 | disable_ep(cdev, out); |
@@ -465,10 +336,6 @@ void disable_endpoints(struct usb_composite_dev *cdev, | |||
465 | disable_ep(cdev, iso_in); | 336 | disable_ep(cdev, iso_in); |
466 | if (iso_out) | 337 | if (iso_out) |
467 | disable_ep(cdev, iso_out); | 338 | disable_ep(cdev, iso_out); |
468 | if (int_in) | ||
469 | disable_ep(cdev, int_in); | ||
470 | if (int_out) | ||
471 | disable_ep(cdev, int_out); | ||
472 | } | 339 | } |
473 | 340 | ||
474 | static int | 341 | static int |
@@ -485,7 +352,6 @@ sourcesink_bind(struct usb_configuration *c, struct usb_function *f) | |||
485 | return id; | 352 | return id; |
486 | source_sink_intf_alt0.bInterfaceNumber = id; | 353 | source_sink_intf_alt0.bInterfaceNumber = id; |
487 | source_sink_intf_alt1.bInterfaceNumber = id; | 354 | source_sink_intf_alt1.bInterfaceNumber = id; |
488 | source_sink_intf_alt2.bInterfaceNumber = id; | ||
489 | 355 | ||
490 | /* allocate bulk endpoints */ | 356 | /* allocate bulk endpoints */ |
491 | ss->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_source_desc); | 357 | ss->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_source_desc); |
@@ -546,55 +412,14 @@ no_iso: | |||
546 | if (isoc_maxpacket > 1024) | 412 | if (isoc_maxpacket > 1024) |
547 | isoc_maxpacket = 1024; | 413 | isoc_maxpacket = 1024; |
548 | 414 | ||
549 | /* sanity check the interrupt module parameters */ | ||
550 | if (int_interval < 1) | ||
551 | int_interval = 1; | ||
552 | if (int_interval > 4096) | ||
553 | int_interval = 4096; | ||
554 | if (int_mult > 2) | ||
555 | int_mult = 2; | ||
556 | if (int_maxburst > 15) | ||
557 | int_maxburst = 15; | ||
558 | |||
559 | /* fill in the FS interrupt descriptors from the module parameters */ | ||
560 | fs_int_source_desc.wMaxPacketSize = int_maxpacket > 64 ? | ||
561 | 64 : int_maxpacket; | ||
562 | fs_int_source_desc.bInterval = int_interval > 255 ? | ||
563 | 255 : int_interval; | ||
564 | fs_int_sink_desc.wMaxPacketSize = int_maxpacket > 64 ? | ||
565 | 64 : int_maxpacket; | ||
566 | fs_int_sink_desc.bInterval = int_interval > 255 ? | ||
567 | 255 : int_interval; | ||
568 | |||
569 | /* allocate int endpoints */ | ||
570 | ss->int_in_ep = usb_ep_autoconfig(cdev->gadget, &fs_int_source_desc); | ||
571 | if (!ss->int_in_ep) | ||
572 | goto no_int; | ||
573 | ss->int_in_ep->driver_data = cdev; /* claim */ | ||
574 | |||
575 | ss->int_out_ep = usb_ep_autoconfig(cdev->gadget, &fs_int_sink_desc); | ||
576 | if (ss->int_out_ep) { | ||
577 | ss->int_out_ep->driver_data = cdev; /* claim */ | ||
578 | } else { | ||
579 | ss->int_in_ep->driver_data = NULL; | ||
580 | ss->int_in_ep = NULL; | ||
581 | no_int: | ||
582 | fs_source_sink_descs[FS_ALT_IFC_2_OFFSET] = NULL; | ||
583 | hs_source_sink_descs[HS_ALT_IFC_2_OFFSET] = NULL; | ||
584 | ss_source_sink_descs[SS_ALT_IFC_2_OFFSET] = NULL; | ||
585 | } | ||
586 | |||
587 | if (int_maxpacket > 1024) | ||
588 | int_maxpacket = 1024; | ||
589 | |||
590 | /* support high speed hardware */ | 415 | /* support high speed hardware */ |
591 | hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress; | 416 | hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress; |
592 | hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress; | 417 | hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress; |
593 | 418 | ||
594 | /* | 419 | /* |
595 | * Fill in the HS isoc and interrupt descriptors from the module | 420 | * Fill in the HS isoc descriptors from the module parameters. |
596 | * parameters. We assume that the user knows what they are doing and | 421 | * We assume that the user knows what they are doing and won't |
597 | * won't give parameters that their UDC doesn't support. | 422 | * give parameters that their UDC doesn't support. |
598 | */ | 423 | */ |
599 | hs_iso_source_desc.wMaxPacketSize = isoc_maxpacket; | 424 | hs_iso_source_desc.wMaxPacketSize = isoc_maxpacket; |
600 | hs_iso_source_desc.wMaxPacketSize |= isoc_mult << 11; | 425 | hs_iso_source_desc.wMaxPacketSize |= isoc_mult << 11; |
@@ -607,17 +432,6 @@ no_int: | |||
607 | hs_iso_sink_desc.bInterval = isoc_interval; | 432 | hs_iso_sink_desc.bInterval = isoc_interval; |
608 | hs_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; | 433 | hs_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; |
609 | 434 | ||
610 | hs_int_source_desc.wMaxPacketSize = int_maxpacket; | ||
611 | hs_int_source_desc.wMaxPacketSize |= int_mult << 11; | ||
612 | hs_int_source_desc.bInterval = USB_MS_TO_HS_INTERVAL(int_interval); | ||
613 | hs_int_source_desc.bEndpointAddress = | ||
614 | fs_int_source_desc.bEndpointAddress; | ||
615 | |||
616 | hs_int_sink_desc.wMaxPacketSize = int_maxpacket; | ||
617 | hs_int_sink_desc.wMaxPacketSize |= int_mult << 11; | ||
618 | hs_int_sink_desc.bInterval = USB_MS_TO_HS_INTERVAL(int_interval); | ||
619 | hs_int_sink_desc.bEndpointAddress = fs_int_sink_desc.bEndpointAddress; | ||
620 | |||
621 | /* support super speed hardware */ | 435 | /* support super speed hardware */ |
622 | ss_source_desc.bEndpointAddress = | 436 | ss_source_desc.bEndpointAddress = |
623 | fs_source_desc.bEndpointAddress; | 437 | fs_source_desc.bEndpointAddress; |
@@ -625,9 +439,9 @@ no_int: | |||
625 | fs_sink_desc.bEndpointAddress; | 439 | fs_sink_desc.bEndpointAddress; |
626 | 440 | ||
627 | /* | 441 | /* |
628 | * Fill in the SS isoc and interrupt descriptors from the module | 442 | * Fill in the SS isoc descriptors from the module parameters. |
629 | * parameters. We assume that the user knows what they are doing and | 443 | * We assume that the user knows what they are doing and won't |
630 | * won't give parameters that their UDC doesn't support. | 444 | * give parameters that their UDC doesn't support. |
631 | */ | 445 | */ |
632 | ss_iso_source_desc.wMaxPacketSize = isoc_maxpacket; | 446 | ss_iso_source_desc.wMaxPacketSize = isoc_maxpacket; |
633 | ss_iso_source_desc.bInterval = isoc_interval; | 447 | ss_iso_source_desc.bInterval = isoc_interval; |
@@ -646,37 +460,17 @@ no_int: | |||
646 | isoc_maxpacket * (isoc_mult + 1) * (isoc_maxburst + 1); | 460 | isoc_maxpacket * (isoc_mult + 1) * (isoc_maxburst + 1); |
647 | ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; | 461 | ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; |
648 | 462 | ||
649 | ss_int_source_desc.wMaxPacketSize = int_maxpacket; | ||
650 | ss_int_source_desc.bInterval = USB_MS_TO_SS_INTERVAL(int_interval); | ||
651 | ss_int_source_comp_desc.bmAttributes = int_mult; | ||
652 | ss_int_source_comp_desc.bMaxBurst = int_maxburst; | ||
653 | ss_int_source_comp_desc.wBytesPerInterval = | ||
654 | int_maxpacket * (int_mult + 1) * (int_maxburst + 1); | ||
655 | ss_int_source_desc.bEndpointAddress = | ||
656 | fs_int_source_desc.bEndpointAddress; | ||
657 | |||
658 | ss_int_sink_desc.wMaxPacketSize = int_maxpacket; | ||
659 | ss_int_sink_desc.bInterval = USB_MS_TO_SS_INTERVAL(int_interval); | ||
660 | ss_int_sink_comp_desc.bmAttributes = int_mult; | ||
661 | ss_int_sink_comp_desc.bMaxBurst = int_maxburst; | ||
662 | ss_int_sink_comp_desc.wBytesPerInterval = | ||
663 | int_maxpacket * (int_mult + 1) * (int_maxburst + 1); | ||
664 | ss_int_sink_desc.bEndpointAddress = fs_int_sink_desc.bEndpointAddress; | ||
665 | |||
666 | ret = usb_assign_descriptors(f, fs_source_sink_descs, | 463 | ret = usb_assign_descriptors(f, fs_source_sink_descs, |
667 | hs_source_sink_descs, ss_source_sink_descs); | 464 | hs_source_sink_descs, ss_source_sink_descs); |
668 | if (ret) | 465 | if (ret) |
669 | return ret; | 466 | return ret; |
670 | 467 | ||
671 | DBG(cdev, "%s speed %s: IN/%s, OUT/%s, ISO-IN/%s, ISO-OUT/%s, " | 468 | DBG(cdev, "%s speed %s: IN/%s, OUT/%s, ISO-IN/%s, ISO-OUT/%s\n", |
672 | "INT-IN/%s, INT-OUT/%s\n", | ||
673 | (gadget_is_superspeed(c->cdev->gadget) ? "super" : | 469 | (gadget_is_superspeed(c->cdev->gadget) ? "super" : |
674 | (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")), | 470 | (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")), |
675 | f->name, ss->in_ep->name, ss->out_ep->name, | 471 | f->name, ss->in_ep->name, ss->out_ep->name, |
676 | ss->iso_in_ep ? ss->iso_in_ep->name : "<none>", | 472 | ss->iso_in_ep ? ss->iso_in_ep->name : "<none>", |
677 | ss->iso_out_ep ? ss->iso_out_ep->name : "<none>", | 473 | ss->iso_out_ep ? ss->iso_out_ep->name : "<none>"); |
678 | ss->int_in_ep ? ss->int_in_ep->name : "<none>", | ||
679 | ss->int_out_ep ? ss->int_out_ep->name : "<none>"); | ||
680 | return 0; | 474 | return 0; |
681 | } | 475 | } |
682 | 476 | ||
@@ -807,15 +601,14 @@ static void source_sink_complete(struct usb_ep *ep, struct usb_request *req) | |||
807 | } | 601 | } |
808 | 602 | ||
809 | static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, | 603 | static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, |
810 | enum eptype ep_type, int speed) | 604 | bool is_iso, int speed) |
811 | { | 605 | { |
812 | struct usb_ep *ep; | 606 | struct usb_ep *ep; |
813 | struct usb_request *req; | 607 | struct usb_request *req; |
814 | int i, size, status; | 608 | int i, size, status; |
815 | 609 | ||
816 | for (i = 0; i < 8; i++) { | 610 | for (i = 0; i < 8; i++) { |
817 | switch (ep_type) { | 611 | if (is_iso) { |
818 | case EP_ISOC: | ||
819 | switch (speed) { | 612 | switch (speed) { |
820 | case USB_SPEED_SUPER: | 613 | case USB_SPEED_SUPER: |
821 | size = isoc_maxpacket * (isoc_mult + 1) * | 614 | size = isoc_maxpacket * (isoc_mult + 1) * |
@@ -831,28 +624,9 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, | |||
831 | } | 624 | } |
832 | ep = is_in ? ss->iso_in_ep : ss->iso_out_ep; | 625 | ep = is_in ? ss->iso_in_ep : ss->iso_out_ep; |
833 | req = ss_alloc_ep_req(ep, size); | 626 | req = ss_alloc_ep_req(ep, size); |
834 | break; | 627 | } else { |
835 | case EP_INTERRUPT: | ||
836 | switch (speed) { | ||
837 | case USB_SPEED_SUPER: | ||
838 | size = int_maxpacket * (int_mult + 1) * | ||
839 | (int_maxburst + 1); | ||
840 | break; | ||
841 | case USB_SPEED_HIGH: | ||
842 | size = int_maxpacket * (int_mult + 1); | ||
843 | break; | ||
844 | default: | ||
845 | size = int_maxpacket > 1023 ? | ||
846 | 1023 : int_maxpacket; | ||
847 | break; | ||
848 | } | ||
849 | ep = is_in ? ss->int_in_ep : ss->int_out_ep; | ||
850 | req = ss_alloc_ep_req(ep, size); | ||
851 | break; | ||
852 | default: | ||
853 | ep = is_in ? ss->in_ep : ss->out_ep; | 628 | ep = is_in ? ss->in_ep : ss->out_ep; |
854 | req = ss_alloc_ep_req(ep, 0); | 629 | req = ss_alloc_ep_req(ep, 0); |
855 | break; | ||
856 | } | 630 | } |
857 | 631 | ||
858 | if (!req) | 632 | if (!req) |
@@ -870,12 +644,12 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, | |||
870 | 644 | ||
871 | cdev = ss->function.config->cdev; | 645 | cdev = ss->function.config->cdev; |
872 | ERROR(cdev, "start %s%s %s --> %d\n", | 646 | ERROR(cdev, "start %s%s %s --> %d\n", |
873 | get_ep_string(ep_type), is_in ? "IN" : "OUT", | 647 | is_iso ? "ISO-" : "", is_in ? "IN" : "OUT", |
874 | ep->name, status); | 648 | ep->name, status); |
875 | free_ep_req(ep, req); | 649 | free_ep_req(ep, req); |
876 | } | 650 | } |
877 | 651 | ||
878 | if (!(ep_type == EP_ISOC)) | 652 | if (!is_iso) |
879 | break; | 653 | break; |
880 | } | 654 | } |
881 | 655 | ||
@@ -888,7 +662,7 @@ static void disable_source_sink(struct f_sourcesink *ss) | |||
888 | 662 | ||
889 | cdev = ss->function.config->cdev; | 663 | cdev = ss->function.config->cdev; |
890 | disable_endpoints(cdev, ss->in_ep, ss->out_ep, ss->iso_in_ep, | 664 | disable_endpoints(cdev, ss->in_ep, ss->out_ep, ss->iso_in_ep, |
891 | ss->iso_out_ep, ss->int_in_ep, ss->int_out_ep); | 665 | ss->iso_out_ep); |
892 | VDBG(cdev, "%s disabled\n", ss->function.name); | 666 | VDBG(cdev, "%s disabled\n", ss->function.name); |
893 | } | 667 | } |
894 | 668 | ||
@@ -900,62 +674,6 @@ enable_source_sink(struct usb_composite_dev *cdev, struct f_sourcesink *ss, | |||
900 | int speed = cdev->gadget->speed; | 674 | int speed = cdev->gadget->speed; |
901 | struct usb_ep *ep; | 675 | struct usb_ep *ep; |
902 | 676 | ||
903 | if (alt == 2) { | ||
904 | /* Configure for periodic interrupt endpoint */ | ||
905 | ep = ss->int_in_ep; | ||
906 | if (ep) { | ||
907 | result = config_ep_by_speed(cdev->gadget, | ||
908 | &(ss->function), ep); | ||
909 | if (result) | ||
910 | return result; | ||
911 | |||
912 | result = usb_ep_enable(ep); | ||
913 | if (result < 0) | ||
914 | return result; | ||
915 | |||
916 | ep->driver_data = ss; | ||
917 | result = source_sink_start_ep(ss, true, EP_INTERRUPT, | ||
918 | speed); | ||
919 | if (result < 0) { | ||
920 | fail1: | ||
921 | ep = ss->int_in_ep; | ||
922 | if (ep) { | ||
923 | usb_ep_disable(ep); | ||
924 | ep->driver_data = NULL; | ||
925 | } | ||
926 | return result; | ||
927 | } | ||
928 | } | ||
929 | |||
930 | /* | ||
931 | * one interrupt endpoint reads (sinks) anything OUT (from the | ||
932 | * host) | ||
933 | */ | ||
934 | ep = ss->int_out_ep; | ||
935 | if (ep) { | ||
936 | result = config_ep_by_speed(cdev->gadget, | ||
937 | &(ss->function), ep); | ||
938 | if (result) | ||
939 | goto fail1; | ||
940 | |||
941 | result = usb_ep_enable(ep); | ||
942 | if (result < 0) | ||
943 | goto fail1; | ||
944 | |||
945 | ep->driver_data = ss; | ||
946 | result = source_sink_start_ep(ss, false, EP_INTERRUPT, | ||
947 | speed); | ||
948 | if (result < 0) { | ||
949 | ep = ss->int_out_ep; | ||
950 | usb_ep_disable(ep); | ||
951 | ep->driver_data = NULL; | ||
952 | goto fail1; | ||
953 | } | ||
954 | } | ||
955 | |||
956 | goto out; | ||
957 | } | ||
958 | |||
959 | /* one bulk endpoint writes (sources) zeroes IN (to the host) */ | 677 | /* one bulk endpoint writes (sources) zeroes IN (to the host) */ |
960 | ep = ss->in_ep; | 678 | ep = ss->in_ep; |
961 | result = config_ep_by_speed(cdev->gadget, &(ss->function), ep); | 679 | result = config_ep_by_speed(cdev->gadget, &(ss->function), ep); |
@@ -966,7 +684,7 @@ fail1: | |||
966 | return result; | 684 | return result; |
967 | ep->driver_data = ss; | 685 | ep->driver_data = ss; |
968 | 686 | ||
969 | result = source_sink_start_ep(ss, true, EP_BULK, speed); | 687 | result = source_sink_start_ep(ss, true, false, speed); |
970 | if (result < 0) { | 688 | if (result < 0) { |
971 | fail: | 689 | fail: |
972 | ep = ss->in_ep; | 690 | ep = ss->in_ep; |
@@ -985,7 +703,7 @@ fail: | |||
985 | goto fail; | 703 | goto fail; |
986 | ep->driver_data = ss; | 704 | ep->driver_data = ss; |
987 | 705 | ||
988 | result = source_sink_start_ep(ss, false, EP_BULK, speed); | 706 | result = source_sink_start_ep(ss, false, false, speed); |
989 | if (result < 0) { | 707 | if (result < 0) { |
990 | fail2: | 708 | fail2: |
991 | ep = ss->out_ep; | 709 | ep = ss->out_ep; |
@@ -1008,7 +726,7 @@ fail2: | |||
1008 | goto fail2; | 726 | goto fail2; |
1009 | ep->driver_data = ss; | 727 | ep->driver_data = ss; |
1010 | 728 | ||
1011 | result = source_sink_start_ep(ss, true, EP_ISOC, speed); | 729 | result = source_sink_start_ep(ss, true, true, speed); |
1012 | if (result < 0) { | 730 | if (result < 0) { |
1013 | fail3: | 731 | fail3: |
1014 | ep = ss->iso_in_ep; | 732 | ep = ss->iso_in_ep; |
@@ -1031,14 +749,13 @@ fail3: | |||
1031 | goto fail3; | 749 | goto fail3; |
1032 | ep->driver_data = ss; | 750 | ep->driver_data = ss; |
1033 | 751 | ||
1034 | result = source_sink_start_ep(ss, false, EP_ISOC, speed); | 752 | result = source_sink_start_ep(ss, false, true, speed); |
1035 | if (result < 0) { | 753 | if (result < 0) { |
1036 | usb_ep_disable(ep); | 754 | usb_ep_disable(ep); |
1037 | ep->driver_data = NULL; | 755 | ep->driver_data = NULL; |
1038 | goto fail3; | 756 | goto fail3; |
1039 | } | 757 | } |
1040 | } | 758 | } |
1041 | |||
1042 | out: | 759 | out: |
1043 | ss->cur_alt = alt; | 760 | ss->cur_alt = alt; |
1044 | 761 | ||
@@ -1054,8 +771,6 @@ static int sourcesink_set_alt(struct usb_function *f, | |||
1054 | 771 | ||
1055 | if (ss->in_ep->driver_data) | 772 | if (ss->in_ep->driver_data) |
1056 | disable_source_sink(ss); | 773 | disable_source_sink(ss); |
1057 | else if (alt == 2 && ss->int_in_ep->driver_data) | ||
1058 | disable_source_sink(ss); | ||
1059 | return enable_source_sink(cdev, ss, alt); | 774 | return enable_source_sink(cdev, ss, alt); |
1060 | } | 775 | } |
1061 | 776 | ||
@@ -1168,10 +883,6 @@ static struct usb_function *source_sink_alloc_func( | |||
1168 | isoc_maxpacket = ss_opts->isoc_maxpacket; | 883 | isoc_maxpacket = ss_opts->isoc_maxpacket; |
1169 | isoc_mult = ss_opts->isoc_mult; | 884 | isoc_mult = ss_opts->isoc_mult; |
1170 | isoc_maxburst = ss_opts->isoc_maxburst; | 885 | isoc_maxburst = ss_opts->isoc_maxburst; |
1171 | int_interval = ss_opts->int_interval; | ||
1172 | int_maxpacket = ss_opts->int_maxpacket; | ||
1173 | int_mult = ss_opts->int_mult; | ||
1174 | int_maxburst = ss_opts->int_maxburst; | ||
1175 | buflen = ss_opts->bulk_buflen; | 886 | buflen = ss_opts->bulk_buflen; |
1176 | 887 | ||
1177 | ss->function.name = "source/sink"; | 888 | ss->function.name = "source/sink"; |
@@ -1468,182 +1179,6 @@ static struct f_ss_opts_attribute f_ss_opts_bulk_buflen = | |||
1468 | f_ss_opts_bulk_buflen_show, | 1179 | f_ss_opts_bulk_buflen_show, |
1469 | f_ss_opts_bulk_buflen_store); | 1180 | f_ss_opts_bulk_buflen_store); |
1470 | 1181 | ||
1471 | static ssize_t f_ss_opts_int_interval_show(struct f_ss_opts *opts, char *page) | ||
1472 | { | ||
1473 | int result; | ||
1474 | |||
1475 | mutex_lock(&opts->lock); | ||
1476 | result = sprintf(page, "%u", opts->int_interval); | ||
1477 | mutex_unlock(&opts->lock); | ||
1478 | |||
1479 | return result; | ||
1480 | } | ||
1481 | |||
1482 | static ssize_t f_ss_opts_int_interval_store(struct f_ss_opts *opts, | ||
1483 | const char *page, size_t len) | ||
1484 | { | ||
1485 | int ret; | ||
1486 | u32 num; | ||
1487 | |||
1488 | mutex_lock(&opts->lock); | ||
1489 | if (opts->refcnt) { | ||
1490 | ret = -EBUSY; | ||
1491 | goto end; | ||
1492 | } | ||
1493 | |||
1494 | ret = kstrtou32(page, 0, &num); | ||
1495 | if (ret) | ||
1496 | goto end; | ||
1497 | |||
1498 | if (num > 4096) { | ||
1499 | ret = -EINVAL; | ||
1500 | goto end; | ||
1501 | } | ||
1502 | |||
1503 | opts->int_interval = num; | ||
1504 | ret = len; | ||
1505 | end: | ||
1506 | mutex_unlock(&opts->lock); | ||
1507 | return ret; | ||
1508 | } | ||
1509 | |||
1510 | static struct f_ss_opts_attribute f_ss_opts_int_interval = | ||
1511 | __CONFIGFS_ATTR(int_interval, S_IRUGO | S_IWUSR, | ||
1512 | f_ss_opts_int_interval_show, | ||
1513 | f_ss_opts_int_interval_store); | ||
1514 | |||
1515 | static ssize_t f_ss_opts_int_maxpacket_show(struct f_ss_opts *opts, char *page) | ||
1516 | { | ||
1517 | int result; | ||
1518 | |||
1519 | mutex_lock(&opts->lock); | ||
1520 | result = sprintf(page, "%u", opts->int_maxpacket); | ||
1521 | mutex_unlock(&opts->lock); | ||
1522 | |||
1523 | return result; | ||
1524 | } | ||
1525 | |||
1526 | static ssize_t f_ss_opts_int_maxpacket_store(struct f_ss_opts *opts, | ||
1527 | const char *page, size_t len) | ||
1528 | { | ||
1529 | int ret; | ||
1530 | u16 num; | ||
1531 | |||
1532 | mutex_lock(&opts->lock); | ||
1533 | if (opts->refcnt) { | ||
1534 | ret = -EBUSY; | ||
1535 | goto end; | ||
1536 | } | ||
1537 | |||
1538 | ret = kstrtou16(page, 0, &num); | ||
1539 | if (ret) | ||
1540 | goto end; | ||
1541 | |||
1542 | if (num > 1024) { | ||
1543 | ret = -EINVAL; | ||
1544 | goto end; | ||
1545 | } | ||
1546 | |||
1547 | opts->int_maxpacket = num; | ||
1548 | ret = len; | ||
1549 | end: | ||
1550 | mutex_unlock(&opts->lock); | ||
1551 | return ret; | ||
1552 | } | ||
1553 | |||
1554 | static struct f_ss_opts_attribute f_ss_opts_int_maxpacket = | ||
1555 | __CONFIGFS_ATTR(int_maxpacket, S_IRUGO | S_IWUSR, | ||
1556 | f_ss_opts_int_maxpacket_show, | ||
1557 | f_ss_opts_int_maxpacket_store); | ||
1558 | |||
1559 | static ssize_t f_ss_opts_int_mult_show(struct f_ss_opts *opts, char *page) | ||
1560 | { | ||
1561 | int result; | ||
1562 | |||
1563 | mutex_lock(&opts->lock); | ||
1564 | result = sprintf(page, "%u", opts->int_mult); | ||
1565 | mutex_unlock(&opts->lock); | ||
1566 | |||
1567 | return result; | ||
1568 | } | ||
1569 | |||
1570 | static ssize_t f_ss_opts_int_mult_store(struct f_ss_opts *opts, | ||
1571 | const char *page, size_t len) | ||
1572 | { | ||
1573 | int ret; | ||
1574 | u8 num; | ||
1575 | |||
1576 | mutex_lock(&opts->lock); | ||
1577 | if (opts->refcnt) { | ||
1578 | ret = -EBUSY; | ||
1579 | goto end; | ||
1580 | } | ||
1581 | |||
1582 | ret = kstrtou8(page, 0, &num); | ||
1583 | if (ret) | ||
1584 | goto end; | ||
1585 | |||
1586 | if (num > 2) { | ||
1587 | ret = -EINVAL; | ||
1588 | goto end; | ||
1589 | } | ||
1590 | |||
1591 | opts->int_mult = num; | ||
1592 | ret = len; | ||
1593 | end: | ||
1594 | mutex_unlock(&opts->lock); | ||
1595 | return ret; | ||
1596 | } | ||
1597 | |||
1598 | static struct f_ss_opts_attribute f_ss_opts_int_mult = | ||
1599 | __CONFIGFS_ATTR(int_mult, S_IRUGO | S_IWUSR, | ||
1600 | f_ss_opts_int_mult_show, | ||
1601 | f_ss_opts_int_mult_store); | ||
1602 | |||
1603 | static ssize_t f_ss_opts_int_maxburst_show(struct f_ss_opts *opts, char *page) | ||
1604 | { | ||
1605 | int result; | ||
1606 | |||
1607 | mutex_lock(&opts->lock); | ||
1608 | result = sprintf(page, "%u", opts->int_maxburst); | ||
1609 | mutex_unlock(&opts->lock); | ||
1610 | |||
1611 | return result; | ||
1612 | } | ||
1613 | |||
1614 | static ssize_t f_ss_opts_int_maxburst_store(struct f_ss_opts *opts, | ||
1615 | const char *page, size_t len) | ||
1616 | { | ||
1617 | int ret; | ||
1618 | u8 num; | ||
1619 | |||
1620 | mutex_lock(&opts->lock); | ||
1621 | if (opts->refcnt) { | ||
1622 | ret = -EBUSY; | ||
1623 | goto end; | ||
1624 | } | ||
1625 | |||
1626 | ret = kstrtou8(page, 0, &num); | ||
1627 | if (ret) | ||
1628 | goto end; | ||
1629 | |||
1630 | if (num > 15) { | ||
1631 | ret = -EINVAL; | ||
1632 | goto end; | ||
1633 | } | ||
1634 | |||
1635 | opts->int_maxburst = num; | ||
1636 | ret = len; | ||
1637 | end: | ||
1638 | mutex_unlock(&opts->lock); | ||
1639 | return ret; | ||
1640 | } | ||
1641 | |||
1642 | static struct f_ss_opts_attribute f_ss_opts_int_maxburst = | ||
1643 | __CONFIGFS_ATTR(int_maxburst, S_IRUGO | S_IWUSR, | ||
1644 | f_ss_opts_int_maxburst_show, | ||
1645 | f_ss_opts_int_maxburst_store); | ||
1646 | |||
1647 | static struct configfs_attribute *ss_attrs[] = { | 1182 | static struct configfs_attribute *ss_attrs[] = { |
1648 | &f_ss_opts_pattern.attr, | 1183 | &f_ss_opts_pattern.attr, |
1649 | &f_ss_opts_isoc_interval.attr, | 1184 | &f_ss_opts_isoc_interval.attr, |
@@ -1651,10 +1186,6 @@ static struct configfs_attribute *ss_attrs[] = { | |||
1651 | &f_ss_opts_isoc_mult.attr, | 1186 | &f_ss_opts_isoc_mult.attr, |
1652 | &f_ss_opts_isoc_maxburst.attr, | 1187 | &f_ss_opts_isoc_maxburst.attr, |
1653 | &f_ss_opts_bulk_buflen.attr, | 1188 | &f_ss_opts_bulk_buflen.attr, |
1654 | &f_ss_opts_int_interval.attr, | ||
1655 | &f_ss_opts_int_maxpacket.attr, | ||
1656 | &f_ss_opts_int_mult.attr, | ||
1657 | &f_ss_opts_int_maxburst.attr, | ||
1658 | NULL, | 1189 | NULL, |
1659 | }; | 1190 | }; |
1660 | 1191 | ||
@@ -1684,8 +1215,6 @@ static struct usb_function_instance *source_sink_alloc_inst(void) | |||
1684 | ss_opts->isoc_interval = GZERO_ISOC_INTERVAL; | 1215 | ss_opts->isoc_interval = GZERO_ISOC_INTERVAL; |
1685 | ss_opts->isoc_maxpacket = GZERO_ISOC_MAXPACKET; | 1216 | ss_opts->isoc_maxpacket = GZERO_ISOC_MAXPACKET; |
1686 | ss_opts->bulk_buflen = GZERO_BULK_BUFLEN; | 1217 | ss_opts->bulk_buflen = GZERO_BULK_BUFLEN; |
1687 | ss_opts->int_interval = GZERO_INT_INTERVAL; | ||
1688 | ss_opts->int_maxpacket = GZERO_INT_MAXPACKET; | ||
1689 | 1218 | ||
1690 | config_group_init_type_name(&ss_opts->func_inst.group, "", | 1219 | config_group_init_type_name(&ss_opts->func_inst.group, "", |
1691 | &ss_func_type); | 1220 | &ss_func_type); |
diff --git a/drivers/usb/gadget/function/g_zero.h b/drivers/usb/gadget/function/g_zero.h index 2ce28b9d97cc..15f180904f8a 100644 --- a/drivers/usb/gadget/function/g_zero.h +++ b/drivers/usb/gadget/function/g_zero.h | |||
@@ -10,8 +10,6 @@ | |||
10 | #define GZERO_QLEN 32 | 10 | #define GZERO_QLEN 32 |
11 | #define GZERO_ISOC_INTERVAL 4 | 11 | #define GZERO_ISOC_INTERVAL 4 |
12 | #define GZERO_ISOC_MAXPACKET 1024 | 12 | #define GZERO_ISOC_MAXPACKET 1024 |
13 | #define GZERO_INT_INTERVAL 1 /* Default interrupt interval = 1 ms */ | ||
14 | #define GZERO_INT_MAXPACKET 1024 | ||
15 | 13 | ||
16 | struct usb_zero_options { | 14 | struct usb_zero_options { |
17 | unsigned pattern; | 15 | unsigned pattern; |
@@ -19,10 +17,6 @@ struct usb_zero_options { | |||
19 | unsigned isoc_maxpacket; | 17 | unsigned isoc_maxpacket; |
20 | unsigned isoc_mult; | 18 | unsigned isoc_mult; |
21 | unsigned isoc_maxburst; | 19 | unsigned isoc_maxburst; |
22 | unsigned int_interval; /* In ms */ | ||
23 | unsigned int_maxpacket; | ||
24 | unsigned int_mult; | ||
25 | unsigned int_maxburst; | ||
26 | unsigned bulk_buflen; | 20 | unsigned bulk_buflen; |
27 | unsigned qlen; | 21 | unsigned qlen; |
28 | }; | 22 | }; |
@@ -34,10 +28,6 @@ struct f_ss_opts { | |||
34 | unsigned isoc_maxpacket; | 28 | unsigned isoc_maxpacket; |
35 | unsigned isoc_mult; | 29 | unsigned isoc_mult; |
36 | unsigned isoc_maxburst; | 30 | unsigned isoc_maxburst; |
37 | unsigned int_interval; /* In ms */ | ||
38 | unsigned int_maxpacket; | ||
39 | unsigned int_mult; | ||
40 | unsigned int_maxburst; | ||
41 | unsigned bulk_buflen; | 31 | unsigned bulk_buflen; |
42 | 32 | ||
43 | /* | 33 | /* |
@@ -72,7 +62,6 @@ int lb_modinit(void); | |||
72 | void free_ep_req(struct usb_ep *ep, struct usb_request *req); | 62 | void free_ep_req(struct usb_ep *ep, struct usb_request *req); |
73 | void disable_endpoints(struct usb_composite_dev *cdev, | 63 | void disable_endpoints(struct usb_composite_dev *cdev, |
74 | struct usb_ep *in, struct usb_ep *out, | 64 | struct usb_ep *in, struct usb_ep *out, |
75 | struct usb_ep *iso_in, struct usb_ep *iso_out, | 65 | struct usb_ep *iso_in, struct usb_ep *iso_out); |
76 | struct usb_ep *int_in, struct usb_ep *int_out); | ||
77 | 66 | ||
78 | #endif /* __G_ZERO_H */ | 67 | #endif /* __G_ZERO_H */ |
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index db49ec4c748e..200f9a584064 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c | |||
@@ -74,6 +74,8 @@ MODULE_DESCRIPTION (DRIVER_DESC); | |||
74 | MODULE_AUTHOR ("David Brownell"); | 74 | MODULE_AUTHOR ("David Brownell"); |
75 | MODULE_LICENSE ("GPL"); | 75 | MODULE_LICENSE ("GPL"); |
76 | 76 | ||
77 | static int ep_open(struct inode *, struct file *); | ||
78 | |||
77 | 79 | ||
78 | /*----------------------------------------------------------------------*/ | 80 | /*----------------------------------------------------------------------*/ |
79 | 81 | ||
@@ -283,14 +285,15 @@ static void epio_complete (struct usb_ep *ep, struct usb_request *req) | |||
283 | * still need dev->lock to use epdata->ep. | 285 | * still need dev->lock to use epdata->ep. |
284 | */ | 286 | */ |
285 | static int | 287 | static int |
286 | get_ready_ep (unsigned f_flags, struct ep_data *epdata) | 288 | get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write) |
287 | { | 289 | { |
288 | int val; | 290 | int val; |
289 | 291 | ||
290 | if (f_flags & O_NONBLOCK) { | 292 | if (f_flags & O_NONBLOCK) { |
291 | if (!mutex_trylock(&epdata->lock)) | 293 | if (!mutex_trylock(&epdata->lock)) |
292 | goto nonblock; | 294 | goto nonblock; |
293 | if (epdata->state != STATE_EP_ENABLED) { | 295 | if (epdata->state != STATE_EP_ENABLED && |
296 | (!is_write || epdata->state != STATE_EP_READY)) { | ||
294 | mutex_unlock(&epdata->lock); | 297 | mutex_unlock(&epdata->lock); |
295 | nonblock: | 298 | nonblock: |
296 | val = -EAGAIN; | 299 | val = -EAGAIN; |
@@ -305,18 +308,20 @@ nonblock: | |||
305 | 308 | ||
306 | switch (epdata->state) { | 309 | switch (epdata->state) { |
307 | case STATE_EP_ENABLED: | 310 | case STATE_EP_ENABLED: |
311 | return 0; | ||
312 | case STATE_EP_READY: /* not configured yet */ | ||
313 | if (is_write) | ||
314 | return 0; | ||
315 | // FALLTHRU | ||
316 | case STATE_EP_UNBOUND: /* clean disconnect */ | ||
308 | break; | 317 | break; |
309 | // case STATE_EP_DISABLED: /* "can't happen" */ | 318 | // case STATE_EP_DISABLED: /* "can't happen" */ |
310 | // case STATE_EP_READY: /* "can't happen" */ | ||
311 | default: /* error! */ | 319 | default: /* error! */ |
312 | pr_debug ("%s: ep %p not available, state %d\n", | 320 | pr_debug ("%s: ep %p not available, state %d\n", |
313 | shortname, epdata, epdata->state); | 321 | shortname, epdata, epdata->state); |
314 | // FALLTHROUGH | ||
315 | case STATE_EP_UNBOUND: /* clean disconnect */ | ||
316 | val = -ENODEV; | ||
317 | mutex_unlock(&epdata->lock); | ||
318 | } | 322 | } |
319 | return val; | 323 | mutex_unlock(&epdata->lock); |
324 | return -ENODEV; | ||
320 | } | 325 | } |
321 | 326 | ||
322 | static ssize_t | 327 | static ssize_t |
@@ -363,97 +368,6 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len) | |||
363 | return value; | 368 | return value; |
364 | } | 369 | } |
365 | 370 | ||
366 | |||
367 | /* handle a synchronous OUT bulk/intr/iso transfer */ | ||
368 | static ssize_t | ||
369 | ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) | ||
370 | { | ||
371 | struct ep_data *data = fd->private_data; | ||
372 | void *kbuf; | ||
373 | ssize_t value; | ||
374 | |||
375 | if ((value = get_ready_ep (fd->f_flags, data)) < 0) | ||
376 | return value; | ||
377 | |||
378 | /* halt any endpoint by doing a "wrong direction" i/o call */ | ||
379 | if (usb_endpoint_dir_in(&data->desc)) { | ||
380 | if (usb_endpoint_xfer_isoc(&data->desc)) { | ||
381 | mutex_unlock(&data->lock); | ||
382 | return -EINVAL; | ||
383 | } | ||
384 | DBG (data->dev, "%s halt\n", data->name); | ||
385 | spin_lock_irq (&data->dev->lock); | ||
386 | if (likely (data->ep != NULL)) | ||
387 | usb_ep_set_halt (data->ep); | ||
388 | spin_unlock_irq (&data->dev->lock); | ||
389 | mutex_unlock(&data->lock); | ||
390 | return -EBADMSG; | ||
391 | } | ||
392 | |||
393 | /* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */ | ||
394 | |||
395 | value = -ENOMEM; | ||
396 | kbuf = kmalloc (len, GFP_KERNEL); | ||
397 | if (unlikely (!kbuf)) | ||
398 | goto free1; | ||
399 | |||
400 | value = ep_io (data, kbuf, len); | ||
401 | VDEBUG (data->dev, "%s read %zu OUT, status %d\n", | ||
402 | data->name, len, (int) value); | ||
403 | if (value >= 0 && copy_to_user (buf, kbuf, value)) | ||
404 | value = -EFAULT; | ||
405 | |||
406 | free1: | ||
407 | mutex_unlock(&data->lock); | ||
408 | kfree (kbuf); | ||
409 | return value; | ||
410 | } | ||
411 | |||
412 | /* handle a synchronous IN bulk/intr/iso transfer */ | ||
413 | static ssize_t | ||
414 | ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | ||
415 | { | ||
416 | struct ep_data *data = fd->private_data; | ||
417 | void *kbuf; | ||
418 | ssize_t value; | ||
419 | |||
420 | if ((value = get_ready_ep (fd->f_flags, data)) < 0) | ||
421 | return value; | ||
422 | |||
423 | /* halt any endpoint by doing a "wrong direction" i/o call */ | ||
424 | if (!usb_endpoint_dir_in(&data->desc)) { | ||
425 | if (usb_endpoint_xfer_isoc(&data->desc)) { | ||
426 | mutex_unlock(&data->lock); | ||
427 | return -EINVAL; | ||
428 | } | ||
429 | DBG (data->dev, "%s halt\n", data->name); | ||
430 | spin_lock_irq (&data->dev->lock); | ||
431 | if (likely (data->ep != NULL)) | ||
432 | usb_ep_set_halt (data->ep); | ||
433 | spin_unlock_irq (&data->dev->lock); | ||
434 | mutex_unlock(&data->lock); | ||
435 | return -EBADMSG; | ||
436 | } | ||
437 | |||
438 | /* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */ | ||
439 | |||
440 | value = -ENOMEM; | ||
441 | kbuf = memdup_user(buf, len); | ||
442 | if (IS_ERR(kbuf)) { | ||
443 | value = PTR_ERR(kbuf); | ||
444 | kbuf = NULL; | ||
445 | goto free1; | ||
446 | } | ||
447 | |||
448 | value = ep_io (data, kbuf, len); | ||
449 | VDEBUG (data->dev, "%s write %zu IN, status %d\n", | ||
450 | data->name, len, (int) value); | ||
451 | free1: | ||
452 | mutex_unlock(&data->lock); | ||
453 | kfree (kbuf); | ||
454 | return value; | ||
455 | } | ||
456 | |||
457 | static int | 371 | static int |
458 | ep_release (struct inode *inode, struct file *fd) | 372 | ep_release (struct inode *inode, struct file *fd) |
459 | { | 373 | { |
@@ -481,7 +395,7 @@ static long ep_ioctl(struct file *fd, unsigned code, unsigned long value) | |||
481 | struct ep_data *data = fd->private_data; | 395 | struct ep_data *data = fd->private_data; |
482 | int status; | 396 | int status; |
483 | 397 | ||
484 | if ((status = get_ready_ep (fd->f_flags, data)) < 0) | 398 | if ((status = get_ready_ep (fd->f_flags, data, false)) < 0) |
485 | return status; | 399 | return status; |
486 | 400 | ||
487 | spin_lock_irq (&data->dev->lock); | 401 | spin_lock_irq (&data->dev->lock); |
@@ -517,8 +431,8 @@ struct kiocb_priv { | |||
517 | struct mm_struct *mm; | 431 | struct mm_struct *mm; |
518 | struct work_struct work; | 432 | struct work_struct work; |
519 | void *buf; | 433 | void *buf; |
520 | const struct iovec *iv; | 434 | struct iov_iter to; |
521 | unsigned long nr_segs; | 435 | const void *to_free; |
522 | unsigned actual; | 436 | unsigned actual; |
523 | }; | 437 | }; |
524 | 438 | ||
@@ -541,35 +455,6 @@ static int ep_aio_cancel(struct kiocb *iocb) | |||
541 | return value; | 455 | return value; |
542 | } | 456 | } |
543 | 457 | ||
544 | static ssize_t ep_copy_to_user(struct kiocb_priv *priv) | ||
545 | { | ||
546 | ssize_t len, total; | ||
547 | void *to_copy; | ||
548 | int i; | ||
549 | |||
550 | /* copy stuff into user buffers */ | ||
551 | total = priv->actual; | ||
552 | len = 0; | ||
553 | to_copy = priv->buf; | ||
554 | for (i=0; i < priv->nr_segs; i++) { | ||
555 | ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total); | ||
556 | |||
557 | if (copy_to_user(priv->iv[i].iov_base, to_copy, this)) { | ||
558 | if (len == 0) | ||
559 | len = -EFAULT; | ||
560 | break; | ||
561 | } | ||
562 | |||
563 | total -= this; | ||
564 | len += this; | ||
565 | to_copy += this; | ||
566 | if (total == 0) | ||
567 | break; | ||
568 | } | ||
569 | |||
570 | return len; | ||
571 | } | ||
572 | |||
573 | static void ep_user_copy_worker(struct work_struct *work) | 458 | static void ep_user_copy_worker(struct work_struct *work) |
574 | { | 459 | { |
575 | struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work); | 460 | struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work); |
@@ -578,13 +463,16 @@ static void ep_user_copy_worker(struct work_struct *work) | |||
578 | size_t ret; | 463 | size_t ret; |
579 | 464 | ||
580 | use_mm(mm); | 465 | use_mm(mm); |
581 | ret = ep_copy_to_user(priv); | 466 | ret = copy_to_iter(priv->buf, priv->actual, &priv->to); |
582 | unuse_mm(mm); | 467 | unuse_mm(mm); |
468 | if (!ret) | ||
469 | ret = -EFAULT; | ||
583 | 470 | ||
584 | /* completing the iocb can drop the ctx and mm, don't touch mm after */ | 471 | /* completing the iocb can drop the ctx and mm, don't touch mm after */ |
585 | aio_complete(iocb, ret, ret); | 472 | aio_complete(iocb, ret, ret); |
586 | 473 | ||
587 | kfree(priv->buf); | 474 | kfree(priv->buf); |
475 | kfree(priv->to_free); | ||
588 | kfree(priv); | 476 | kfree(priv); |
589 | } | 477 | } |
590 | 478 | ||
@@ -603,8 +491,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) | |||
603 | * don't need to copy anything to userspace, so we can | 491 | * don't need to copy anything to userspace, so we can |
604 | * complete the aio request immediately. | 492 | * complete the aio request immediately. |
605 | */ | 493 | */ |
606 | if (priv->iv == NULL || unlikely(req->actual == 0)) { | 494 | if (priv->to_free == NULL || unlikely(req->actual == 0)) { |
607 | kfree(req->buf); | 495 | kfree(req->buf); |
496 | kfree(priv->to_free); | ||
608 | kfree(priv); | 497 | kfree(priv); |
609 | iocb->private = NULL; | 498 | iocb->private = NULL; |
610 | /* aio_complete() reports bytes-transferred _and_ faults */ | 499 | /* aio_complete() reports bytes-transferred _and_ faults */ |
@@ -618,6 +507,7 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) | |||
618 | 507 | ||
619 | priv->buf = req->buf; | 508 | priv->buf = req->buf; |
620 | priv->actual = req->actual; | 509 | priv->actual = req->actual; |
510 | INIT_WORK(&priv->work, ep_user_copy_worker); | ||
621 | schedule_work(&priv->work); | 511 | schedule_work(&priv->work); |
622 | } | 512 | } |
623 | spin_unlock(&epdata->dev->lock); | 513 | spin_unlock(&epdata->dev->lock); |
@@ -626,38 +516,17 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) | |||
626 | put_ep(epdata); | 516 | put_ep(epdata); |
627 | } | 517 | } |
628 | 518 | ||
629 | static ssize_t | 519 | static ssize_t ep_aio(struct kiocb *iocb, |
630 | ep_aio_rwtail( | 520 | struct kiocb_priv *priv, |
631 | struct kiocb *iocb, | 521 | struct ep_data *epdata, |
632 | char *buf, | 522 | char *buf, |
633 | size_t len, | 523 | size_t len) |
634 | struct ep_data *epdata, | ||
635 | const struct iovec *iv, | ||
636 | unsigned long nr_segs | ||
637 | ) | ||
638 | { | 524 | { |
639 | struct kiocb_priv *priv; | 525 | struct usb_request *req; |
640 | struct usb_request *req; | 526 | ssize_t value; |
641 | ssize_t value; | ||
642 | 527 | ||
643 | priv = kmalloc(sizeof *priv, GFP_KERNEL); | ||
644 | if (!priv) { | ||
645 | value = -ENOMEM; | ||
646 | fail: | ||
647 | kfree(buf); | ||
648 | return value; | ||
649 | } | ||
650 | iocb->private = priv; | 528 | iocb->private = priv; |
651 | priv->iocb = iocb; | 529 | priv->iocb = iocb; |
652 | priv->iv = iv; | ||
653 | priv->nr_segs = nr_segs; | ||
654 | INIT_WORK(&priv->work, ep_user_copy_worker); | ||
655 | |||
656 | value = get_ready_ep(iocb->ki_filp->f_flags, epdata); | ||
657 | if (unlikely(value < 0)) { | ||
658 | kfree(priv); | ||
659 | goto fail; | ||
660 | } | ||
661 | 530 | ||
662 | kiocb_set_cancel_fn(iocb, ep_aio_cancel); | 531 | kiocb_set_cancel_fn(iocb, ep_aio_cancel); |
663 | get_ep(epdata); | 532 | get_ep(epdata); |
@@ -669,75 +538,154 @@ fail: | |||
669 | * allocate or submit those if the host disconnected. | 538 | * allocate or submit those if the host disconnected. |
670 | */ | 539 | */ |
671 | spin_lock_irq(&epdata->dev->lock); | 540 | spin_lock_irq(&epdata->dev->lock); |
672 | if (likely(epdata->ep)) { | 541 | value = -ENODEV; |
673 | req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); | 542 | if (unlikely(epdata->ep)) |
674 | if (likely(req)) { | 543 | goto fail; |
675 | priv->req = req; | ||
676 | req->buf = buf; | ||
677 | req->length = len; | ||
678 | req->complete = ep_aio_complete; | ||
679 | req->context = iocb; | ||
680 | value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC); | ||
681 | if (unlikely(0 != value)) | ||
682 | usb_ep_free_request(epdata->ep, req); | ||
683 | } else | ||
684 | value = -EAGAIN; | ||
685 | } else | ||
686 | value = -ENODEV; | ||
687 | spin_unlock_irq(&epdata->dev->lock); | ||
688 | 544 | ||
689 | mutex_unlock(&epdata->lock); | 545 | req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); |
546 | value = -ENOMEM; | ||
547 | if (unlikely(!req)) | ||
548 | goto fail; | ||
690 | 549 | ||
691 | if (unlikely(value)) { | 550 | priv->req = req; |
692 | kfree(priv); | 551 | req->buf = buf; |
693 | put_ep(epdata); | 552 | req->length = len; |
694 | } else | 553 | req->complete = ep_aio_complete; |
695 | value = -EIOCBQUEUED; | 554 | req->context = iocb; |
555 | value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC); | ||
556 | if (unlikely(0 != value)) { | ||
557 | usb_ep_free_request(epdata->ep, req); | ||
558 | goto fail; | ||
559 | } | ||
560 | spin_unlock_irq(&epdata->dev->lock); | ||
561 | return -EIOCBQUEUED; | ||
562 | |||
563 | fail: | ||
564 | spin_unlock_irq(&epdata->dev->lock); | ||
565 | kfree(priv->to_free); | ||
566 | kfree(priv); | ||
567 | put_ep(epdata); | ||
696 | return value; | 568 | return value; |
697 | } | 569 | } |
698 | 570 | ||
699 | static ssize_t | 571 | static ssize_t |
700 | ep_aio_read(struct kiocb *iocb, const struct iovec *iov, | 572 | ep_read_iter(struct kiocb *iocb, struct iov_iter *to) |
701 | unsigned long nr_segs, loff_t o) | ||
702 | { | 573 | { |
703 | struct ep_data *epdata = iocb->ki_filp->private_data; | 574 | struct file *file = iocb->ki_filp; |
704 | char *buf; | 575 | struct ep_data *epdata = file->private_data; |
576 | size_t len = iov_iter_count(to); | ||
577 | ssize_t value; | ||
578 | char *buf; | ||
705 | 579 | ||
706 | if (unlikely(usb_endpoint_dir_in(&epdata->desc))) | 580 | if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0) |
707 | return -EINVAL; | 581 | return value; |
708 | 582 | ||
709 | buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); | 583 | /* halt any endpoint by doing a "wrong direction" i/o call */ |
710 | if (unlikely(!buf)) | 584 | if (usb_endpoint_dir_in(&epdata->desc)) { |
711 | return -ENOMEM; | 585 | if (usb_endpoint_xfer_isoc(&epdata->desc) || |
586 | !is_sync_kiocb(iocb)) { | ||
587 | mutex_unlock(&epdata->lock); | ||
588 | return -EINVAL; | ||
589 | } | ||
590 | DBG (epdata->dev, "%s halt\n", epdata->name); | ||
591 | spin_lock_irq(&epdata->dev->lock); | ||
592 | if (likely(epdata->ep != NULL)) | ||
593 | usb_ep_set_halt(epdata->ep); | ||
594 | spin_unlock_irq(&epdata->dev->lock); | ||
595 | mutex_unlock(&epdata->lock); | ||
596 | return -EBADMSG; | ||
597 | } | ||
712 | 598 | ||
713 | return ep_aio_rwtail(iocb, buf, iocb->ki_nbytes, epdata, iov, nr_segs); | 599 | buf = kmalloc(len, GFP_KERNEL); |
600 | if (unlikely(!buf)) { | ||
601 | mutex_unlock(&epdata->lock); | ||
602 | return -ENOMEM; | ||
603 | } | ||
604 | if (is_sync_kiocb(iocb)) { | ||
605 | value = ep_io(epdata, buf, len); | ||
606 | if (value >= 0 && copy_to_iter(buf, value, to)) | ||
607 | value = -EFAULT; | ||
608 | } else { | ||
609 | struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); | ||
610 | value = -ENOMEM; | ||
611 | if (!priv) | ||
612 | goto fail; | ||
613 | priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL); | ||
614 | if (!priv->to_free) { | ||
615 | kfree(priv); | ||
616 | goto fail; | ||
617 | } | ||
618 | value = ep_aio(iocb, priv, epdata, buf, len); | ||
619 | if (value == -EIOCBQUEUED) | ||
620 | buf = NULL; | ||
621 | } | ||
622 | fail: | ||
623 | kfree(buf); | ||
624 | mutex_unlock(&epdata->lock); | ||
625 | return value; | ||
714 | } | 626 | } |
715 | 627 | ||
628 | static ssize_t ep_config(struct ep_data *, const char *, size_t); | ||
629 | |||
716 | static ssize_t | 630 | static ssize_t |
717 | ep_aio_write(struct kiocb *iocb, const struct iovec *iov, | 631 | ep_write_iter(struct kiocb *iocb, struct iov_iter *from) |
718 | unsigned long nr_segs, loff_t o) | ||
719 | { | 632 | { |
720 | struct ep_data *epdata = iocb->ki_filp->private_data; | 633 | struct file *file = iocb->ki_filp; |
721 | char *buf; | 634 | struct ep_data *epdata = file->private_data; |
722 | size_t len = 0; | 635 | size_t len = iov_iter_count(from); |
723 | int i = 0; | 636 | bool configured; |
637 | ssize_t value; | ||
638 | char *buf; | ||
639 | |||
640 | if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0) | ||
641 | return value; | ||
724 | 642 | ||
725 | if (unlikely(!usb_endpoint_dir_in(&epdata->desc))) | 643 | configured = epdata->state == STATE_EP_ENABLED; |
726 | return -EINVAL; | ||
727 | 644 | ||
728 | buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); | 645 | /* halt any endpoint by doing a "wrong direction" i/o call */ |
729 | if (unlikely(!buf)) | 646 | if (configured && !usb_endpoint_dir_in(&epdata->desc)) { |
647 | if (usb_endpoint_xfer_isoc(&epdata->desc) || | ||
648 | !is_sync_kiocb(iocb)) { | ||
649 | mutex_unlock(&epdata->lock); | ||
650 | return -EINVAL; | ||
651 | } | ||
652 | DBG (epdata->dev, "%s halt\n", epdata->name); | ||
653 | spin_lock_irq(&epdata->dev->lock); | ||
654 | if (likely(epdata->ep != NULL)) | ||
655 | usb_ep_set_halt(epdata->ep); | ||
656 | spin_unlock_irq(&epdata->dev->lock); | ||
657 | mutex_unlock(&epdata->lock); | ||
658 | return -EBADMSG; | ||
659 | } | ||
660 | |||
661 | buf = kmalloc(len, GFP_KERNEL); | ||
662 | if (unlikely(!buf)) { | ||
663 | mutex_unlock(&epdata->lock); | ||
730 | return -ENOMEM; | 664 | return -ENOMEM; |
665 | } | ||
731 | 666 | ||
732 | for (i=0; i < nr_segs; i++) { | 667 | if (unlikely(copy_from_iter(buf, len, from) != len)) { |
733 | if (unlikely(copy_from_user(&buf[len], iov[i].iov_base, | 668 | value = -EFAULT; |
734 | iov[i].iov_len) != 0)) { | 669 | goto out; |
735 | kfree(buf); | 670 | } |
736 | return -EFAULT; | 671 | |
672 | if (unlikely(!configured)) { | ||
673 | value = ep_config(epdata, buf, len); | ||
674 | } else if (is_sync_kiocb(iocb)) { | ||
675 | value = ep_io(epdata, buf, len); | ||
676 | } else { | ||
677 | struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); | ||
678 | value = -ENOMEM; | ||
679 | if (priv) { | ||
680 | value = ep_aio(iocb, priv, epdata, buf, len); | ||
681 | if (value == -EIOCBQUEUED) | ||
682 | buf = NULL; | ||
737 | } | 683 | } |
738 | len += iov[i].iov_len; | ||
739 | } | 684 | } |
740 | return ep_aio_rwtail(iocb, buf, len, epdata, NULL, 0); | 685 | out: |
686 | kfree(buf); | ||
687 | mutex_unlock(&epdata->lock); | ||
688 | return value; | ||
741 | } | 689 | } |
742 | 690 | ||
743 | /*----------------------------------------------------------------------*/ | 691 | /*----------------------------------------------------------------------*/ |
@@ -745,15 +693,15 @@ ep_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
745 | /* used after endpoint configuration */ | 693 | /* used after endpoint configuration */ |
746 | static const struct file_operations ep_io_operations = { | 694 | static const struct file_operations ep_io_operations = { |
747 | .owner = THIS_MODULE, | 695 | .owner = THIS_MODULE, |
748 | .llseek = no_llseek, | ||
749 | 696 | ||
750 | .read = ep_read, | 697 | .open = ep_open, |
751 | .write = ep_write, | ||
752 | .unlocked_ioctl = ep_ioctl, | ||
753 | .release = ep_release, | 698 | .release = ep_release, |
754 | 699 | .llseek = no_llseek, | |
755 | .aio_read = ep_aio_read, | 700 | .read = new_sync_read, |
756 | .aio_write = ep_aio_write, | 701 | .write = new_sync_write, |
702 | .unlocked_ioctl = ep_ioctl, | ||
703 | .read_iter = ep_read_iter, | ||
704 | .write_iter = ep_write_iter, | ||
757 | }; | 705 | }; |
758 | 706 | ||
759 | /* ENDPOINT INITIALIZATION | 707 | /* ENDPOINT INITIALIZATION |
@@ -770,17 +718,12 @@ static const struct file_operations ep_io_operations = { | |||
770 | * speed descriptor, then optional high speed descriptor. | 718 | * speed descriptor, then optional high speed descriptor. |
771 | */ | 719 | */ |
772 | static ssize_t | 720 | static ssize_t |
773 | ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | 721 | ep_config (struct ep_data *data, const char *buf, size_t len) |
774 | { | 722 | { |
775 | struct ep_data *data = fd->private_data; | ||
776 | struct usb_ep *ep; | 723 | struct usb_ep *ep; |
777 | u32 tag; | 724 | u32 tag; |
778 | int value, length = len; | 725 | int value, length = len; |
779 | 726 | ||
780 | value = mutex_lock_interruptible(&data->lock); | ||
781 | if (value < 0) | ||
782 | return value; | ||
783 | |||
784 | if (data->state != STATE_EP_READY) { | 727 | if (data->state != STATE_EP_READY) { |
785 | value = -EL2HLT; | 728 | value = -EL2HLT; |
786 | goto fail; | 729 | goto fail; |
@@ -791,9 +734,7 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
791 | goto fail0; | 734 | goto fail0; |
792 | 735 | ||
793 | /* we might need to change message format someday */ | 736 | /* we might need to change message format someday */ |
794 | if (copy_from_user (&tag, buf, 4)) { | 737 | memcpy(&tag, buf, 4); |
795 | goto fail1; | ||
796 | } | ||
797 | if (tag != 1) { | 738 | if (tag != 1) { |
798 | DBG(data->dev, "config %s, bad tag %d\n", data->name, tag); | 739 | DBG(data->dev, "config %s, bad tag %d\n", data->name, tag); |
799 | goto fail0; | 740 | goto fail0; |
@@ -806,19 +747,15 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
806 | */ | 747 | */ |
807 | 748 | ||
808 | /* full/low speed descriptor, then high speed */ | 749 | /* full/low speed descriptor, then high speed */ |
809 | if (copy_from_user (&data->desc, buf, USB_DT_ENDPOINT_SIZE)) { | 750 | memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE); |
810 | goto fail1; | ||
811 | } | ||
812 | if (data->desc.bLength != USB_DT_ENDPOINT_SIZE | 751 | if (data->desc.bLength != USB_DT_ENDPOINT_SIZE |
813 | || data->desc.bDescriptorType != USB_DT_ENDPOINT) | 752 | || data->desc.bDescriptorType != USB_DT_ENDPOINT) |
814 | goto fail0; | 753 | goto fail0; |
815 | if (len != USB_DT_ENDPOINT_SIZE) { | 754 | if (len != USB_DT_ENDPOINT_SIZE) { |
816 | if (len != 2 * USB_DT_ENDPOINT_SIZE) | 755 | if (len != 2 * USB_DT_ENDPOINT_SIZE) |
817 | goto fail0; | 756 | goto fail0; |
818 | if (copy_from_user (&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE, | 757 | memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE, |
819 | USB_DT_ENDPOINT_SIZE)) { | 758 | USB_DT_ENDPOINT_SIZE); |
820 | goto fail1; | ||
821 | } | ||
822 | if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE | 759 | if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE |
823 | || data->hs_desc.bDescriptorType | 760 | || data->hs_desc.bDescriptorType |
824 | != USB_DT_ENDPOINT) { | 761 | != USB_DT_ENDPOINT) { |
@@ -840,24 +777,20 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
840 | case USB_SPEED_LOW: | 777 | case USB_SPEED_LOW: |
841 | case USB_SPEED_FULL: | 778 | case USB_SPEED_FULL: |
842 | ep->desc = &data->desc; | 779 | ep->desc = &data->desc; |
843 | value = usb_ep_enable(ep); | ||
844 | if (value == 0) | ||
845 | data->state = STATE_EP_ENABLED; | ||
846 | break; | 780 | break; |
847 | case USB_SPEED_HIGH: | 781 | case USB_SPEED_HIGH: |
848 | /* fails if caller didn't provide that descriptor... */ | 782 | /* fails if caller didn't provide that descriptor... */ |
849 | ep->desc = &data->hs_desc; | 783 | ep->desc = &data->hs_desc; |
850 | value = usb_ep_enable(ep); | ||
851 | if (value == 0) | ||
852 | data->state = STATE_EP_ENABLED; | ||
853 | break; | 784 | break; |
854 | default: | 785 | default: |
855 | DBG(data->dev, "unconnected, %s init abandoned\n", | 786 | DBG(data->dev, "unconnected, %s init abandoned\n", |
856 | data->name); | 787 | data->name); |
857 | value = -EINVAL; | 788 | value = -EINVAL; |
789 | goto gone; | ||
858 | } | 790 | } |
791 | value = usb_ep_enable(ep); | ||
859 | if (value == 0) { | 792 | if (value == 0) { |
860 | fd->f_op = &ep_io_operations; | 793 | data->state = STATE_EP_ENABLED; |
861 | value = length; | 794 | value = length; |
862 | } | 795 | } |
863 | gone: | 796 | gone: |
@@ -867,14 +800,10 @@ fail: | |||
867 | data->desc.bDescriptorType = 0; | 800 | data->desc.bDescriptorType = 0; |
868 | data->hs_desc.bDescriptorType = 0; | 801 | data->hs_desc.bDescriptorType = 0; |
869 | } | 802 | } |
870 | mutex_unlock(&data->lock); | ||
871 | return value; | 803 | return value; |
872 | fail0: | 804 | fail0: |
873 | value = -EINVAL; | 805 | value = -EINVAL; |
874 | goto fail; | 806 | goto fail; |
875 | fail1: | ||
876 | value = -EFAULT; | ||
877 | goto fail; | ||
878 | } | 807 | } |
879 | 808 | ||
880 | static int | 809 | static int |
@@ -902,15 +831,6 @@ ep_open (struct inode *inode, struct file *fd) | |||
902 | return value; | 831 | return value; |
903 | } | 832 | } |
904 | 833 | ||
905 | /* used before endpoint configuration */ | ||
906 | static const struct file_operations ep_config_operations = { | ||
907 | .llseek = no_llseek, | ||
908 | |||
909 | .open = ep_open, | ||
910 | .write = ep_config, | ||
911 | .release = ep_release, | ||
912 | }; | ||
913 | |||
914 | /*----------------------------------------------------------------------*/ | 834 | /*----------------------------------------------------------------------*/ |
915 | 835 | ||
916 | /* EP0 IMPLEMENTATION can be partly in userspace. | 836 | /* EP0 IMPLEMENTATION can be partly in userspace. |
@@ -989,6 +909,10 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) | |||
989 | enum ep0_state state; | 909 | enum ep0_state state; |
990 | 910 | ||
991 | spin_lock_irq (&dev->lock); | 911 | spin_lock_irq (&dev->lock); |
912 | if (dev->state <= STATE_DEV_OPENED) { | ||
913 | retval = -EINVAL; | ||
914 | goto done; | ||
915 | } | ||
992 | 916 | ||
993 | /* report fd mode change before acting on it */ | 917 | /* report fd mode change before acting on it */ |
994 | if (dev->setup_abort) { | 918 | if (dev->setup_abort) { |
@@ -1187,8 +1111,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
1187 | struct dev_data *dev = fd->private_data; | 1111 | struct dev_data *dev = fd->private_data; |
1188 | ssize_t retval = -ESRCH; | 1112 | ssize_t retval = -ESRCH; |
1189 | 1113 | ||
1190 | spin_lock_irq (&dev->lock); | ||
1191 | |||
1192 | /* report fd mode change before acting on it */ | 1114 | /* report fd mode change before acting on it */ |
1193 | if (dev->setup_abort) { | 1115 | if (dev->setup_abort) { |
1194 | dev->setup_abort = 0; | 1116 | dev->setup_abort = 0; |
@@ -1234,7 +1156,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
1234 | } else | 1156 | } else |
1235 | DBG (dev, "fail %s, state %d\n", __func__, dev->state); | 1157 | DBG (dev, "fail %s, state %d\n", __func__, dev->state); |
1236 | 1158 | ||
1237 | spin_unlock_irq (&dev->lock); | ||
1238 | return retval; | 1159 | return retval; |
1239 | } | 1160 | } |
1240 | 1161 | ||
@@ -1281,6 +1202,9 @@ ep0_poll (struct file *fd, poll_table *wait) | |||
1281 | struct dev_data *dev = fd->private_data; | 1202 | struct dev_data *dev = fd->private_data; |
1282 | int mask = 0; | 1203 | int mask = 0; |
1283 | 1204 | ||
1205 | if (dev->state <= STATE_DEV_OPENED) | ||
1206 | return DEFAULT_POLLMASK; | ||
1207 | |||
1284 | poll_wait(fd, &dev->wait, wait); | 1208 | poll_wait(fd, &dev->wait, wait); |
1285 | 1209 | ||
1286 | spin_lock_irq (&dev->lock); | 1210 | spin_lock_irq (&dev->lock); |
@@ -1316,19 +1240,6 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value) | |||
1316 | return ret; | 1240 | return ret; |
1317 | } | 1241 | } |
1318 | 1242 | ||
1319 | /* used after device configuration */ | ||
1320 | static const struct file_operations ep0_io_operations = { | ||
1321 | .owner = THIS_MODULE, | ||
1322 | .llseek = no_llseek, | ||
1323 | |||
1324 | .read = ep0_read, | ||
1325 | .write = ep0_write, | ||
1326 | .fasync = ep0_fasync, | ||
1327 | .poll = ep0_poll, | ||
1328 | .unlocked_ioctl = dev_ioctl, | ||
1329 | .release = dev_release, | ||
1330 | }; | ||
1331 | |||
1332 | /*----------------------------------------------------------------------*/ | 1243 | /*----------------------------------------------------------------------*/ |
1333 | 1244 | ||
1334 | /* The in-kernel gadget driver handles most ep0 issues, in particular | 1245 | /* The in-kernel gadget driver handles most ep0 issues, in particular |
@@ -1650,7 +1561,7 @@ static int activate_ep_files (struct dev_data *dev) | |||
1650 | goto enomem1; | 1561 | goto enomem1; |
1651 | 1562 | ||
1652 | data->dentry = gadgetfs_create_file (dev->sb, data->name, | 1563 | data->dentry = gadgetfs_create_file (dev->sb, data->name, |
1653 | data, &ep_config_operations); | 1564 | data, &ep_io_operations); |
1654 | if (!data->dentry) | 1565 | if (!data->dentry) |
1655 | goto enomem2; | 1566 | goto enomem2; |
1656 | list_add_tail (&data->epfiles, &dev->epfiles); | 1567 | list_add_tail (&data->epfiles, &dev->epfiles); |
@@ -1852,6 +1763,14 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
1852 | u32 tag; | 1763 | u32 tag; |
1853 | char *kbuf; | 1764 | char *kbuf; |
1854 | 1765 | ||
1766 | spin_lock_irq(&dev->lock); | ||
1767 | if (dev->state > STATE_DEV_OPENED) { | ||
1768 | value = ep0_write(fd, buf, len, ptr); | ||
1769 | spin_unlock_irq(&dev->lock); | ||
1770 | return value; | ||
1771 | } | ||
1772 | spin_unlock_irq(&dev->lock); | ||
1773 | |||
1855 | if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) | 1774 | if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) |
1856 | return -EINVAL; | 1775 | return -EINVAL; |
1857 | 1776 | ||
@@ -1925,7 +1844,6 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
1925 | * on, they can work ... except in cleanup paths that | 1844 | * on, they can work ... except in cleanup paths that |
1926 | * kick in after the ep0 descriptor is closed. | 1845 | * kick in after the ep0 descriptor is closed. |
1927 | */ | 1846 | */ |
1928 | fd->f_op = &ep0_io_operations; | ||
1929 | value = len; | 1847 | value = len; |
1930 | } | 1848 | } |
1931 | return value; | 1849 | return value; |
@@ -1956,12 +1874,14 @@ dev_open (struct inode *inode, struct file *fd) | |||
1956 | return value; | 1874 | return value; |
1957 | } | 1875 | } |
1958 | 1876 | ||
1959 | static const struct file_operations dev_init_operations = { | 1877 | static const struct file_operations ep0_operations = { |
1960 | .llseek = no_llseek, | 1878 | .llseek = no_llseek, |
1961 | 1879 | ||
1962 | .open = dev_open, | 1880 | .open = dev_open, |
1881 | .read = ep0_read, | ||
1963 | .write = dev_config, | 1882 | .write = dev_config, |
1964 | .fasync = ep0_fasync, | 1883 | .fasync = ep0_fasync, |
1884 | .poll = ep0_poll, | ||
1965 | .unlocked_ioctl = dev_ioctl, | 1885 | .unlocked_ioctl = dev_ioctl, |
1966 | .release = dev_release, | 1886 | .release = dev_release, |
1967 | }; | 1887 | }; |
@@ -2077,7 +1997,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent) | |||
2077 | goto Enomem; | 1997 | goto Enomem; |
2078 | 1998 | ||
2079 | dev->sb = sb; | 1999 | dev->sb = sb; |
2080 | dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &dev_init_operations); | 2000 | dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations); |
2081 | if (!dev->dentry) { | 2001 | if (!dev->dentry) { |
2082 | put_dev(dev); | 2002 | put_dev(dev); |
2083 | goto Enomem; | 2003 | goto Enomem; |
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c index 3a494168661e..6e0a019aad54 100644 --- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c +++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c | |||
@@ -1740,10 +1740,9 @@ static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name) | |||
1740 | goto err_session; | 1740 | goto err_session; |
1741 | } | 1741 | } |
1742 | /* | 1742 | /* |
1743 | * Now register the TCM vHost virtual I_T Nexus as active with the | 1743 | * Now register the TCM vHost virtual I_T Nexus as active. |
1744 | * call to __transport_register_session() | ||
1745 | */ | 1744 | */ |
1746 | __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, | 1745 | transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, |
1747 | tv_nexus->tvn_se_sess, tv_nexus); | 1746 | tv_nexus->tvn_se_sess, tv_nexus); |
1748 | tpg->tpg_nexus = tv_nexus; | 1747 | tpg->tpg_nexus = tv_nexus; |
1749 | mutex_unlock(&tpg->tpg_mutex); | 1748 | mutex_unlock(&tpg->tpg_mutex); |
diff --git a/drivers/usb/gadget/legacy/zero.c b/drivers/usb/gadget/legacy/zero.c index ff97ac93ac03..5ee95152493c 100644 --- a/drivers/usb/gadget/legacy/zero.c +++ b/drivers/usb/gadget/legacy/zero.c | |||
@@ -68,8 +68,6 @@ static struct usb_zero_options gzero_options = { | |||
68 | .isoc_maxpacket = GZERO_ISOC_MAXPACKET, | 68 | .isoc_maxpacket = GZERO_ISOC_MAXPACKET, |
69 | .bulk_buflen = GZERO_BULK_BUFLEN, | 69 | .bulk_buflen = GZERO_BULK_BUFLEN, |
70 | .qlen = GZERO_QLEN, | 70 | .qlen = GZERO_QLEN, |
71 | .int_interval = GZERO_INT_INTERVAL, | ||
72 | .int_maxpacket = GZERO_INT_MAXPACKET, | ||
73 | }; | 71 | }; |
74 | 72 | ||
75 | /*-------------------------------------------------------------------------*/ | 73 | /*-------------------------------------------------------------------------*/ |
@@ -268,21 +266,6 @@ module_param_named(isoc_maxburst, gzero_options.isoc_maxburst, uint, | |||
268 | S_IRUGO|S_IWUSR); | 266 | S_IRUGO|S_IWUSR); |
269 | MODULE_PARM_DESC(isoc_maxburst, "0 - 15 (ss only)"); | 267 | MODULE_PARM_DESC(isoc_maxburst, "0 - 15 (ss only)"); |
270 | 268 | ||
271 | module_param_named(int_interval, gzero_options.int_interval, uint, | ||
272 | S_IRUGO|S_IWUSR); | ||
273 | MODULE_PARM_DESC(int_interval, "1 - 16"); | ||
274 | |||
275 | module_param_named(int_maxpacket, gzero_options.int_maxpacket, uint, | ||
276 | S_IRUGO|S_IWUSR); | ||
277 | MODULE_PARM_DESC(int_maxpacket, "0 - 1023 (fs), 0 - 1024 (hs/ss)"); | ||
278 | |||
279 | module_param_named(int_mult, gzero_options.int_mult, uint, S_IRUGO|S_IWUSR); | ||
280 | MODULE_PARM_DESC(int_mult, "0 - 2 (hs/ss only)"); | ||
281 | |||
282 | module_param_named(int_maxburst, gzero_options.int_maxburst, uint, | ||
283 | S_IRUGO|S_IWUSR); | ||
284 | MODULE_PARM_DESC(int_maxburst, "0 - 15 (ss only)"); | ||
285 | |||
286 | static struct usb_function *func_lb; | 269 | static struct usb_function *func_lb; |
287 | static struct usb_function_instance *func_inst_lb; | 270 | static struct usb_function_instance *func_inst_lb; |
288 | 271 | ||
@@ -318,10 +301,6 @@ static int __init zero_bind(struct usb_composite_dev *cdev) | |||
318 | ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket; | 301 | ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket; |
319 | ss_opts->isoc_mult = gzero_options.isoc_mult; | 302 | ss_opts->isoc_mult = gzero_options.isoc_mult; |
320 | ss_opts->isoc_maxburst = gzero_options.isoc_maxburst; | 303 | ss_opts->isoc_maxburst = gzero_options.isoc_maxburst; |
321 | ss_opts->int_interval = gzero_options.int_interval; | ||
322 | ss_opts->int_maxpacket = gzero_options.int_maxpacket; | ||
323 | ss_opts->int_mult = gzero_options.int_mult; | ||
324 | ss_opts->int_maxburst = gzero_options.int_maxburst; | ||
325 | ss_opts->bulk_buflen = gzero_options.bulk_buflen; | 304 | ss_opts->bulk_buflen = gzero_options.bulk_buflen; |
326 | 305 | ||
327 | func_ss = usb_get_function(func_inst_ss); | 306 | func_ss = usb_get_function(func_inst_ss); |
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c index 663f7908b15c..be0964a801e8 100644 --- a/drivers/usb/host/ehci-atmel.c +++ b/drivers/usb/host/ehci-atmel.c | |||
@@ -34,7 +34,6 @@ static const char hcd_name[] = "ehci-atmel"; | |||
34 | 34 | ||
35 | struct atmel_ehci_priv { | 35 | struct atmel_ehci_priv { |
36 | struct clk *iclk; | 36 | struct clk *iclk; |
37 | struct clk *fclk; | ||
38 | struct clk *uclk; | 37 | struct clk *uclk; |
39 | bool clocked; | 38 | bool clocked; |
40 | }; | 39 | }; |
@@ -51,12 +50,9 @@ static void atmel_start_clock(struct atmel_ehci_priv *atmel_ehci) | |||
51 | { | 50 | { |
52 | if (atmel_ehci->clocked) | 51 | if (atmel_ehci->clocked) |
53 | return; | 52 | return; |
54 | if (IS_ENABLED(CONFIG_COMMON_CLK)) { | 53 | |
55 | clk_set_rate(atmel_ehci->uclk, 48000000); | 54 | clk_prepare_enable(atmel_ehci->uclk); |
56 | clk_prepare_enable(atmel_ehci->uclk); | ||
57 | } | ||
58 | clk_prepare_enable(atmel_ehci->iclk); | 55 | clk_prepare_enable(atmel_ehci->iclk); |
59 | clk_prepare_enable(atmel_ehci->fclk); | ||
60 | atmel_ehci->clocked = true; | 56 | atmel_ehci->clocked = true; |
61 | } | 57 | } |
62 | 58 | ||
@@ -64,10 +60,9 @@ static void atmel_stop_clock(struct atmel_ehci_priv *atmel_ehci) | |||
64 | { | 60 | { |
65 | if (!atmel_ehci->clocked) | 61 | if (!atmel_ehci->clocked) |
66 | return; | 62 | return; |
67 | clk_disable_unprepare(atmel_ehci->fclk); | 63 | |
68 | clk_disable_unprepare(atmel_ehci->iclk); | 64 | clk_disable_unprepare(atmel_ehci->iclk); |
69 | if (IS_ENABLED(CONFIG_COMMON_CLK)) | 65 | clk_disable_unprepare(atmel_ehci->uclk); |
70 | clk_disable_unprepare(atmel_ehci->uclk); | ||
71 | atmel_ehci->clocked = false; | 66 | atmel_ehci->clocked = false; |
72 | } | 67 | } |
73 | 68 | ||
@@ -146,20 +141,13 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev) | |||
146 | retval = -ENOENT; | 141 | retval = -ENOENT; |
147 | goto fail_request_resource; | 142 | goto fail_request_resource; |
148 | } | 143 | } |
149 | atmel_ehci->fclk = devm_clk_get(&pdev->dev, "uhpck"); | 144 | |
150 | if (IS_ERR(atmel_ehci->fclk)) { | 145 | atmel_ehci->uclk = devm_clk_get(&pdev->dev, "usb_clk"); |
151 | dev_err(&pdev->dev, "Error getting function clock\n"); | 146 | if (IS_ERR(atmel_ehci->uclk)) { |
152 | retval = -ENOENT; | 147 | dev_err(&pdev->dev, "failed to get uclk\n"); |
148 | retval = PTR_ERR(atmel_ehci->uclk); | ||
153 | goto fail_request_resource; | 149 | goto fail_request_resource; |
154 | } | 150 | } |
155 | if (IS_ENABLED(CONFIG_COMMON_CLK)) { | ||
156 | atmel_ehci->uclk = devm_clk_get(&pdev->dev, "usb_clk"); | ||
157 | if (IS_ERR(atmel_ehci->uclk)) { | ||
158 | dev_err(&pdev->dev, "failed to get uclk\n"); | ||
159 | retval = PTR_ERR(atmel_ehci->uclk); | ||
160 | goto fail_request_resource; | ||
161 | } | ||
162 | } | ||
163 | 151 | ||
164 | ehci = hcd_to_ehci(hcd); | 152 | ehci = hcd_to_ehci(hcd); |
165 | /* registers start at offset 0x0 */ | 153 | /* registers start at offset 0x0 */ |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 5fb66db89e05..73485fa4372f 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -1729,7 +1729,7 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, | |||
1729 | if (!command) | 1729 | if (!command) |
1730 | return; | 1730 | return; |
1731 | 1731 | ||
1732 | ep->ep_state |= EP_HALTED | EP_RECENTLY_HALTED; | 1732 | ep->ep_state |= EP_HALTED; |
1733 | ep->stopped_stream = stream_id; | 1733 | ep->stopped_stream = stream_id; |
1734 | 1734 | ||
1735 | xhci_queue_reset_ep(xhci, command, slot_id, ep_index); | 1735 | xhci_queue_reset_ep(xhci, command, slot_id, ep_index); |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index b06d1a53652d..ec8ac1674854 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -1338,12 +1338,6 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
1338 | goto exit; | 1338 | goto exit; |
1339 | } | 1339 | } |
1340 | 1340 | ||
1341 | /* Reject urb if endpoint is in soft reset, queue must stay empty */ | ||
1342 | if (xhci->devs[slot_id]->eps[ep_index].ep_state & EP_CONFIG_PENDING) { | ||
1343 | xhci_warn(xhci, "Can't enqueue URB while ep is in soft reset\n"); | ||
1344 | ret = -EINVAL; | ||
1345 | } | ||
1346 | |||
1347 | if (usb_endpoint_xfer_isoc(&urb->ep->desc)) | 1341 | if (usb_endpoint_xfer_isoc(&urb->ep->desc)) |
1348 | size = urb->number_of_packets; | 1342 | size = urb->number_of_packets; |
1349 | else | 1343 | else |
@@ -2954,36 +2948,23 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, | |||
2954 | } | 2948 | } |
2955 | } | 2949 | } |
2956 | 2950 | ||
2957 | /* Called after clearing a halted device. USB core should have sent the control | 2951 | /* Called when clearing halted device. The core should have sent the control |
2958 | * message to clear the device halt condition. The host side of the halt should | 2952 | * message to clear the device halt condition. The host side of the halt should |
2959 | * already be cleared with a reset endpoint command issued immediately when the | 2953 | * already be cleared with a reset endpoint command issued when the STALL tx |
2960 | * STALL tx event was received. | 2954 | * event was received. |
2955 | * | ||
2956 | * Context: in_interrupt | ||
2961 | */ | 2957 | */ |
2962 | 2958 | ||
2963 | void xhci_endpoint_reset(struct usb_hcd *hcd, | 2959 | void xhci_endpoint_reset(struct usb_hcd *hcd, |
2964 | struct usb_host_endpoint *ep) | 2960 | struct usb_host_endpoint *ep) |
2965 | { | 2961 | { |
2966 | struct xhci_hcd *xhci; | 2962 | struct xhci_hcd *xhci; |
2967 | struct usb_device *udev; | ||
2968 | struct xhci_virt_device *virt_dev; | ||
2969 | struct xhci_virt_ep *virt_ep; | ||
2970 | struct xhci_input_control_ctx *ctrl_ctx; | ||
2971 | struct xhci_command *command; | ||
2972 | unsigned int ep_index, ep_state; | ||
2973 | unsigned long flags; | ||
2974 | u32 ep_flag; | ||
2975 | 2963 | ||
2976 | xhci = hcd_to_xhci(hcd); | 2964 | xhci = hcd_to_xhci(hcd); |
2977 | udev = (struct usb_device *) ep->hcpriv; | ||
2978 | if (!ep->hcpriv) | ||
2979 | return; | ||
2980 | virt_dev = xhci->devs[udev->slot_id]; | ||
2981 | ep_index = xhci_get_endpoint_index(&ep->desc); | ||
2982 | virt_ep = &virt_dev->eps[ep_index]; | ||
2983 | ep_state = virt_ep->ep_state; | ||
2984 | 2965 | ||
2985 | /* | 2966 | /* |
2986 | * Implement the config ep command in xhci 4.6.8 additional note: | 2967 | * We might need to implement the config ep cmd in xhci 4.8.1 note: |
2987 | * The Reset Endpoint Command may only be issued to endpoints in the | 2968 | * The Reset Endpoint Command may only be issued to endpoints in the |
2988 | * Halted state. If software wishes reset the Data Toggle or Sequence | 2969 | * Halted state. If software wishes reset the Data Toggle or Sequence |
2989 | * Number of an endpoint that isn't in the Halted state, then software | 2970 | * Number of an endpoint that isn't in the Halted state, then software |
@@ -2991,72 +2972,9 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, | |||
2991 | * for the target endpoint. that is in the Stopped state. | 2972 | * for the target endpoint. that is in the Stopped state. |
2992 | */ | 2973 | */ |
2993 | 2974 | ||
2994 | if (ep_state & SET_DEQ_PENDING || ep_state & EP_RECENTLY_HALTED) { | 2975 | /* For now just print debug to follow the situation */ |
2995 | virt_ep->ep_state &= ~EP_RECENTLY_HALTED; | 2976 | xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n", |
2996 | xhci_dbg(xhci, "ep recently halted, no toggle reset needed\n"); | 2977 | ep->desc.bEndpointAddress); |
2997 | return; | ||
2998 | } | ||
2999 | |||
3000 | /* Only interrupt and bulk ep's use Data toggle, USB2 spec 5.5.4-> */ | ||
3001 | if (usb_endpoint_xfer_control(&ep->desc) || | ||
3002 | usb_endpoint_xfer_isoc(&ep->desc)) | ||
3003 | return; | ||
3004 | |||
3005 | ep_flag = xhci_get_endpoint_flag(&ep->desc); | ||
3006 | |||
3007 | if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG) | ||
3008 | return; | ||
3009 | |||
3010 | command = xhci_alloc_command(xhci, true, true, GFP_NOWAIT); | ||
3011 | if (!command) { | ||
3012 | xhci_err(xhci, "Could not allocate xHCI command structure.\n"); | ||
3013 | return; | ||
3014 | } | ||
3015 | |||
3016 | spin_lock_irqsave(&xhci->lock, flags); | ||
3017 | |||
3018 | /* block ringing ep doorbell */ | ||
3019 | virt_ep->ep_state |= EP_CONFIG_PENDING; | ||
3020 | |||
3021 | /* | ||
3022 | * Make sure endpoint ring is empty before resetting the toggle/seq. | ||
3023 | * Driver is required to synchronously cancel all transfer request. | ||
3024 | * | ||
3025 | * xhci 4.6.6 says we can issue a configure endpoint command on a | ||
3026 | * running endpoint ring as long as it's idle (queue empty) | ||
3027 | */ | ||
3028 | |||
3029 | if (!list_empty(&virt_ep->ring->td_list)) { | ||
3030 | dev_err(&udev->dev, "EP not empty, refuse reset\n"); | ||
3031 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
3032 | goto cleanup; | ||
3033 | } | ||
3034 | |||
3035 | xhci_dbg(xhci, "Reset toggle/seq for slot %d, ep_index: %d\n", | ||
3036 | udev->slot_id, ep_index); | ||
3037 | |||
3038 | ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); | ||
3039 | if (!ctrl_ctx) { | ||
3040 | xhci_err(xhci, "Could not get input context, bad type. virt_dev: %p, in_ctx %p\n", | ||
3041 | virt_dev, virt_dev->in_ctx); | ||
3042 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
3043 | goto cleanup; | ||
3044 | } | ||
3045 | xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, | ||
3046 | virt_dev->out_ctx, ctrl_ctx, | ||
3047 | ep_flag, ep_flag); | ||
3048 | xhci_endpoint_copy(xhci, command->in_ctx, virt_dev->out_ctx, ep_index); | ||
3049 | |||
3050 | xhci_queue_configure_endpoint(xhci, command, command->in_ctx->dma, | ||
3051 | udev->slot_id, false); | ||
3052 | xhci_ring_cmd_db(xhci); | ||
3053 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
3054 | |||
3055 | wait_for_completion(command->completion); | ||
3056 | |||
3057 | cleanup: | ||
3058 | virt_ep->ep_state &= ~EP_CONFIG_PENDING; | ||
3059 | xhci_free_command(xhci, command); | ||
3060 | } | 2978 | } |
3061 | 2979 | ||
3062 | static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, | 2980 | static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 265ab1771d24..8e421b89632d 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -865,8 +865,6 @@ struct xhci_virt_ep { | |||
865 | #define EP_HAS_STREAMS (1 << 4) | 865 | #define EP_HAS_STREAMS (1 << 4) |
866 | /* Transitioning the endpoint to not using streams, don't enqueue URBs */ | 866 | /* Transitioning the endpoint to not using streams, don't enqueue URBs */ |
867 | #define EP_GETTING_NO_STREAMS (1 << 5) | 867 | #define EP_GETTING_NO_STREAMS (1 << 5) |
868 | #define EP_RECENTLY_HALTED (1 << 6) | ||
869 | #define EP_CONFIG_PENDING (1 << 7) | ||
870 | /* ---- Related to URB cancellation ---- */ | 868 | /* ---- Related to URB cancellation ---- */ |
871 | struct list_head cancelled_td_list; | 869 | struct list_head cancelled_td_list; |
872 | struct xhci_td *stopped_td; | 870 | struct xhci_td *stopped_td; |
diff --git a/drivers/usb/isp1760/isp1760-core.c b/drivers/usb/isp1760/isp1760-core.c index b9827556455f..bfa402cf3a27 100644 --- a/drivers/usb/isp1760/isp1760-core.c +++ b/drivers/usb/isp1760/isp1760-core.c | |||
@@ -151,8 +151,7 @@ int isp1760_register(struct resource *mem, int irq, unsigned long irqflags, | |||
151 | } | 151 | } |
152 | 152 | ||
153 | if (IS_ENABLED(CONFIG_USB_ISP1761_UDC) && !udc_disabled) { | 153 | if (IS_ENABLED(CONFIG_USB_ISP1761_UDC) && !udc_disabled) { |
154 | ret = isp1760_udc_register(isp, irq, irqflags | IRQF_SHARED | | 154 | ret = isp1760_udc_register(isp, irq, irqflags); |
155 | IRQF_DISABLED); | ||
156 | if (ret < 0) { | 155 | if (ret < 0) { |
157 | isp1760_hcd_unregister(&isp->hcd); | 156 | isp1760_hcd_unregister(&isp->hcd); |
158 | return ret; | 157 | return ret; |
diff --git a/drivers/usb/isp1760/isp1760-udc.c b/drivers/usb/isp1760/isp1760-udc.c index 9612d7990565..f32c292cc868 100644 --- a/drivers/usb/isp1760/isp1760-udc.c +++ b/drivers/usb/isp1760/isp1760-udc.c | |||
@@ -1191,6 +1191,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget, | |||
1191 | struct usb_gadget_driver *driver) | 1191 | struct usb_gadget_driver *driver) |
1192 | { | 1192 | { |
1193 | struct isp1760_udc *udc = gadget_to_udc(gadget); | 1193 | struct isp1760_udc *udc = gadget_to_udc(gadget); |
1194 | unsigned long flags; | ||
1194 | 1195 | ||
1195 | /* The hardware doesn't support low speed. */ | 1196 | /* The hardware doesn't support low speed. */ |
1196 | if (driver->max_speed < USB_SPEED_FULL) { | 1197 | if (driver->max_speed < USB_SPEED_FULL) { |
@@ -1198,7 +1199,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget, | |||
1198 | return -EINVAL; | 1199 | return -EINVAL; |
1199 | } | 1200 | } |
1200 | 1201 | ||
1201 | spin_lock(&udc->lock); | 1202 | spin_lock_irqsave(&udc->lock, flags); |
1202 | 1203 | ||
1203 | if (udc->driver) { | 1204 | if (udc->driver) { |
1204 | dev_err(udc->isp->dev, "UDC already has a gadget driver\n"); | 1205 | dev_err(udc->isp->dev, "UDC already has a gadget driver\n"); |
@@ -1208,7 +1209,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget, | |||
1208 | 1209 | ||
1209 | udc->driver = driver; | 1210 | udc->driver = driver; |
1210 | 1211 | ||
1211 | spin_unlock(&udc->lock); | 1212 | spin_unlock_irqrestore(&udc->lock, flags); |
1212 | 1213 | ||
1213 | dev_dbg(udc->isp->dev, "starting UDC with driver %s\n", | 1214 | dev_dbg(udc->isp->dev, "starting UDC with driver %s\n", |
1214 | driver->function); | 1215 | driver->function); |
@@ -1232,6 +1233,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget, | |||
1232 | static int isp1760_udc_stop(struct usb_gadget *gadget) | 1233 | static int isp1760_udc_stop(struct usb_gadget *gadget) |
1233 | { | 1234 | { |
1234 | struct isp1760_udc *udc = gadget_to_udc(gadget); | 1235 | struct isp1760_udc *udc = gadget_to_udc(gadget); |
1236 | unsigned long flags; | ||
1235 | 1237 | ||
1236 | dev_dbg(udc->isp->dev, "%s\n", __func__); | 1238 | dev_dbg(udc->isp->dev, "%s\n", __func__); |
1237 | 1239 | ||
@@ -1239,9 +1241,9 @@ static int isp1760_udc_stop(struct usb_gadget *gadget) | |||
1239 | 1241 | ||
1240 | isp1760_udc_write(udc, DC_MODE, 0); | 1242 | isp1760_udc_write(udc, DC_MODE, 0); |
1241 | 1243 | ||
1242 | spin_lock(&udc->lock); | 1244 | spin_lock_irqsave(&udc->lock, flags); |
1243 | udc->driver = NULL; | 1245 | udc->driver = NULL; |
1244 | spin_unlock(&udc->lock); | 1246 | spin_unlock_irqrestore(&udc->lock, flags); |
1245 | 1247 | ||
1246 | return 0; | 1248 | return 0; |
1247 | } | 1249 | } |
@@ -1411,7 +1413,7 @@ static int isp1760_udc_init(struct isp1760_udc *udc) | |||
1411 | return -ENODEV; | 1413 | return -ENODEV; |
1412 | } | 1414 | } |
1413 | 1415 | ||
1414 | if (chipid != 0x00011582) { | 1416 | if (chipid != 0x00011582 && chipid != 0x00158210) { |
1415 | dev_err(udc->isp->dev, "udc: invalid chip ID 0x%08x\n", chipid); | 1417 | dev_err(udc->isp->dev, "udc: invalid chip ID 0x%08x\n", chipid); |
1416 | return -ENODEV; | 1418 | return -ENODEV; |
1417 | } | 1419 | } |
@@ -1451,8 +1453,8 @@ int isp1760_udc_register(struct isp1760_device *isp, int irq, | |||
1451 | 1453 | ||
1452 | sprintf(udc->irqname, "%s (udc)", devname); | 1454 | sprintf(udc->irqname, "%s (udc)", devname); |
1453 | 1455 | ||
1454 | ret = request_irq(irq, isp1760_udc_irq, IRQF_SHARED | IRQF_DISABLED | | 1456 | ret = request_irq(irq, isp1760_udc_irq, IRQF_SHARED | irqflags, |
1455 | irqflags, udc->irqname, udc); | 1457 | udc->irqname, udc); |
1456 | if (ret < 0) | 1458 | if (ret < 0) |
1457 | goto error; | 1459 | goto error; |
1458 | 1460 | ||
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index 14e1628483d9..39db8b603627 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig | |||
@@ -79,7 +79,8 @@ config USB_MUSB_TUSB6010 | |||
79 | 79 | ||
80 | config USB_MUSB_OMAP2PLUS | 80 | config USB_MUSB_OMAP2PLUS |
81 | tristate "OMAP2430 and onwards" | 81 | tristate "OMAP2430 and onwards" |
82 | depends on ARCH_OMAP2PLUS && USB && OMAP_CONTROL_PHY | 82 | depends on ARCH_OMAP2PLUS && USB |
83 | depends on OMAP_CONTROL_PHY || !OMAP_CONTROL_PHY | ||
83 | select GENERIC_PHY | 84 | select GENERIC_PHY |
84 | 85 | ||
85 | config USB_MUSB_AM35X | 86 | config USB_MUSB_AM35X |
diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c index 403fab772724..7b3035ff9434 100644 --- a/drivers/usb/phy/phy-am335x-control.c +++ b/drivers/usb/phy/phy-am335x-control.c | |||
@@ -126,6 +126,9 @@ struct phy_control *am335x_get_phy_control(struct device *dev) | |||
126 | return NULL; | 126 | return NULL; |
127 | 127 | ||
128 | dev = bus_find_device(&platform_bus_type, NULL, node, match); | 128 | dev = bus_find_device(&platform_bus_type, NULL, node, match); |
129 | if (!dev) | ||
130 | return NULL; | ||
131 | |||
129 | ctrl_usb = dev_get_drvdata(dev); | 132 | ctrl_usb = dev_get_drvdata(dev); |
130 | if (!ctrl_usb) | 133 | if (!ctrl_usb) |
131 | return NULL; | 134 | return NULL; |
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 82570425fdfe..c85ea530085f 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h | |||
@@ -113,6 +113,13 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999, | |||
113 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 113 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
114 | US_FL_NO_ATA_1X), | 114 | US_FL_NO_ATA_1X), |
115 | 115 | ||
116 | /* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */ | ||
117 | UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999, | ||
118 | "Initio Corporation", | ||
119 | "", | ||
120 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
121 | US_FL_NO_ATA_1X), | ||
122 | |||
116 | /* Reported-by: Tom Arild Naess <tanaess@gmail.com> */ | 123 | /* Reported-by: Tom Arild Naess <tanaess@gmail.com> */ |
117 | UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999, | 124 | UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999, |
118 | "JMicron", | 125 | "JMicron", |
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index f88bfdf5b6a0..2027a27546ef 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c | |||
@@ -868,12 +868,14 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags, | |||
868 | func = vfio_pci_set_err_trigger; | 868 | func = vfio_pci_set_err_trigger; |
869 | break; | 869 | break; |
870 | } | 870 | } |
871 | break; | ||
871 | case VFIO_PCI_REQ_IRQ_INDEX: | 872 | case VFIO_PCI_REQ_IRQ_INDEX: |
872 | switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { | 873 | switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { |
873 | case VFIO_IRQ_SET_ACTION_TRIGGER: | 874 | case VFIO_IRQ_SET_ACTION_TRIGGER: |
874 | func = vfio_pci_set_req_trigger; | 875 | func = vfio_pci_set_req_trigger; |
875 | break; | 876 | break; |
876 | } | 877 | } |
878 | break; | ||
877 | } | 879 | } |
878 | 880 | ||
879 | if (!func) | 881 | if (!func) |
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 8d4f3f1ff799..71df240a467a 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
@@ -1956,10 +1956,9 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg, | |||
1956 | goto out; | 1956 | goto out; |
1957 | } | 1957 | } |
1958 | /* | 1958 | /* |
1959 | * Now register the TCM vhost virtual I_T Nexus as active with the | 1959 | * Now register the TCM vhost virtual I_T Nexus as active. |
1960 | * call to __transport_register_session() | ||
1961 | */ | 1960 | */ |
1962 | __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, | 1961 | transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, |
1963 | tv_nexus->tvn_se_sess, tv_nexus); | 1962 | tv_nexus->tvn_se_sess, tv_nexus); |
1964 | tpg->tpg_nexus = tv_nexus; | 1963 | tpg->tpg_nexus = tv_nexus; |
1965 | 1964 | ||
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c index 32c0b6b28097..9362424c2340 100644 --- a/drivers/video/fbdev/amba-clcd.c +++ b/drivers/video/fbdev/amba-clcd.c | |||
@@ -599,6 +599,9 @@ static int clcdfb_of_get_mode(struct device *dev, struct device_node *endpoint, | |||
599 | 599 | ||
600 | len = clcdfb_snprintf_mode(NULL, 0, mode); | 600 | len = clcdfb_snprintf_mode(NULL, 0, mode); |
601 | name = devm_kzalloc(dev, len + 1, GFP_KERNEL); | 601 | name = devm_kzalloc(dev, len + 1, GFP_KERNEL); |
602 | if (!name) | ||
603 | return -ENOMEM; | ||
604 | |||
602 | clcdfb_snprintf_mode(name, len + 1, mode); | 605 | clcdfb_snprintf_mode(name, len + 1, mode); |
603 | mode->name = name; | 606 | mode->name = name; |
604 | 607 | ||
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c index 95338593ebf4..868facdec638 100644 --- a/drivers/video/fbdev/core/fbmon.c +++ b/drivers/video/fbdev/core/fbmon.c | |||
@@ -624,9 +624,6 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize, | |||
624 | int num = 0, i, first = 1; | 624 | int num = 0, i, first = 1; |
625 | int ver, rev; | 625 | int ver, rev; |
626 | 626 | ||
627 | ver = edid[EDID_STRUCT_VERSION]; | ||
628 | rev = edid[EDID_STRUCT_REVISION]; | ||
629 | |||
630 | mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL); | 627 | mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL); |
631 | if (mode == NULL) | 628 | if (mode == NULL) |
632 | return NULL; | 629 | return NULL; |
@@ -637,6 +634,9 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize, | |||
637 | return NULL; | 634 | return NULL; |
638 | } | 635 | } |
639 | 636 | ||
637 | ver = edid[EDID_STRUCT_VERSION]; | ||
638 | rev = edid[EDID_STRUCT_REVISION]; | ||
639 | |||
640 | *dbsize = 0; | 640 | *dbsize = 0; |
641 | 641 | ||
642 | DPRINTK(" Detailed Timings\n"); | 642 | DPRINTK(" Detailed Timings\n"); |
diff --git a/drivers/video/fbdev/omap2/dss/display-sysfs.c b/drivers/video/fbdev/omap2/dss/display-sysfs.c index 5a2095a98ed8..12186557a9d4 100644 --- a/drivers/video/fbdev/omap2/dss/display-sysfs.c +++ b/drivers/video/fbdev/omap2/dss/display-sysfs.c | |||
@@ -28,44 +28,22 @@ | |||
28 | #include <video/omapdss.h> | 28 | #include <video/omapdss.h> |
29 | #include "dss.h" | 29 | #include "dss.h" |
30 | 30 | ||
31 | static struct omap_dss_device *to_dss_device_sysfs(struct device *dev) | 31 | static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf) |
32 | { | 32 | { |
33 | struct omap_dss_device *dssdev = NULL; | ||
34 | |||
35 | for_each_dss_dev(dssdev) { | ||
36 | if (dssdev->dev == dev) { | ||
37 | omap_dss_put_device(dssdev); | ||
38 | return dssdev; | ||
39 | } | ||
40 | } | ||
41 | |||
42 | return NULL; | ||
43 | } | ||
44 | |||
45 | static ssize_t display_name_show(struct device *dev, | ||
46 | struct device_attribute *attr, char *buf) | ||
47 | { | ||
48 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
49 | |||
50 | return snprintf(buf, PAGE_SIZE, "%s\n", | 33 | return snprintf(buf, PAGE_SIZE, "%s\n", |
51 | dssdev->name ? | 34 | dssdev->name ? |
52 | dssdev->name : ""); | 35 | dssdev->name : ""); |
53 | } | 36 | } |
54 | 37 | ||
55 | static ssize_t display_enabled_show(struct device *dev, | 38 | static ssize_t display_enabled_show(struct omap_dss_device *dssdev, char *buf) |
56 | struct device_attribute *attr, char *buf) | ||
57 | { | 39 | { |
58 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
59 | |||
60 | return snprintf(buf, PAGE_SIZE, "%d\n", | 40 | return snprintf(buf, PAGE_SIZE, "%d\n", |
61 | omapdss_device_is_enabled(dssdev)); | 41 | omapdss_device_is_enabled(dssdev)); |
62 | } | 42 | } |
63 | 43 | ||
64 | static ssize_t display_enabled_store(struct device *dev, | 44 | static ssize_t display_enabled_store(struct omap_dss_device *dssdev, |
65 | struct device_attribute *attr, | ||
66 | const char *buf, size_t size) | 45 | const char *buf, size_t size) |
67 | { | 46 | { |
68 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
69 | int r; | 47 | int r; |
70 | bool enable; | 48 | bool enable; |
71 | 49 | ||
@@ -90,19 +68,16 @@ static ssize_t display_enabled_store(struct device *dev, | |||
90 | return size; | 68 | return size; |
91 | } | 69 | } |
92 | 70 | ||
93 | static ssize_t display_tear_show(struct device *dev, | 71 | static ssize_t display_tear_show(struct omap_dss_device *dssdev, char *buf) |
94 | struct device_attribute *attr, char *buf) | ||
95 | { | 72 | { |
96 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
97 | return snprintf(buf, PAGE_SIZE, "%d\n", | 73 | return snprintf(buf, PAGE_SIZE, "%d\n", |
98 | dssdev->driver->get_te ? | 74 | dssdev->driver->get_te ? |
99 | dssdev->driver->get_te(dssdev) : 0); | 75 | dssdev->driver->get_te(dssdev) : 0); |
100 | } | 76 | } |
101 | 77 | ||
102 | static ssize_t display_tear_store(struct device *dev, | 78 | static ssize_t display_tear_store(struct omap_dss_device *dssdev, |
103 | struct device_attribute *attr, const char *buf, size_t size) | 79 | const char *buf, size_t size) |
104 | { | 80 | { |
105 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
106 | int r; | 81 | int r; |
107 | bool te; | 82 | bool te; |
108 | 83 | ||
@@ -120,10 +95,8 @@ static ssize_t display_tear_store(struct device *dev, | |||
120 | return size; | 95 | return size; |
121 | } | 96 | } |
122 | 97 | ||
123 | static ssize_t display_timings_show(struct device *dev, | 98 | static ssize_t display_timings_show(struct omap_dss_device *dssdev, char *buf) |
124 | struct device_attribute *attr, char *buf) | ||
125 | { | 99 | { |
126 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
127 | struct omap_video_timings t; | 100 | struct omap_video_timings t; |
128 | 101 | ||
129 | if (!dssdev->driver->get_timings) | 102 | if (!dssdev->driver->get_timings) |
@@ -137,10 +110,9 @@ static ssize_t display_timings_show(struct device *dev, | |||
137 | t.y_res, t.vfp, t.vbp, t.vsw); | 110 | t.y_res, t.vfp, t.vbp, t.vsw); |
138 | } | 111 | } |
139 | 112 | ||
140 | static ssize_t display_timings_store(struct device *dev, | 113 | static ssize_t display_timings_store(struct omap_dss_device *dssdev, |
141 | struct device_attribute *attr, const char *buf, size_t size) | 114 | const char *buf, size_t size) |
142 | { | 115 | { |
143 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
144 | struct omap_video_timings t = dssdev->panel.timings; | 116 | struct omap_video_timings t = dssdev->panel.timings; |
145 | int r, found; | 117 | int r, found; |
146 | 118 | ||
@@ -176,10 +148,8 @@ static ssize_t display_timings_store(struct device *dev, | |||
176 | return size; | 148 | return size; |
177 | } | 149 | } |
178 | 150 | ||
179 | static ssize_t display_rotate_show(struct device *dev, | 151 | static ssize_t display_rotate_show(struct omap_dss_device *dssdev, char *buf) |
180 | struct device_attribute *attr, char *buf) | ||
181 | { | 152 | { |
182 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
183 | int rotate; | 153 | int rotate; |
184 | if (!dssdev->driver->get_rotate) | 154 | if (!dssdev->driver->get_rotate) |
185 | return -ENOENT; | 155 | return -ENOENT; |
@@ -187,10 +157,9 @@ static ssize_t display_rotate_show(struct device *dev, | |||
187 | return snprintf(buf, PAGE_SIZE, "%u\n", rotate); | 157 | return snprintf(buf, PAGE_SIZE, "%u\n", rotate); |
188 | } | 158 | } |
189 | 159 | ||
190 | static ssize_t display_rotate_store(struct device *dev, | 160 | static ssize_t display_rotate_store(struct omap_dss_device *dssdev, |
191 | struct device_attribute *attr, const char *buf, size_t size) | 161 | const char *buf, size_t size) |
192 | { | 162 | { |
193 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
194 | int rot, r; | 163 | int rot, r; |
195 | 164 | ||
196 | if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate) | 165 | if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate) |
@@ -207,10 +176,8 @@ static ssize_t display_rotate_store(struct device *dev, | |||
207 | return size; | 176 | return size; |
208 | } | 177 | } |
209 | 178 | ||
210 | static ssize_t display_mirror_show(struct device *dev, | 179 | static ssize_t display_mirror_show(struct omap_dss_device *dssdev, char *buf) |
211 | struct device_attribute *attr, char *buf) | ||
212 | { | 180 | { |
213 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
214 | int mirror; | 181 | int mirror; |
215 | if (!dssdev->driver->get_mirror) | 182 | if (!dssdev->driver->get_mirror) |
216 | return -ENOENT; | 183 | return -ENOENT; |
@@ -218,10 +185,9 @@ static ssize_t display_mirror_show(struct device *dev, | |||
218 | return snprintf(buf, PAGE_SIZE, "%u\n", mirror); | 185 | return snprintf(buf, PAGE_SIZE, "%u\n", mirror); |
219 | } | 186 | } |
220 | 187 | ||
221 | static ssize_t display_mirror_store(struct device *dev, | 188 | static ssize_t display_mirror_store(struct omap_dss_device *dssdev, |
222 | struct device_attribute *attr, const char *buf, size_t size) | 189 | const char *buf, size_t size) |
223 | { | 190 | { |
224 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
225 | int r; | 191 | int r; |
226 | bool mirror; | 192 | bool mirror; |
227 | 193 | ||
@@ -239,10 +205,8 @@ static ssize_t display_mirror_store(struct device *dev, | |||
239 | return size; | 205 | return size; |
240 | } | 206 | } |
241 | 207 | ||
242 | static ssize_t display_wss_show(struct device *dev, | 208 | static ssize_t display_wss_show(struct omap_dss_device *dssdev, char *buf) |
243 | struct device_attribute *attr, char *buf) | ||
244 | { | 209 | { |
245 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
246 | unsigned int wss; | 210 | unsigned int wss; |
247 | 211 | ||
248 | if (!dssdev->driver->get_wss) | 212 | if (!dssdev->driver->get_wss) |
@@ -253,10 +217,9 @@ static ssize_t display_wss_show(struct device *dev, | |||
253 | return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss); | 217 | return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss); |
254 | } | 218 | } |
255 | 219 | ||
256 | static ssize_t display_wss_store(struct device *dev, | 220 | static ssize_t display_wss_store(struct omap_dss_device *dssdev, |
257 | struct device_attribute *attr, const char *buf, size_t size) | 221 | const char *buf, size_t size) |
258 | { | 222 | { |
259 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
260 | u32 wss; | 223 | u32 wss; |
261 | int r; | 224 | int r; |
262 | 225 | ||
@@ -277,50 +240,94 @@ static ssize_t display_wss_store(struct device *dev, | |||
277 | return size; | 240 | return size; |
278 | } | 241 | } |
279 | 242 | ||
280 | static DEVICE_ATTR(display_name, S_IRUGO, display_name_show, NULL); | 243 | struct display_attribute { |
281 | static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR, | 244 | struct attribute attr; |
245 | ssize_t (*show)(struct omap_dss_device *, char *); | ||
246 | ssize_t (*store)(struct omap_dss_device *, const char *, size_t); | ||
247 | }; | ||
248 | |||
249 | #define DISPLAY_ATTR(_name, _mode, _show, _store) \ | ||
250 | struct display_attribute display_attr_##_name = \ | ||
251 | __ATTR(_name, _mode, _show, _store) | ||
252 | |||
253 | static DISPLAY_ATTR(name, S_IRUGO, display_name_show, NULL); | ||
254 | static DISPLAY_ATTR(display_name, S_IRUGO, display_name_show, NULL); | ||
255 | static DISPLAY_ATTR(enabled, S_IRUGO|S_IWUSR, | ||
282 | display_enabled_show, display_enabled_store); | 256 | display_enabled_show, display_enabled_store); |
283 | static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR, | 257 | static DISPLAY_ATTR(tear_elim, S_IRUGO|S_IWUSR, |
284 | display_tear_show, display_tear_store); | 258 | display_tear_show, display_tear_store); |
285 | static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR, | 259 | static DISPLAY_ATTR(timings, S_IRUGO|S_IWUSR, |
286 | display_timings_show, display_timings_store); | 260 | display_timings_show, display_timings_store); |
287 | static DEVICE_ATTR(rotate, S_IRUGO|S_IWUSR, | 261 | static DISPLAY_ATTR(rotate, S_IRUGO|S_IWUSR, |
288 | display_rotate_show, display_rotate_store); | 262 | display_rotate_show, display_rotate_store); |
289 | static DEVICE_ATTR(mirror, S_IRUGO|S_IWUSR, | 263 | static DISPLAY_ATTR(mirror, S_IRUGO|S_IWUSR, |
290 | display_mirror_show, display_mirror_store); | 264 | display_mirror_show, display_mirror_store); |
291 | static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR, | 265 | static DISPLAY_ATTR(wss, S_IRUGO|S_IWUSR, |
292 | display_wss_show, display_wss_store); | 266 | display_wss_show, display_wss_store); |
293 | 267 | ||
294 | static const struct attribute *display_sysfs_attrs[] = { | 268 | static struct attribute *display_sysfs_attrs[] = { |
295 | &dev_attr_display_name.attr, | 269 | &display_attr_name.attr, |
296 | &dev_attr_enabled.attr, | 270 | &display_attr_display_name.attr, |
297 | &dev_attr_tear_elim.attr, | 271 | &display_attr_enabled.attr, |
298 | &dev_attr_timings.attr, | 272 | &display_attr_tear_elim.attr, |
299 | &dev_attr_rotate.attr, | 273 | &display_attr_timings.attr, |
300 | &dev_attr_mirror.attr, | 274 | &display_attr_rotate.attr, |
301 | &dev_attr_wss.attr, | 275 | &display_attr_mirror.attr, |
276 | &display_attr_wss.attr, | ||
302 | NULL | 277 | NULL |
303 | }; | 278 | }; |
304 | 279 | ||
280 | static ssize_t display_attr_show(struct kobject *kobj, struct attribute *attr, | ||
281 | char *buf) | ||
282 | { | ||
283 | struct omap_dss_device *dssdev; | ||
284 | struct display_attribute *display_attr; | ||
285 | |||
286 | dssdev = container_of(kobj, struct omap_dss_device, kobj); | ||
287 | display_attr = container_of(attr, struct display_attribute, attr); | ||
288 | |||
289 | if (!display_attr->show) | ||
290 | return -ENOENT; | ||
291 | |||
292 | return display_attr->show(dssdev, buf); | ||
293 | } | ||
294 | |||
295 | static ssize_t display_attr_store(struct kobject *kobj, struct attribute *attr, | ||
296 | const char *buf, size_t size) | ||
297 | { | ||
298 | struct omap_dss_device *dssdev; | ||
299 | struct display_attribute *display_attr; | ||
300 | |||
301 | dssdev = container_of(kobj, struct omap_dss_device, kobj); | ||
302 | display_attr = container_of(attr, struct display_attribute, attr); | ||
303 | |||
304 | if (!display_attr->store) | ||
305 | return -ENOENT; | ||
306 | |||
307 | return display_attr->store(dssdev, buf, size); | ||
308 | } | ||
309 | |||
310 | static const struct sysfs_ops display_sysfs_ops = { | ||
311 | .show = display_attr_show, | ||
312 | .store = display_attr_store, | ||
313 | }; | ||
314 | |||
315 | static struct kobj_type display_ktype = { | ||
316 | .sysfs_ops = &display_sysfs_ops, | ||
317 | .default_attrs = display_sysfs_attrs, | ||
318 | }; | ||
319 | |||
305 | int display_init_sysfs(struct platform_device *pdev) | 320 | int display_init_sysfs(struct platform_device *pdev) |
306 | { | 321 | { |
307 | struct omap_dss_device *dssdev = NULL; | 322 | struct omap_dss_device *dssdev = NULL; |
308 | int r; | 323 | int r; |
309 | 324 | ||
310 | for_each_dss_dev(dssdev) { | 325 | for_each_dss_dev(dssdev) { |
311 | struct kobject *kobj = &dssdev->dev->kobj; | 326 | r = kobject_init_and_add(&dssdev->kobj, &display_ktype, |
312 | 327 | &pdev->dev.kobj, dssdev->alias); | |
313 | r = sysfs_create_files(kobj, display_sysfs_attrs); | ||
314 | if (r) { | 328 | if (r) { |
315 | DSSERR("failed to create sysfs files\n"); | 329 | DSSERR("failed to create sysfs files\n"); |
316 | goto err; | 330 | omap_dss_put_device(dssdev); |
317 | } | ||
318 | |||
319 | r = sysfs_create_link(&pdev->dev.kobj, kobj, dssdev->alias); | ||
320 | if (r) { | ||
321 | sysfs_remove_files(kobj, display_sysfs_attrs); | ||
322 | |||
323 | DSSERR("failed to create sysfs display link\n"); | ||
324 | goto err; | 331 | goto err; |
325 | } | 332 | } |
326 | } | 333 | } |
@@ -338,8 +345,12 @@ void display_uninit_sysfs(struct platform_device *pdev) | |||
338 | struct omap_dss_device *dssdev = NULL; | 345 | struct omap_dss_device *dssdev = NULL; |
339 | 346 | ||
340 | for_each_dss_dev(dssdev) { | 347 | for_each_dss_dev(dssdev) { |
341 | sysfs_remove_link(&pdev->dev.kobj, dssdev->alias); | 348 | if (kobject_name(&dssdev->kobj) == NULL) |
342 | sysfs_remove_files(&dssdev->dev->kobj, | 349 | continue; |
343 | display_sysfs_attrs); | 350 | |
351 | kobject_del(&dssdev->kobj); | ||
352 | kobject_put(&dssdev->kobj); | ||
353 | |||
354 | memset(&dssdev->kobj, 0, sizeof(dssdev->kobj)); | ||
344 | } | 355 | } |
345 | } | 356 | } |
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 0413157f3b49..6a356e344f82 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | #include <linux/balloon_compaction.h> | 30 | #include <linux/balloon_compaction.h> |
31 | #include <linux/oom.h> | 31 | #include <linux/oom.h> |
32 | #include <linux/wait.h> | ||
32 | 33 | ||
33 | /* | 34 | /* |
34 | * Balloon device works in 4K page units. So each page is pointed to by | 35 | * Balloon device works in 4K page units. So each page is pointed to by |
@@ -334,17 +335,25 @@ static int virtballoon_oom_notify(struct notifier_block *self, | |||
334 | static int balloon(void *_vballoon) | 335 | static int balloon(void *_vballoon) |
335 | { | 336 | { |
336 | struct virtio_balloon *vb = _vballoon; | 337 | struct virtio_balloon *vb = _vballoon; |
338 | DEFINE_WAIT_FUNC(wait, woken_wake_function); | ||
337 | 339 | ||
338 | set_freezable(); | 340 | set_freezable(); |
339 | while (!kthread_should_stop()) { | 341 | while (!kthread_should_stop()) { |
340 | s64 diff; | 342 | s64 diff; |
341 | 343 | ||
342 | try_to_freeze(); | 344 | try_to_freeze(); |
343 | wait_event_interruptible(vb->config_change, | 345 | |
344 | (diff = towards_target(vb)) != 0 | 346 | add_wait_queue(&vb->config_change, &wait); |
345 | || vb->need_stats_update | 347 | for (;;) { |
346 | || kthread_should_stop() | 348 | if ((diff = towards_target(vb)) != 0 || |
347 | || freezing(current)); | 349 | vb->need_stats_update || |
350 | kthread_should_stop() || | ||
351 | freezing(current)) | ||
352 | break; | ||
353 | wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); | ||
354 | } | ||
355 | remove_wait_queue(&vb->config_change, &wait); | ||
356 | |||
348 | if (vb->need_stats_update) | 357 | if (vb->need_stats_update) |
349 | stats_handle_request(vb); | 358 | stats_handle_request(vb); |
350 | if (diff > 0) | 359 | if (diff > 0) |
@@ -499,6 +508,8 @@ static int virtballoon_probe(struct virtio_device *vdev) | |||
499 | if (err < 0) | 508 | if (err < 0) |
500 | goto out_oom_notify; | 509 | goto out_oom_notify; |
501 | 510 | ||
511 | virtio_device_ready(vdev); | ||
512 | |||
502 | vb->thread = kthread_run(balloon, vb, "vballoon"); | 513 | vb->thread = kthread_run(balloon, vb, "vballoon"); |
503 | if (IS_ERR(vb->thread)) { | 514 | if (IS_ERR(vb->thread)) { |
504 | err = PTR_ERR(vb->thread); | 515 | err = PTR_ERR(vb->thread); |
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index cad569890908..6010d7ec0a0f 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c | |||
@@ -156,22 +156,95 @@ static void vm_get(struct virtio_device *vdev, unsigned offset, | |||
156 | void *buf, unsigned len) | 156 | void *buf, unsigned len) |
157 | { | 157 | { |
158 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); | 158 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); |
159 | u8 *ptr = buf; | 159 | void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG; |
160 | int i; | 160 | u8 b; |
161 | __le16 w; | ||
162 | __le32 l; | ||
161 | 163 | ||
162 | for (i = 0; i < len; i++) | 164 | if (vm_dev->version == 1) { |
163 | ptr[i] = readb(vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); | 165 | u8 *ptr = buf; |
166 | int i; | ||
167 | |||
168 | for (i = 0; i < len; i++) | ||
169 | ptr[i] = readb(base + offset + i); | ||
170 | return; | ||
171 | } | ||
172 | |||
173 | switch (len) { | ||
174 | case 1: | ||
175 | b = readb(base + offset); | ||
176 | memcpy(buf, &b, sizeof b); | ||
177 | break; | ||
178 | case 2: | ||
179 | w = cpu_to_le16(readw(base + offset)); | ||
180 | memcpy(buf, &w, sizeof w); | ||
181 | break; | ||
182 | case 4: | ||
183 | l = cpu_to_le32(readl(base + offset)); | ||
184 | memcpy(buf, &l, sizeof l); | ||
185 | break; | ||
186 | case 8: | ||
187 | l = cpu_to_le32(readl(base + offset)); | ||
188 | memcpy(buf, &l, sizeof l); | ||
189 | l = cpu_to_le32(ioread32(base + offset + sizeof l)); | ||
190 | memcpy(buf + sizeof l, &l, sizeof l); | ||
191 | break; | ||
192 | default: | ||
193 | BUG(); | ||
194 | } | ||
164 | } | 195 | } |
165 | 196 | ||
166 | static void vm_set(struct virtio_device *vdev, unsigned offset, | 197 | static void vm_set(struct virtio_device *vdev, unsigned offset, |
167 | const void *buf, unsigned len) | 198 | const void *buf, unsigned len) |
168 | { | 199 | { |
169 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); | 200 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); |
170 | const u8 *ptr = buf; | 201 | void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG; |
171 | int i; | 202 | u8 b; |
203 | __le16 w; | ||
204 | __le32 l; | ||
172 | 205 | ||
173 | for (i = 0; i < len; i++) | 206 | if (vm_dev->version == 1) { |
174 | writeb(ptr[i], vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); | 207 | const u8 *ptr = buf; |
208 | int i; | ||
209 | |||
210 | for (i = 0; i < len; i++) | ||
211 | writeb(ptr[i], base + offset + i); | ||
212 | |||
213 | return; | ||
214 | } | ||
215 | |||
216 | switch (len) { | ||
217 | case 1: | ||
218 | memcpy(&b, buf, sizeof b); | ||
219 | writeb(b, base + offset); | ||
220 | break; | ||
221 | case 2: | ||
222 | memcpy(&w, buf, sizeof w); | ||
223 | writew(le16_to_cpu(w), base + offset); | ||
224 | break; | ||
225 | case 4: | ||
226 | memcpy(&l, buf, sizeof l); | ||
227 | writel(le32_to_cpu(l), base + offset); | ||
228 | break; | ||
229 | case 8: | ||
230 | memcpy(&l, buf, sizeof l); | ||
231 | writel(le32_to_cpu(l), base + offset); | ||
232 | memcpy(&l, buf + sizeof l, sizeof l); | ||
233 | writel(le32_to_cpu(l), base + offset + sizeof l); | ||
234 | break; | ||
235 | default: | ||
236 | BUG(); | ||
237 | } | ||
238 | } | ||
239 | |||
240 | static u32 vm_generation(struct virtio_device *vdev) | ||
241 | { | ||
242 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); | ||
243 | |||
244 | if (vm_dev->version == 1) | ||
245 | return 0; | ||
246 | else | ||
247 | return readl(vm_dev->base + VIRTIO_MMIO_CONFIG_GENERATION); | ||
175 | } | 248 | } |
176 | 249 | ||
177 | static u8 vm_get_status(struct virtio_device *vdev) | 250 | static u8 vm_get_status(struct virtio_device *vdev) |
@@ -440,6 +513,7 @@ static const char *vm_bus_name(struct virtio_device *vdev) | |||
440 | static const struct virtio_config_ops virtio_mmio_config_ops = { | 513 | static const struct virtio_config_ops virtio_mmio_config_ops = { |
441 | .get = vm_get, | 514 | .get = vm_get, |
442 | .set = vm_set, | 515 | .set = vm_set, |
516 | .generation = vm_generation, | ||
443 | .get_status = vm_get_status, | 517 | .get_status = vm_get_status, |
444 | .set_status = vm_set_status, | 518 | .set_status = vm_set_status, |
445 | .reset = vm_reset, | 519 | .reset = vm_reset, |
diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c index c8def68d9e4c..0deaa4f971f5 100644 --- a/drivers/watchdog/imgpdc_wdt.c +++ b/drivers/watchdog/imgpdc_wdt.c | |||
@@ -42,10 +42,10 @@ | |||
42 | #define PDC_WDT_MIN_TIMEOUT 1 | 42 | #define PDC_WDT_MIN_TIMEOUT 1 |
43 | #define PDC_WDT_DEF_TIMEOUT 64 | 43 | #define PDC_WDT_DEF_TIMEOUT 64 |
44 | 44 | ||
45 | static int heartbeat; | 45 | static int heartbeat = PDC_WDT_DEF_TIMEOUT; |
46 | module_param(heartbeat, int, 0); | 46 | module_param(heartbeat, int, 0); |
47 | MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. " | 47 | MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds " |
48 | "(default = " __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")"); | 48 | "(default=" __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")"); |
49 | 49 | ||
50 | static bool nowayout = WATCHDOG_NOWAYOUT; | 50 | static bool nowayout = WATCHDOG_NOWAYOUT; |
51 | module_param(nowayout, bool, 0); | 51 | module_param(nowayout, bool, 0); |
@@ -191,6 +191,7 @@ static int pdc_wdt_probe(struct platform_device *pdev) | |||
191 | pdc_wdt->wdt_dev.ops = &pdc_wdt_ops; | 191 | pdc_wdt->wdt_dev.ops = &pdc_wdt_ops; |
192 | pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK; | 192 | pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK; |
193 | pdc_wdt->wdt_dev.parent = &pdev->dev; | 193 | pdc_wdt->wdt_dev.parent = &pdev->dev; |
194 | watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt); | ||
194 | 195 | ||
195 | ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev); | 196 | ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev); |
196 | if (ret < 0) { | 197 | if (ret < 0) { |
@@ -232,7 +233,6 @@ static int pdc_wdt_probe(struct platform_device *pdev) | |||
232 | watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout); | 233 | watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout); |
233 | 234 | ||
234 | platform_set_drvdata(pdev, pdc_wdt); | 235 | platform_set_drvdata(pdev, pdc_wdt); |
235 | watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt); | ||
236 | 236 | ||
237 | ret = watchdog_register_device(&pdc_wdt->wdt_dev); | 237 | ret = watchdog_register_device(&pdc_wdt->wdt_dev); |
238 | if (ret) | 238 | if (ret) |
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c index a87f6df6e85f..938b987de551 100644 --- a/drivers/watchdog/mtk_wdt.c +++ b/drivers/watchdog/mtk_wdt.c | |||
@@ -133,7 +133,7 @@ static int mtk_wdt_start(struct watchdog_device *wdt_dev) | |||
133 | u32 reg; | 133 | u32 reg; |
134 | struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev); | 134 | struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev); |
135 | void __iomem *wdt_base = mtk_wdt->wdt_base; | 135 | void __iomem *wdt_base = mtk_wdt->wdt_base; |
136 | u32 ret; | 136 | int ret; |
137 | 137 | ||
138 | ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout); | 138 | ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout); |
139 | if (ret < 0) | 139 | if (ret < 0) |
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index b4bca2d4a7e5..70fba973a107 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c | |||
@@ -526,20 +526,26 @@ static unsigned int __startup_pirq(unsigned int irq) | |||
526 | pirq_query_unmask(irq); | 526 | pirq_query_unmask(irq); |
527 | 527 | ||
528 | rc = set_evtchn_to_irq(evtchn, irq); | 528 | rc = set_evtchn_to_irq(evtchn, irq); |
529 | if (rc != 0) { | 529 | if (rc) |
530 | pr_err("irq%d: Failed to set port to irq mapping (%d)\n", | 530 | goto err; |
531 | irq, rc); | 531 | |
532 | xen_evtchn_close(evtchn); | ||
533 | return 0; | ||
534 | } | ||
535 | bind_evtchn_to_cpu(evtchn, 0); | 532 | bind_evtchn_to_cpu(evtchn, 0); |
536 | info->evtchn = evtchn; | 533 | info->evtchn = evtchn; |
537 | 534 | ||
535 | rc = xen_evtchn_port_setup(info); | ||
536 | if (rc) | ||
537 | goto err; | ||
538 | |||
538 | out: | 539 | out: |
539 | unmask_evtchn(evtchn); | 540 | unmask_evtchn(evtchn); |
540 | eoi_pirq(irq_get_irq_data(irq)); | 541 | eoi_pirq(irq_get_irq_data(irq)); |
541 | 542 | ||
542 | return 0; | 543 | return 0; |
544 | |||
545 | err: | ||
546 | pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc); | ||
547 | xen_evtchn_close(evtchn); | ||
548 | return 0; | ||
543 | } | 549 | } |
544 | 550 | ||
545 | static unsigned int startup_pirq(struct irq_data *data) | 551 | static unsigned int startup_pirq(struct irq_data *data) |
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c index 46ae0f9f02ad..75fe3d466515 100644 --- a/drivers/xen/xen-pciback/conf_space.c +++ b/drivers/xen/xen-pciback/conf_space.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include "conf_space.h" | 16 | #include "conf_space.h" |
17 | #include "conf_space_quirks.h" | 17 | #include "conf_space_quirks.h" |
18 | 18 | ||
19 | static bool permissive; | 19 | bool permissive; |
20 | module_param(permissive, bool, 0644); | 20 | module_param(permissive, bool, 0644); |
21 | 21 | ||
22 | /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word, | 22 | /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word, |
diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h index e56c934ad137..2e1d73d1d5d0 100644 --- a/drivers/xen/xen-pciback/conf_space.h +++ b/drivers/xen/xen-pciback/conf_space.h | |||
@@ -64,6 +64,8 @@ struct config_field_entry { | |||
64 | void *data; | 64 | void *data; |
65 | }; | 65 | }; |
66 | 66 | ||
67 | extern bool permissive; | ||
68 | |||
67 | #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset) | 69 | #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset) |
68 | 70 | ||
69 | /* Add fields to a device - the add_fields macro expects to get a pointer to | 71 | /* Add fields to a device - the add_fields macro expects to get a pointer to |
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c index c5ee82587e8c..2d7369391472 100644 --- a/drivers/xen/xen-pciback/conf_space_header.c +++ b/drivers/xen/xen-pciback/conf_space_header.c | |||
@@ -11,6 +11,10 @@ | |||
11 | #include "pciback.h" | 11 | #include "pciback.h" |
12 | #include "conf_space.h" | 12 | #include "conf_space.h" |
13 | 13 | ||
14 | struct pci_cmd_info { | ||
15 | u16 val; | ||
16 | }; | ||
17 | |||
14 | struct pci_bar_info { | 18 | struct pci_bar_info { |
15 | u32 val; | 19 | u32 val; |
16 | u32 len_val; | 20 | u32 len_val; |
@@ -20,22 +24,36 @@ struct pci_bar_info { | |||
20 | #define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO)) | 24 | #define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO)) |
21 | #define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER) | 25 | #define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER) |
22 | 26 | ||
23 | static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data) | 27 | /* Bits guests are allowed to control in permissive mode. */ |
28 | #define PCI_COMMAND_GUEST (PCI_COMMAND_MASTER|PCI_COMMAND_SPECIAL| \ | ||
29 | PCI_COMMAND_INVALIDATE|PCI_COMMAND_VGA_PALETTE| \ | ||
30 | PCI_COMMAND_WAIT|PCI_COMMAND_FAST_BACK) | ||
31 | |||
32 | static void *command_init(struct pci_dev *dev, int offset) | ||
24 | { | 33 | { |
25 | int i; | 34 | struct pci_cmd_info *cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); |
26 | int ret; | 35 | int err; |
27 | 36 | ||
28 | ret = xen_pcibk_read_config_word(dev, offset, value, data); | 37 | if (!cmd) |
29 | if (!pci_is_enabled(dev)) | 38 | return ERR_PTR(-ENOMEM); |
30 | return ret; | 39 | |
31 | 40 | err = pci_read_config_word(dev, PCI_COMMAND, &cmd->val); | |
32 | for (i = 0; i < PCI_ROM_RESOURCE; i++) { | 41 | if (err) { |
33 | if (dev->resource[i].flags & IORESOURCE_IO) | 42 | kfree(cmd); |
34 | *value |= PCI_COMMAND_IO; | 43 | return ERR_PTR(err); |
35 | if (dev->resource[i].flags & IORESOURCE_MEM) | ||
36 | *value |= PCI_COMMAND_MEMORY; | ||
37 | } | 44 | } |
38 | 45 | ||
46 | return cmd; | ||
47 | } | ||
48 | |||
49 | static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data) | ||
50 | { | ||
51 | int ret = pci_read_config_word(dev, offset, value); | ||
52 | const struct pci_cmd_info *cmd = data; | ||
53 | |||
54 | *value &= PCI_COMMAND_GUEST; | ||
55 | *value |= cmd->val & ~PCI_COMMAND_GUEST; | ||
56 | |||
39 | return ret; | 57 | return ret; |
40 | } | 58 | } |
41 | 59 | ||
@@ -43,6 +61,8 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) | |||
43 | { | 61 | { |
44 | struct xen_pcibk_dev_data *dev_data; | 62 | struct xen_pcibk_dev_data *dev_data; |
45 | int err; | 63 | int err; |
64 | u16 val; | ||
65 | struct pci_cmd_info *cmd = data; | ||
46 | 66 | ||
47 | dev_data = pci_get_drvdata(dev); | 67 | dev_data = pci_get_drvdata(dev); |
48 | if (!pci_is_enabled(dev) && is_enable_cmd(value)) { | 68 | if (!pci_is_enabled(dev) && is_enable_cmd(value)) { |
@@ -83,6 +103,19 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) | |||
83 | } | 103 | } |
84 | } | 104 | } |
85 | 105 | ||
106 | cmd->val = value; | ||
107 | |||
108 | if (!permissive && (!dev_data || !dev_data->permissive)) | ||
109 | return 0; | ||
110 | |||
111 | /* Only allow the guest to control certain bits. */ | ||
112 | err = pci_read_config_word(dev, offset, &val); | ||
113 | if (err || val == value) | ||
114 | return err; | ||
115 | |||
116 | value &= PCI_COMMAND_GUEST; | ||
117 | value |= val & ~PCI_COMMAND_GUEST; | ||
118 | |||
86 | return pci_write_config_word(dev, offset, value); | 119 | return pci_write_config_word(dev, offset, value); |
87 | } | 120 | } |
88 | 121 | ||
@@ -282,6 +315,8 @@ static const struct config_field header_common[] = { | |||
282 | { | 315 | { |
283 | .offset = PCI_COMMAND, | 316 | .offset = PCI_COMMAND, |
284 | .size = 2, | 317 | .size = 2, |
318 | .init = command_init, | ||
319 | .release = bar_release, | ||
285 | .u.w.read = command_read, | 320 | .u.w.read = command_read, |
286 | .u.w.write = command_write, | 321 | .u.w.write = command_write, |
287 | }, | 322 | }, |
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index 9faca6a60bb0..42bd55a6c237 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c | |||
@@ -1659,11 +1659,8 @@ static int scsiback_make_nexus(struct scsiback_tpg *tpg, | |||
1659 | name); | 1659 | name); |
1660 | goto out; | 1660 | goto out; |
1661 | } | 1661 | } |
1662 | /* | 1662 | /* Now register the TCM pvscsi virtual I_T Nexus as active. */ |
1663 | * Now register the TCM pvscsi virtual I_T Nexus as active with the | 1663 | transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, |
1664 | * call to __transport_register_session() | ||
1665 | */ | ||
1666 | __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, | ||
1667 | tv_nexus->tvn_se_sess, tv_nexus); | 1664 | tv_nexus->tvn_se_sess, tv_nexus); |
1668 | tpg->tpg_nexus = tv_nexus; | 1665 | tpg->tpg_nexus = tv_nexus; |
1669 | 1666 | ||
diff --git a/fs/affs/file.c b/fs/affs/file.c index d2468bf95669..a91795e01a7f 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c | |||
@@ -699,8 +699,10 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, | |||
699 | boff = tmp % bsize; | 699 | boff = tmp % bsize; |
700 | if (boff) { | 700 | if (boff) { |
701 | bh = affs_bread_ino(inode, bidx, 0); | 701 | bh = affs_bread_ino(inode, bidx, 0); |
702 | if (IS_ERR(bh)) | 702 | if (IS_ERR(bh)) { |
703 | return PTR_ERR(bh); | 703 | written = PTR_ERR(bh); |
704 | goto err_first_bh; | ||
705 | } | ||
704 | tmp = min(bsize - boff, to - from); | 706 | tmp = min(bsize - boff, to - from); |
705 | BUG_ON(boff + tmp > bsize || tmp > bsize); | 707 | BUG_ON(boff + tmp > bsize || tmp > bsize); |
706 | memcpy(AFFS_DATA(bh) + boff, data + from, tmp); | 708 | memcpy(AFFS_DATA(bh) + boff, data + from, tmp); |
@@ -712,14 +714,16 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, | |||
712 | bidx++; | 714 | bidx++; |
713 | } else if (bidx) { | 715 | } else if (bidx) { |
714 | bh = affs_bread_ino(inode, bidx - 1, 0); | 716 | bh = affs_bread_ino(inode, bidx - 1, 0); |
715 | if (IS_ERR(bh)) | 717 | if (IS_ERR(bh)) { |
716 | return PTR_ERR(bh); | 718 | written = PTR_ERR(bh); |
719 | goto err_first_bh; | ||
720 | } | ||
717 | } | 721 | } |
718 | while (from + bsize <= to) { | 722 | while (from + bsize <= to) { |
719 | prev_bh = bh; | 723 | prev_bh = bh; |
720 | bh = affs_getemptyblk_ino(inode, bidx); | 724 | bh = affs_getemptyblk_ino(inode, bidx); |
721 | if (IS_ERR(bh)) | 725 | if (IS_ERR(bh)) |
722 | goto out; | 726 | goto err_bh; |
723 | memcpy(AFFS_DATA(bh), data + from, bsize); | 727 | memcpy(AFFS_DATA(bh), data + from, bsize); |
724 | if (buffer_new(bh)) { | 728 | if (buffer_new(bh)) { |
725 | AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); | 729 | AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); |
@@ -751,7 +755,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, | |||
751 | prev_bh = bh; | 755 | prev_bh = bh; |
752 | bh = affs_bread_ino(inode, bidx, 1); | 756 | bh = affs_bread_ino(inode, bidx, 1); |
753 | if (IS_ERR(bh)) | 757 | if (IS_ERR(bh)) |
754 | goto out; | 758 | goto err_bh; |
755 | tmp = min(bsize, to - from); | 759 | tmp = min(bsize, to - from); |
756 | BUG_ON(tmp > bsize); | 760 | BUG_ON(tmp > bsize); |
757 | memcpy(AFFS_DATA(bh), data + from, tmp); | 761 | memcpy(AFFS_DATA(bh), data + from, tmp); |
@@ -790,12 +794,13 @@ done: | |||
790 | if (tmp > inode->i_size) | 794 | if (tmp > inode->i_size) |
791 | inode->i_size = AFFS_I(inode)->mmu_private = tmp; | 795 | inode->i_size = AFFS_I(inode)->mmu_private = tmp; |
792 | 796 | ||
797 | err_first_bh: | ||
793 | unlock_page(page); | 798 | unlock_page(page); |
794 | page_cache_release(page); | 799 | page_cache_release(page); |
795 | 800 | ||
796 | return written; | 801 | return written; |
797 | 802 | ||
798 | out: | 803 | err_bh: |
799 | bh = prev_bh; | 804 | bh = prev_bh; |
800 | if (!written) | 805 | if (!written) |
801 | written = PTR_ERR(bh); | 806 | written = PTR_ERR(bh); |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 84c3b00f3de8..f9c89cae39ee 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -3387,6 +3387,8 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, | |||
3387 | 3387 | ||
3388 | int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, | 3388 | int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, |
3389 | struct btrfs_root *root); | 3389 | struct btrfs_root *root); |
3390 | int btrfs_setup_space_cache(struct btrfs_trans_handle *trans, | ||
3391 | struct btrfs_root *root); | ||
3390 | int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr); | 3392 | int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr); |
3391 | int btrfs_free_block_groups(struct btrfs_fs_info *info); | 3393 | int btrfs_free_block_groups(struct btrfs_fs_info *info); |
3392 | int btrfs_read_block_groups(struct btrfs_root *root); | 3394 | int btrfs_read_block_groups(struct btrfs_root *root); |
@@ -3909,6 +3911,9 @@ int btrfs_prealloc_file_range_trans(struct inode *inode, | |||
3909 | loff_t actual_len, u64 *alloc_hint); | 3911 | loff_t actual_len, u64 *alloc_hint); |
3910 | int btrfs_inode_check_errors(struct inode *inode); | 3912 | int btrfs_inode_check_errors(struct inode *inode); |
3911 | extern const struct dentry_operations btrfs_dentry_operations; | 3913 | extern const struct dentry_operations btrfs_dentry_operations; |
3914 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | ||
3915 | void btrfs_test_inode_set_ops(struct inode *inode); | ||
3916 | #endif | ||
3912 | 3917 | ||
3913 | /* ioctl.c */ | 3918 | /* ioctl.c */ |
3914 | long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); | 3919 | long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index f79f38542a73..639f2663ed3f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -3921,7 +3921,7 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, | |||
3921 | } | 3921 | } |
3922 | if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key) | 3922 | if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key) |
3923 | + sizeof(struct btrfs_chunk)) { | 3923 | + sizeof(struct btrfs_chunk)) { |
3924 | printk(KERN_ERR "BTRFS: system chunk array too small %u < %lu\n", | 3924 | printk(KERN_ERR "BTRFS: system chunk array too small %u < %zu\n", |
3925 | btrfs_super_sys_array_size(sb), | 3925 | btrfs_super_sys_array_size(sb), |
3926 | sizeof(struct btrfs_disk_key) | 3926 | sizeof(struct btrfs_disk_key) |
3927 | + sizeof(struct btrfs_chunk)); | 3927 | + sizeof(struct btrfs_chunk)); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 6f080451fcb1..8b353ad02f03 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -3325,6 +3325,32 @@ out: | |||
3325 | return ret; | 3325 | return ret; |
3326 | } | 3326 | } |
3327 | 3327 | ||
3328 | int btrfs_setup_space_cache(struct btrfs_trans_handle *trans, | ||
3329 | struct btrfs_root *root) | ||
3330 | { | ||
3331 | struct btrfs_block_group_cache *cache, *tmp; | ||
3332 | struct btrfs_transaction *cur_trans = trans->transaction; | ||
3333 | struct btrfs_path *path; | ||
3334 | |||
3335 | if (list_empty(&cur_trans->dirty_bgs) || | ||
3336 | !btrfs_test_opt(root, SPACE_CACHE)) | ||
3337 | return 0; | ||
3338 | |||
3339 | path = btrfs_alloc_path(); | ||
3340 | if (!path) | ||
3341 | return -ENOMEM; | ||
3342 | |||
3343 | /* Could add new block groups, use _safe just in case */ | ||
3344 | list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs, | ||
3345 | dirty_list) { | ||
3346 | if (cache->disk_cache_state == BTRFS_DC_CLEAR) | ||
3347 | cache_save_setup(cache, trans, path); | ||
3348 | } | ||
3349 | |||
3350 | btrfs_free_path(path); | ||
3351 | return 0; | ||
3352 | } | ||
3353 | |||
3328 | int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, | 3354 | int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, |
3329 | struct btrfs_root *root) | 3355 | struct btrfs_root *root) |
3330 | { | 3356 | { |
@@ -5110,7 +5136,11 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) | |||
5110 | num_bytes = ALIGN(num_bytes, root->sectorsize); | 5136 | num_bytes = ALIGN(num_bytes, root->sectorsize); |
5111 | 5137 | ||
5112 | spin_lock(&BTRFS_I(inode)->lock); | 5138 | spin_lock(&BTRFS_I(inode)->lock); |
5113 | BTRFS_I(inode)->outstanding_extents++; | 5139 | nr_extents = (unsigned)div64_u64(num_bytes + |
5140 | BTRFS_MAX_EXTENT_SIZE - 1, | ||
5141 | BTRFS_MAX_EXTENT_SIZE); | ||
5142 | BTRFS_I(inode)->outstanding_extents += nr_extents; | ||
5143 | nr_extents = 0; | ||
5114 | 5144 | ||
5115 | if (BTRFS_I(inode)->outstanding_extents > | 5145 | if (BTRFS_I(inode)->outstanding_extents > |
5116 | BTRFS_I(inode)->reserved_extents) | 5146 | BTRFS_I(inode)->reserved_extents) |
@@ -5255,6 +5285,9 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) | |||
5255 | if (dropped > 0) | 5285 | if (dropped > 0) |
5256 | to_free += btrfs_calc_trans_metadata_size(root, dropped); | 5286 | to_free += btrfs_calc_trans_metadata_size(root, dropped); |
5257 | 5287 | ||
5288 | if (btrfs_test_is_dummy_root(root)) | ||
5289 | return; | ||
5290 | |||
5258 | trace_btrfs_space_reservation(root->fs_info, "delalloc", | 5291 | trace_btrfs_space_reservation(root->fs_info, "delalloc", |
5259 | btrfs_ino(inode), to_free, 0); | 5292 | btrfs_ino(inode), to_free, 0); |
5260 | if (root->fs_info->quota_enabled) { | 5293 | if (root->fs_info->quota_enabled) { |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index c7233ff1d533..d688cfe5d496 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -4968,6 +4968,12 @@ static int release_extent_buffer(struct extent_buffer *eb) | |||
4968 | 4968 | ||
4969 | /* Should be safe to release our pages at this point */ | 4969 | /* Should be safe to release our pages at this point */ |
4970 | btrfs_release_extent_buffer_page(eb); | 4970 | btrfs_release_extent_buffer_page(eb); |
4971 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | ||
4972 | if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))) { | ||
4973 | __free_extent_buffer(eb); | ||
4974 | return 1; | ||
4975 | } | ||
4976 | #endif | ||
4971 | call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); | 4977 | call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); |
4972 | return 1; | 4978 | return 1; |
4973 | } | 4979 | } |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index da828cf5e8f8..d2e732d7af52 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -108,6 +108,13 @@ static struct extent_map *create_pinned_em(struct inode *inode, u64 start, | |||
108 | 108 | ||
109 | static int btrfs_dirty_inode(struct inode *inode); | 109 | static int btrfs_dirty_inode(struct inode *inode); |
110 | 110 | ||
111 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | ||
112 | void btrfs_test_inode_set_ops(struct inode *inode) | ||
113 | { | ||
114 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; | ||
115 | } | ||
116 | #endif | ||
117 | |||
111 | static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, | 118 | static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, |
112 | struct inode *inode, struct inode *dir, | 119 | struct inode *inode, struct inode *dir, |
113 | const struct qstr *qstr) | 120 | const struct qstr *qstr) |
@@ -1542,30 +1549,17 @@ static void btrfs_split_extent_hook(struct inode *inode, | |||
1542 | u64 new_size; | 1549 | u64 new_size; |
1543 | 1550 | ||
1544 | /* | 1551 | /* |
1545 | * We need the largest size of the remaining extent to see if we | 1552 | * See the explanation in btrfs_merge_extent_hook, the same |
1546 | * need to add a new outstanding extent. Think of the following | 1553 | * applies here, just in reverse. |
1547 | * case | ||
1548 | * | ||
1549 | * [MEAX_EXTENT_SIZEx2 - 4k][4k] | ||
1550 | * | ||
1551 | * The new_size would just be 4k and we'd think we had enough | ||
1552 | * outstanding extents for this if we only took one side of the | ||
1553 | * split, same goes for the other direction. We need to see if | ||
1554 | * the larger size still is the same amount of extents as the | ||
1555 | * original size, because if it is we need to add a new | ||
1556 | * outstanding extent. But if we split up and the larger size | ||
1557 | * is less than the original then we are good to go since we've | ||
1558 | * already accounted for the extra extent in our original | ||
1559 | * accounting. | ||
1560 | */ | 1554 | */ |
1561 | new_size = orig->end - split + 1; | 1555 | new_size = orig->end - split + 1; |
1562 | if ((split - orig->start) > new_size) | 1556 | num_extents = div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, |
1563 | new_size = split - orig->start; | ||
1564 | |||
1565 | num_extents = div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, | ||
1566 | BTRFS_MAX_EXTENT_SIZE); | 1557 | BTRFS_MAX_EXTENT_SIZE); |
1567 | if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, | 1558 | new_size = split - orig->start; |
1568 | BTRFS_MAX_EXTENT_SIZE) < num_extents) | 1559 | num_extents += div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, |
1560 | BTRFS_MAX_EXTENT_SIZE); | ||
1561 | if (div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, | ||
1562 | BTRFS_MAX_EXTENT_SIZE) >= num_extents) | ||
1569 | return; | 1563 | return; |
1570 | } | 1564 | } |
1571 | 1565 | ||
@@ -1591,8 +1585,10 @@ static void btrfs_merge_extent_hook(struct inode *inode, | |||
1591 | if (!(other->state & EXTENT_DELALLOC)) | 1585 | if (!(other->state & EXTENT_DELALLOC)) |
1592 | return; | 1586 | return; |
1593 | 1587 | ||
1594 | old_size = other->end - other->start + 1; | 1588 | if (new->start > other->start) |
1595 | new_size = old_size + (new->end - new->start + 1); | 1589 | new_size = new->end - other->start + 1; |
1590 | else | ||
1591 | new_size = other->end - new->start + 1; | ||
1596 | 1592 | ||
1597 | /* we're not bigger than the max, unreserve the space and go */ | 1593 | /* we're not bigger than the max, unreserve the space and go */ |
1598 | if (new_size <= BTRFS_MAX_EXTENT_SIZE) { | 1594 | if (new_size <= BTRFS_MAX_EXTENT_SIZE) { |
@@ -1603,13 +1599,32 @@ static void btrfs_merge_extent_hook(struct inode *inode, | |||
1603 | } | 1599 | } |
1604 | 1600 | ||
1605 | /* | 1601 | /* |
1606 | * If we grew by another max_extent, just return, we want to keep that | 1602 | * We have to add up either side to figure out how many extents were |
1607 | * reserved amount. | 1603 | * accounted for before we merged into one big extent. If the number of |
1604 | * extents we accounted for is <= the amount we need for the new range | ||
1605 | * then we can return, otherwise drop. Think of it like this | ||
1606 | * | ||
1607 | * [ 4k][MAX_SIZE] | ||
1608 | * | ||
1609 | * So we've grown the extent by a MAX_SIZE extent, this would mean we | ||
1610 | * need 2 outstanding extents, on one side we have 1 and the other side | ||
1611 | * we have 1 so they are == and we can return. But in this case | ||
1612 | * | ||
1613 | * [MAX_SIZE+4k][MAX_SIZE+4k] | ||
1614 | * | ||
1615 | * Each range on their own accounts for 2 extents, but merged together | ||
1616 | * they are only 3 extents worth of accounting, so we need to drop in | ||
1617 | * this case. | ||
1608 | */ | 1618 | */ |
1619 | old_size = other->end - other->start + 1; | ||
1609 | num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1, | 1620 | num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1, |
1610 | BTRFS_MAX_EXTENT_SIZE); | 1621 | BTRFS_MAX_EXTENT_SIZE); |
1622 | old_size = new->end - new->start + 1; | ||
1623 | num_extents += div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1, | ||
1624 | BTRFS_MAX_EXTENT_SIZE); | ||
1625 | |||
1611 | if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, | 1626 | if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, |
1612 | BTRFS_MAX_EXTENT_SIZE) > num_extents) | 1627 | BTRFS_MAX_EXTENT_SIZE) >= num_extents) |
1613 | return; | 1628 | return; |
1614 | 1629 | ||
1615 | spin_lock(&BTRFS_I(inode)->lock); | 1630 | spin_lock(&BTRFS_I(inode)->lock); |
@@ -1686,6 +1701,10 @@ static void btrfs_set_bit_hook(struct inode *inode, | |||
1686 | spin_unlock(&BTRFS_I(inode)->lock); | 1701 | spin_unlock(&BTRFS_I(inode)->lock); |
1687 | } | 1702 | } |
1688 | 1703 | ||
1704 | /* For sanity tests */ | ||
1705 | if (btrfs_test_is_dummy_root(root)) | ||
1706 | return; | ||
1707 | |||
1689 | __percpu_counter_add(&root->fs_info->delalloc_bytes, len, | 1708 | __percpu_counter_add(&root->fs_info->delalloc_bytes, len, |
1690 | root->fs_info->delalloc_batch); | 1709 | root->fs_info->delalloc_batch); |
1691 | spin_lock(&BTRFS_I(inode)->lock); | 1710 | spin_lock(&BTRFS_I(inode)->lock); |
@@ -1741,6 +1760,10 @@ static void btrfs_clear_bit_hook(struct inode *inode, | |||
1741 | root != root->fs_info->tree_root) | 1760 | root != root->fs_info->tree_root) |
1742 | btrfs_delalloc_release_metadata(inode, len); | 1761 | btrfs_delalloc_release_metadata(inode, len); |
1743 | 1762 | ||
1763 | /* For sanity tests. */ | ||
1764 | if (btrfs_test_is_dummy_root(root)) | ||
1765 | return; | ||
1766 | |||
1744 | if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID | 1767 | if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID |
1745 | && do_list && !(state->state & EXTENT_NORESERVE)) | 1768 | && do_list && !(state->state & EXTENT_NORESERVE)) |
1746 | btrfs_free_reserved_data_space(inode, len); | 1769 | btrfs_free_reserved_data_space(inode, len); |
@@ -7213,7 +7236,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, | |||
7213 | u64 start = iblock << inode->i_blkbits; | 7236 | u64 start = iblock << inode->i_blkbits; |
7214 | u64 lockstart, lockend; | 7237 | u64 lockstart, lockend; |
7215 | u64 len = bh_result->b_size; | 7238 | u64 len = bh_result->b_size; |
7216 | u64 orig_len = len; | 7239 | u64 *outstanding_extents = NULL; |
7217 | int unlock_bits = EXTENT_LOCKED; | 7240 | int unlock_bits = EXTENT_LOCKED; |
7218 | int ret = 0; | 7241 | int ret = 0; |
7219 | 7242 | ||
@@ -7225,6 +7248,16 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, | |||
7225 | lockstart = start; | 7248 | lockstart = start; |
7226 | lockend = start + len - 1; | 7249 | lockend = start + len - 1; |
7227 | 7250 | ||
7251 | if (current->journal_info) { | ||
7252 | /* | ||
7253 | * Need to pull our outstanding extents and set journal_info to NULL so | ||
7254 | * that anything that needs to check if there's a transction doesn't get | ||
7255 | * confused. | ||
7256 | */ | ||
7257 | outstanding_extents = current->journal_info; | ||
7258 | current->journal_info = NULL; | ||
7259 | } | ||
7260 | |||
7228 | /* | 7261 | /* |
7229 | * If this errors out it's because we couldn't invalidate pagecache for | 7262 | * If this errors out it's because we couldn't invalidate pagecache for |
7230 | * this range and we need to fallback to buffered. | 7263 | * this range and we need to fallback to buffered. |
@@ -7348,11 +7381,20 @@ unlock: | |||
7348 | if (start + len > i_size_read(inode)) | 7381 | if (start + len > i_size_read(inode)) |
7349 | i_size_write(inode, start + len); | 7382 | i_size_write(inode, start + len); |
7350 | 7383 | ||
7351 | if (len < orig_len) { | 7384 | /* |
7385 | * If we have an outstanding_extents count still set then we're | ||
7386 | * within our reservation, otherwise we need to adjust our inode | ||
7387 | * counter appropriately. | ||
7388 | */ | ||
7389 | if (*outstanding_extents) { | ||
7390 | (*outstanding_extents)--; | ||
7391 | } else { | ||
7352 | spin_lock(&BTRFS_I(inode)->lock); | 7392 | spin_lock(&BTRFS_I(inode)->lock); |
7353 | BTRFS_I(inode)->outstanding_extents++; | 7393 | BTRFS_I(inode)->outstanding_extents++; |
7354 | spin_unlock(&BTRFS_I(inode)->lock); | 7394 | spin_unlock(&BTRFS_I(inode)->lock); |
7355 | } | 7395 | } |
7396 | |||
7397 | current->journal_info = outstanding_extents; | ||
7356 | btrfs_free_reserved_data_space(inode, len); | 7398 | btrfs_free_reserved_data_space(inode, len); |
7357 | } | 7399 | } |
7358 | 7400 | ||
@@ -7376,6 +7418,8 @@ unlock: | |||
7376 | unlock_err: | 7418 | unlock_err: |
7377 | clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, | 7419 | clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, |
7378 | unlock_bits, 1, 0, &cached_state, GFP_NOFS); | 7420 | unlock_bits, 1, 0, &cached_state, GFP_NOFS); |
7421 | if (outstanding_extents) | ||
7422 | current->journal_info = outstanding_extents; | ||
7379 | return ret; | 7423 | return ret; |
7380 | } | 7424 | } |
7381 | 7425 | ||
@@ -8075,6 +8119,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, | |||
8075 | { | 8119 | { |
8076 | struct file *file = iocb->ki_filp; | 8120 | struct file *file = iocb->ki_filp; |
8077 | struct inode *inode = file->f_mapping->host; | 8121 | struct inode *inode = file->f_mapping->host; |
8122 | u64 outstanding_extents = 0; | ||
8078 | size_t count = 0; | 8123 | size_t count = 0; |
8079 | int flags = 0; | 8124 | int flags = 0; |
8080 | bool wakeup = true; | 8125 | bool wakeup = true; |
@@ -8112,6 +8157,16 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, | |||
8112 | ret = btrfs_delalloc_reserve_space(inode, count); | 8157 | ret = btrfs_delalloc_reserve_space(inode, count); |
8113 | if (ret) | 8158 | if (ret) |
8114 | goto out; | 8159 | goto out; |
8160 | outstanding_extents = div64_u64(count + | ||
8161 | BTRFS_MAX_EXTENT_SIZE - 1, | ||
8162 | BTRFS_MAX_EXTENT_SIZE); | ||
8163 | |||
8164 | /* | ||
8165 | * We need to know how many extents we reserved so that we can | ||
8166 | * do the accounting properly if we go over the number we | ||
8167 | * originally calculated. Abuse current->journal_info for this. | ||
8168 | */ | ||
8169 | current->journal_info = &outstanding_extents; | ||
8115 | } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, | 8170 | } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, |
8116 | &BTRFS_I(inode)->runtime_flags)) { | 8171 | &BTRFS_I(inode)->runtime_flags)) { |
8117 | inode_dio_done(inode); | 8172 | inode_dio_done(inode); |
@@ -8124,6 +8179,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, | |||
8124 | iter, offset, btrfs_get_blocks_direct, NULL, | 8179 | iter, offset, btrfs_get_blocks_direct, NULL, |
8125 | btrfs_submit_direct, flags); | 8180 | btrfs_submit_direct, flags); |
8126 | if (rw & WRITE) { | 8181 | if (rw & WRITE) { |
8182 | current->journal_info = NULL; | ||
8127 | if (ret < 0 && ret != -EIOCBQUEUED) | 8183 | if (ret < 0 && ret != -EIOCBQUEUED) |
8128 | btrfs_delalloc_release_space(inode, count); | 8184 | btrfs_delalloc_release_space(inode, count); |
8129 | else if (ret >= 0 && (size_t)ret < count) | 8185 | else if (ret >= 0 && (size_t)ret < count) |
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 97159a8e91d4..058c79eecbfb 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c | |||
@@ -1259,7 +1259,7 @@ static int comp_oper(struct btrfs_qgroup_operation *oper1, | |||
1259 | if (oper1->seq < oper2->seq) | 1259 | if (oper1->seq < oper2->seq) |
1260 | return -1; | 1260 | return -1; |
1261 | if (oper1->seq > oper2->seq) | 1261 | if (oper1->seq > oper2->seq) |
1262 | return -1; | 1262 | return 1; |
1263 | if (oper1->ref_root < oper2->ref_root) | 1263 | if (oper1->ref_root < oper2->ref_root) |
1264 | return -1; | 1264 | return -1; |
1265 | if (oper1->ref_root > oper2->ref_root) | 1265 | if (oper1->ref_root > oper2->ref_root) |
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c index a116b55ce788..054fc0d97131 100644 --- a/fs/btrfs/tests/inode-tests.c +++ b/fs/btrfs/tests/inode-tests.c | |||
@@ -911,6 +911,197 @@ out: | |||
911 | return ret; | 911 | return ret; |
912 | } | 912 | } |
913 | 913 | ||
914 | static int test_extent_accounting(void) | ||
915 | { | ||
916 | struct inode *inode = NULL; | ||
917 | struct btrfs_root *root = NULL; | ||
918 | int ret = -ENOMEM; | ||
919 | |||
920 | inode = btrfs_new_test_inode(); | ||
921 | if (!inode) { | ||
922 | test_msg("Couldn't allocate inode\n"); | ||
923 | return ret; | ||
924 | } | ||
925 | |||
926 | root = btrfs_alloc_dummy_root(); | ||
927 | if (IS_ERR(root)) { | ||
928 | test_msg("Couldn't allocate root\n"); | ||
929 | goto out; | ||
930 | } | ||
931 | |||
932 | root->fs_info = btrfs_alloc_dummy_fs_info(); | ||
933 | if (!root->fs_info) { | ||
934 | test_msg("Couldn't allocate dummy fs info\n"); | ||
935 | goto out; | ||
936 | } | ||
937 | |||
938 | BTRFS_I(inode)->root = root; | ||
939 | btrfs_test_inode_set_ops(inode); | ||
940 | |||
941 | /* [BTRFS_MAX_EXTENT_SIZE] */ | ||
942 | BTRFS_I(inode)->outstanding_extents++; | ||
943 | ret = btrfs_set_extent_delalloc(inode, 0, BTRFS_MAX_EXTENT_SIZE - 1, | ||
944 | NULL); | ||
945 | if (ret) { | ||
946 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | ||
947 | goto out; | ||
948 | } | ||
949 | if (BTRFS_I(inode)->outstanding_extents != 1) { | ||
950 | ret = -EINVAL; | ||
951 | test_msg("Miscount, wanted 1, got %u\n", | ||
952 | BTRFS_I(inode)->outstanding_extents); | ||
953 | goto out; | ||
954 | } | ||
955 | |||
956 | /* [BTRFS_MAX_EXTENT_SIZE][4k] */ | ||
957 | BTRFS_I(inode)->outstanding_extents++; | ||
958 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE, | ||
959 | BTRFS_MAX_EXTENT_SIZE + 4095, NULL); | ||
960 | if (ret) { | ||
961 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | ||
962 | goto out; | ||
963 | } | ||
964 | if (BTRFS_I(inode)->outstanding_extents != 2) { | ||
965 | ret = -EINVAL; | ||
966 | test_msg("Miscount, wanted 2, got %u\n", | ||
967 | BTRFS_I(inode)->outstanding_extents); | ||
968 | goto out; | ||
969 | } | ||
970 | |||
971 | /* [BTRFS_MAX_EXTENT_SIZE/2][4K HOLE][the rest] */ | ||
972 | ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, | ||
973 | BTRFS_MAX_EXTENT_SIZE >> 1, | ||
974 | (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095, | ||
975 | EXTENT_DELALLOC | EXTENT_DIRTY | | ||
976 | EXTENT_UPTODATE | EXTENT_DO_ACCOUNTING, 0, 0, | ||
977 | NULL, GFP_NOFS); | ||
978 | if (ret) { | ||
979 | test_msg("clear_extent_bit returned %d\n", ret); | ||
980 | goto out; | ||
981 | } | ||
982 | if (BTRFS_I(inode)->outstanding_extents != 2) { | ||
983 | ret = -EINVAL; | ||
984 | test_msg("Miscount, wanted 2, got %u\n", | ||
985 | BTRFS_I(inode)->outstanding_extents); | ||
986 | goto out; | ||
987 | } | ||
988 | |||
989 | /* [BTRFS_MAX_EXTENT_SIZE][4K] */ | ||
990 | BTRFS_I(inode)->outstanding_extents++; | ||
991 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1, | ||
992 | (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095, | ||
993 | NULL); | ||
994 | if (ret) { | ||
995 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | ||
996 | goto out; | ||
997 | } | ||
998 | if (BTRFS_I(inode)->outstanding_extents != 2) { | ||
999 | ret = -EINVAL; | ||
1000 | test_msg("Miscount, wanted 2, got %u\n", | ||
1001 | BTRFS_I(inode)->outstanding_extents); | ||
1002 | goto out; | ||
1003 | } | ||
1004 | |||
1005 | /* | ||
1006 | * [BTRFS_MAX_EXTENT_SIZE+4K][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4K] | ||
1007 | * | ||
1008 | * I'm artificially adding 2 to outstanding_extents because in the | ||
1009 | * buffered IO case we'd add things up as we go, but I don't feel like | ||
1010 | * doing that here, this isn't the interesting case we want to test. | ||
1011 | */ | ||
1012 | BTRFS_I(inode)->outstanding_extents += 2; | ||
1013 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE + 8192, | ||
1014 | (BTRFS_MAX_EXTENT_SIZE << 1) + 12287, | ||
1015 | NULL); | ||
1016 | if (ret) { | ||
1017 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | ||
1018 | goto out; | ||
1019 | } | ||
1020 | if (BTRFS_I(inode)->outstanding_extents != 4) { | ||
1021 | ret = -EINVAL; | ||
1022 | test_msg("Miscount, wanted 4, got %u\n", | ||
1023 | BTRFS_I(inode)->outstanding_extents); | ||
1024 | goto out; | ||
1025 | } | ||
1026 | |||
1027 | /* [BTRFS_MAX_EXTENT_SIZE+4k][4k][BTRFS_MAX_EXTENT_SIZE+4k] */ | ||
1028 | BTRFS_I(inode)->outstanding_extents++; | ||
1029 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096, | ||
1030 | BTRFS_MAX_EXTENT_SIZE+8191, NULL); | ||
1031 | if (ret) { | ||
1032 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | ||
1033 | goto out; | ||
1034 | } | ||
1035 | if (BTRFS_I(inode)->outstanding_extents != 3) { | ||
1036 | ret = -EINVAL; | ||
1037 | test_msg("Miscount, wanted 3, got %u\n", | ||
1038 | BTRFS_I(inode)->outstanding_extents); | ||
1039 | goto out; | ||
1040 | } | ||
1041 | |||
1042 | /* [BTRFS_MAX_EXTENT_SIZE+4k][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4k] */ | ||
1043 | ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, | ||
1044 | BTRFS_MAX_EXTENT_SIZE+4096, | ||
1045 | BTRFS_MAX_EXTENT_SIZE+8191, | ||
1046 | EXTENT_DIRTY | EXTENT_DELALLOC | | ||
1047 | EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0, | ||
1048 | NULL, GFP_NOFS); | ||
1049 | if (ret) { | ||
1050 | test_msg("clear_extent_bit returned %d\n", ret); | ||
1051 | goto out; | ||
1052 | } | ||
1053 | if (BTRFS_I(inode)->outstanding_extents != 4) { | ||
1054 | ret = -EINVAL; | ||
1055 | test_msg("Miscount, wanted 4, got %u\n", | ||
1056 | BTRFS_I(inode)->outstanding_extents); | ||
1057 | goto out; | ||
1058 | } | ||
1059 | |||
1060 | /* | ||
1061 | * Refill the hole again just for good measure, because I thought it | ||
1062 | * might fail and I'd rather satisfy my paranoia at this point. | ||
1063 | */ | ||
1064 | BTRFS_I(inode)->outstanding_extents++; | ||
1065 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096, | ||
1066 | BTRFS_MAX_EXTENT_SIZE+8191, NULL); | ||
1067 | if (ret) { | ||
1068 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | ||
1069 | goto out; | ||
1070 | } | ||
1071 | if (BTRFS_I(inode)->outstanding_extents != 3) { | ||
1072 | ret = -EINVAL; | ||
1073 | test_msg("Miscount, wanted 3, got %u\n", | ||
1074 | BTRFS_I(inode)->outstanding_extents); | ||
1075 | goto out; | ||
1076 | } | ||
1077 | |||
1078 | /* Empty */ | ||
1079 | ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1, | ||
1080 | EXTENT_DIRTY | EXTENT_DELALLOC | | ||
1081 | EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0, | ||
1082 | NULL, GFP_NOFS); | ||
1083 | if (ret) { | ||
1084 | test_msg("clear_extent_bit returned %d\n", ret); | ||
1085 | goto out; | ||
1086 | } | ||
1087 | if (BTRFS_I(inode)->outstanding_extents) { | ||
1088 | ret = -EINVAL; | ||
1089 | test_msg("Miscount, wanted 0, got %u\n", | ||
1090 | BTRFS_I(inode)->outstanding_extents); | ||
1091 | goto out; | ||
1092 | } | ||
1093 | ret = 0; | ||
1094 | out: | ||
1095 | if (ret) | ||
1096 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1, | ||
1097 | EXTENT_DIRTY | EXTENT_DELALLOC | | ||
1098 | EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0, | ||
1099 | NULL, GFP_NOFS); | ||
1100 | iput(inode); | ||
1101 | btrfs_free_dummy_root(root); | ||
1102 | return ret; | ||
1103 | } | ||
1104 | |||
914 | int btrfs_test_inodes(void) | 1105 | int btrfs_test_inodes(void) |
915 | { | 1106 | { |
916 | int ret; | 1107 | int ret; |
@@ -924,5 +1115,9 @@ int btrfs_test_inodes(void) | |||
924 | if (ret) | 1115 | if (ret) |
925 | return ret; | 1116 | return ret; |
926 | test_msg("Running hole first btrfs_get_extent test\n"); | 1117 | test_msg("Running hole first btrfs_get_extent test\n"); |
927 | return test_hole_first(); | 1118 | ret = test_hole_first(); |
1119 | if (ret) | ||
1120 | return ret; | ||
1121 | test_msg("Running outstanding_extents tests\n"); | ||
1122 | return test_extent_accounting(); | ||
928 | } | 1123 | } |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 88e51aded6bd..8be4278e25e8 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -1023,17 +1023,13 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, | |||
1023 | u64 old_root_bytenr; | 1023 | u64 old_root_bytenr; |
1024 | u64 old_root_used; | 1024 | u64 old_root_used; |
1025 | struct btrfs_root *tree_root = root->fs_info->tree_root; | 1025 | struct btrfs_root *tree_root = root->fs_info->tree_root; |
1026 | bool extent_root = (root->objectid == BTRFS_EXTENT_TREE_OBJECTID); | ||
1027 | 1026 | ||
1028 | old_root_used = btrfs_root_used(&root->root_item); | 1027 | old_root_used = btrfs_root_used(&root->root_item); |
1029 | btrfs_write_dirty_block_groups(trans, root); | ||
1030 | 1028 | ||
1031 | while (1) { | 1029 | while (1) { |
1032 | old_root_bytenr = btrfs_root_bytenr(&root->root_item); | 1030 | old_root_bytenr = btrfs_root_bytenr(&root->root_item); |
1033 | if (old_root_bytenr == root->node->start && | 1031 | if (old_root_bytenr == root->node->start && |
1034 | old_root_used == btrfs_root_used(&root->root_item) && | 1032 | old_root_used == btrfs_root_used(&root->root_item)) |
1035 | (!extent_root || | ||
1036 | list_empty(&trans->transaction->dirty_bgs))) | ||
1037 | break; | 1033 | break; |
1038 | 1034 | ||
1039 | btrfs_set_root_node(&root->root_item, root->node); | 1035 | btrfs_set_root_node(&root->root_item, root->node); |
@@ -1044,14 +1040,6 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, | |||
1044 | return ret; | 1040 | return ret; |
1045 | 1041 | ||
1046 | old_root_used = btrfs_root_used(&root->root_item); | 1042 | old_root_used = btrfs_root_used(&root->root_item); |
1047 | if (extent_root) { | ||
1048 | ret = btrfs_write_dirty_block_groups(trans, root); | ||
1049 | if (ret) | ||
1050 | return ret; | ||
1051 | } | ||
1052 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); | ||
1053 | if (ret) | ||
1054 | return ret; | ||
1055 | } | 1043 | } |
1056 | 1044 | ||
1057 | return 0; | 1045 | return 0; |
@@ -1068,6 +1056,7 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, | |||
1068 | struct btrfs_root *root) | 1056 | struct btrfs_root *root) |
1069 | { | 1057 | { |
1070 | struct btrfs_fs_info *fs_info = root->fs_info; | 1058 | struct btrfs_fs_info *fs_info = root->fs_info; |
1059 | struct list_head *dirty_bgs = &trans->transaction->dirty_bgs; | ||
1071 | struct list_head *next; | 1060 | struct list_head *next; |
1072 | struct extent_buffer *eb; | 1061 | struct extent_buffer *eb; |
1073 | int ret; | 1062 | int ret; |
@@ -1095,11 +1084,15 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, | |||
1095 | if (ret) | 1084 | if (ret) |
1096 | return ret; | 1085 | return ret; |
1097 | 1086 | ||
1087 | ret = btrfs_setup_space_cache(trans, root); | ||
1088 | if (ret) | ||
1089 | return ret; | ||
1090 | |||
1098 | /* run_qgroups might have added some more refs */ | 1091 | /* run_qgroups might have added some more refs */ |
1099 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); | 1092 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); |
1100 | if (ret) | 1093 | if (ret) |
1101 | return ret; | 1094 | return ret; |
1102 | 1095 | again: | |
1103 | while (!list_empty(&fs_info->dirty_cowonly_roots)) { | 1096 | while (!list_empty(&fs_info->dirty_cowonly_roots)) { |
1104 | next = fs_info->dirty_cowonly_roots.next; | 1097 | next = fs_info->dirty_cowonly_roots.next; |
1105 | list_del_init(next); | 1098 | list_del_init(next); |
@@ -1112,8 +1105,23 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, | |||
1112 | ret = update_cowonly_root(trans, root); | 1105 | ret = update_cowonly_root(trans, root); |
1113 | if (ret) | 1106 | if (ret) |
1114 | return ret; | 1107 | return ret; |
1108 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); | ||
1109 | if (ret) | ||
1110 | return ret; | ||
1115 | } | 1111 | } |
1116 | 1112 | ||
1113 | while (!list_empty(dirty_bgs)) { | ||
1114 | ret = btrfs_write_dirty_block_groups(trans, root); | ||
1115 | if (ret) | ||
1116 | return ret; | ||
1117 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); | ||
1118 | if (ret) | ||
1119 | return ret; | ||
1120 | } | ||
1121 | |||
1122 | if (!list_empty(&fs_info->dirty_cowonly_roots)) | ||
1123 | goto again; | ||
1124 | |||
1117 | list_add_tail(&fs_info->extent_root->dirty_list, | 1125 | list_add_tail(&fs_info->extent_root->dirty_list, |
1118 | &trans->transaction->switch_commits); | 1126 | &trans->transaction->switch_commits); |
1119 | btrfs_after_dev_replace_commit(fs_info); | 1127 | btrfs_after_dev_replace_commit(fs_info); |
@@ -1811,6 +1819,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
1811 | 1819 | ||
1812 | wait_for_commit(root, cur_trans); | 1820 | wait_for_commit(root, cur_trans); |
1813 | 1821 | ||
1822 | if (unlikely(cur_trans->aborted)) | ||
1823 | ret = cur_trans->aborted; | ||
1824 | |||
1814 | btrfs_put_transaction(cur_trans); | 1825 | btrfs_put_transaction(cur_trans); |
1815 | 1826 | ||
1816 | return ret; | 1827 | return ret; |
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index ed19a7d622fa..39706c57ad3c 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -890,8 +890,8 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) | |||
890 | 890 | ||
891 | newpage = buf->page; | 891 | newpage = buf->page; |
892 | 892 | ||
893 | if (WARN_ON(!PageUptodate(newpage))) | 893 | if (!PageUptodate(newpage)) |
894 | return -EIO; | 894 | SetPageUptodate(newpage); |
895 | 895 | ||
896 | ClearPageMappedToDisk(newpage); | 896 | ClearPageMappedToDisk(newpage); |
897 | 897 | ||
@@ -1353,6 +1353,17 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file, | |||
1353 | return err; | 1353 | return err; |
1354 | } | 1354 | } |
1355 | 1355 | ||
1356 | static int fuse_dev_open(struct inode *inode, struct file *file) | ||
1357 | { | ||
1358 | /* | ||
1359 | * The fuse device's file's private_data is used to hold | ||
1360 | * the fuse_conn(ection) when it is mounted, and is used to | ||
1361 | * keep track of whether the file has been mounted already. | ||
1362 | */ | ||
1363 | file->private_data = NULL; | ||
1364 | return 0; | ||
1365 | } | ||
1366 | |||
1356 | static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, | 1367 | static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, |
1357 | unsigned long nr_segs, loff_t pos) | 1368 | unsigned long nr_segs, loff_t pos) |
1358 | { | 1369 | { |
@@ -1797,6 +1808,9 @@ copy_finish: | |||
1797 | static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, | 1808 | static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, |
1798 | unsigned int size, struct fuse_copy_state *cs) | 1809 | unsigned int size, struct fuse_copy_state *cs) |
1799 | { | 1810 | { |
1811 | /* Don't try to move pages (yet) */ | ||
1812 | cs->move_pages = 0; | ||
1813 | |||
1800 | switch (code) { | 1814 | switch (code) { |
1801 | case FUSE_NOTIFY_POLL: | 1815 | case FUSE_NOTIFY_POLL: |
1802 | return fuse_notify_poll(fc, size, cs); | 1816 | return fuse_notify_poll(fc, size, cs); |
@@ -2217,6 +2231,7 @@ static int fuse_dev_fasync(int fd, struct file *file, int on) | |||
2217 | 2231 | ||
2218 | const struct file_operations fuse_dev_operations = { | 2232 | const struct file_operations fuse_dev_operations = { |
2219 | .owner = THIS_MODULE, | 2233 | .owner = THIS_MODULE, |
2234 | .open = fuse_dev_open, | ||
2220 | .llseek = no_llseek, | 2235 | .llseek = no_llseek, |
2221 | .read = do_sync_read, | 2236 | .read = do_sync_read, |
2222 | .aio_read = fuse_dev_read, | 2237 | .aio_read = fuse_dev_read, |
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c index 6e560d56094b..754fdf8c6356 100644 --- a/fs/hfsplus/brec.c +++ b/fs/hfsplus/brec.c | |||
@@ -131,13 +131,16 @@ skip: | |||
131 | hfs_bnode_write(node, entry, data_off + key_len, entry_len); | 131 | hfs_bnode_write(node, entry, data_off + key_len, entry_len); |
132 | hfs_bnode_dump(node); | 132 | hfs_bnode_dump(node); |
133 | 133 | ||
134 | if (new_node) { | 134 | /* |
135 | /* update parent key if we inserted a key | 135 | * update parent key if we inserted a key |
136 | * at the start of the first node | 136 | * at the start of the node and it is not the new node |
137 | */ | 137 | */ |
138 | if (!rec && new_node != node) | 138 | if (!rec && new_node != node) { |
139 | hfs_brec_update_parent(fd); | 139 | hfs_bnode_read_key(node, fd->search_key, data_off + size); |
140 | hfs_brec_update_parent(fd); | ||
141 | } | ||
140 | 142 | ||
143 | if (new_node) { | ||
141 | hfs_bnode_put(fd->bnode); | 144 | hfs_bnode_put(fd->bnode); |
142 | if (!new_node->parent) { | 145 | if (!new_node->parent) { |
143 | hfs_btree_inc_height(tree); | 146 | hfs_btree_inc_height(tree); |
@@ -168,9 +171,6 @@ skip: | |||
168 | goto again; | 171 | goto again; |
169 | } | 172 | } |
170 | 173 | ||
171 | if (!rec) | ||
172 | hfs_brec_update_parent(fd); | ||
173 | |||
174 | return 0; | 174 | return 0; |
175 | } | 175 | } |
176 | 176 | ||
@@ -370,6 +370,8 @@ again: | |||
370 | if (IS_ERR(parent)) | 370 | if (IS_ERR(parent)) |
371 | return PTR_ERR(parent); | 371 | return PTR_ERR(parent); |
372 | __hfs_brec_find(parent, fd, hfs_find_rec_by_key); | 372 | __hfs_brec_find(parent, fd, hfs_find_rec_by_key); |
373 | if (fd->record < 0) | ||
374 | return -ENOENT; | ||
373 | hfs_bnode_dump(parent); | 375 | hfs_bnode_dump(parent); |
374 | rec = fd->record; | 376 | rec = fd->record; |
375 | 377 | ||
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index b684e8a132e6..2bacb9988566 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c | |||
@@ -207,6 +207,7 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of, | |||
207 | goto out_free; | 207 | goto out_free; |
208 | } | 208 | } |
209 | 209 | ||
210 | of->event = atomic_read(&of->kn->attr.open->event); | ||
210 | ops = kernfs_ops(of->kn); | 211 | ops = kernfs_ops(of->kn); |
211 | if (ops->read) | 212 | if (ops->read) |
212 | len = ops->read(of, buf, len, *ppos); | 213 | len = ops->read(of, buf, len, *ppos); |
diff --git a/fs/locks.c b/fs/locks.c index f1bad681fc1c..528fedfda15e 100644 --- a/fs/locks.c +++ b/fs/locks.c | |||
@@ -1728,7 +1728,7 @@ static int generic_delete_lease(struct file *filp, void *owner) | |||
1728 | break; | 1728 | break; |
1729 | } | 1729 | } |
1730 | } | 1730 | } |
1731 | trace_generic_delete_lease(inode, fl); | 1731 | trace_generic_delete_lease(inode, victim); |
1732 | if (victim) | 1732 | if (victim) |
1733 | error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose); | 1733 | error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose); |
1734 | spin_unlock(&ctx->flc_lock); | 1734 | spin_unlock(&ctx->flc_lock); |
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index 3c1bfa155571..1028a0629543 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c | |||
@@ -587,8 +587,6 @@ nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls) | |||
587 | 587 | ||
588 | rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str)); | 588 | rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str)); |
589 | 589 | ||
590 | nfsd4_cb_layout_fail(ls); | ||
591 | |||
592 | printk(KERN_WARNING | 590 | printk(KERN_WARNING |
593 | "nfsd: client %s failed to respond to layout recall. " | 591 | "nfsd: client %s failed to respond to layout recall. " |
594 | " Fencing..\n", addr_str); | 592 | " Fencing..\n", addr_str); |
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 469086b9f99b..0c3f303baf32 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c | |||
@@ -1907,6 +1907,7 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, | |||
1907 | struct the_nilfs *nilfs) | 1907 | struct the_nilfs *nilfs) |
1908 | { | 1908 | { |
1909 | struct nilfs_inode_info *ii, *n; | 1909 | struct nilfs_inode_info *ii, *n; |
1910 | int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE); | ||
1910 | int defer_iput = false; | 1911 | int defer_iput = false; |
1911 | 1912 | ||
1912 | spin_lock(&nilfs->ns_inode_lock); | 1913 | spin_lock(&nilfs->ns_inode_lock); |
@@ -1919,10 +1920,10 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, | |||
1919 | brelse(ii->i_bh); | 1920 | brelse(ii->i_bh); |
1920 | ii->i_bh = NULL; | 1921 | ii->i_bh = NULL; |
1921 | list_del_init(&ii->i_dirty); | 1922 | list_del_init(&ii->i_dirty); |
1922 | if (!ii->vfs_inode.i_nlink) { | 1923 | if (!ii->vfs_inode.i_nlink || during_mount) { |
1923 | /* | 1924 | /* |
1924 | * Defer calling iput() to avoid a deadlock | 1925 | * Defer calling iput() to avoid deadlocks if |
1925 | * over I_SYNC flag for inodes with i_nlink == 0 | 1926 | * i_nlink == 0 or mount is not yet finished. |
1926 | */ | 1927 | */ |
1927 | list_add_tail(&ii->i_dirty, &sci->sc_iput_queue); | 1928 | list_add_tail(&ii->i_dirty, &sci->sc_iput_queue); |
1928 | defer_iput = true; | 1929 | defer_iput = true; |
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index 9a66ff79ff27..d2f97ecca6a5 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c | |||
@@ -143,7 +143,8 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark, | |||
143 | !(marks_mask & FS_ISDIR & ~marks_ignored_mask)) | 143 | !(marks_mask & FS_ISDIR & ~marks_ignored_mask)) |
144 | return false; | 144 | return false; |
145 | 145 | ||
146 | if (event_mask & marks_mask & ~marks_ignored_mask) | 146 | if (event_mask & FAN_ALL_OUTGOING_EVENTS & marks_mask & |
147 | ~marks_ignored_mask) | ||
147 | return true; | 148 | return true; |
148 | 149 | ||
149 | return false; | 150 | return false; |
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 8490c64d34fe..460c6c37e683 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h | |||
@@ -502,7 +502,7 @@ static inline int ocfs2_writes_unwritten_extents(struct ocfs2_super *osb) | |||
502 | 502 | ||
503 | static inline int ocfs2_supports_append_dio(struct ocfs2_super *osb) | 503 | static inline int ocfs2_supports_append_dio(struct ocfs2_super *osb) |
504 | { | 504 | { |
505 | if (osb->s_feature_ro_compat & OCFS2_FEATURE_RO_COMPAT_APPEND_DIO) | 505 | if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_APPEND_DIO) |
506 | return 1; | 506 | return 1; |
507 | return 0; | 507 | return 0; |
508 | } | 508 | } |
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 20e37a3ed26f..db64ce2d4667 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h | |||
@@ -102,11 +102,11 @@ | |||
102 | | OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS \ | 102 | | OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS \ |
103 | | OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE \ | 103 | | OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE \ |
104 | | OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG \ | 104 | | OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG \ |
105 | | OCFS2_FEATURE_INCOMPAT_CLUSTERINFO) | 105 | | OCFS2_FEATURE_INCOMPAT_CLUSTERINFO \ |
106 | | OCFS2_FEATURE_INCOMPAT_APPEND_DIO) | ||
106 | #define OCFS2_FEATURE_RO_COMPAT_SUPP (OCFS2_FEATURE_RO_COMPAT_UNWRITTEN \ | 107 | #define OCFS2_FEATURE_RO_COMPAT_SUPP (OCFS2_FEATURE_RO_COMPAT_UNWRITTEN \ |
107 | | OCFS2_FEATURE_RO_COMPAT_USRQUOTA \ | 108 | | OCFS2_FEATURE_RO_COMPAT_USRQUOTA \ |
108 | | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA \ | 109 | | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA) |
109 | | OCFS2_FEATURE_RO_COMPAT_APPEND_DIO) | ||
110 | 110 | ||
111 | /* | 111 | /* |
112 | * Heartbeat-only devices are missing journals and other files. The | 112 | * Heartbeat-only devices are missing journals and other files. The |
@@ -179,6 +179,11 @@ | |||
179 | #define OCFS2_FEATURE_INCOMPAT_CLUSTERINFO 0x4000 | 179 | #define OCFS2_FEATURE_INCOMPAT_CLUSTERINFO 0x4000 |
180 | 180 | ||
181 | /* | 181 | /* |
182 | * Append Direct IO support | ||
183 | */ | ||
184 | #define OCFS2_FEATURE_INCOMPAT_APPEND_DIO 0x8000 | ||
185 | |||
186 | /* | ||
182 | * backup superblock flag is used to indicate that this volume | 187 | * backup superblock flag is used to indicate that this volume |
183 | * has backup superblocks. | 188 | * has backup superblocks. |
184 | */ | 189 | */ |
@@ -200,10 +205,6 @@ | |||
200 | #define OCFS2_FEATURE_RO_COMPAT_USRQUOTA 0x0002 | 205 | #define OCFS2_FEATURE_RO_COMPAT_USRQUOTA 0x0002 |
201 | #define OCFS2_FEATURE_RO_COMPAT_GRPQUOTA 0x0004 | 206 | #define OCFS2_FEATURE_RO_COMPAT_GRPQUOTA 0x0004 |
202 | 207 | ||
203 | /* | ||
204 | * Append Direct IO support | ||
205 | */ | ||
206 | #define OCFS2_FEATURE_RO_COMPAT_APPEND_DIO 0x0008 | ||
207 | 208 | ||
208 | /* The byte offset of the first backup block will be 1G. | 209 | /* The byte offset of the first backup block will be 1G. |
209 | * The following will be 4G, 16G, 64G, 256G and 1T. | 210 | * The following will be 4G, 16G, 64G, 256G and 1T. |
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index b90952f528b1..5f0d1993e6e3 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c | |||
@@ -529,8 +529,7 @@ static int ovl_remount(struct super_block *sb, int *flags, char *data) | |||
529 | { | 529 | { |
530 | struct ovl_fs *ufs = sb->s_fs_info; | 530 | struct ovl_fs *ufs = sb->s_fs_info; |
531 | 531 | ||
532 | if (!(*flags & MS_RDONLY) && | 532 | if (!(*flags & MS_RDONLY) && !ufs->upper_mnt) |
533 | (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY))) | ||
534 | return -EROFS; | 533 | return -EROFS; |
535 | 534 | ||
536 | return 0; | 535 | return 0; |
@@ -615,9 +614,19 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config) | |||
615 | break; | 614 | break; |
616 | 615 | ||
617 | default: | 616 | default: |
617 | pr_err("overlayfs: unrecognized mount option \"%s\" or missing value\n", p); | ||
618 | return -EINVAL; | 618 | return -EINVAL; |
619 | } | 619 | } |
620 | } | 620 | } |
621 | |||
622 | /* Workdir is useless in non-upper mount */ | ||
623 | if (!config->upperdir && config->workdir) { | ||
624 | pr_info("overlayfs: option \"workdir=%s\" is useless in a non-upper mount, ignore\n", | ||
625 | config->workdir); | ||
626 | kfree(config->workdir); | ||
627 | config->workdir = NULL; | ||
628 | } | ||
629 | |||
621 | return 0; | 630 | return 0; |
622 | } | 631 | } |
623 | 632 | ||
@@ -837,7 +846,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) | |||
837 | 846 | ||
838 | sb->s_stack_depth = 0; | 847 | sb->s_stack_depth = 0; |
839 | if (ufs->config.upperdir) { | 848 | if (ufs->config.upperdir) { |
840 | /* FIXME: workdir is not needed for a R/O mount */ | ||
841 | if (!ufs->config.workdir) { | 849 | if (!ufs->config.workdir) { |
842 | pr_err("overlayfs: missing 'workdir'\n"); | 850 | pr_err("overlayfs: missing 'workdir'\n"); |
843 | goto out_free_config; | 851 | goto out_free_config; |
@@ -847,6 +855,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) | |||
847 | if (err) | 855 | if (err) |
848 | goto out_free_config; | 856 | goto out_free_config; |
849 | 857 | ||
858 | /* Upper fs should not be r/o */ | ||
859 | if (upperpath.mnt->mnt_sb->s_flags & MS_RDONLY) { | ||
860 | pr_err("overlayfs: upper fs is r/o, try multi-lower layers mount\n"); | ||
861 | err = -EINVAL; | ||
862 | goto out_put_upperpath; | ||
863 | } | ||
864 | |||
850 | err = ovl_mount_dir(ufs->config.workdir, &workpath); | 865 | err = ovl_mount_dir(ufs->config.workdir, &workpath); |
851 | if (err) | 866 | if (err) |
852 | goto out_put_upperpath; | 867 | goto out_put_upperpath; |
@@ -869,8 +884,14 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) | |||
869 | 884 | ||
870 | err = -EINVAL; | 885 | err = -EINVAL; |
871 | stacklen = ovl_split_lowerdirs(lowertmp); | 886 | stacklen = ovl_split_lowerdirs(lowertmp); |
872 | if (stacklen > OVL_MAX_STACK) | 887 | if (stacklen > OVL_MAX_STACK) { |
888 | pr_err("overlayfs: too many lower directries, limit is %d\n", | ||
889 | OVL_MAX_STACK); | ||
873 | goto out_free_lowertmp; | 890 | goto out_free_lowertmp; |
891 | } else if (!ufs->config.upperdir && stacklen == 1) { | ||
892 | pr_err("overlayfs: at least 2 lowerdir are needed while upperdir nonexistent\n"); | ||
893 | goto out_free_lowertmp; | ||
894 | } | ||
874 | 895 | ||
875 | stack = kcalloc(stacklen, sizeof(struct path), GFP_KERNEL); | 896 | stack = kcalloc(stacklen, sizeof(struct path), GFP_KERNEL); |
876 | if (!stack) | 897 | if (!stack) |
@@ -932,8 +953,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) | |||
932 | ufs->numlower++; | 953 | ufs->numlower++; |
933 | } | 954 | } |
934 | 955 | ||
935 | /* If the upper fs is r/o or nonexistent, we mark overlayfs r/o too */ | 956 | /* If the upper fs is nonexistent, we mark overlayfs r/o too */ |
936 | if (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY)) | 957 | if (!ufs->upper_mnt) |
937 | sb->s_flags |= MS_RDONLY; | 958 | sb->s_flags |= MS_RDONLY; |
938 | 959 | ||
939 | sb->s_d_op = &ovl_dentry_operations; | 960 | sb->s_d_op = &ovl_dentry_operations; |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 956b75d61809..6dee68d013ff 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -1325,6 +1325,9 @@ out: | |||
1325 | 1325 | ||
1326 | static int pagemap_open(struct inode *inode, struct file *file) | 1326 | static int pagemap_open(struct inode *inode, struct file *file) |
1327 | { | 1327 | { |
1328 | /* do not disclose physical addresses: attack vector */ | ||
1329 | if (!capable(CAP_SYS_ADMIN)) | ||
1330 | return -EPERM; | ||
1328 | pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about " | 1331 | pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about " |
1329 | "to stop being page-shift some time soon. See the " | 1332 | "to stop being page-shift some time soon. See the " |
1330 | "linux/Documentation/vm/pagemap.txt for details.\n"); | 1333 | "linux/Documentation/vm/pagemap.txt for details.\n"); |
diff --git a/include/dt-bindings/pinctrl/am33xx.h b/include/dt-bindings/pinctrl/am33xx.h index 2fbc804e1a45..226f77246a70 100644 --- a/include/dt-bindings/pinctrl/am33xx.h +++ b/include/dt-bindings/pinctrl/am33xx.h | |||
@@ -13,7 +13,8 @@ | |||
13 | 13 | ||
14 | #define PULL_DISABLE (1 << 3) | 14 | #define PULL_DISABLE (1 << 3) |
15 | #define INPUT_EN (1 << 5) | 15 | #define INPUT_EN (1 << 5) |
16 | #define SLEWCTRL_FAST (1 << 6) | 16 | #define SLEWCTRL_SLOW (1 << 6) |
17 | #define SLEWCTRL_FAST 0 | ||
17 | 18 | ||
18 | /* update macro depending on INPUT_EN and PULL_ENA */ | 19 | /* update macro depending on INPUT_EN and PULL_ENA */ |
19 | #undef PIN_OUTPUT | 20 | #undef PIN_OUTPUT |
diff --git a/include/dt-bindings/pinctrl/am43xx.h b/include/dt-bindings/pinctrl/am43xx.h index 9c2e4f82381e..5f4d01898c9c 100644 --- a/include/dt-bindings/pinctrl/am43xx.h +++ b/include/dt-bindings/pinctrl/am43xx.h | |||
@@ -18,7 +18,8 @@ | |||
18 | #define PULL_DISABLE (1 << 16) | 18 | #define PULL_DISABLE (1 << 16) |
19 | #define PULL_UP (1 << 17) | 19 | #define PULL_UP (1 << 17) |
20 | #define INPUT_EN (1 << 18) | 20 | #define INPUT_EN (1 << 18) |
21 | #define SLEWCTRL_FAST (1 << 19) | 21 | #define SLEWCTRL_SLOW (1 << 19) |
22 | #define SLEWCTRL_FAST 0 | ||
22 | #define DS0_PULL_UP_DOWN_EN (1 << 27) | 23 | #define DS0_PULL_UP_DOWN_EN (1 << 27) |
23 | 24 | ||
24 | #define PIN_OUTPUT (PULL_DISABLE) | 25 | #define PIN_OUTPUT (PULL_DISABLE) |
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 7c55dd5dd2c9..66203b268984 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h | |||
@@ -114,6 +114,7 @@ struct vgic_ops { | |||
114 | void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr); | 114 | void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr); |
115 | u64 (*get_elrsr)(const struct kvm_vcpu *vcpu); | 115 | u64 (*get_elrsr)(const struct kvm_vcpu *vcpu); |
116 | u64 (*get_eisr)(const struct kvm_vcpu *vcpu); | 116 | u64 (*get_eisr)(const struct kvm_vcpu *vcpu); |
117 | void (*clear_eisr)(struct kvm_vcpu *vcpu); | ||
117 | u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu); | 118 | u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu); |
118 | void (*enable_underflow)(struct kvm_vcpu *vcpu); | 119 | void (*enable_underflow)(struct kvm_vcpu *vcpu); |
119 | void (*disable_underflow)(struct kvm_vcpu *vcpu); | 120 | void (*disable_underflow)(struct kvm_vcpu *vcpu); |
diff --git a/include/linux/clk.h b/include/linux/clk.h index 8381bbfbc308..68c16a6bedb3 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h | |||
@@ -125,6 +125,19 @@ int clk_set_phase(struct clk *clk, int degrees); | |||
125 | */ | 125 | */ |
126 | int clk_get_phase(struct clk *clk); | 126 | int clk_get_phase(struct clk *clk); |
127 | 127 | ||
128 | /** | ||
129 | * clk_is_match - check if two clk's point to the same hardware clock | ||
130 | * @p: clk compared against q | ||
131 | * @q: clk compared against p | ||
132 | * | ||
133 | * Returns true if the two struct clk pointers both point to the same hardware | ||
134 | * clock node. Put differently, returns true if struct clk *p and struct clk *q | ||
135 | * share the same struct clk_core object. | ||
136 | * | ||
137 | * Returns false otherwise. Note that two NULL clks are treated as matching. | ||
138 | */ | ||
139 | bool clk_is_match(const struct clk *p, const struct clk *q); | ||
140 | |||
128 | #else | 141 | #else |
129 | 142 | ||
130 | static inline long clk_get_accuracy(struct clk *clk) | 143 | static inline long clk_get_accuracy(struct clk *clk) |
@@ -142,6 +155,11 @@ static inline long clk_get_phase(struct clk *clk) | |||
142 | return -ENOTSUPP; | 155 | return -ENOTSUPP; |
143 | } | 156 | } |
144 | 157 | ||
158 | static inline bool clk_is_match(const struct clk *p, const struct clk *q) | ||
159 | { | ||
160 | return p == q; | ||
161 | } | ||
162 | |||
145 | #endif | 163 | #endif |
146 | 164 | ||
147 | /** | 165 | /** |
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 2646aed1d3fe..fd23978d93fe 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -375,6 +375,7 @@ int dm_create(int minor, struct mapped_device **md); | |||
375 | */ | 375 | */ |
376 | struct mapped_device *dm_get_md(dev_t dev); | 376 | struct mapped_device *dm_get_md(dev_t dev); |
377 | void dm_get(struct mapped_device *md); | 377 | void dm_get(struct mapped_device *md); |
378 | int dm_hold(struct mapped_device *md); | ||
378 | void dm_put(struct mapped_device *md); | 379 | void dm_put(struct mapped_device *md); |
379 | 380 | ||
380 | /* | 381 | /* |
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 800544bc7bfd..781974afff9f 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h | |||
@@ -166,6 +166,11 @@ | |||
166 | 166 | ||
167 | #define GITS_TRANSLATER 0x10040 | 167 | #define GITS_TRANSLATER 0x10040 |
168 | 168 | ||
169 | #define GITS_CTLR_ENABLE (1U << 0) | ||
170 | #define GITS_CTLR_QUIESCENT (1U << 31) | ||
171 | |||
172 | #define GITS_TYPER_DEVBITS_SHIFT 13 | ||
173 | #define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) | ||
169 | #define GITS_TYPER_PTA (1UL << 19) | 174 | #define GITS_TYPER_PTA (1UL << 19) |
170 | 175 | ||
171 | #define GITS_CBASER_VALID (1UL << 63) | 176 | #define GITS_CBASER_VALID (1UL << 63) |
diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 72ba725ddf9c..5bb074431eb0 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h | |||
@@ -5,6 +5,7 @@ | |||
5 | 5 | ||
6 | struct kmem_cache; | 6 | struct kmem_cache; |
7 | struct page; | 7 | struct page; |
8 | struct vm_struct; | ||
8 | 9 | ||
9 | #ifdef CONFIG_KASAN | 10 | #ifdef CONFIG_KASAN |
10 | 11 | ||
@@ -49,15 +50,11 @@ void kasan_krealloc(const void *object, size_t new_size); | |||
49 | void kasan_slab_alloc(struct kmem_cache *s, void *object); | 50 | void kasan_slab_alloc(struct kmem_cache *s, void *object); |
50 | void kasan_slab_free(struct kmem_cache *s, void *object); | 51 | void kasan_slab_free(struct kmem_cache *s, void *object); |
51 | 52 | ||
52 | #define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) | ||
53 | |||
54 | int kasan_module_alloc(void *addr, size_t size); | 53 | int kasan_module_alloc(void *addr, size_t size); |
55 | void kasan_module_free(void *addr); | 54 | void kasan_free_shadow(const struct vm_struct *vm); |
56 | 55 | ||
57 | #else /* CONFIG_KASAN */ | 56 | #else /* CONFIG_KASAN */ |
58 | 57 | ||
59 | #define MODULE_ALIGN 1 | ||
60 | |||
61 | static inline void kasan_unpoison_shadow(const void *address, size_t size) {} | 58 | static inline void kasan_unpoison_shadow(const void *address, size_t size) {} |
62 | 59 | ||
63 | static inline void kasan_enable_current(void) {} | 60 | static inline void kasan_enable_current(void) {} |
@@ -82,7 +79,7 @@ static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {} | |||
82 | static inline void kasan_slab_free(struct kmem_cache *s, void *object) {} | 79 | static inline void kasan_slab_free(struct kmem_cache *s, void *object) {} |
83 | 80 | ||
84 | static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } | 81 | static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } |
85 | static inline void kasan_module_free(void *addr) {} | 82 | static inline void kasan_free_shadow(const struct vm_struct *vm) {} |
86 | 83 | ||
87 | #endif /* CONFIG_KASAN */ | 84 | #endif /* CONFIG_KASAN */ |
88 | 85 | ||
diff --git a/include/linux/libata.h b/include/linux/libata.h index fc03efa64ffe..6b08cc106c21 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -232,6 +232,7 @@ enum { | |||
232 | * led */ | 232 | * led */ |
233 | ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */ | 233 | ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */ |
234 | ATA_FLAG_LOWTAG = (1 << 24), /* host wants lowest available tag */ | 234 | ATA_FLAG_LOWTAG = (1 << 24), /* host wants lowest available tag */ |
235 | ATA_FLAG_SAS_HOST = (1 << 25), /* SAS host */ | ||
235 | 236 | ||
236 | /* bits 24:31 of ap->flags are reserved for LLD specific flags */ | 237 | /* bits 24:31 of ap->flags are reserved for LLD specific flags */ |
237 | 238 | ||
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h index fb0390a1a498..ee7b1ce7a6f8 100644 --- a/include/linux/mfd/palmas.h +++ b/include/linux/mfd/palmas.h | |||
@@ -2999,6 +2999,9 @@ enum usb_irq_events { | |||
2999 | #define PALMAS_GPADC_TRIM15 0x0E | 2999 | #define PALMAS_GPADC_TRIM15 0x0E |
3000 | #define PALMAS_GPADC_TRIM16 0x0F | 3000 | #define PALMAS_GPADC_TRIM16 0x0F |
3001 | 3001 | ||
3002 | /* TPS659038 regen2_ctrl offset iss different from palmas */ | ||
3003 | #define TPS659038_REGEN2_CTRL 0x12 | ||
3004 | |||
3002 | /* TPS65917 Interrupt registers */ | 3005 | /* TPS65917 Interrupt registers */ |
3003 | 3006 | ||
3004 | /* Registers for function INTERRUPT */ | 3007 | /* Registers for function INTERRUPT */ |
diff --git a/include/linux/module.h b/include/linux/module.h index 42999fe2dbd0..b03485bcb82a 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -344,6 +344,10 @@ struct module { | |||
344 | unsigned long *ftrace_callsites; | 344 | unsigned long *ftrace_callsites; |
345 | #endif | 345 | #endif |
346 | 346 | ||
347 | #ifdef CONFIG_LIVEPATCH | ||
348 | bool klp_alive; | ||
349 | #endif | ||
350 | |||
347 | #ifdef CONFIG_MODULE_UNLOAD | 351 | #ifdef CONFIG_MODULE_UNLOAD |
348 | /* What modules depend on me? */ | 352 | /* What modules depend on me? */ |
349 | struct list_head source_list; | 353 | struct list_head source_list; |
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h index f7556261fe3c..4d0cb9bba93e 100644 --- a/include/linux/moduleloader.h +++ b/include/linux/moduleloader.h | |||
@@ -84,4 +84,12 @@ void module_arch_cleanup(struct module *mod); | |||
84 | 84 | ||
85 | /* Any cleanup before freeing mod->module_init */ | 85 | /* Any cleanup before freeing mod->module_init */ |
86 | void module_arch_freeing_init(struct module *mod); | 86 | void module_arch_freeing_init(struct module *mod); |
87 | |||
88 | #ifdef CONFIG_KASAN | ||
89 | #include <linux/kasan.h> | ||
90 | #define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) | ||
91 | #else | ||
92 | #define MODULE_ALIGN PAGE_SIZE | ||
93 | #endif | ||
94 | |||
87 | #endif | 95 | #endif |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 429d1790a27e..dcf6ec27739b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -965,9 +965,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, | |||
965 | * Used to add FDB entries to dump requests. Implementers should add | 965 | * Used to add FDB entries to dump requests. Implementers should add |
966 | * entries to skb and update idx with the number of entries. | 966 | * entries to skb and update idx with the number of entries. |
967 | * | 967 | * |
968 | * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh) | 968 | * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, |
969 | * u16 flags) | ||
969 | * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, | 970 | * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, |
970 | * struct net_device *dev, u32 filter_mask) | 971 | * struct net_device *dev, u32 filter_mask) |
972 | * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, | ||
973 | * u16 flags); | ||
971 | * | 974 | * |
972 | * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); | 975 | * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); |
973 | * Called to change device carrier. Soft-devices (like dummy, team, etc) | 976 | * Called to change device carrier. Soft-devices (like dummy, team, etc) |
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index 8a860f096c35..611a691145c4 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h | |||
@@ -84,7 +84,7 @@ static inline int of_platform_populate(struct device_node *root, | |||
84 | static inline void of_platform_depopulate(struct device *parent) { } | 84 | static inline void of_platform_depopulate(struct device *parent) { } |
85 | #endif | 85 | #endif |
86 | 86 | ||
87 | #ifdef CONFIG_OF_DYNAMIC | 87 | #if defined(CONFIG_OF_DYNAMIC) && defined(CONFIG_OF_ADDRESS) |
88 | extern void of_platform_register_reconfig_notifier(void); | 88 | extern void of_platform_register_reconfig_notifier(void); |
89 | #else | 89 | #else |
90 | static inline void of_platform_register_reconfig_notifier(void) { } | 90 | static inline void of_platform_register_reconfig_notifier(void) { } |
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h index 72c0415d6c21..18eccefea06e 100644 --- a/include/linux/pinctrl/consumer.h +++ b/include/linux/pinctrl/consumer.h | |||
@@ -82,7 +82,7 @@ static inline int pinctrl_gpio_direction_output(unsigned gpio) | |||
82 | 82 | ||
83 | static inline struct pinctrl * __must_check pinctrl_get(struct device *dev) | 83 | static inline struct pinctrl * __must_check pinctrl_get(struct device *dev) |
84 | { | 84 | { |
85 | return ERR_PTR(-ENOSYS); | 85 | return NULL; |
86 | } | 86 | } |
87 | 87 | ||
88 | static inline void pinctrl_put(struct pinctrl *p) | 88 | static inline void pinctrl_put(struct pinctrl *p) |
@@ -93,7 +93,7 @@ static inline struct pinctrl_state * __must_check pinctrl_lookup_state( | |||
93 | struct pinctrl *p, | 93 | struct pinctrl *p, |
94 | const char *name) | 94 | const char *name) |
95 | { | 95 | { |
96 | return ERR_PTR(-ENOSYS); | 96 | return NULL; |
97 | } | 97 | } |
98 | 98 | ||
99 | static inline int pinctrl_select_state(struct pinctrl *p, | 99 | static inline int pinctrl_select_state(struct pinctrl *p, |
@@ -104,7 +104,7 @@ static inline int pinctrl_select_state(struct pinctrl *p, | |||
104 | 104 | ||
105 | static inline struct pinctrl * __must_check devm_pinctrl_get(struct device *dev) | 105 | static inline struct pinctrl * __must_check devm_pinctrl_get(struct device *dev) |
106 | { | 106 | { |
107 | return ERR_PTR(-ENOSYS); | 107 | return NULL; |
108 | } | 108 | } |
109 | 109 | ||
110 | static inline void devm_pinctrl_put(struct pinctrl *p) | 110 | static inline void devm_pinctrl_put(struct pinctrl *p) |
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index d4ad5b5a02bb..045f709cb89b 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h | |||
@@ -316,7 +316,7 @@ struct regulator_desc { | |||
316 | * @driver_data: private regulator data | 316 | * @driver_data: private regulator data |
317 | * @of_node: OpenFirmware node to parse for device tree bindings (may be | 317 | * @of_node: OpenFirmware node to parse for device tree bindings (may be |
318 | * NULL). | 318 | * NULL). |
319 | * @regmap: regmap to use for core regmap helpers if dev_get_regulator() is | 319 | * @regmap: regmap to use for core regmap helpers if dev_get_regmap() is |
320 | * insufficient. | 320 | * insufficient. |
321 | * @ena_gpio_initialized: GPIO controlling regulator enable was properly | 321 | * @ena_gpio_initialized: GPIO controlling regulator enable was properly |
322 | * initialized, meaning that >= 0 is a valid gpio | 322 | * initialized, meaning that >= 0 is a valid gpio |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 6d77432e14ff..a419b65770d6 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1625,11 +1625,11 @@ struct task_struct { | |||
1625 | 1625 | ||
1626 | /* | 1626 | /* |
1627 | * numa_faults_locality tracks if faults recorded during the last | 1627 | * numa_faults_locality tracks if faults recorded during the last |
1628 | * scan window were remote/local. The task scan period is adapted | 1628 | * scan window were remote/local or failed to migrate. The task scan |
1629 | * based on the locality of the faults with different weights | 1629 | * period is adapted based on the locality of the faults with different |
1630 | * depending on whether they were shared or private faults | 1630 | * weights depending on whether they were shared or private faults |
1631 | */ | 1631 | */ |
1632 | unsigned long numa_faults_locality[2]; | 1632 | unsigned long numa_faults_locality[3]; |
1633 | 1633 | ||
1634 | unsigned long numa_pages_migrated; | 1634 | unsigned long numa_pages_migrated; |
1635 | #endif /* CONFIG_NUMA_BALANCING */ | 1635 | #endif /* CONFIG_NUMA_BALANCING */ |
@@ -1719,6 +1719,7 @@ struct task_struct { | |||
1719 | #define TNF_NO_GROUP 0x02 | 1719 | #define TNF_NO_GROUP 0x02 |
1720 | #define TNF_SHARED 0x04 | 1720 | #define TNF_SHARED 0x04 |
1721 | #define TNF_FAULT_LOCAL 0x08 | 1721 | #define TNF_FAULT_LOCAL 0x08 |
1722 | #define TNF_MIGRATE_FAIL 0x10 | ||
1722 | 1723 | ||
1723 | #ifdef CONFIG_NUMA_BALANCING | 1724 | #ifdef CONFIG_NUMA_BALANCING |
1724 | extern void task_numa_fault(int last_node, int node, int pages, int flags); | 1725 | extern void task_numa_fault(int last_node, int node, int pages, int flags); |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 30007afe70b3..f54d6659713a 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -948,6 +948,13 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) | |||
948 | to->l4_hash = from->l4_hash; | 948 | to->l4_hash = from->l4_hash; |
949 | }; | 949 | }; |
950 | 950 | ||
951 | static inline void skb_sender_cpu_clear(struct sk_buff *skb) | ||
952 | { | ||
953 | #ifdef CONFIG_XPS | ||
954 | skb->sender_cpu = 0; | ||
955 | #endif | ||
956 | } | ||
957 | |||
951 | #ifdef NET_SKBUFF_DATA_USES_OFFSET | 958 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
952 | static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) | 959 | static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) |
953 | { | 960 | { |
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index ed9489d893a4..856d34dde79b 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
@@ -649,7 +649,7 @@ struct spi_transfer { | |||
649 | * sequence completes. On some systems, many such sequences can execute as | 649 | * sequence completes. On some systems, many such sequences can execute as |
650 | * as single programmed DMA transfer. On all systems, these messages are | 650 | * as single programmed DMA transfer. On all systems, these messages are |
651 | * queued, and might complete after transactions to other devices. Messages | 651 | * queued, and might complete after transactions to other devices. Messages |
652 | * sent to a given spi_device are alway executed in FIFO order. | 652 | * sent to a given spi_device are always executed in FIFO order. |
653 | * | 653 | * |
654 | * The code that submits an spi_message (and its spi_transfers) | 654 | * The code that submits an spi_message (and its spi_transfers) |
655 | * to the lower layers is responsible for managing its memory. | 655 | * to the lower layers is responsible for managing its memory. |
diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h index aadc6a04e1ac..807371357160 100644 --- a/include/linux/sunrpc/msg_prot.h +++ b/include/linux/sunrpc/msg_prot.h | |||
@@ -142,12 +142,18 @@ typedef __be32 rpc_fraghdr; | |||
142 | (RPC_REPHDRSIZE + (2 + RPC_MAX_AUTH_SIZE/4)) | 142 | (RPC_REPHDRSIZE + (2 + RPC_MAX_AUTH_SIZE/4)) |
143 | 143 | ||
144 | /* | 144 | /* |
145 | * RFC1833/RFC3530 rpcbind (v3+) well-known netid's. | 145 | * Well-known netids. See: |
146 | * | ||
147 | * http://www.iana.org/assignments/rpc-netids/rpc-netids.xhtml | ||
146 | */ | 148 | */ |
147 | #define RPCBIND_NETID_UDP "udp" | 149 | #define RPCBIND_NETID_UDP "udp" |
148 | #define RPCBIND_NETID_TCP "tcp" | 150 | #define RPCBIND_NETID_TCP "tcp" |
151 | #define RPCBIND_NETID_RDMA "rdma" | ||
152 | #define RPCBIND_NETID_SCTP "sctp" | ||
149 | #define RPCBIND_NETID_UDP6 "udp6" | 153 | #define RPCBIND_NETID_UDP6 "udp6" |
150 | #define RPCBIND_NETID_TCP6 "tcp6" | 154 | #define RPCBIND_NETID_TCP6 "tcp6" |
155 | #define RPCBIND_NETID_RDMA6 "rdma6" | ||
156 | #define RPCBIND_NETID_SCTP6 "sctp6" | ||
151 | #define RPCBIND_NETID_LOCAL "local" | 157 | #define RPCBIND_NETID_LOCAL "local" |
152 | 158 | ||
153 | /* | 159 | /* |
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h index 64a0a0a97b23..c984c85981ea 100644 --- a/include/linux/sunrpc/xprtrdma.h +++ b/include/linux/sunrpc/xprtrdma.h | |||
@@ -41,11 +41,6 @@ | |||
41 | #define _LINUX_SUNRPC_XPRTRDMA_H | 41 | #define _LINUX_SUNRPC_XPRTRDMA_H |
42 | 42 | ||
43 | /* | 43 | /* |
44 | * rpcbind (v3+) RDMA netid. | ||
45 | */ | ||
46 | #define RPCBIND_NETID_RDMA "rdma" | ||
47 | |||
48 | /* | ||
49 | * Constants. Max RPC/NFS header is big enough to account for | 44 | * Constants. Max RPC/NFS header is big enough to account for |
50 | * additional marshaling buffers passed down by Linux client. | 45 | * additional marshaling buffers passed down by Linux client. |
51 | * | 46 | * |
diff --git a/include/linux/uio.h b/include/linux/uio.h index 07a022641996..71880299ed48 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h | |||
@@ -98,6 +98,8 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, | |||
98 | size_t maxsize, size_t *start); | 98 | size_t maxsize, size_t *start); |
99 | int iov_iter_npages(const struct iov_iter *i, int maxpages); | 99 | int iov_iter_npages(const struct iov_iter *i, int maxpages); |
100 | 100 | ||
101 | const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); | ||
102 | |||
101 | static inline size_t iov_iter_count(struct iov_iter *i) | 103 | static inline size_t iov_iter_count(struct iov_iter *i) |
102 | { | 104 | { |
103 | return i->count; | 105 | return i->count; |
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 7d7acb35603d..0ec598381f97 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
@@ -17,6 +17,7 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */ | |||
17 | #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ | 17 | #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ |
18 | #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ | 18 | #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ |
19 | #define VM_NO_GUARD 0x00000040 /* don't add guard page */ | 19 | #define VM_NO_GUARD 0x00000040 /* don't add guard page */ |
20 | #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ | ||
20 | /* bits [20..32] reserved for arch specific ioremap internals */ | 21 | /* bits [20..32] reserved for arch specific ioremap internals */ |
21 | 22 | ||
22 | /* | 23 | /* |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 74db135f9957..f597846ff605 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -70,7 +70,8 @@ enum { | |||
70 | /* data contains off-queue information when !WORK_STRUCT_PWQ */ | 70 | /* data contains off-queue information when !WORK_STRUCT_PWQ */ |
71 | WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, | 71 | WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, |
72 | 72 | ||
73 | WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE), | 73 | __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE, |
74 | WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING), | ||
74 | 75 | ||
75 | /* | 76 | /* |
76 | * When a work item is off queue, its high bits point to the last | 77 | * When a work item is off queue, its high bits point to the last |
diff --git a/include/net/dst.h b/include/net/dst.h index a8ae4e760778..0fb99a26e973 100644 --- a/include/net/dst.h +++ b/include/net/dst.h | |||
@@ -481,6 +481,7 @@ void dst_init(void); | |||
481 | enum { | 481 | enum { |
482 | XFRM_LOOKUP_ICMP = 1 << 0, | 482 | XFRM_LOOKUP_ICMP = 1 << 0, |
483 | XFRM_LOOKUP_QUEUE = 1 << 1, | 483 | XFRM_LOOKUP_QUEUE = 1 << 1, |
484 | XFRM_LOOKUP_KEEP_DST_REF = 1 << 2, | ||
484 | }; | 485 | }; |
485 | 486 | ||
486 | struct flowi; | 487 | struct flowi; |
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h index 534e1f2ac4fc..57639fca223a 100644 --- a/include/net/netfilter/nf_log.h +++ b/include/net/netfilter/nf_log.h | |||
@@ -79,6 +79,16 @@ void nf_log_packet(struct net *net, | |||
79 | const struct nf_loginfo *li, | 79 | const struct nf_loginfo *li, |
80 | const char *fmt, ...); | 80 | const char *fmt, ...); |
81 | 81 | ||
82 | __printf(8, 9) | ||
83 | void nf_log_trace(struct net *net, | ||
84 | u_int8_t pf, | ||
85 | unsigned int hooknum, | ||
86 | const struct sk_buff *skb, | ||
87 | const struct net_device *in, | ||
88 | const struct net_device *out, | ||
89 | const struct nf_loginfo *li, | ||
90 | const char *fmt, ...); | ||
91 | |||
82 | struct nf_log_buf; | 92 | struct nf_log_buf; |
83 | 93 | ||
84 | struct nf_log_buf *nf_log_buf_open(void); | 94 | struct nf_log_buf *nf_log_buf_open(void); |
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 9eaaa7884586..decb9a095ae7 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h | |||
@@ -119,6 +119,22 @@ int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg, | |||
119 | const struct nft_data *data, | 119 | const struct nft_data *data, |
120 | enum nft_data_types type); | 120 | enum nft_data_types type); |
121 | 121 | ||
122 | |||
123 | /** | ||
124 | * struct nft_userdata - user defined data associated with an object | ||
125 | * | ||
126 | * @len: length of the data | ||
127 | * @data: content | ||
128 | * | ||
129 | * The presence of user data is indicated in an object specific fashion, | ||
130 | * so a length of zero can't occur and the value "len" indicates data | ||
131 | * of length len + 1. | ||
132 | */ | ||
133 | struct nft_userdata { | ||
134 | u8 len; | ||
135 | unsigned char data[0]; | ||
136 | }; | ||
137 | |||
122 | /** | 138 | /** |
123 | * struct nft_set_elem - generic representation of set elements | 139 | * struct nft_set_elem - generic representation of set elements |
124 | * | 140 | * |
@@ -380,7 +396,7 @@ static inline void *nft_expr_priv(const struct nft_expr *expr) | |||
380 | * @handle: rule handle | 396 | * @handle: rule handle |
381 | * @genmask: generation mask | 397 | * @genmask: generation mask |
382 | * @dlen: length of expression data | 398 | * @dlen: length of expression data |
383 | * @ulen: length of user data (used for comments) | 399 | * @udata: user data is appended to the rule |
384 | * @data: expression data | 400 | * @data: expression data |
385 | */ | 401 | */ |
386 | struct nft_rule { | 402 | struct nft_rule { |
@@ -388,7 +404,7 @@ struct nft_rule { | |||
388 | u64 handle:42, | 404 | u64 handle:42, |
389 | genmask:2, | 405 | genmask:2, |
390 | dlen:12, | 406 | dlen:12, |
391 | ulen:8; | 407 | udata:1; |
392 | unsigned char data[] | 408 | unsigned char data[] |
393 | __attribute__((aligned(__alignof__(struct nft_expr)))); | 409 | __attribute__((aligned(__alignof__(struct nft_expr)))); |
394 | }; | 410 | }; |
@@ -476,7 +492,7 @@ static inline struct nft_expr *nft_expr_last(const struct nft_rule *rule) | |||
476 | return (struct nft_expr *)&rule->data[rule->dlen]; | 492 | return (struct nft_expr *)&rule->data[rule->dlen]; |
477 | } | 493 | } |
478 | 494 | ||
479 | static inline void *nft_userdata(const struct nft_rule *rule) | 495 | static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule) |
480 | { | 496 | { |
481 | return (void *)&rule->data[rule->dlen]; | 497 | return (void *)&rule->data[rule->dlen]; |
482 | } | 498 | } |
diff --git a/include/net/vxlan.h b/include/net/vxlan.h index eabd3a038674..c73e7abbbaa5 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h | |||
@@ -91,6 +91,7 @@ struct vxlanhdr { | |||
91 | 91 | ||
92 | #define VXLAN_N_VID (1u << 24) | 92 | #define VXLAN_N_VID (1u << 24) |
93 | #define VXLAN_VID_MASK (VXLAN_N_VID - 1) | 93 | #define VXLAN_VID_MASK (VXLAN_N_VID - 1) |
94 | #define VXLAN_VNI_MASK (VXLAN_VID_MASK << 8) | ||
94 | #define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr)) | 95 | #define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr)) |
95 | 96 | ||
96 | struct vxlan_metadata { | 97 | struct vxlan_metadata { |
diff --git a/include/soc/at91/at91sam9_ddrsdr.h b/include/soc/at91/at91sam9_ddrsdr.h index 0210797abf2e..dc10c52e0e91 100644 --- a/include/soc/at91/at91sam9_ddrsdr.h +++ b/include/soc/at91/at91sam9_ddrsdr.h | |||
@@ -92,7 +92,7 @@ | |||
92 | #define AT91_DDRSDRC_UPD_MR (3 << 20) /* Update load mode register and extended mode register */ | 92 | #define AT91_DDRSDRC_UPD_MR (3 << 20) /* Update load mode register and extended mode register */ |
93 | 93 | ||
94 | #define AT91_DDRSDRC_MDR 0x20 /* Memory Device Register */ | 94 | #define AT91_DDRSDRC_MDR 0x20 /* Memory Device Register */ |
95 | #define AT91_DDRSDRC_MD (3 << 0) /* Memory Device Type */ | 95 | #define AT91_DDRSDRC_MD (7 << 0) /* Memory Device Type */ |
96 | #define AT91_DDRSDRC_MD_SDR 0 | 96 | #define AT91_DDRSDRC_MD_SDR 0 |
97 | #define AT91_DDRSDRC_MD_LOW_POWER_SDR 1 | 97 | #define AT91_DDRSDRC_MD_LOW_POWER_SDR 1 |
98 | #define AT91_DDRSDRC_MD_LOW_POWER_DDR 3 | 98 | #define AT91_DDRSDRC_MD_LOW_POWER_DDR 3 |
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index db81c65b8f48..d61be7297b2c 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h | |||
@@ -111,6 +111,7 @@ void array_free(void *array, int n); | |||
111 | void target_core_setup_sub_cits(struct se_subsystem_api *); | 111 | void target_core_setup_sub_cits(struct se_subsystem_api *); |
112 | 112 | ||
113 | /* attribute helpers from target_core_device.c for backend drivers */ | 113 | /* attribute helpers from target_core_device.c for backend drivers */ |
114 | bool se_dev_check_wce(struct se_device *); | ||
114 | int se_dev_set_max_unmap_lba_count(struct se_device *, u32); | 115 | int se_dev_set_max_unmap_lba_count(struct se_device *, u32); |
115 | int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32); | 116 | int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32); |
116 | int se_dev_set_unmap_granularity(struct se_device *, u32); | 117 | int se_dev_set_unmap_granularity(struct se_device *, u32); |
diff --git a/include/trace/events/regmap.h b/include/trace/events/regmap.h index 23d561512f64..22317d2b52ab 100644 --- a/include/trace/events/regmap.h +++ b/include/trace/events/regmap.h | |||
@@ -7,27 +7,26 @@ | |||
7 | #include <linux/ktime.h> | 7 | #include <linux/ktime.h> |
8 | #include <linux/tracepoint.h> | 8 | #include <linux/tracepoint.h> |
9 | 9 | ||
10 | struct device; | 10 | #include "../../../drivers/base/regmap/internal.h" |
11 | struct regmap; | ||
12 | 11 | ||
13 | /* | 12 | /* |
14 | * Log register events | 13 | * Log register events |
15 | */ | 14 | */ |
16 | DECLARE_EVENT_CLASS(regmap_reg, | 15 | DECLARE_EVENT_CLASS(regmap_reg, |
17 | 16 | ||
18 | TP_PROTO(struct device *dev, unsigned int reg, | 17 | TP_PROTO(struct regmap *map, unsigned int reg, |
19 | unsigned int val), | 18 | unsigned int val), |
20 | 19 | ||
21 | TP_ARGS(dev, reg, val), | 20 | TP_ARGS(map, reg, val), |
22 | 21 | ||
23 | TP_STRUCT__entry( | 22 | TP_STRUCT__entry( |
24 | __string( name, dev_name(dev) ) | 23 | __string( name, regmap_name(map) ) |
25 | __field( unsigned int, reg ) | 24 | __field( unsigned int, reg ) |
26 | __field( unsigned int, val ) | 25 | __field( unsigned int, val ) |
27 | ), | 26 | ), |
28 | 27 | ||
29 | TP_fast_assign( | 28 | TP_fast_assign( |
30 | __assign_str(name, dev_name(dev)); | 29 | __assign_str(name, regmap_name(map)); |
31 | __entry->reg = reg; | 30 | __entry->reg = reg; |
32 | __entry->val = val; | 31 | __entry->val = val; |
33 | ), | 32 | ), |
@@ -39,45 +38,45 @@ DECLARE_EVENT_CLASS(regmap_reg, | |||
39 | 38 | ||
40 | DEFINE_EVENT(regmap_reg, regmap_reg_write, | 39 | DEFINE_EVENT(regmap_reg, regmap_reg_write, |
41 | 40 | ||
42 | TP_PROTO(struct device *dev, unsigned int reg, | 41 | TP_PROTO(struct regmap *map, unsigned int reg, |
43 | unsigned int val), | 42 | unsigned int val), |
44 | 43 | ||
45 | TP_ARGS(dev, reg, val) | 44 | TP_ARGS(map, reg, val) |
46 | 45 | ||
47 | ); | 46 | ); |
48 | 47 | ||
49 | DEFINE_EVENT(regmap_reg, regmap_reg_read, | 48 | DEFINE_EVENT(regmap_reg, regmap_reg_read, |
50 | 49 | ||
51 | TP_PROTO(struct device *dev, unsigned int reg, | 50 | TP_PROTO(struct regmap *map, unsigned int reg, |
52 | unsigned int val), | 51 | unsigned int val), |
53 | 52 | ||
54 | TP_ARGS(dev, reg, val) | 53 | TP_ARGS(map, reg, val) |
55 | 54 | ||
56 | ); | 55 | ); |
57 | 56 | ||
58 | DEFINE_EVENT(regmap_reg, regmap_reg_read_cache, | 57 | DEFINE_EVENT(regmap_reg, regmap_reg_read_cache, |
59 | 58 | ||
60 | TP_PROTO(struct device *dev, unsigned int reg, | 59 | TP_PROTO(struct regmap *map, unsigned int reg, |
61 | unsigned int val), | 60 | unsigned int val), |
62 | 61 | ||
63 | TP_ARGS(dev, reg, val) | 62 | TP_ARGS(map, reg, val) |
64 | 63 | ||
65 | ); | 64 | ); |
66 | 65 | ||
67 | DECLARE_EVENT_CLASS(regmap_block, | 66 | DECLARE_EVENT_CLASS(regmap_block, |
68 | 67 | ||
69 | TP_PROTO(struct device *dev, unsigned int reg, int count), | 68 | TP_PROTO(struct regmap *map, unsigned int reg, int count), |
70 | 69 | ||
71 | TP_ARGS(dev, reg, count), | 70 | TP_ARGS(map, reg, count), |
72 | 71 | ||
73 | TP_STRUCT__entry( | 72 | TP_STRUCT__entry( |
74 | __string( name, dev_name(dev) ) | 73 | __string( name, regmap_name(map) ) |
75 | __field( unsigned int, reg ) | 74 | __field( unsigned int, reg ) |
76 | __field( int, count ) | 75 | __field( int, count ) |
77 | ), | 76 | ), |
78 | 77 | ||
79 | TP_fast_assign( | 78 | TP_fast_assign( |
80 | __assign_str(name, dev_name(dev)); | 79 | __assign_str(name, regmap_name(map)); |
81 | __entry->reg = reg; | 80 | __entry->reg = reg; |
82 | __entry->count = count; | 81 | __entry->count = count; |
83 | ), | 82 | ), |
@@ -89,48 +88,48 @@ DECLARE_EVENT_CLASS(regmap_block, | |||
89 | 88 | ||
90 | DEFINE_EVENT(regmap_block, regmap_hw_read_start, | 89 | DEFINE_EVENT(regmap_block, regmap_hw_read_start, |
91 | 90 | ||
92 | TP_PROTO(struct device *dev, unsigned int reg, int count), | 91 | TP_PROTO(struct regmap *map, unsigned int reg, int count), |
93 | 92 | ||
94 | TP_ARGS(dev, reg, count) | 93 | TP_ARGS(map, reg, count) |
95 | ); | 94 | ); |
96 | 95 | ||
97 | DEFINE_EVENT(regmap_block, regmap_hw_read_done, | 96 | DEFINE_EVENT(regmap_block, regmap_hw_read_done, |
98 | 97 | ||
99 | TP_PROTO(struct device *dev, unsigned int reg, int count), | 98 | TP_PROTO(struct regmap *map, unsigned int reg, int count), |
100 | 99 | ||
101 | TP_ARGS(dev, reg, count) | 100 | TP_ARGS(map, reg, count) |
102 | ); | 101 | ); |
103 | 102 | ||
104 | DEFINE_EVENT(regmap_block, regmap_hw_write_start, | 103 | DEFINE_EVENT(regmap_block, regmap_hw_write_start, |
105 | 104 | ||
106 | TP_PROTO(struct device *dev, unsigned int reg, int count), | 105 | TP_PROTO(struct regmap *map, unsigned int reg, int count), |
107 | 106 | ||
108 | TP_ARGS(dev, reg, count) | 107 | TP_ARGS(map, reg, count) |
109 | ); | 108 | ); |
110 | 109 | ||
111 | DEFINE_EVENT(regmap_block, regmap_hw_write_done, | 110 | DEFINE_EVENT(regmap_block, regmap_hw_write_done, |
112 | 111 | ||
113 | TP_PROTO(struct device *dev, unsigned int reg, int count), | 112 | TP_PROTO(struct regmap *map, unsigned int reg, int count), |
114 | 113 | ||
115 | TP_ARGS(dev, reg, count) | 114 | TP_ARGS(map, reg, count) |
116 | ); | 115 | ); |
117 | 116 | ||
118 | TRACE_EVENT(regcache_sync, | 117 | TRACE_EVENT(regcache_sync, |
119 | 118 | ||
120 | TP_PROTO(struct device *dev, const char *type, | 119 | TP_PROTO(struct regmap *map, const char *type, |
121 | const char *status), | 120 | const char *status), |
122 | 121 | ||
123 | TP_ARGS(dev, type, status), | 122 | TP_ARGS(map, type, status), |
124 | 123 | ||
125 | TP_STRUCT__entry( | 124 | TP_STRUCT__entry( |
126 | __string( name, dev_name(dev) ) | 125 | __string( name, regmap_name(map) ) |
127 | __string( status, status ) | 126 | __string( status, status ) |
128 | __string( type, type ) | 127 | __string( type, type ) |
129 | __field( int, type ) | 128 | __field( int, type ) |
130 | ), | 129 | ), |
131 | 130 | ||
132 | TP_fast_assign( | 131 | TP_fast_assign( |
133 | __assign_str(name, dev_name(dev)); | 132 | __assign_str(name, regmap_name(map)); |
134 | __assign_str(status, status); | 133 | __assign_str(status, status); |
135 | __assign_str(type, type); | 134 | __assign_str(type, type); |
136 | ), | 135 | ), |
@@ -141,17 +140,17 @@ TRACE_EVENT(regcache_sync, | |||
141 | 140 | ||
142 | DECLARE_EVENT_CLASS(regmap_bool, | 141 | DECLARE_EVENT_CLASS(regmap_bool, |
143 | 142 | ||
144 | TP_PROTO(struct device *dev, bool flag), | 143 | TP_PROTO(struct regmap *map, bool flag), |
145 | 144 | ||
146 | TP_ARGS(dev, flag), | 145 | TP_ARGS(map, flag), |
147 | 146 | ||
148 | TP_STRUCT__entry( | 147 | TP_STRUCT__entry( |
149 | __string( name, dev_name(dev) ) | 148 | __string( name, regmap_name(map) ) |
150 | __field( int, flag ) | 149 | __field( int, flag ) |
151 | ), | 150 | ), |
152 | 151 | ||
153 | TP_fast_assign( | 152 | TP_fast_assign( |
154 | __assign_str(name, dev_name(dev)); | 153 | __assign_str(name, regmap_name(map)); |
155 | __entry->flag = flag; | 154 | __entry->flag = flag; |
156 | ), | 155 | ), |
157 | 156 | ||
@@ -161,32 +160,32 @@ DECLARE_EVENT_CLASS(regmap_bool, | |||
161 | 160 | ||
162 | DEFINE_EVENT(regmap_bool, regmap_cache_only, | 161 | DEFINE_EVENT(regmap_bool, regmap_cache_only, |
163 | 162 | ||
164 | TP_PROTO(struct device *dev, bool flag), | 163 | TP_PROTO(struct regmap *map, bool flag), |
165 | 164 | ||
166 | TP_ARGS(dev, flag) | 165 | TP_ARGS(map, flag) |
167 | 166 | ||
168 | ); | 167 | ); |
169 | 168 | ||
170 | DEFINE_EVENT(regmap_bool, regmap_cache_bypass, | 169 | DEFINE_EVENT(regmap_bool, regmap_cache_bypass, |
171 | 170 | ||
172 | TP_PROTO(struct device *dev, bool flag), | 171 | TP_PROTO(struct regmap *map, bool flag), |
173 | 172 | ||
174 | TP_ARGS(dev, flag) | 173 | TP_ARGS(map, flag) |
175 | 174 | ||
176 | ); | 175 | ); |
177 | 176 | ||
178 | DECLARE_EVENT_CLASS(regmap_async, | 177 | DECLARE_EVENT_CLASS(regmap_async, |
179 | 178 | ||
180 | TP_PROTO(struct device *dev), | 179 | TP_PROTO(struct regmap *map), |
181 | 180 | ||
182 | TP_ARGS(dev), | 181 | TP_ARGS(map), |
183 | 182 | ||
184 | TP_STRUCT__entry( | 183 | TP_STRUCT__entry( |
185 | __string( name, dev_name(dev) ) | 184 | __string( name, regmap_name(map) ) |
186 | ), | 185 | ), |
187 | 186 | ||
188 | TP_fast_assign( | 187 | TP_fast_assign( |
189 | __assign_str(name, dev_name(dev)); | 188 | __assign_str(name, regmap_name(map)); |
190 | ), | 189 | ), |
191 | 190 | ||
192 | TP_printk("%s", __get_str(name)) | 191 | TP_printk("%s", __get_str(name)) |
@@ -194,50 +193,50 @@ DECLARE_EVENT_CLASS(regmap_async, | |||
194 | 193 | ||
195 | DEFINE_EVENT(regmap_block, regmap_async_write_start, | 194 | DEFINE_EVENT(regmap_block, regmap_async_write_start, |
196 | 195 | ||
197 | TP_PROTO(struct device *dev, unsigned int reg, int count), | 196 | TP_PROTO(struct regmap *map, unsigned int reg, int count), |
198 | 197 | ||
199 | TP_ARGS(dev, reg, count) | 198 | TP_ARGS(map, reg, count) |
200 | ); | 199 | ); |
201 | 200 | ||
202 | DEFINE_EVENT(regmap_async, regmap_async_io_complete, | 201 | DEFINE_EVENT(regmap_async, regmap_async_io_complete, |
203 | 202 | ||
204 | TP_PROTO(struct device *dev), | 203 | TP_PROTO(struct regmap *map), |
205 | 204 | ||
206 | TP_ARGS(dev) | 205 | TP_ARGS(map) |
207 | 206 | ||
208 | ); | 207 | ); |
209 | 208 | ||
210 | DEFINE_EVENT(regmap_async, regmap_async_complete_start, | 209 | DEFINE_EVENT(regmap_async, regmap_async_complete_start, |
211 | 210 | ||
212 | TP_PROTO(struct device *dev), | 211 | TP_PROTO(struct regmap *map), |
213 | 212 | ||
214 | TP_ARGS(dev) | 213 | TP_ARGS(map) |
215 | 214 | ||
216 | ); | 215 | ); |
217 | 216 | ||
218 | DEFINE_EVENT(regmap_async, regmap_async_complete_done, | 217 | DEFINE_EVENT(regmap_async, regmap_async_complete_done, |
219 | 218 | ||
220 | TP_PROTO(struct device *dev), | 219 | TP_PROTO(struct regmap *map), |
221 | 220 | ||
222 | TP_ARGS(dev) | 221 | TP_ARGS(map) |
223 | 222 | ||
224 | ); | 223 | ); |
225 | 224 | ||
226 | TRACE_EVENT(regcache_drop_region, | 225 | TRACE_EVENT(regcache_drop_region, |
227 | 226 | ||
228 | TP_PROTO(struct device *dev, unsigned int from, | 227 | TP_PROTO(struct regmap *map, unsigned int from, |
229 | unsigned int to), | 228 | unsigned int to), |
230 | 229 | ||
231 | TP_ARGS(dev, from, to), | 230 | TP_ARGS(map, from, to), |
232 | 231 | ||
233 | TP_STRUCT__entry( | 232 | TP_STRUCT__entry( |
234 | __string( name, dev_name(dev) ) | 233 | __string( name, regmap_name(map) ) |
235 | __field( unsigned int, from ) | 234 | __field( unsigned int, from ) |
236 | __field( unsigned int, to ) | 235 | __field( unsigned int, to ) |
237 | ), | 236 | ), |
238 | 237 | ||
239 | TP_fast_assign( | 238 | TP_fast_assign( |
240 | __assign_str(name, dev_name(dev)); | 239 | __assign_str(name, regmap_name(map)); |
241 | __entry->from = from; | 240 | __entry->from = from; |
242 | __entry->to = to; | 241 | __entry->to = to; |
243 | ), | 242 | ), |
diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h index 3c53eec4ae22..19c66fcbab8a 100644 --- a/include/uapi/linux/virtio_blk.h +++ b/include/uapi/linux/virtio_blk.h | |||
@@ -60,7 +60,7 @@ struct virtio_blk_config { | |||
60 | __u32 size_max; | 60 | __u32 size_max; |
61 | /* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */ | 61 | /* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */ |
62 | __u32 seg_max; | 62 | __u32 seg_max; |
63 | /* geometry the device (if VIRTIO_BLK_F_GEOMETRY) */ | 63 | /* geometry of the device (if VIRTIO_BLK_F_GEOMETRY) */ |
64 | struct virtio_blk_geometry { | 64 | struct virtio_blk_geometry { |
65 | __u16 cylinders; | 65 | __u16 cylinders; |
66 | __u8 heads; | 66 | __u8 heads; |
@@ -119,7 +119,11 @@ struct virtio_blk_config { | |||
119 | #define VIRTIO_BLK_T_BARRIER 0x80000000 | 119 | #define VIRTIO_BLK_T_BARRIER 0x80000000 |
120 | #endif /* !VIRTIO_BLK_NO_LEGACY */ | 120 | #endif /* !VIRTIO_BLK_NO_LEGACY */ |
121 | 121 | ||
122 | /* This is the first element of the read scatter-gather list. */ | 122 | /* |
123 | * This comes first in the read scatter-gather list. | ||
124 | * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated, | ||
125 | * this is the first element of the read scatter-gather list. | ||
126 | */ | ||
123 | struct virtio_blk_outhdr { | 127 | struct virtio_blk_outhdr { |
124 | /* VIRTIO_BLK_T* */ | 128 | /* VIRTIO_BLK_T* */ |
125 | __virtio32 type; | 129 | __virtio32 type; |
diff --git a/include/uapi/linux/virtio_scsi.h b/include/uapi/linux/virtio_scsi.h index 42b9370771b0..cc18ef8825c0 100644 --- a/include/uapi/linux/virtio_scsi.h +++ b/include/uapi/linux/virtio_scsi.h | |||
@@ -29,8 +29,16 @@ | |||
29 | 29 | ||
30 | #include <linux/virtio_types.h> | 30 | #include <linux/virtio_types.h> |
31 | 31 | ||
32 | #define VIRTIO_SCSI_CDB_SIZE 32 | 32 | /* Default values of the CDB and sense data size configuration fields */ |
33 | #define VIRTIO_SCSI_SENSE_SIZE 96 | 33 | #define VIRTIO_SCSI_CDB_DEFAULT_SIZE 32 |
34 | #define VIRTIO_SCSI_SENSE_DEFAULT_SIZE 96 | ||
35 | |||
36 | #ifndef VIRTIO_SCSI_CDB_SIZE | ||
37 | #define VIRTIO_SCSI_CDB_SIZE VIRTIO_SCSI_CDB_DEFAULT_SIZE | ||
38 | #endif | ||
39 | #ifndef VIRTIO_SCSI_SENSE_SIZE | ||
40 | #define VIRTIO_SCSI_SENSE_SIZE VIRTIO_SCSI_SENSE_DEFAULT_SIZE | ||
41 | #endif | ||
34 | 42 | ||
35 | /* SCSI command request, followed by data-out */ | 43 | /* SCSI command request, followed by data-out */ |
36 | struct virtio_scsi_cmd_req { | 44 | struct virtio_scsi_cmd_req { |
diff --git a/include/video/omapdss.h b/include/video/omapdss.h index 60de61fea8e3..c8ed15daad02 100644 --- a/include/video/omapdss.h +++ b/include/video/omapdss.h | |||
@@ -689,6 +689,7 @@ struct omapdss_dsi_ops { | |||
689 | }; | 689 | }; |
690 | 690 | ||
691 | struct omap_dss_device { | 691 | struct omap_dss_device { |
692 | struct kobject kobj; | ||
692 | struct device *dev; | 693 | struct device *dev; |
693 | 694 | ||
694 | struct module *owner; | 695 | struct module *owner; |
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h index b78f21caf55a..b0f1c9e5d687 100644 --- a/include/xen/xenbus.h +++ b/include/xen/xenbus.h | |||
@@ -114,9 +114,9 @@ int __must_check __xenbus_register_backend(struct xenbus_driver *drv, | |||
114 | const char *mod_name); | 114 | const char *mod_name); |
115 | 115 | ||
116 | #define xenbus_register_frontend(drv) \ | 116 | #define xenbus_register_frontend(drv) \ |
117 | __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME); | 117 | __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME) |
118 | #define xenbus_register_backend(drv) \ | 118 | #define xenbus_register_backend(drv) \ |
119 | __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME); | 119 | __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME) |
120 | 120 | ||
121 | void xenbus_unregister_driver(struct xenbus_driver *drv); | 121 | void xenbus_unregister_driver(struct xenbus_driver *drv); |
122 | 122 | ||
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 1d1fe9361d29..fc7f4748d34a 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -548,9 +548,6 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr, | |||
548 | 548 | ||
549 | rcu_read_lock(); | 549 | rcu_read_lock(); |
550 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { | 550 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { |
551 | if (cp == root_cs) | ||
552 | continue; | ||
553 | |||
554 | /* skip the whole subtree if @cp doesn't have any CPU */ | 551 | /* skip the whole subtree if @cp doesn't have any CPU */ |
555 | if (cpumask_empty(cp->cpus_allowed)) { | 552 | if (cpumask_empty(cp->cpus_allowed)) { |
556 | pos_css = css_rightmost_descendant(pos_css); | 553 | pos_css = css_rightmost_descendant(pos_css); |
@@ -873,7 +870,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus) | |||
873 | * If it becomes empty, inherit the effective mask of the | 870 | * If it becomes empty, inherit the effective mask of the |
874 | * parent, which is guaranteed to have some CPUs. | 871 | * parent, which is guaranteed to have some CPUs. |
875 | */ | 872 | */ |
876 | if (cpumask_empty(new_cpus)) | 873 | if (cgroup_on_dfl(cp->css.cgroup) && cpumask_empty(new_cpus)) |
877 | cpumask_copy(new_cpus, parent->effective_cpus); | 874 | cpumask_copy(new_cpus, parent->effective_cpus); |
878 | 875 | ||
879 | /* Skip the whole subtree if the cpumask remains the same. */ | 876 | /* Skip the whole subtree if the cpumask remains the same. */ |
@@ -1129,7 +1126,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) | |||
1129 | * If it becomes empty, inherit the effective mask of the | 1126 | * If it becomes empty, inherit the effective mask of the |
1130 | * parent, which is guaranteed to have some MEMs. | 1127 | * parent, which is guaranteed to have some MEMs. |
1131 | */ | 1128 | */ |
1132 | if (nodes_empty(*new_mems)) | 1129 | if (cgroup_on_dfl(cp->css.cgroup) && nodes_empty(*new_mems)) |
1133 | *new_mems = parent->effective_mems; | 1130 | *new_mems = parent->effective_mems; |
1134 | 1131 | ||
1135 | /* Skip the whole subtree if the nodemask remains the same. */ | 1132 | /* Skip the whole subtree if the nodemask remains the same. */ |
@@ -1979,7 +1976,9 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) | |||
1979 | 1976 | ||
1980 | spin_lock_irq(&callback_lock); | 1977 | spin_lock_irq(&callback_lock); |
1981 | cs->mems_allowed = parent->mems_allowed; | 1978 | cs->mems_allowed = parent->mems_allowed; |
1979 | cs->effective_mems = parent->mems_allowed; | ||
1982 | cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); | 1980 | cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); |
1981 | cpumask_copy(cs->effective_cpus, parent->cpus_allowed); | ||
1983 | spin_unlock_irq(&callback_lock); | 1982 | spin_unlock_irq(&callback_lock); |
1984 | out_unlock: | 1983 | out_unlock: |
1985 | mutex_unlock(&cpuset_mutex); | 1984 | mutex_unlock(&cpuset_mutex); |
diff --git a/kernel/events/core.c b/kernel/events/core.c index f04daabfd1cf..2fabc0627165 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -3591,7 +3591,7 @@ static void put_event(struct perf_event *event) | |||
3591 | ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING); | 3591 | ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING); |
3592 | WARN_ON_ONCE(ctx->parent_ctx); | 3592 | WARN_ON_ONCE(ctx->parent_ctx); |
3593 | perf_remove_from_context(event, true); | 3593 | perf_remove_from_context(event, true); |
3594 | mutex_unlock(&ctx->mutex); | 3594 | perf_event_ctx_unlock(event, ctx); |
3595 | 3595 | ||
3596 | _free_event(event); | 3596 | _free_event(event); |
3597 | } | 3597 | } |
@@ -4574,6 +4574,13 @@ static void perf_pending_event(struct irq_work *entry) | |||
4574 | { | 4574 | { |
4575 | struct perf_event *event = container_of(entry, | 4575 | struct perf_event *event = container_of(entry, |
4576 | struct perf_event, pending); | 4576 | struct perf_event, pending); |
4577 | int rctx; | ||
4578 | |||
4579 | rctx = perf_swevent_get_recursion_context(); | ||
4580 | /* | ||
4581 | * If we 'fail' here, that's OK, it means recursion is already disabled | ||
4582 | * and we won't recurse 'further'. | ||
4583 | */ | ||
4577 | 4584 | ||
4578 | if (event->pending_disable) { | 4585 | if (event->pending_disable) { |
4579 | event->pending_disable = 0; | 4586 | event->pending_disable = 0; |
@@ -4584,6 +4591,9 @@ static void perf_pending_event(struct irq_work *entry) | |||
4584 | event->pending_wakeup = 0; | 4591 | event->pending_wakeup = 0; |
4585 | perf_event_wakeup(event); | 4592 | perf_event_wakeup(event); |
4586 | } | 4593 | } |
4594 | |||
4595 | if (rctx >= 0) | ||
4596 | perf_swevent_put_recursion_context(rctx); | ||
4587 | } | 4597 | } |
4588 | 4598 | ||
4589 | /* | 4599 | /* |
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 01ca08804f51..3f9f1d6b4c2e 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c | |||
@@ -89,16 +89,28 @@ static bool klp_is_object_loaded(struct klp_object *obj) | |||
89 | /* sets obj->mod if object is not vmlinux and module is found */ | 89 | /* sets obj->mod if object is not vmlinux and module is found */ |
90 | static void klp_find_object_module(struct klp_object *obj) | 90 | static void klp_find_object_module(struct klp_object *obj) |
91 | { | 91 | { |
92 | struct module *mod; | ||
93 | |||
92 | if (!klp_is_module(obj)) | 94 | if (!klp_is_module(obj)) |
93 | return; | 95 | return; |
94 | 96 | ||
95 | mutex_lock(&module_mutex); | 97 | mutex_lock(&module_mutex); |
96 | /* | 98 | /* |
97 | * We don't need to take a reference on the module here because we have | 99 | * We do not want to block removal of patched modules and therefore |
98 | * the klp_mutex, which is also taken by the module notifier. This | 100 | * we do not take a reference here. The patches are removed by |
99 | * prevents any module from unloading until we release the klp_mutex. | 101 | * a going module handler instead. |
102 | */ | ||
103 | mod = find_module(obj->name); | ||
104 | /* | ||
105 | * Do not mess work of the module coming and going notifiers. | ||
106 | * Note that the patch might still be needed before the going handler | ||
107 | * is called. Module functions can be called even in the GOING state | ||
108 | * until mod->exit() finishes. This is especially important for | ||
109 | * patches that modify semantic of the functions. | ||
100 | */ | 110 | */ |
101 | obj->mod = find_module(obj->name); | 111 | if (mod && mod->klp_alive) |
112 | obj->mod = mod; | ||
113 | |||
102 | mutex_unlock(&module_mutex); | 114 | mutex_unlock(&module_mutex); |
103 | } | 115 | } |
104 | 116 | ||
@@ -767,6 +779,7 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) | |||
767 | return -EINVAL; | 779 | return -EINVAL; |
768 | 780 | ||
769 | obj->state = KLP_DISABLED; | 781 | obj->state = KLP_DISABLED; |
782 | obj->mod = NULL; | ||
770 | 783 | ||
771 | klp_find_object_module(obj); | 784 | klp_find_object_module(obj); |
772 | 785 | ||
@@ -961,6 +974,15 @@ static int klp_module_notify(struct notifier_block *nb, unsigned long action, | |||
961 | 974 | ||
962 | mutex_lock(&klp_mutex); | 975 | mutex_lock(&klp_mutex); |
963 | 976 | ||
977 | /* | ||
978 | * Each module has to know that the notifier has been called. | ||
979 | * We never know what module will get patched by a new patch. | ||
980 | */ | ||
981 | if (action == MODULE_STATE_COMING) | ||
982 | mod->klp_alive = true; | ||
983 | else /* MODULE_STATE_GOING */ | ||
984 | mod->klp_alive = false; | ||
985 | |||
964 | list_for_each_entry(patch, &klp_patches, list) { | 986 | list_for_each_entry(patch, &klp_patches, list) { |
965 | for (obj = patch->objs; obj->funcs; obj++) { | 987 | for (obj = patch->objs; obj->funcs; obj++) { |
966 | if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) | 988 | if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) |
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 88d0d4420ad2..ba77ab5f64dd 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
@@ -633,7 +633,7 @@ static int count_matching_names(struct lock_class *new_class) | |||
633 | if (!new_class->name) | 633 | if (!new_class->name) |
634 | return 0; | 634 | return 0; |
635 | 635 | ||
636 | list_for_each_entry(class, &all_lock_classes, lock_entry) { | 636 | list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) { |
637 | if (new_class->key - new_class->subclass == class->key) | 637 | if (new_class->key - new_class->subclass == class->key) |
638 | return class->name_version; | 638 | return class->name_version; |
639 | if (class->name && !strcmp(class->name, new_class->name)) | 639 | if (class->name && !strcmp(class->name, new_class->name)) |
@@ -700,10 +700,12 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) | |||
700 | hash_head = classhashentry(key); | 700 | hash_head = classhashentry(key); |
701 | 701 | ||
702 | /* | 702 | /* |
703 | * We can walk the hash lockfree, because the hash only | 703 | * We do an RCU walk of the hash, see lockdep_free_key_range(). |
704 | * grows, and we are careful when adding entries to the end: | ||
705 | */ | 704 | */ |
706 | list_for_each_entry(class, hash_head, hash_entry) { | 705 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
706 | return NULL; | ||
707 | |||
708 | list_for_each_entry_rcu(class, hash_head, hash_entry) { | ||
707 | if (class->key == key) { | 709 | if (class->key == key) { |
708 | /* | 710 | /* |
709 | * Huh! same key, different name? Did someone trample | 711 | * Huh! same key, different name? Did someone trample |
@@ -728,7 +730,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
728 | struct lockdep_subclass_key *key; | 730 | struct lockdep_subclass_key *key; |
729 | struct list_head *hash_head; | 731 | struct list_head *hash_head; |
730 | struct lock_class *class; | 732 | struct lock_class *class; |
731 | unsigned long flags; | 733 | |
734 | DEBUG_LOCKS_WARN_ON(!irqs_disabled()); | ||
732 | 735 | ||
733 | class = look_up_lock_class(lock, subclass); | 736 | class = look_up_lock_class(lock, subclass); |
734 | if (likely(class)) | 737 | if (likely(class)) |
@@ -750,28 +753,26 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
750 | key = lock->key->subkeys + subclass; | 753 | key = lock->key->subkeys + subclass; |
751 | hash_head = classhashentry(key); | 754 | hash_head = classhashentry(key); |
752 | 755 | ||
753 | raw_local_irq_save(flags); | ||
754 | if (!graph_lock()) { | 756 | if (!graph_lock()) { |
755 | raw_local_irq_restore(flags); | ||
756 | return NULL; | 757 | return NULL; |
757 | } | 758 | } |
758 | /* | 759 | /* |
759 | * We have to do the hash-walk again, to avoid races | 760 | * We have to do the hash-walk again, to avoid races |
760 | * with another CPU: | 761 | * with another CPU: |
761 | */ | 762 | */ |
762 | list_for_each_entry(class, hash_head, hash_entry) | 763 | list_for_each_entry_rcu(class, hash_head, hash_entry) { |
763 | if (class->key == key) | 764 | if (class->key == key) |
764 | goto out_unlock_set; | 765 | goto out_unlock_set; |
766 | } | ||
767 | |||
765 | /* | 768 | /* |
766 | * Allocate a new key from the static array, and add it to | 769 | * Allocate a new key from the static array, and add it to |
767 | * the hash: | 770 | * the hash: |
768 | */ | 771 | */ |
769 | if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { | 772 | if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { |
770 | if (!debug_locks_off_graph_unlock()) { | 773 | if (!debug_locks_off_graph_unlock()) { |
771 | raw_local_irq_restore(flags); | ||
772 | return NULL; | 774 | return NULL; |
773 | } | 775 | } |
774 | raw_local_irq_restore(flags); | ||
775 | 776 | ||
776 | print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!"); | 777 | print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!"); |
777 | dump_stack(); | 778 | dump_stack(); |
@@ -798,7 +799,6 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
798 | 799 | ||
799 | if (verbose(class)) { | 800 | if (verbose(class)) { |
800 | graph_unlock(); | 801 | graph_unlock(); |
801 | raw_local_irq_restore(flags); | ||
802 | 802 | ||
803 | printk("\nnew class %p: %s", class->key, class->name); | 803 | printk("\nnew class %p: %s", class->key, class->name); |
804 | if (class->name_version > 1) | 804 | if (class->name_version > 1) |
@@ -806,15 +806,12 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
806 | printk("\n"); | 806 | printk("\n"); |
807 | dump_stack(); | 807 | dump_stack(); |
808 | 808 | ||
809 | raw_local_irq_save(flags); | ||
810 | if (!graph_lock()) { | 809 | if (!graph_lock()) { |
811 | raw_local_irq_restore(flags); | ||
812 | return NULL; | 810 | return NULL; |
813 | } | 811 | } |
814 | } | 812 | } |
815 | out_unlock_set: | 813 | out_unlock_set: |
816 | graph_unlock(); | 814 | graph_unlock(); |
817 | raw_local_irq_restore(flags); | ||
818 | 815 | ||
819 | out_set_class_cache: | 816 | out_set_class_cache: |
820 | if (!subclass || force) | 817 | if (!subclass || force) |
@@ -870,11 +867,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this, | |||
870 | entry->distance = distance; | 867 | entry->distance = distance; |
871 | entry->trace = *trace; | 868 | entry->trace = *trace; |
872 | /* | 869 | /* |
873 | * Since we never remove from the dependency list, the list can | 870 | * Both allocation and removal are done under the graph lock; but |
874 | * be walked lockless by other CPUs, it's only allocation | 871 | * iteration is under RCU-sched; see look_up_lock_class() and |
875 | * that must be protected by the spinlock. But this also means | 872 | * lockdep_free_key_range(). |
876 | * we must make new entries visible only once writes to the | ||
877 | * entry become visible - hence the RCU op: | ||
878 | */ | 873 | */ |
879 | list_add_tail_rcu(&entry->entry, head); | 874 | list_add_tail_rcu(&entry->entry, head); |
880 | 875 | ||
@@ -1025,7 +1020,9 @@ static int __bfs(struct lock_list *source_entry, | |||
1025 | else | 1020 | else |
1026 | head = &lock->class->locks_before; | 1021 | head = &lock->class->locks_before; |
1027 | 1022 | ||
1028 | list_for_each_entry(entry, head, entry) { | 1023 | DEBUG_LOCKS_WARN_ON(!irqs_disabled()); |
1024 | |||
1025 | list_for_each_entry_rcu(entry, head, entry) { | ||
1029 | if (!lock_accessed(entry)) { | 1026 | if (!lock_accessed(entry)) { |
1030 | unsigned int cq_depth; | 1027 | unsigned int cq_depth; |
1031 | mark_lock_accessed(entry, lock); | 1028 | mark_lock_accessed(entry, lock); |
@@ -2022,7 +2019,7 @@ static inline int lookup_chain_cache(struct task_struct *curr, | |||
2022 | * We can walk it lock-free, because entries only get added | 2019 | * We can walk it lock-free, because entries only get added |
2023 | * to the hash: | 2020 | * to the hash: |
2024 | */ | 2021 | */ |
2025 | list_for_each_entry(chain, hash_head, entry) { | 2022 | list_for_each_entry_rcu(chain, hash_head, entry) { |
2026 | if (chain->chain_key == chain_key) { | 2023 | if (chain->chain_key == chain_key) { |
2027 | cache_hit: | 2024 | cache_hit: |
2028 | debug_atomic_inc(chain_lookup_hits); | 2025 | debug_atomic_inc(chain_lookup_hits); |
@@ -2996,8 +2993,18 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name, | |||
2996 | if (unlikely(!debug_locks)) | 2993 | if (unlikely(!debug_locks)) |
2997 | return; | 2994 | return; |
2998 | 2995 | ||
2999 | if (subclass) | 2996 | if (subclass) { |
2997 | unsigned long flags; | ||
2998 | |||
2999 | if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion)) | ||
3000 | return; | ||
3001 | |||
3002 | raw_local_irq_save(flags); | ||
3003 | current->lockdep_recursion = 1; | ||
3000 | register_lock_class(lock, subclass, 1); | 3004 | register_lock_class(lock, subclass, 1); |
3005 | current->lockdep_recursion = 0; | ||
3006 | raw_local_irq_restore(flags); | ||
3007 | } | ||
3001 | } | 3008 | } |
3002 | EXPORT_SYMBOL_GPL(lockdep_init_map); | 3009 | EXPORT_SYMBOL_GPL(lockdep_init_map); |
3003 | 3010 | ||
@@ -3887,9 +3894,17 @@ static inline int within(const void *addr, void *start, unsigned long size) | |||
3887 | return addr >= start && addr < start + size; | 3894 | return addr >= start && addr < start + size; |
3888 | } | 3895 | } |
3889 | 3896 | ||
3897 | /* | ||
3898 | * Used in module.c to remove lock classes from memory that is going to be | ||
3899 | * freed; and possibly re-used by other modules. | ||
3900 | * | ||
3901 | * We will have had one sync_sched() before getting here, so we're guaranteed | ||
3902 | * nobody will look up these exact classes -- they're properly dead but still | ||
3903 | * allocated. | ||
3904 | */ | ||
3890 | void lockdep_free_key_range(void *start, unsigned long size) | 3905 | void lockdep_free_key_range(void *start, unsigned long size) |
3891 | { | 3906 | { |
3892 | struct lock_class *class, *next; | 3907 | struct lock_class *class; |
3893 | struct list_head *head; | 3908 | struct list_head *head; |
3894 | unsigned long flags; | 3909 | unsigned long flags; |
3895 | int i; | 3910 | int i; |
@@ -3905,7 +3920,7 @@ void lockdep_free_key_range(void *start, unsigned long size) | |||
3905 | head = classhash_table + i; | 3920 | head = classhash_table + i; |
3906 | if (list_empty(head)) | 3921 | if (list_empty(head)) |
3907 | continue; | 3922 | continue; |
3908 | list_for_each_entry_safe(class, next, head, hash_entry) { | 3923 | list_for_each_entry_rcu(class, head, hash_entry) { |
3909 | if (within(class->key, start, size)) | 3924 | if (within(class->key, start, size)) |
3910 | zap_class(class); | 3925 | zap_class(class); |
3911 | else if (within(class->name, start, size)) | 3926 | else if (within(class->name, start, size)) |
@@ -3916,11 +3931,25 @@ void lockdep_free_key_range(void *start, unsigned long size) | |||
3916 | if (locked) | 3931 | if (locked) |
3917 | graph_unlock(); | 3932 | graph_unlock(); |
3918 | raw_local_irq_restore(flags); | 3933 | raw_local_irq_restore(flags); |
3934 | |||
3935 | /* | ||
3936 | * Wait for any possible iterators from look_up_lock_class() to pass | ||
3937 | * before continuing to free the memory they refer to. | ||
3938 | * | ||
3939 | * sync_sched() is sufficient because the read-side is IRQ disable. | ||
3940 | */ | ||
3941 | synchronize_sched(); | ||
3942 | |||
3943 | /* | ||
3944 | * XXX at this point we could return the resources to the pool; | ||
3945 | * instead we leak them. We would need to change to bitmap allocators | ||
3946 | * instead of the linear allocators we have now. | ||
3947 | */ | ||
3919 | } | 3948 | } |
3920 | 3949 | ||
3921 | void lockdep_reset_lock(struct lockdep_map *lock) | 3950 | void lockdep_reset_lock(struct lockdep_map *lock) |
3922 | { | 3951 | { |
3923 | struct lock_class *class, *next; | 3952 | struct lock_class *class; |
3924 | struct list_head *head; | 3953 | struct list_head *head; |
3925 | unsigned long flags; | 3954 | unsigned long flags; |
3926 | int i, j; | 3955 | int i, j; |
@@ -3948,7 +3977,7 @@ void lockdep_reset_lock(struct lockdep_map *lock) | |||
3948 | head = classhash_table + i; | 3977 | head = classhash_table + i; |
3949 | if (list_empty(head)) | 3978 | if (list_empty(head)) |
3950 | continue; | 3979 | continue; |
3951 | list_for_each_entry_safe(class, next, head, hash_entry) { | 3980 | list_for_each_entry_rcu(class, head, hash_entry) { |
3952 | int match = 0; | 3981 | int match = 0; |
3953 | 3982 | ||
3954 | for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) | 3983 | for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) |
diff --git a/kernel/module.c b/kernel/module.c index cc93cf68653c..99fdf94efce8 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -56,7 +56,6 @@ | |||
56 | #include <linux/async.h> | 56 | #include <linux/async.h> |
57 | #include <linux/percpu.h> | 57 | #include <linux/percpu.h> |
58 | #include <linux/kmemleak.h> | 58 | #include <linux/kmemleak.h> |
59 | #include <linux/kasan.h> | ||
60 | #include <linux/jump_label.h> | 59 | #include <linux/jump_label.h> |
61 | #include <linux/pfn.h> | 60 | #include <linux/pfn.h> |
62 | #include <linux/bsearch.h> | 61 | #include <linux/bsearch.h> |
@@ -1814,7 +1813,6 @@ static void unset_module_init_ro_nx(struct module *mod) { } | |||
1814 | void __weak module_memfree(void *module_region) | 1813 | void __weak module_memfree(void *module_region) |
1815 | { | 1814 | { |
1816 | vfree(module_region); | 1815 | vfree(module_region); |
1817 | kasan_module_free(module_region); | ||
1818 | } | 1816 | } |
1819 | 1817 | ||
1820 | void __weak module_arch_cleanup(struct module *mod) | 1818 | void __weak module_arch_cleanup(struct module *mod) |
@@ -1867,7 +1865,7 @@ static void free_module(struct module *mod) | |||
1867 | kfree(mod->args); | 1865 | kfree(mod->args); |
1868 | percpu_modfree(mod); | 1866 | percpu_modfree(mod); |
1869 | 1867 | ||
1870 | /* Free lock-classes: */ | 1868 | /* Free lock-classes; relies on the preceding sync_rcu(). */ |
1871 | lockdep_free_key_range(mod->module_core, mod->core_size); | 1869 | lockdep_free_key_range(mod->module_core, mod->core_size); |
1872 | 1870 | ||
1873 | /* Finally, free the core (containing the module structure) */ | 1871 | /* Finally, free the core (containing the module structure) */ |
@@ -3351,9 +3349,6 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
3351 | module_bug_cleanup(mod); | 3349 | module_bug_cleanup(mod); |
3352 | mutex_unlock(&module_mutex); | 3350 | mutex_unlock(&module_mutex); |
3353 | 3351 | ||
3354 | /* Free lock-classes: */ | ||
3355 | lockdep_free_key_range(mod->module_core, mod->core_size); | ||
3356 | |||
3357 | /* we can't deallocate the module until we clear memory protection */ | 3352 | /* we can't deallocate the module until we clear memory protection */ |
3358 | unset_module_init_ro_nx(mod); | 3353 | unset_module_init_ro_nx(mod); |
3359 | unset_module_core_ro_nx(mod); | 3354 | unset_module_core_ro_nx(mod); |
@@ -3377,6 +3372,9 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
3377 | synchronize_rcu(); | 3372 | synchronize_rcu(); |
3378 | mutex_unlock(&module_mutex); | 3373 | mutex_unlock(&module_mutex); |
3379 | free_module: | 3374 | free_module: |
3375 | /* Free lock-classes; relies on the preceding sync_rcu() */ | ||
3376 | lockdep_free_key_range(mod->module_core, mod->core_size); | ||
3377 | |||
3380 | module_deallocate(mod, info); | 3378 | module_deallocate(mod, info); |
3381 | free_copy: | 3379 | free_copy: |
3382 | free_copy(info); | 3380 | free_copy(info); |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f0f831e8a345..62671f53202a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -3034,6 +3034,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio) | |||
3034 | } else { | 3034 | } else { |
3035 | if (dl_prio(oldprio)) | 3035 | if (dl_prio(oldprio)) |
3036 | p->dl.dl_boosted = 0; | 3036 | p->dl.dl_boosted = 0; |
3037 | if (rt_prio(oldprio)) | ||
3038 | p->rt.timeout = 0; | ||
3037 | p->sched_class = &fair_sched_class; | 3039 | p->sched_class = &fair_sched_class; |
3038 | } | 3040 | } |
3039 | 3041 | ||
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7ce18f3c097a..bcfe32088b37 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -1609,9 +1609,11 @@ static void update_task_scan_period(struct task_struct *p, | |||
1609 | /* | 1609 | /* |
1610 | * If there were no record hinting faults then either the task is | 1610 | * If there were no record hinting faults then either the task is |
1611 | * completely idle or all activity is areas that are not of interest | 1611 | * completely idle or all activity is areas that are not of interest |
1612 | * to automatic numa balancing. Scan slower | 1612 | * to automatic numa balancing. Related to that, if there were failed |
1613 | * migration then it implies we are migrating too quickly or the local | ||
1614 | * node is overloaded. In either case, scan slower | ||
1613 | */ | 1615 | */ |
1614 | if (local + shared == 0) { | 1616 | if (local + shared == 0 || p->numa_faults_locality[2]) { |
1615 | p->numa_scan_period = min(p->numa_scan_period_max, | 1617 | p->numa_scan_period = min(p->numa_scan_period_max, |
1616 | p->numa_scan_period << 1); | 1618 | p->numa_scan_period << 1); |
1617 | 1619 | ||
@@ -2080,6 +2082,8 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) | |||
2080 | 2082 | ||
2081 | if (migrated) | 2083 | if (migrated) |
2082 | p->numa_pages_migrated += pages; | 2084 | p->numa_pages_migrated += pages; |
2085 | if (flags & TNF_MIGRATE_FAIL) | ||
2086 | p->numa_faults_locality[2] += pages; | ||
2083 | 2087 | ||
2084 | p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; | 2088 | p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; |
2085 | p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; | 2089 | p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; |
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c index eb682d5c697c..6aac4beedbbe 100644 --- a/kernel/time/tick-broadcast-hrtimer.c +++ b/kernel/time/tick-broadcast-hrtimer.c | |||
@@ -49,6 +49,7 @@ static void bc_set_mode(enum clock_event_mode mode, | |||
49 | */ | 49 | */ |
50 | static int bc_set_next(ktime_t expires, struct clock_event_device *bc) | 50 | static int bc_set_next(ktime_t expires, struct clock_event_device *bc) |
51 | { | 51 | { |
52 | int bc_moved; | ||
52 | /* | 53 | /* |
53 | * We try to cancel the timer first. If the callback is on | 54 | * We try to cancel the timer first. If the callback is on |
54 | * flight on some other cpu then we let it handle it. If we | 55 | * flight on some other cpu then we let it handle it. If we |
@@ -60,9 +61,15 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc) | |||
60 | * restart the timer because we are in the callback, but we | 61 | * restart the timer because we are in the callback, but we |
61 | * can set the expiry time and let the callback return | 62 | * can set the expiry time and let the callback return |
62 | * HRTIMER_RESTART. | 63 | * HRTIMER_RESTART. |
64 | * | ||
65 | * Since we are in the idle loop at this point and because | ||
66 | * hrtimer_{start/cancel} functions call into tracing, | ||
67 | * calls to these functions must be bound within RCU_NONIDLE. | ||
63 | */ | 68 | */ |
64 | if (hrtimer_try_to_cancel(&bctimer) >= 0) { | 69 | RCU_NONIDLE(bc_moved = (hrtimer_try_to_cancel(&bctimer) >= 0) ? |
65 | hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED); | 70 | !hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED) : |
71 | 0); | ||
72 | if (bc_moved) { | ||
66 | /* Bind the "device" to the cpu */ | 73 | /* Bind the "device" to the cpu */ |
67 | bc->bound_on = smp_processor_id(); | 74 | bc->bound_on = smp_processor_id(); |
68 | } else if (bc->bound_on == smp_processor_id()) { | 75 | } else if (bc->bound_on == smp_processor_id()) { |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 45e5cb143d17..4f228024055b 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1059,6 +1059,12 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer) | |||
1059 | 1059 | ||
1060 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | 1060 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; |
1061 | 1061 | ||
1062 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
1063 | static int ftrace_graph_active; | ||
1064 | #else | ||
1065 | # define ftrace_graph_active 0 | ||
1066 | #endif | ||
1067 | |||
1062 | #ifdef CONFIG_DYNAMIC_FTRACE | 1068 | #ifdef CONFIG_DYNAMIC_FTRACE |
1063 | 1069 | ||
1064 | static struct ftrace_ops *removed_ops; | 1070 | static struct ftrace_ops *removed_ops; |
@@ -2041,8 +2047,12 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) | |||
2041 | if (!ftrace_rec_count(rec)) | 2047 | if (!ftrace_rec_count(rec)) |
2042 | rec->flags = 0; | 2048 | rec->flags = 0; |
2043 | else | 2049 | else |
2044 | /* Just disable the record (keep REGS state) */ | 2050 | /* |
2045 | rec->flags &= ~FTRACE_FL_ENABLED; | 2051 | * Just disable the record, but keep the ops TRAMP |
2052 | * and REGS states. The _EN flags must be disabled though. | ||
2053 | */ | ||
2054 | rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | | ||
2055 | FTRACE_FL_REGS_EN); | ||
2046 | } | 2056 | } |
2047 | 2057 | ||
2048 | return FTRACE_UPDATE_MAKE_NOP; | 2058 | return FTRACE_UPDATE_MAKE_NOP; |
@@ -2688,24 +2698,36 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
2688 | 2698 | ||
2689 | static void ftrace_startup_sysctl(void) | 2699 | static void ftrace_startup_sysctl(void) |
2690 | { | 2700 | { |
2701 | int command; | ||
2702 | |||
2691 | if (unlikely(ftrace_disabled)) | 2703 | if (unlikely(ftrace_disabled)) |
2692 | return; | 2704 | return; |
2693 | 2705 | ||
2694 | /* Force update next time */ | 2706 | /* Force update next time */ |
2695 | saved_ftrace_func = NULL; | 2707 | saved_ftrace_func = NULL; |
2696 | /* ftrace_start_up is true if we want ftrace running */ | 2708 | /* ftrace_start_up is true if we want ftrace running */ |
2697 | if (ftrace_start_up) | 2709 | if (ftrace_start_up) { |
2698 | ftrace_run_update_code(FTRACE_UPDATE_CALLS); | 2710 | command = FTRACE_UPDATE_CALLS; |
2711 | if (ftrace_graph_active) | ||
2712 | command |= FTRACE_START_FUNC_RET; | ||
2713 | ftrace_startup_enable(command); | ||
2714 | } | ||
2699 | } | 2715 | } |
2700 | 2716 | ||
2701 | static void ftrace_shutdown_sysctl(void) | 2717 | static void ftrace_shutdown_sysctl(void) |
2702 | { | 2718 | { |
2719 | int command; | ||
2720 | |||
2703 | if (unlikely(ftrace_disabled)) | 2721 | if (unlikely(ftrace_disabled)) |
2704 | return; | 2722 | return; |
2705 | 2723 | ||
2706 | /* ftrace_start_up is true if ftrace is running */ | 2724 | /* ftrace_start_up is true if ftrace is running */ |
2707 | if (ftrace_start_up) | 2725 | if (ftrace_start_up) { |
2708 | ftrace_run_update_code(FTRACE_DISABLE_CALLS); | 2726 | command = FTRACE_DISABLE_CALLS; |
2727 | if (ftrace_graph_active) | ||
2728 | command |= FTRACE_STOP_FUNC_RET; | ||
2729 | ftrace_run_update_code(command); | ||
2730 | } | ||
2709 | } | 2731 | } |
2710 | 2732 | ||
2711 | static cycle_t ftrace_update_time; | 2733 | static cycle_t ftrace_update_time; |
@@ -5558,12 +5580,12 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
5558 | 5580 | ||
5559 | if (ftrace_enabled) { | 5581 | if (ftrace_enabled) { |
5560 | 5582 | ||
5561 | ftrace_startup_sysctl(); | ||
5562 | |||
5563 | /* we are starting ftrace again */ | 5583 | /* we are starting ftrace again */ |
5564 | if (ftrace_ops_list != &ftrace_list_end) | 5584 | if (ftrace_ops_list != &ftrace_list_end) |
5565 | update_ftrace_function(); | 5585 | update_ftrace_function(); |
5566 | 5586 | ||
5587 | ftrace_startup_sysctl(); | ||
5588 | |||
5567 | } else { | 5589 | } else { |
5568 | /* stopping ftrace calls (just send to ftrace_stub) */ | 5590 | /* stopping ftrace calls (just send to ftrace_stub) */ |
5569 | ftrace_trace_function = ftrace_stub; | 5591 | ftrace_trace_function = ftrace_stub; |
@@ -5590,8 +5612,6 @@ static struct ftrace_ops graph_ops = { | |||
5590 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) | 5612 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) |
5591 | }; | 5613 | }; |
5592 | 5614 | ||
5593 | static int ftrace_graph_active; | ||
5594 | |||
5595 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) | 5615 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) |
5596 | { | 5616 | { |
5597 | return 0; | 5617 | return 0; |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f28849394791..41ff75b478c6 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -2728,19 +2728,57 @@ bool flush_work(struct work_struct *work) | |||
2728 | } | 2728 | } |
2729 | EXPORT_SYMBOL_GPL(flush_work); | 2729 | EXPORT_SYMBOL_GPL(flush_work); |
2730 | 2730 | ||
2731 | struct cwt_wait { | ||
2732 | wait_queue_t wait; | ||
2733 | struct work_struct *work; | ||
2734 | }; | ||
2735 | |||
2736 | static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key) | ||
2737 | { | ||
2738 | struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); | ||
2739 | |||
2740 | if (cwait->work != key) | ||
2741 | return 0; | ||
2742 | return autoremove_wake_function(wait, mode, sync, key); | ||
2743 | } | ||
2744 | |||
2731 | static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) | 2745 | static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) |
2732 | { | 2746 | { |
2747 | static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); | ||
2733 | unsigned long flags; | 2748 | unsigned long flags; |
2734 | int ret; | 2749 | int ret; |
2735 | 2750 | ||
2736 | do { | 2751 | do { |
2737 | ret = try_to_grab_pending(work, is_dwork, &flags); | 2752 | ret = try_to_grab_pending(work, is_dwork, &flags); |
2738 | /* | 2753 | /* |
2739 | * If someone else is canceling, wait for the same event it | 2754 | * If someone else is already canceling, wait for it to |
2740 | * would be waiting for before retrying. | 2755 | * finish. flush_work() doesn't work for PREEMPT_NONE |
2756 | * because we may get scheduled between @work's completion | ||
2757 | * and the other canceling task resuming and clearing | ||
2758 | * CANCELING - flush_work() will return false immediately | ||
2759 | * as @work is no longer busy, try_to_grab_pending() will | ||
2760 | * return -ENOENT as @work is still being canceled and the | ||
2761 | * other canceling task won't be able to clear CANCELING as | ||
2762 | * we're hogging the CPU. | ||
2763 | * | ||
2764 | * Let's wait for completion using a waitqueue. As this | ||
2765 | * may lead to the thundering herd problem, use a custom | ||
2766 | * wake function which matches @work along with exclusive | ||
2767 | * wait and wakeup. | ||
2741 | */ | 2768 | */ |
2742 | if (unlikely(ret == -ENOENT)) | 2769 | if (unlikely(ret == -ENOENT)) { |
2743 | flush_work(work); | 2770 | struct cwt_wait cwait; |
2771 | |||
2772 | init_wait(&cwait.wait); | ||
2773 | cwait.wait.func = cwt_wakefn; | ||
2774 | cwait.work = work; | ||
2775 | |||
2776 | prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait, | ||
2777 | TASK_UNINTERRUPTIBLE); | ||
2778 | if (work_is_canceling(work)) | ||
2779 | schedule(); | ||
2780 | finish_wait(&cancel_waitq, &cwait.wait); | ||
2781 | } | ||
2744 | } while (unlikely(ret < 0)); | 2782 | } while (unlikely(ret < 0)); |
2745 | 2783 | ||
2746 | /* tell other tasks trying to grab @work to back off */ | 2784 | /* tell other tasks trying to grab @work to back off */ |
@@ -2749,6 +2787,16 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) | |||
2749 | 2787 | ||
2750 | flush_work(work); | 2788 | flush_work(work); |
2751 | clear_work_data(work); | 2789 | clear_work_data(work); |
2790 | |||
2791 | /* | ||
2792 | * Paired with prepare_to_wait() above so that either | ||
2793 | * waitqueue_active() is visible here or !work_is_canceling() is | ||
2794 | * visible there. | ||
2795 | */ | ||
2796 | smp_mb(); | ||
2797 | if (waitqueue_active(&cancel_waitq)) | ||
2798 | __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); | ||
2799 | |||
2752 | return ret; | 2800 | return ret; |
2753 | } | 2801 | } |
2754 | 2802 | ||
diff --git a/lib/Makefile b/lib/Makefile index 87eb3bffc283..58f74d2dd396 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -24,7 +24,7 @@ obj-y += lockref.o | |||
24 | 24 | ||
25 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | 25 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ |
26 | bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ | 26 | bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ |
27 | gcd.o lcm.o list_sort.o uuid.o flex_array.o clz_ctz.o \ | 27 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ |
28 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ | 28 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ |
29 | percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o | 29 | percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o |
30 | obj-y += string_helpers.o | 30 | obj-y += string_helpers.o |
diff --git a/mm/iov_iter.c b/lib/iov_iter.c index 827732047da1..9d96e283520c 100644 --- a/mm/iov_iter.c +++ b/lib/iov_iter.c | |||
@@ -751,3 +751,18 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages) | |||
751 | return npages; | 751 | return npages; |
752 | } | 752 | } |
753 | EXPORT_SYMBOL(iov_iter_npages); | 753 | EXPORT_SYMBOL(iov_iter_npages); |
754 | |||
755 | const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) | ||
756 | { | ||
757 | *new = *old; | ||
758 | if (new->type & ITER_BVEC) | ||
759 | return new->bvec = kmemdup(new->bvec, | ||
760 | new->nr_segs * sizeof(struct bio_vec), | ||
761 | flags); | ||
762 | else | ||
763 | /* iovec and kvec have identical layout */ | ||
764 | return new->iov = kmemdup(new->iov, | ||
765 | new->nr_segs * sizeof(struct iovec), | ||
766 | flags); | ||
767 | } | ||
768 | EXPORT_SYMBOL(dup_iter); | ||
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c index 7a85967060a5..f0f5c5c3de12 100644 --- a/lib/lz4/lz4_decompress.c +++ b/lib/lz4/lz4_decompress.c | |||
@@ -139,6 +139,9 @@ static int lz4_uncompress(const char *source, char *dest, int osize) | |||
139 | /* Error: request to write beyond destination buffer */ | 139 | /* Error: request to write beyond destination buffer */ |
140 | if (cpy > oend) | 140 | if (cpy > oend) |
141 | goto _output_error; | 141 | goto _output_error; |
142 | if ((ref + COPYLENGTH) > oend || | ||
143 | (op + COPYLENGTH) > oend) | ||
144 | goto _output_error; | ||
142 | LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); | 145 | LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); |
143 | while (op < cpy) | 146 | while (op < cpy) |
144 | *op++ = *ref++; | 147 | *op++ = *ref++; |
diff --git a/lib/seq_buf.c b/lib/seq_buf.c index 88c0854bd752..5c94e1012a91 100644 --- a/lib/seq_buf.c +++ b/lib/seq_buf.c | |||
@@ -61,7 +61,7 @@ int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args) | |||
61 | 61 | ||
62 | if (s->len < s->size) { | 62 | if (s->len < s->size) { |
63 | len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args); | 63 | len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args); |
64 | if (seq_buf_can_fit(s, len)) { | 64 | if (s->len + len < s->size) { |
65 | s->len += len; | 65 | s->len += len; |
66 | return 0; | 66 | return 0; |
67 | } | 67 | } |
@@ -118,7 +118,7 @@ int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary) | |||
118 | 118 | ||
119 | if (s->len < s->size) { | 119 | if (s->len < s->size) { |
120 | ret = bstr_printf(s->buffer + s->len, len, fmt, binary); | 120 | ret = bstr_printf(s->buffer + s->len, len, fmt, binary); |
121 | if (seq_buf_can_fit(s, ret)) { | 121 | if (s->len + ret < s->size) { |
122 | s->len += ret; | 122 | s->len += ret; |
123 | return 0; | 123 | return 0; |
124 | } | 124 | } |
diff --git a/mm/Makefile b/mm/Makefile index 3c1caa2693bd..15dbe9903c27 100644 --- a/mm/Makefile +++ b/mm/Makefile | |||
@@ -21,7 +21,7 @@ obj-y := filemap.o mempool.o oom_kill.o \ | |||
21 | mm_init.o mmu_context.o percpu.o slab_common.o \ | 21 | mm_init.o mmu_context.o percpu.o slab_common.o \ |
22 | compaction.o vmacache.o \ | 22 | compaction.o vmacache.o \ |
23 | interval_tree.o list_lru.o workingset.o \ | 23 | interval_tree.o list_lru.o workingset.o \ |
24 | iov_iter.o debug.o $(mmu-y) | 24 | debug.o $(mmu-y) |
25 | 25 | ||
26 | obj-y += init-mm.o | 26 | obj-y += init-mm.o |
27 | 27 | ||
@@ -64,15 +64,17 @@ static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) | |||
64 | return (1UL << (align_order - cma->order_per_bit)) - 1; | 64 | return (1UL << (align_order - cma->order_per_bit)) - 1; |
65 | } | 65 | } |
66 | 66 | ||
67 | /* | ||
68 | * Find a PFN aligned to the specified order and return an offset represented in | ||
69 | * order_per_bits. | ||
70 | */ | ||
67 | static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order) | 71 | static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order) |
68 | { | 72 | { |
69 | unsigned int alignment; | ||
70 | |||
71 | if (align_order <= cma->order_per_bit) | 73 | if (align_order <= cma->order_per_bit) |
72 | return 0; | 74 | return 0; |
73 | alignment = 1UL << (align_order - cma->order_per_bit); | 75 | |
74 | return ALIGN(cma->base_pfn, alignment) - | 76 | return (ALIGN(cma->base_pfn, (1UL << align_order)) |
75 | (cma->base_pfn >> cma->order_per_bit); | 77 | - cma->base_pfn) >> cma->order_per_bit; |
76 | } | 78 | } |
77 | 79 | ||
78 | static unsigned long cma_bitmap_maxno(struct cma *cma) | 80 | static unsigned long cma_bitmap_maxno(struct cma *cma) |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index fc00c8cb5a82..6817b0350c71 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1260,6 +1260,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1260 | int target_nid, last_cpupid = -1; | 1260 | int target_nid, last_cpupid = -1; |
1261 | bool page_locked; | 1261 | bool page_locked; |
1262 | bool migrated = false; | 1262 | bool migrated = false; |
1263 | bool was_writable; | ||
1263 | int flags = 0; | 1264 | int flags = 0; |
1264 | 1265 | ||
1265 | /* A PROT_NONE fault should not end up here */ | 1266 | /* A PROT_NONE fault should not end up here */ |
@@ -1291,12 +1292,8 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1291 | flags |= TNF_FAULT_LOCAL; | 1292 | flags |= TNF_FAULT_LOCAL; |
1292 | } | 1293 | } |
1293 | 1294 | ||
1294 | /* | 1295 | /* See similar comment in do_numa_page for explanation */ |
1295 | * Avoid grouping on DSO/COW pages in specific and RO pages | 1296 | if (!(vma->vm_flags & VM_WRITE)) |
1296 | * in general, RO pages shouldn't hurt as much anyway since | ||
1297 | * they can be in shared cache state. | ||
1298 | */ | ||
1299 | if (!pmd_write(pmd)) | ||
1300 | flags |= TNF_NO_GROUP; | 1297 | flags |= TNF_NO_GROUP; |
1301 | 1298 | ||
1302 | /* | 1299 | /* |
@@ -1353,12 +1350,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1353 | if (migrated) { | 1350 | if (migrated) { |
1354 | flags |= TNF_MIGRATED; | 1351 | flags |= TNF_MIGRATED; |
1355 | page_nid = target_nid; | 1352 | page_nid = target_nid; |
1356 | } | 1353 | } else |
1354 | flags |= TNF_MIGRATE_FAIL; | ||
1357 | 1355 | ||
1358 | goto out; | 1356 | goto out; |
1359 | clear_pmdnuma: | 1357 | clear_pmdnuma: |
1360 | BUG_ON(!PageLocked(page)); | 1358 | BUG_ON(!PageLocked(page)); |
1359 | was_writable = pmd_write(pmd); | ||
1361 | pmd = pmd_modify(pmd, vma->vm_page_prot); | 1360 | pmd = pmd_modify(pmd, vma->vm_page_prot); |
1361 | pmd = pmd_mkyoung(pmd); | ||
1362 | if (was_writable) | ||
1363 | pmd = pmd_mkwrite(pmd); | ||
1362 | set_pmd_at(mm, haddr, pmdp, pmd); | 1364 | set_pmd_at(mm, haddr, pmdp, pmd); |
1363 | update_mmu_cache_pmd(vma, addr, pmdp); | 1365 | update_mmu_cache_pmd(vma, addr, pmdp); |
1364 | unlock_page(page); | 1366 | unlock_page(page); |
@@ -1482,6 +1484,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
1482 | 1484 | ||
1483 | if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { | 1485 | if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { |
1484 | pmd_t entry; | 1486 | pmd_t entry; |
1487 | bool preserve_write = prot_numa && pmd_write(*pmd); | ||
1488 | ret = 1; | ||
1485 | 1489 | ||
1486 | /* | 1490 | /* |
1487 | * Avoid trapping faults against the zero page. The read-only | 1491 | * Avoid trapping faults against the zero page. The read-only |
@@ -1490,16 +1494,17 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
1490 | */ | 1494 | */ |
1491 | if (prot_numa && is_huge_zero_pmd(*pmd)) { | 1495 | if (prot_numa && is_huge_zero_pmd(*pmd)) { |
1492 | spin_unlock(ptl); | 1496 | spin_unlock(ptl); |
1493 | return 0; | 1497 | return ret; |
1494 | } | 1498 | } |
1495 | 1499 | ||
1496 | if (!prot_numa || !pmd_protnone(*pmd)) { | 1500 | if (!prot_numa || !pmd_protnone(*pmd)) { |
1497 | ret = 1; | ||
1498 | entry = pmdp_get_and_clear_notify(mm, addr, pmd); | 1501 | entry = pmdp_get_and_clear_notify(mm, addr, pmd); |
1499 | entry = pmd_modify(entry, newprot); | 1502 | entry = pmd_modify(entry, newprot); |
1503 | if (preserve_write) | ||
1504 | entry = pmd_mkwrite(entry); | ||
1500 | ret = HPAGE_PMD_NR; | 1505 | ret = HPAGE_PMD_NR; |
1501 | set_pmd_at(mm, addr, pmd, entry); | 1506 | set_pmd_at(mm, addr, pmd, entry); |
1502 | BUG_ON(pmd_write(entry)); | 1507 | BUG_ON(!preserve_write && pmd_write(entry)); |
1503 | } | 1508 | } |
1504 | spin_unlock(ptl); | 1509 | spin_unlock(ptl); |
1505 | } | 1510 | } |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0a9ac6c26832..c41b2a0ee273 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -917,7 +917,6 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order) | |||
917 | __SetPageHead(page); | 917 | __SetPageHead(page); |
918 | __ClearPageReserved(page); | 918 | __ClearPageReserved(page); |
919 | for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { | 919 | for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { |
920 | __SetPageTail(p); | ||
921 | /* | 920 | /* |
922 | * For gigantic hugepages allocated through bootmem at | 921 | * For gigantic hugepages allocated through bootmem at |
923 | * boot, it's safer to be consistent with the not-gigantic | 922 | * boot, it's safer to be consistent with the not-gigantic |
@@ -933,6 +932,9 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order) | |||
933 | __ClearPageReserved(p); | 932 | __ClearPageReserved(p); |
934 | set_page_count(p, 0); | 933 | set_page_count(p, 0); |
935 | p->first_page = page; | 934 | p->first_page = page; |
935 | /* Make sure p->first_page is always valid for PageTail() */ | ||
936 | smp_wmb(); | ||
937 | __SetPageTail(p); | ||
936 | } | 938 | } |
937 | } | 939 | } |
938 | 940 | ||
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 78fee632a7ee..936d81661c47 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/stacktrace.h> | 29 | #include <linux/stacktrace.h> |
30 | #include <linux/string.h> | 30 | #include <linux/string.h> |
31 | #include <linux/types.h> | 31 | #include <linux/types.h> |
32 | #include <linux/vmalloc.h> | ||
32 | #include <linux/kasan.h> | 33 | #include <linux/kasan.h> |
33 | 34 | ||
34 | #include "kasan.h" | 35 | #include "kasan.h" |
@@ -414,12 +415,19 @@ int kasan_module_alloc(void *addr, size_t size) | |||
414 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, | 415 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, |
415 | PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, | 416 | PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, |
416 | __builtin_return_address(0)); | 417 | __builtin_return_address(0)); |
417 | return ret ? 0 : -ENOMEM; | 418 | |
419 | if (ret) { | ||
420 | find_vm_area(addr)->flags |= VM_KASAN; | ||
421 | return 0; | ||
422 | } | ||
423 | |||
424 | return -ENOMEM; | ||
418 | } | 425 | } |
419 | 426 | ||
420 | void kasan_module_free(void *addr) | 427 | void kasan_free_shadow(const struct vm_struct *vm) |
421 | { | 428 | { |
422 | vfree(kasan_mem_to_shadow(addr)); | 429 | if (vm->flags & VM_KASAN) |
430 | vfree(kasan_mem_to_shadow(vm->addr)); | ||
423 | } | 431 | } |
424 | 432 | ||
425 | static void register_global(struct kasan_global *global) | 433 | static void register_global(struct kasan_global *global) |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9fe07692eaad..b34ef4a32a3b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -5232,7 +5232,9 @@ static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) | |||
5232 | * on for the root memcg is enough. | 5232 | * on for the root memcg is enough. |
5233 | */ | 5233 | */ |
5234 | if (cgroup_on_dfl(root_css->cgroup)) | 5234 | if (cgroup_on_dfl(root_css->cgroup)) |
5235 | mem_cgroup_from_css(root_css)->use_hierarchy = true; | 5235 | root_mem_cgroup->use_hierarchy = true; |
5236 | else | ||
5237 | root_mem_cgroup->use_hierarchy = false; | ||
5236 | } | 5238 | } |
5237 | 5239 | ||
5238 | static u64 memory_current_read(struct cgroup_subsys_state *css, | 5240 | static u64 memory_current_read(struct cgroup_subsys_state *css, |
diff --git a/mm/memory.c b/mm/memory.c index 8068893697bb..97839f5c8c30 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -3035,6 +3035,7 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3035 | int last_cpupid; | 3035 | int last_cpupid; |
3036 | int target_nid; | 3036 | int target_nid; |
3037 | bool migrated = false; | 3037 | bool migrated = false; |
3038 | bool was_writable = pte_write(pte); | ||
3038 | int flags = 0; | 3039 | int flags = 0; |
3039 | 3040 | ||
3040 | /* A PROT_NONE fault should not end up here */ | 3041 | /* A PROT_NONE fault should not end up here */ |
@@ -3059,6 +3060,8 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3059 | /* Make it present again */ | 3060 | /* Make it present again */ |
3060 | pte = pte_modify(pte, vma->vm_page_prot); | 3061 | pte = pte_modify(pte, vma->vm_page_prot); |
3061 | pte = pte_mkyoung(pte); | 3062 | pte = pte_mkyoung(pte); |
3063 | if (was_writable) | ||
3064 | pte = pte_mkwrite(pte); | ||
3062 | set_pte_at(mm, addr, ptep, pte); | 3065 | set_pte_at(mm, addr, ptep, pte); |
3063 | update_mmu_cache(vma, addr, ptep); | 3066 | update_mmu_cache(vma, addr, ptep); |
3064 | 3067 | ||
@@ -3069,11 +3072,14 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3069 | } | 3072 | } |
3070 | 3073 | ||
3071 | /* | 3074 | /* |
3072 | * Avoid grouping on DSO/COW pages in specific and RO pages | 3075 | * Avoid grouping on RO pages in general. RO pages shouldn't hurt as |
3073 | * in general, RO pages shouldn't hurt as much anyway since | 3076 | * much anyway since they can be in shared cache state. This misses |
3074 | * they can be in shared cache state. | 3077 | * the case where a mapping is writable but the process never writes |
3078 | * to it but pte_write gets cleared during protection updates and | ||
3079 | * pte_dirty has unpredictable behaviour between PTE scan updates, | ||
3080 | * background writeback, dirty balancing and application behaviour. | ||
3075 | */ | 3081 | */ |
3076 | if (!pte_write(pte)) | 3082 | if (!(vma->vm_flags & VM_WRITE)) |
3077 | flags |= TNF_NO_GROUP; | 3083 | flags |= TNF_NO_GROUP; |
3078 | 3084 | ||
3079 | /* | 3085 | /* |
@@ -3097,7 +3103,8 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3097 | if (migrated) { | 3103 | if (migrated) { |
3098 | page_nid = target_nid; | 3104 | page_nid = target_nid; |
3099 | flags |= TNF_MIGRATED; | 3105 | flags |= TNF_MIGRATED; |
3100 | } | 3106 | } else |
3107 | flags |= TNF_MIGRATE_FAIL; | ||
3101 | 3108 | ||
3102 | out: | 3109 | out: |
3103 | if (page_nid != -1) | 3110 | if (page_nid != -1) |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 9fab10795bea..65842d688b7c 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -1092,6 +1092,10 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) | |||
1092 | return NULL; | 1092 | return NULL; |
1093 | 1093 | ||
1094 | arch_refresh_nodedata(nid, pgdat); | 1094 | arch_refresh_nodedata(nid, pgdat); |
1095 | } else { | ||
1096 | /* Reset the nr_zones and classzone_idx to 0 before reuse */ | ||
1097 | pgdat->nr_zones = 0; | ||
1098 | pgdat->classzone_idx = 0; | ||
1095 | } | 1099 | } |
1096 | 1100 | ||
1097 | /* we can use NODE_DATA(nid) from here */ | 1101 | /* we can use NODE_DATA(nid) from here */ |
@@ -1977,15 +1981,6 @@ void try_offline_node(int nid) | |||
1977 | if (is_vmalloc_addr(zone->wait_table)) | 1981 | if (is_vmalloc_addr(zone->wait_table)) |
1978 | vfree(zone->wait_table); | 1982 | vfree(zone->wait_table); |
1979 | } | 1983 | } |
1980 | |||
1981 | /* | ||
1982 | * Since there is no way to guarentee the address of pgdat/zone is not | ||
1983 | * on stack of any kernel threads or used by other kernel objects | ||
1984 | * without reference counting or other symchronizing method, do not | ||
1985 | * reset node_data and free pgdat here. Just reset it to 0 and reuse | ||
1986 | * the memory when the node is online again. | ||
1987 | */ | ||
1988 | memset(pgdat, 0, sizeof(*pgdat)); | ||
1989 | } | 1984 | } |
1990 | EXPORT_SYMBOL(try_offline_node); | 1985 | EXPORT_SYMBOL(try_offline_node); |
1991 | 1986 | ||
diff --git a/mm/mlock.c b/mm/mlock.c index 73cf0987088c..8a54cd214925 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -26,10 +26,10 @@ | |||
26 | 26 | ||
27 | int can_do_mlock(void) | 27 | int can_do_mlock(void) |
28 | { | 28 | { |
29 | if (capable(CAP_IPC_LOCK)) | ||
30 | return 1; | ||
31 | if (rlimit(RLIMIT_MEMLOCK) != 0) | 29 | if (rlimit(RLIMIT_MEMLOCK) != 0) |
32 | return 1; | 30 | return 1; |
31 | if (capable(CAP_IPC_LOCK)) | ||
32 | return 1; | ||
33 | return 0; | 33 | return 0; |
34 | } | 34 | } |
35 | EXPORT_SYMBOL(can_do_mlock); | 35 | EXPORT_SYMBOL(can_do_mlock); |
@@ -774,10 +774,8 @@ again: remove_next = 1 + (end > next->vm_end); | |||
774 | 774 | ||
775 | importer->anon_vma = exporter->anon_vma; | 775 | importer->anon_vma = exporter->anon_vma; |
776 | error = anon_vma_clone(importer, exporter); | 776 | error = anon_vma_clone(importer, exporter); |
777 | if (error) { | 777 | if (error) |
778 | importer->anon_vma = NULL; | ||
779 | return error; | 778 | return error; |
780 | } | ||
781 | } | 779 | } |
782 | } | 780 | } |
783 | 781 | ||
diff --git a/mm/mprotect.c b/mm/mprotect.c index 44727811bf4c..88584838e704 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
@@ -75,6 +75,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
75 | oldpte = *pte; | 75 | oldpte = *pte; |
76 | if (pte_present(oldpte)) { | 76 | if (pte_present(oldpte)) { |
77 | pte_t ptent; | 77 | pte_t ptent; |
78 | bool preserve_write = prot_numa && pte_write(oldpte); | ||
78 | 79 | ||
79 | /* | 80 | /* |
80 | * Avoid trapping faults against the zero or KSM | 81 | * Avoid trapping faults against the zero or KSM |
@@ -94,6 +95,8 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
94 | 95 | ||
95 | ptent = ptep_modify_prot_start(mm, addr, pte); | 96 | ptent = ptep_modify_prot_start(mm, addr, pte); |
96 | ptent = pte_modify(ptent, newprot); | 97 | ptent = pte_modify(ptent, newprot); |
98 | if (preserve_write) | ||
99 | ptent = pte_mkwrite(ptent); | ||
97 | 100 | ||
98 | /* Avoid taking write faults for known dirty pages */ | 101 | /* Avoid taking write faults for known dirty pages */ |
99 | if (dirty_accountable && pte_dirty(ptent) && | 102 | if (dirty_accountable && pte_dirty(ptent) && |
diff --git a/mm/nommu.c b/mm/nommu.c index 3e67e7538ecf..3fba2dc97c44 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -62,6 +62,7 @@ void *high_memory; | |||
62 | EXPORT_SYMBOL(high_memory); | 62 | EXPORT_SYMBOL(high_memory); |
63 | struct page *mem_map; | 63 | struct page *mem_map; |
64 | unsigned long max_mapnr; | 64 | unsigned long max_mapnr; |
65 | EXPORT_SYMBOL(max_mapnr); | ||
65 | unsigned long highest_memmap_pfn; | 66 | unsigned long highest_memmap_pfn; |
66 | struct percpu_counter vm_committed_as; | 67 | struct percpu_counter vm_committed_as; |
67 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ | 68 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 45e187b2d971..644bcb665773 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -857,8 +857,11 @@ static void bdi_update_write_bandwidth(struct backing_dev_info *bdi, | |||
857 | * bw * elapsed + write_bandwidth * (period - elapsed) | 857 | * bw * elapsed + write_bandwidth * (period - elapsed) |
858 | * write_bandwidth = --------------------------------------------------- | 858 | * write_bandwidth = --------------------------------------------------- |
859 | * period | 859 | * period |
860 | * | ||
861 | * @written may have decreased due to account_page_redirty(). | ||
862 | * Avoid underflowing @bw calculation. | ||
860 | */ | 863 | */ |
861 | bw = written - bdi->written_stamp; | 864 | bw = written - min(written, bdi->written_stamp); |
862 | bw *= HZ; | 865 | bw *= HZ; |
863 | if (unlikely(elapsed > period)) { | 866 | if (unlikely(elapsed > period)) { |
864 | do_div(bw, elapsed); | 867 | do_div(bw, elapsed); |
@@ -922,7 +925,7 @@ static void global_update_bandwidth(unsigned long thresh, | |||
922 | unsigned long now) | 925 | unsigned long now) |
923 | { | 926 | { |
924 | static DEFINE_SPINLOCK(dirty_lock); | 927 | static DEFINE_SPINLOCK(dirty_lock); |
925 | static unsigned long update_time; | 928 | static unsigned long update_time = INITIAL_JIFFIES; |
926 | 929 | ||
927 | /* | 930 | /* |
928 | * check locklessly first to optimize away locking for the most time | 931 | * check locklessly first to optimize away locking for the most time |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7abfa70cdc1a..40e29429e7b0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2373,7 +2373,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, | |||
2373 | goto out; | 2373 | goto out; |
2374 | } | 2374 | } |
2375 | /* Exhausted what can be done so it's blamo time */ | 2375 | /* Exhausted what can be done so it's blamo time */ |
2376 | if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false)) | 2376 | if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false) |
2377 | || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) | ||
2377 | *did_some_progress = 1; | 2378 | *did_some_progress = 1; |
2378 | out: | 2379 | out: |
2379 | oom_zonelist_unlock(ac->zonelist, gfp_mask); | 2380 | oom_zonelist_unlock(ac->zonelist, gfp_mask); |
diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 72f5ac381ab3..755a42c76eb4 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c | |||
@@ -103,6 +103,7 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype) | |||
103 | 103 | ||
104 | if (!is_migrate_isolate_page(buddy)) { | 104 | if (!is_migrate_isolate_page(buddy)) { |
105 | __isolate_free_page(page, order); | 105 | __isolate_free_page(page, order); |
106 | kernel_map_pages(page, (1 << order), 1); | ||
106 | set_page_refcounted(page); | 107 | set_page_refcounted(page); |
107 | isolated_page = page; | 108 | isolated_page = page; |
108 | } | 109 | } |
diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 75c1f2878519..29f2f8b853ae 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c | |||
@@ -265,8 +265,15 @@ int walk_page_range(unsigned long start, unsigned long end, | |||
265 | vma = vma->vm_next; | 265 | vma = vma->vm_next; |
266 | 266 | ||
267 | err = walk_page_test(start, next, walk); | 267 | err = walk_page_test(start, next, walk); |
268 | if (err > 0) | 268 | if (err > 0) { |
269 | /* | ||
270 | * positive return values are purely for | ||
271 | * controlling the pagewalk, so should never | ||
272 | * be passed to the callers. | ||
273 | */ | ||
274 | err = 0; | ||
269 | continue; | 275 | continue; |
276 | } | ||
270 | if (err < 0) | 277 | if (err < 0) |
271 | break; | 278 | break; |
272 | } | 279 | } |
@@ -287,6 +287,13 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) | |||
287 | return 0; | 287 | return 0; |
288 | 288 | ||
289 | enomem_failure: | 289 | enomem_failure: |
290 | /* | ||
291 | * dst->anon_vma is dropped here otherwise its degree can be incorrectly | ||
292 | * decremented in unlink_anon_vmas(). | ||
293 | * We can safely do this because callers of anon_vma_clone() don't care | ||
294 | * about dst->anon_vma if anon_vma_clone() failed. | ||
295 | */ | ||
296 | dst->anon_vma = NULL; | ||
290 | unlink_anon_vmas(dst); | 297 | unlink_anon_vmas(dst); |
291 | return -ENOMEM; | 298 | return -ENOMEM; |
292 | } | 299 | } |
@@ -2449,7 +2449,8 @@ redo: | |||
2449 | do { | 2449 | do { |
2450 | tid = this_cpu_read(s->cpu_slab->tid); | 2450 | tid = this_cpu_read(s->cpu_slab->tid); |
2451 | c = raw_cpu_ptr(s->cpu_slab); | 2451 | c = raw_cpu_ptr(s->cpu_slab); |
2452 | } while (IS_ENABLED(CONFIG_PREEMPT) && unlikely(tid != c->tid)); | 2452 | } while (IS_ENABLED(CONFIG_PREEMPT) && |
2453 | unlikely(tid != READ_ONCE(c->tid))); | ||
2453 | 2454 | ||
2454 | /* | 2455 | /* |
2455 | * Irqless object alloc/free algorithm used here depends on sequence | 2456 | * Irqless object alloc/free algorithm used here depends on sequence |
@@ -2718,7 +2719,8 @@ redo: | |||
2718 | do { | 2719 | do { |
2719 | tid = this_cpu_read(s->cpu_slab->tid); | 2720 | tid = this_cpu_read(s->cpu_slab->tid); |
2720 | c = raw_cpu_ptr(s->cpu_slab); | 2721 | c = raw_cpu_ptr(s->cpu_slab); |
2721 | } while (IS_ENABLED(CONFIG_PREEMPT) && unlikely(tid != c->tid)); | 2722 | } while (IS_ENABLED(CONFIG_PREEMPT) && |
2723 | unlikely(tid != READ_ONCE(c->tid))); | ||
2722 | 2724 | ||
2723 | /* Same with comment on barrier() in slab_alloc_node() */ | 2725 | /* Same with comment on barrier() in slab_alloc_node() */ |
2724 | barrier(); | 2726 | barrier(); |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 35b25e1340ca..49abccf29a29 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -1418,6 +1418,7 @@ struct vm_struct *remove_vm_area(const void *addr) | |||
1418 | spin_unlock(&vmap_area_lock); | 1418 | spin_unlock(&vmap_area_lock); |
1419 | 1419 | ||
1420 | vmap_debug_free_range(va->va_start, va->va_end); | 1420 | vmap_debug_free_range(va->va_start, va->va_end); |
1421 | kasan_free_shadow(vm); | ||
1421 | free_unmap_vmap_area(va); | 1422 | free_unmap_vmap_area(va); |
1422 | vm->size -= PAGE_SIZE; | 1423 | vm->size -= PAGE_SIZE; |
1423 | 1424 | ||
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index d8e376a5f0f1..36a1a739ad68 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
@@ -658,14 +658,30 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args) | |||
658 | static void p9_virtio_remove(struct virtio_device *vdev) | 658 | static void p9_virtio_remove(struct virtio_device *vdev) |
659 | { | 659 | { |
660 | struct virtio_chan *chan = vdev->priv; | 660 | struct virtio_chan *chan = vdev->priv; |
661 | 661 | unsigned long warning_time; | |
662 | if (chan->inuse) | ||
663 | p9_virtio_close(chan->client); | ||
664 | vdev->config->del_vqs(vdev); | ||
665 | 662 | ||
666 | mutex_lock(&virtio_9p_lock); | 663 | mutex_lock(&virtio_9p_lock); |
664 | |||
665 | /* Remove self from list so we don't get new users. */ | ||
667 | list_del(&chan->chan_list); | 666 | list_del(&chan->chan_list); |
667 | warning_time = jiffies; | ||
668 | |||
669 | /* Wait for existing users to close. */ | ||
670 | while (chan->inuse) { | ||
671 | mutex_unlock(&virtio_9p_lock); | ||
672 | msleep(250); | ||
673 | if (time_after(jiffies, warning_time + 10 * HZ)) { | ||
674 | dev_emerg(&vdev->dev, | ||
675 | "p9_virtio_remove: waiting for device in use.\n"); | ||
676 | warning_time = jiffies; | ||
677 | } | ||
678 | mutex_lock(&virtio_9p_lock); | ||
679 | } | ||
680 | |||
668 | mutex_unlock(&virtio_9p_lock); | 681 | mutex_unlock(&virtio_9p_lock); |
682 | |||
683 | vdev->config->del_vqs(vdev); | ||
684 | |||
669 | sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); | 685 | sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); |
670 | kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE); | 686 | kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE); |
671 | kfree(chan->tag); | 687 | kfree(chan->tag); |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index b087d278c679..1849d96b3c91 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -563,6 +563,8 @@ int br_del_if(struct net_bridge *br, struct net_device *dev) | |||
563 | */ | 563 | */ |
564 | del_nbp(p); | 564 | del_nbp(p); |
565 | 565 | ||
566 | dev_set_mtu(br->dev, br_min_mtu(br)); | ||
567 | |||
566 | spin_lock_bh(&br->lock); | 568 | spin_lock_bh(&br->lock); |
567 | changed_addr = br_stp_recalculate_bridge_id(br); | 569 | changed_addr = br_stp_recalculate_bridge_id(br); |
568 | spin_unlock_bh(&br->lock); | 570 | spin_unlock_bh(&br->lock); |
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 769b185fefbd..a6e2da0bc718 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c | |||
@@ -281,7 +281,7 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
281 | int copylen; | 281 | int copylen; |
282 | 282 | ||
283 | ret = -EOPNOTSUPP; | 283 | ret = -EOPNOTSUPP; |
284 | if (m->msg_flags&MSG_OOB) | 284 | if (flags & MSG_OOB) |
285 | goto read_error; | 285 | goto read_error; |
286 | 286 | ||
287 | skb = skb_recv_datagram(sk, flags, 0 , &ret); | 287 | skb = skb_recv_datagram(sk, flags, 0 , &ret); |
diff --git a/net/can/af_can.c b/net/can/af_can.c index 66e08040ced7..32d710eaf1fc 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
@@ -259,6 +259,9 @@ int can_send(struct sk_buff *skb, int loop) | |||
259 | goto inval_skb; | 259 | goto inval_skb; |
260 | } | 260 | } |
261 | 261 | ||
262 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
263 | |||
264 | skb_reset_mac_header(skb); | ||
262 | skb_reset_network_header(skb); | 265 | skb_reset_network_header(skb); |
263 | skb_reset_transport_header(skb); | 266 | skb_reset_transport_header(skb); |
264 | 267 | ||
diff --git a/net/compat.c b/net/compat.c index 94d3d5e97883..f7bd286a8280 100644 --- a/net/compat.c +++ b/net/compat.c | |||
@@ -49,6 +49,13 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg, | |||
49 | __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || | 49 | __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || |
50 | __get_user(kmsg->msg_flags, &umsg->msg_flags)) | 50 | __get_user(kmsg->msg_flags, &umsg->msg_flags)) |
51 | return -EFAULT; | 51 | return -EFAULT; |
52 | |||
53 | if (!uaddr) | ||
54 | kmsg->msg_namelen = 0; | ||
55 | |||
56 | if (kmsg->msg_namelen < 0) | ||
57 | return -EINVAL; | ||
58 | |||
52 | if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) | 59 | if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) |
53 | kmsg->msg_namelen = sizeof(struct sockaddr_storage); | 60 | kmsg->msg_namelen = sizeof(struct sockaddr_storage); |
54 | kmsg->msg_control = compat_ptr(tmp3); | 61 | kmsg->msg_control = compat_ptr(tmp3); |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 25b4b5d23485..ee0608bb3bc0 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -2166,28 +2166,28 @@ replay: | |||
2166 | } | 2166 | } |
2167 | } | 2167 | } |
2168 | err = rtnl_configure_link(dev, ifm); | 2168 | err = rtnl_configure_link(dev, ifm); |
2169 | if (err < 0) { | 2169 | if (err < 0) |
2170 | if (ops->newlink) { | 2170 | goto out_unregister; |
2171 | LIST_HEAD(list_kill); | ||
2172 | |||
2173 | ops->dellink(dev, &list_kill); | ||
2174 | unregister_netdevice_many(&list_kill); | ||
2175 | } else { | ||
2176 | unregister_netdevice(dev); | ||
2177 | } | ||
2178 | goto out; | ||
2179 | } | ||
2180 | |||
2181 | if (link_net) { | 2171 | if (link_net) { |
2182 | err = dev_change_net_namespace(dev, dest_net, ifname); | 2172 | err = dev_change_net_namespace(dev, dest_net, ifname); |
2183 | if (err < 0) | 2173 | if (err < 0) |
2184 | unregister_netdevice(dev); | 2174 | goto out_unregister; |
2185 | } | 2175 | } |
2186 | out: | 2176 | out: |
2187 | if (link_net) | 2177 | if (link_net) |
2188 | put_net(link_net); | 2178 | put_net(link_net); |
2189 | put_net(dest_net); | 2179 | put_net(dest_net); |
2190 | return err; | 2180 | return err; |
2181 | out_unregister: | ||
2182 | if (ops->newlink) { | ||
2183 | LIST_HEAD(list_kill); | ||
2184 | |||
2185 | ops->dellink(dev, &list_kill); | ||
2186 | unregister_netdevice_many(&list_kill); | ||
2187 | } else { | ||
2188 | unregister_netdevice(dev); | ||
2189 | } | ||
2190 | goto out; | ||
2191 | } | 2191 | } |
2192 | } | 2192 | } |
2193 | 2193 | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index f80507823531..8e4ac97c8477 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -3733,9 +3733,13 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, | |||
3733 | struct sock *sk, int tstype) | 3733 | struct sock *sk, int tstype) |
3734 | { | 3734 | { |
3735 | struct sk_buff *skb; | 3735 | struct sk_buff *skb; |
3736 | bool tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; | 3736 | bool tsonly; |
3737 | 3737 | ||
3738 | if (!sk || !skb_may_tx_timestamp(sk, tsonly)) | 3738 | if (!sk) |
3739 | return; | ||
3740 | |||
3741 | tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; | ||
3742 | if (!skb_may_tx_timestamp(sk, tsonly)) | ||
3739 | return; | 3743 | return; |
3740 | 3744 | ||
3741 | if (tsonly) | 3745 | if (tsonly) |
@@ -4173,7 +4177,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet) | |||
4173 | skb->ignore_df = 0; | 4177 | skb->ignore_df = 0; |
4174 | skb_dst_drop(skb); | 4178 | skb_dst_drop(skb); |
4175 | skb->mark = 0; | 4179 | skb->mark = 0; |
4176 | skb->sender_cpu = 0; | 4180 | skb_sender_cpu_clear(skb); |
4177 | skb_init_secmark(skb); | 4181 | skb_init_secmark(skb); |
4178 | secpath_reset(skb); | 4182 | secpath_reset(skb); |
4179 | nf_reset(skb); | 4183 | nf_reset(skb); |
diff --git a/net/core/sock.c b/net/core/sock.c index 93c8b20c91e4..78e89eb7eb70 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -1655,6 +1655,10 @@ void sock_rfree(struct sk_buff *skb) | |||
1655 | } | 1655 | } |
1656 | EXPORT_SYMBOL(sock_rfree); | 1656 | EXPORT_SYMBOL(sock_rfree); |
1657 | 1657 | ||
1658 | /* | ||
1659 | * Buffer destructor for skbs that are not used directly in read or write | ||
1660 | * path, e.g. for error handler skbs. Automatically called from kfree_skb. | ||
1661 | */ | ||
1658 | void sock_efree(struct sk_buff *skb) | 1662 | void sock_efree(struct sk_buff *skb) |
1659 | { | 1663 | { |
1660 | sock_put(skb->sk); | 1664 | sock_put(skb->sk); |
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 433424804284..8ce351ffceb1 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c | |||
@@ -25,6 +25,8 @@ | |||
25 | static int zero = 0; | 25 | static int zero = 0; |
26 | static int one = 1; | 26 | static int one = 1; |
27 | static int ushort_max = USHRT_MAX; | 27 | static int ushort_max = USHRT_MAX; |
28 | static int min_sndbuf = SOCK_MIN_SNDBUF; | ||
29 | static int min_rcvbuf = SOCK_MIN_RCVBUF; | ||
28 | 30 | ||
29 | static int net_msg_warn; /* Unused, but still a sysctl */ | 31 | static int net_msg_warn; /* Unused, but still a sysctl */ |
30 | 32 | ||
@@ -237,7 +239,7 @@ static struct ctl_table net_core_table[] = { | |||
237 | .maxlen = sizeof(int), | 239 | .maxlen = sizeof(int), |
238 | .mode = 0644, | 240 | .mode = 0644, |
239 | .proc_handler = proc_dointvec_minmax, | 241 | .proc_handler = proc_dointvec_minmax, |
240 | .extra1 = &one, | 242 | .extra1 = &min_sndbuf, |
241 | }, | 243 | }, |
242 | { | 244 | { |
243 | .procname = "rmem_max", | 245 | .procname = "rmem_max", |
@@ -245,7 +247,7 @@ static struct ctl_table net_core_table[] = { | |||
245 | .maxlen = sizeof(int), | 247 | .maxlen = sizeof(int), |
246 | .mode = 0644, | 248 | .mode = 0644, |
247 | .proc_handler = proc_dointvec_minmax, | 249 | .proc_handler = proc_dointvec_minmax, |
248 | .extra1 = &one, | 250 | .extra1 = &min_rcvbuf, |
249 | }, | 251 | }, |
250 | { | 252 | { |
251 | .procname = "wmem_default", | 253 | .procname = "wmem_default", |
@@ -253,7 +255,7 @@ static struct ctl_table net_core_table[] = { | |||
253 | .maxlen = sizeof(int), | 255 | .maxlen = sizeof(int), |
254 | .mode = 0644, | 256 | .mode = 0644, |
255 | .proc_handler = proc_dointvec_minmax, | 257 | .proc_handler = proc_dointvec_minmax, |
256 | .extra1 = &one, | 258 | .extra1 = &min_sndbuf, |
257 | }, | 259 | }, |
258 | { | 260 | { |
259 | .procname = "rmem_default", | 261 | .procname = "rmem_default", |
@@ -261,7 +263,7 @@ static struct ctl_table net_core_table[] = { | |||
261 | .maxlen = sizeof(int), | 263 | .maxlen = sizeof(int), |
262 | .mode = 0644, | 264 | .mode = 0644, |
263 | .proc_handler = proc_dointvec_minmax, | 265 | .proc_handler = proc_dointvec_minmax, |
264 | .extra1 = &one, | 266 | .extra1 = &min_rcvbuf, |
265 | }, | 267 | }, |
266 | { | 268 | { |
267 | .procname = "dev_weight", | 269 | .procname = "dev_weight", |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 14d02ea905b6..3e44b9b0b78e 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -268,6 +268,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo) | |||
268 | release_sock(sk); | 268 | release_sock(sk); |
269 | if (reqsk_queue_empty(&icsk->icsk_accept_queue)) | 269 | if (reqsk_queue_empty(&icsk->icsk_accept_queue)) |
270 | timeo = schedule_timeout(timeo); | 270 | timeo = schedule_timeout(timeo); |
271 | sched_annotate_sleep(); | ||
271 | lock_sock(sk); | 272 | lock_sock(sk); |
272 | err = 0; | 273 | err = 0; |
273 | if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) | 274 | if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) |
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 81751f12645f..592aff37366b 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -71,6 +71,20 @@ static inline void inet_diag_unlock_handler( | |||
71 | mutex_unlock(&inet_diag_table_mutex); | 71 | mutex_unlock(&inet_diag_table_mutex); |
72 | } | 72 | } |
73 | 73 | ||
74 | static size_t inet_sk_attr_size(void) | ||
75 | { | ||
76 | return nla_total_size(sizeof(struct tcp_info)) | ||
77 | + nla_total_size(1) /* INET_DIAG_SHUTDOWN */ | ||
78 | + nla_total_size(1) /* INET_DIAG_TOS */ | ||
79 | + nla_total_size(1) /* INET_DIAG_TCLASS */ | ||
80 | + nla_total_size(sizeof(struct inet_diag_meminfo)) | ||
81 | + nla_total_size(sizeof(struct inet_diag_msg)) | ||
82 | + nla_total_size(SK_MEMINFO_VARS * sizeof(u32)) | ||
83 | + nla_total_size(TCP_CA_NAME_MAX) | ||
84 | + nla_total_size(sizeof(struct tcpvegas_info)) | ||
85 | + 64; | ||
86 | } | ||
87 | |||
74 | int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, | 88 | int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, |
75 | struct sk_buff *skb, struct inet_diag_req_v2 *req, | 89 | struct sk_buff *skb, struct inet_diag_req_v2 *req, |
76 | struct user_namespace *user_ns, | 90 | struct user_namespace *user_ns, |
@@ -326,9 +340,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s | |||
326 | if (err) | 340 | if (err) |
327 | goto out; | 341 | goto out; |
328 | 342 | ||
329 | rep = nlmsg_new(sizeof(struct inet_diag_msg) + | 343 | rep = nlmsg_new(inet_sk_attr_size(), GFP_KERNEL); |
330 | sizeof(struct inet_diag_meminfo) + | ||
331 | sizeof(struct tcp_info) + 64, GFP_KERNEL); | ||
332 | if (!rep) { | 344 | if (!rep) { |
333 | err = -ENOMEM; | 345 | err = -ENOMEM; |
334 | goto out; | 346 | goto out; |
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 787b3c294ce6..d9bc28ac5d1b 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
@@ -67,6 +67,7 @@ static int ip_forward_finish(struct sk_buff *skb) | |||
67 | if (unlikely(opt->optlen)) | 67 | if (unlikely(opt->optlen)) |
68 | ip_forward_options(skb); | 68 | ip_forward_options(skb); |
69 | 69 | ||
70 | skb_sender_cpu_clear(skb); | ||
70 | return dst_output(skb); | 71 | return dst_output(skb); |
71 | } | 72 | } |
72 | 73 | ||
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 2c8d98e728c0..145a50c4d566 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -659,27 +659,30 @@ EXPORT_SYMBOL(ip_defrag); | |||
659 | struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) | 659 | struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) |
660 | { | 660 | { |
661 | struct iphdr iph; | 661 | struct iphdr iph; |
662 | int netoff; | ||
662 | u32 len; | 663 | u32 len; |
663 | 664 | ||
664 | if (skb->protocol != htons(ETH_P_IP)) | 665 | if (skb->protocol != htons(ETH_P_IP)) |
665 | return skb; | 666 | return skb; |
666 | 667 | ||
667 | if (skb_copy_bits(skb, 0, &iph, sizeof(iph)) < 0) | 668 | netoff = skb_network_offset(skb); |
669 | |||
670 | if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0) | ||
668 | return skb; | 671 | return skb; |
669 | 672 | ||
670 | if (iph.ihl < 5 || iph.version != 4) | 673 | if (iph.ihl < 5 || iph.version != 4) |
671 | return skb; | 674 | return skb; |
672 | 675 | ||
673 | len = ntohs(iph.tot_len); | 676 | len = ntohs(iph.tot_len); |
674 | if (skb->len < len || len < (iph.ihl * 4)) | 677 | if (skb->len < netoff + len || len < (iph.ihl * 4)) |
675 | return skb; | 678 | return skb; |
676 | 679 | ||
677 | if (ip_is_fragment(&iph)) { | 680 | if (ip_is_fragment(&iph)) { |
678 | skb = skb_share_check(skb, GFP_ATOMIC); | 681 | skb = skb_share_check(skb, GFP_ATOMIC); |
679 | if (skb) { | 682 | if (skb) { |
680 | if (!pskb_may_pull(skb, iph.ihl*4)) | 683 | if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) |
681 | return skb; | 684 | return skb; |
682 | if (pskb_trim_rcsum(skb, len)) | 685 | if (pskb_trim_rcsum(skb, netoff + len)) |
683 | return skb; | 686 | return skb; |
684 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); | 687 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); |
685 | if (ip_defrag(skb, user)) | 688 | if (ip_defrag(skb, user)) |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 31d8c71986b4..5cd99271d3a6 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -432,17 +432,32 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf | |||
432 | kfree_skb(skb); | 432 | kfree_skb(skb); |
433 | } | 433 | } |
434 | 434 | ||
435 | static bool ipv4_pktinfo_prepare_errqueue(const struct sock *sk, | 435 | /* IPv4 supports cmsg on all imcp errors and some timestamps |
436 | const struct sk_buff *skb, | 436 | * |
437 | int ee_origin) | 437 | * Timestamp code paths do not initialize the fields expected by cmsg: |
438 | * the PKTINFO fields in skb->cb[]. Fill those in here. | ||
439 | */ | ||
440 | static bool ipv4_datagram_support_cmsg(const struct sock *sk, | ||
441 | struct sk_buff *skb, | ||
442 | int ee_origin) | ||
438 | { | 443 | { |
439 | struct in_pktinfo *info = PKTINFO_SKB_CB(skb); | 444 | struct in_pktinfo *info; |
445 | |||
446 | if (ee_origin == SO_EE_ORIGIN_ICMP) | ||
447 | return true; | ||
440 | 448 | ||
441 | if ((ee_origin != SO_EE_ORIGIN_TIMESTAMPING) || | 449 | if (ee_origin == SO_EE_ORIGIN_LOCAL) |
442 | (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) || | 450 | return false; |
451 | |||
452 | /* Support IP_PKTINFO on tstamp packets if requested, to correlate | ||
453 | * timestamp with egress dev. Not possible for packets without dev | ||
454 | * or without payload (SOF_TIMESTAMPING_OPT_TSONLY). | ||
455 | */ | ||
456 | if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) || | ||
443 | (!skb->dev)) | 457 | (!skb->dev)) |
444 | return false; | 458 | return false; |
445 | 459 | ||
460 | info = PKTINFO_SKB_CB(skb); | ||
446 | info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr; | 461 | info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr; |
447 | info->ipi_ifindex = skb->dev->ifindex; | 462 | info->ipi_ifindex = skb->dev->ifindex; |
448 | return true; | 463 | return true; |
@@ -483,7 +498,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) | |||
483 | 498 | ||
484 | serr = SKB_EXT_ERR(skb); | 499 | serr = SKB_EXT_ERR(skb); |
485 | 500 | ||
486 | if (sin && skb->len) { | 501 | if (sin && serr->port) { |
487 | sin->sin_family = AF_INET; | 502 | sin->sin_family = AF_INET; |
488 | sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) + | 503 | sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) + |
489 | serr->addr_offset); | 504 | serr->addr_offset); |
@@ -496,9 +511,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) | |||
496 | sin = &errhdr.offender; | 511 | sin = &errhdr.offender; |
497 | memset(sin, 0, sizeof(*sin)); | 512 | memset(sin, 0, sizeof(*sin)); |
498 | 513 | ||
499 | if (skb->len && | 514 | if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) { |
500 | (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP || | ||
501 | ipv4_pktinfo_prepare_errqueue(sk, skb, serr->ee.ee_origin))) { | ||
502 | sin->sin_family = AF_INET; | 515 | sin->sin_family = AF_INET; |
503 | sin->sin_addr.s_addr = ip_hdr(skb)->saddr; | 516 | sin->sin_addr.s_addr = ip_hdr(skb)->saddr; |
504 | if (inet_sk(sk)->cmsg_flags) | 517 | if (inet_sk(sk)->cmsg_flags) |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 99e810f84671..cf5e82f39d3b 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -272,9 +272,9 @@ static void trace_packet(const struct sk_buff *skb, | |||
272 | &chainname, &comment, &rulenum) != 0) | 272 | &chainname, &comment, &rulenum) != 0) |
273 | break; | 273 | break; |
274 | 274 | ||
275 | nf_log_packet(net, AF_INET, hook, skb, in, out, &trace_loginfo, | 275 | nf_log_trace(net, AF_INET, hook, skb, in, out, &trace_loginfo, |
276 | "TRACE: %s:%s:%s:%u ", | 276 | "TRACE: %s:%s:%s:%u ", |
277 | tablename, chainname, comment, rulenum); | 277 | tablename, chainname, comment, rulenum); |
278 | } | 278 | } |
279 | #endif | 279 | #endif |
280 | 280 | ||
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index e9f66e1cda50..208d5439e59b 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c | |||
@@ -259,6 +259,9 @@ int ping_init_sock(struct sock *sk) | |||
259 | kgid_t low, high; | 259 | kgid_t low, high; |
260 | int ret = 0; | 260 | int ret = 0; |
261 | 261 | ||
262 | if (sk->sk_family == AF_INET6) | ||
263 | sk->sk_ipv6only = 1; | ||
264 | |||
262 | inet_get_ping_group_range_net(net, &low, &high); | 265 | inet_get_ping_group_range_net(net, &low, &high); |
263 | if (gid_lte(low, group) && gid_lte(group, high)) | 266 | if (gid_lte(low, group) && gid_lte(group, high)) |
264 | return 0; | 267 | return 0; |
@@ -305,6 +308,11 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, | |||
305 | if (addr_len < sizeof(*addr)) | 308 | if (addr_len < sizeof(*addr)) |
306 | return -EINVAL; | 309 | return -EINVAL; |
307 | 310 | ||
311 | if (addr->sin_family != AF_INET && | ||
312 | !(addr->sin_family == AF_UNSPEC && | ||
313 | addr->sin_addr.s_addr == htonl(INADDR_ANY))) | ||
314 | return -EAFNOSUPPORT; | ||
315 | |||
308 | pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n", | 316 | pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n", |
309 | sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port)); | 317 | sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port)); |
310 | 318 | ||
@@ -330,7 +338,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, | |||
330 | return -EINVAL; | 338 | return -EINVAL; |
331 | 339 | ||
332 | if (addr->sin6_family != AF_INET6) | 340 | if (addr->sin6_family != AF_INET6) |
333 | return -EINVAL; | 341 | return -EAFNOSUPPORT; |
334 | 342 | ||
335 | pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n", | 343 | pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n", |
336 | sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port)); | 344 | sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port)); |
@@ -716,7 +724,7 @@ static int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m | |||
716 | if (msg->msg_namelen < sizeof(*usin)) | 724 | if (msg->msg_namelen < sizeof(*usin)) |
717 | return -EINVAL; | 725 | return -EINVAL; |
718 | if (usin->sin_family != AF_INET) | 726 | if (usin->sin_family != AF_INET) |
719 | return -EINVAL; | 727 | return -EAFNOSUPPORT; |
720 | daddr = usin->sin_addr.s_addr; | 728 | daddr = usin->sin_addr.s_addr; |
721 | /* no remote port */ | 729 | /* no remote port */ |
722 | } else { | 730 | } else { |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 9d72a0fcd928..995a2259bcfc 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -835,17 +835,13 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, | |||
835 | int large_allowed) | 835 | int large_allowed) |
836 | { | 836 | { |
837 | struct tcp_sock *tp = tcp_sk(sk); | 837 | struct tcp_sock *tp = tcp_sk(sk); |
838 | u32 new_size_goal, size_goal, hlen; | 838 | u32 new_size_goal, size_goal; |
839 | 839 | ||
840 | if (!large_allowed || !sk_can_gso(sk)) | 840 | if (!large_allowed || !sk_can_gso(sk)) |
841 | return mss_now; | 841 | return mss_now; |
842 | 842 | ||
843 | /* Maybe we should/could use sk->sk_prot->max_header here ? */ | 843 | /* Note : tcp_tso_autosize() will eventually split this later */ |
844 | hlen = inet_csk(sk)->icsk_af_ops->net_header_len + | 844 | new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER; |
845 | inet_csk(sk)->icsk_ext_hdr_len + | ||
846 | tp->tcp_header_len; | ||
847 | |||
848 | new_size_goal = sk->sk_gso_max_size - 1 - hlen; | ||
849 | new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal); | 845 | new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal); |
850 | 846 | ||
851 | /* We try hard to avoid divides here */ | 847 | /* We try hard to avoid divides here */ |
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index d694088214cd..62856e185a93 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c | |||
@@ -378,6 +378,12 @@ EXPORT_SYMBOL_GPL(tcp_slow_start); | |||
378 | */ | 378 | */ |
379 | void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked) | 379 | void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked) |
380 | { | 380 | { |
381 | /* If credits accumulated at a higher w, apply them gently now. */ | ||
382 | if (tp->snd_cwnd_cnt >= w) { | ||
383 | tp->snd_cwnd_cnt = 0; | ||
384 | tp->snd_cwnd++; | ||
385 | } | ||
386 | |||
381 | tp->snd_cwnd_cnt += acked; | 387 | tp->snd_cwnd_cnt += acked; |
382 | if (tp->snd_cwnd_cnt >= w) { | 388 | if (tp->snd_cwnd_cnt >= w) { |
383 | u32 delta = tp->snd_cwnd_cnt / w; | 389 | u32 delta = tp->snd_cwnd_cnt / w; |
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 4b276d1ed980..06d3d665a9fd 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c | |||
@@ -306,8 +306,10 @@ tcp_friendliness: | |||
306 | } | 306 | } |
307 | } | 307 | } |
308 | 308 | ||
309 | if (ca->cnt == 0) /* cannot be zero */ | 309 | /* The maximum rate of cwnd increase CUBIC allows is 1 packet per |
310 | ca->cnt = 1; | 310 | * 2 packets ACKed, meaning cwnd grows at 1.5x per RTT. |
311 | */ | ||
312 | ca->cnt = max(ca->cnt, 2U); | ||
311 | } | 313 | } |
312 | 314 | ||
313 | static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) | 315 | static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index a2a796c5536b..1db253e36045 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2773,15 +2773,11 @@ void tcp_send_fin(struct sock *sk) | |||
2773 | } else { | 2773 | } else { |
2774 | /* Socket is locked, keep trying until memory is available. */ | 2774 | /* Socket is locked, keep trying until memory is available. */ |
2775 | for (;;) { | 2775 | for (;;) { |
2776 | skb = alloc_skb_fclone(MAX_TCP_HEADER, | 2776 | skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); |
2777 | sk->sk_allocation); | ||
2778 | if (skb) | 2777 | if (skb) |
2779 | break; | 2778 | break; |
2780 | yield(); | 2779 | yield(); |
2781 | } | 2780 | } |
2782 | |||
2783 | /* Reserve space for headers and prepare control bits. */ | ||
2784 | skb_reserve(skb, MAX_TCP_HEADER); | ||
2785 | /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ | 2781 | /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ |
2786 | tcp_init_nondata_skb(skb, tp->write_seq, | 2782 | tcp_init_nondata_skb(skb, tp->write_seq, |
2787 | TCPHDR_ACK | TCPHDR_FIN); | 2783 | TCPHDR_ACK | TCPHDR_FIN); |
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index d5f6bd9a210a..dab73813cb92 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c | |||
@@ -63,6 +63,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb) | |||
63 | return err; | 63 | return err; |
64 | 64 | ||
65 | IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE; | 65 | IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE; |
66 | skb->protocol = htons(ETH_P_IP); | ||
66 | 67 | ||
67 | return x->outer_mode->output2(x, skb); | 68 | return x->outer_mode->output2(x, skb); |
68 | } | 69 | } |
@@ -71,7 +72,6 @@ EXPORT_SYMBOL(xfrm4_prepare_output); | |||
71 | int xfrm4_output_finish(struct sk_buff *skb) | 72 | int xfrm4_output_finish(struct sk_buff *skb) |
72 | { | 73 | { |
73 | memset(IPCB(skb), 0, sizeof(*IPCB(skb))); | 74 | memset(IPCB(skb), 0, sizeof(*IPCB(skb))); |
74 | skb->protocol = htons(ETH_P_IP); | ||
75 | 75 | ||
76 | #ifdef CONFIG_NETFILTER | 76 | #ifdef CONFIG_NETFILTER |
77 | IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; | 77 | IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index c215be70cac0..ace8daca5c83 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -325,14 +325,34 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu) | |||
325 | kfree_skb(skb); | 325 | kfree_skb(skb); |
326 | } | 326 | } |
327 | 327 | ||
328 | static void ip6_datagram_prepare_pktinfo_errqueue(struct sk_buff *skb) | 328 | /* IPv6 supports cmsg on all origins aside from SO_EE_ORIGIN_LOCAL. |
329 | * | ||
330 | * At one point, excluding local errors was a quick test to identify icmp/icmp6 | ||
331 | * errors. This is no longer true, but the test remained, so the v6 stack, | ||
332 | * unlike v4, also honors cmsg requests on all wifi and timestamp errors. | ||
333 | * | ||
334 | * Timestamp code paths do not initialize the fields expected by cmsg: | ||
335 | * the PKTINFO fields in skb->cb[]. Fill those in here. | ||
336 | */ | ||
337 | static bool ip6_datagram_support_cmsg(struct sk_buff *skb, | ||
338 | struct sock_exterr_skb *serr) | ||
329 | { | 339 | { |
330 | int ifindex = skb->dev ? skb->dev->ifindex : -1; | 340 | if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP || |
341 | serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6) | ||
342 | return true; | ||
343 | |||
344 | if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL) | ||
345 | return false; | ||
346 | |||
347 | if (!skb->dev) | ||
348 | return false; | ||
331 | 349 | ||
332 | if (skb->protocol == htons(ETH_P_IPV6)) | 350 | if (skb->protocol == htons(ETH_P_IPV6)) |
333 | IP6CB(skb)->iif = ifindex; | 351 | IP6CB(skb)->iif = skb->dev->ifindex; |
334 | else | 352 | else |
335 | PKTINFO_SKB_CB(skb)->ipi_ifindex = ifindex; | 353 | PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex; |
354 | |||
355 | return true; | ||
336 | } | 356 | } |
337 | 357 | ||
338 | /* | 358 | /* |
@@ -369,7 +389,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) | |||
369 | 389 | ||
370 | serr = SKB_EXT_ERR(skb); | 390 | serr = SKB_EXT_ERR(skb); |
371 | 391 | ||
372 | if (sin && skb->len) { | 392 | if (sin && serr->port) { |
373 | const unsigned char *nh = skb_network_header(skb); | 393 | const unsigned char *nh = skb_network_header(skb); |
374 | sin->sin6_family = AF_INET6; | 394 | sin->sin6_family = AF_INET6; |
375 | sin->sin6_flowinfo = 0; | 395 | sin->sin6_flowinfo = 0; |
@@ -394,14 +414,11 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) | |||
394 | memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); | 414 | memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); |
395 | sin = &errhdr.offender; | 415 | sin = &errhdr.offender; |
396 | memset(sin, 0, sizeof(*sin)); | 416 | memset(sin, 0, sizeof(*sin)); |
397 | if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL && skb->len) { | 417 | |
418 | if (ip6_datagram_support_cmsg(skb, serr)) { | ||
398 | sin->sin6_family = AF_INET6; | 419 | sin->sin6_family = AF_INET6; |
399 | if (np->rxopt.all) { | 420 | if (np->rxopt.all) |
400 | if (serr->ee.ee_origin != SO_EE_ORIGIN_ICMP && | ||
401 | serr->ee.ee_origin != SO_EE_ORIGIN_ICMP6) | ||
402 | ip6_datagram_prepare_pktinfo_errqueue(skb); | ||
403 | ip6_datagram_recv_common_ctl(sk, msg, skb); | 421 | ip6_datagram_recv_common_ctl(sk, msg, skb); |
404 | } | ||
405 | if (skb->protocol == htons(ETH_P_IPV6)) { | 422 | if (skb->protocol == htons(ETH_P_IPV6)) { |
406 | sin->sin6_addr = ipv6_hdr(skb)->saddr; | 423 | sin->sin6_addr = ipv6_hdr(skb)->saddr; |
407 | if (np->rxopt.all) | 424 | if (np->rxopt.all) |
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index b4d5e1d97c1b..27ca79682efb 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c | |||
@@ -104,6 +104,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, | |||
104 | goto again; | 104 | goto again; |
105 | flp6->saddr = saddr; | 105 | flp6->saddr = saddr; |
106 | } | 106 | } |
107 | err = rt->dst.error; | ||
107 | goto out; | 108 | goto out; |
108 | } | 109 | } |
109 | again: | 110 | again: |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 0a04a37305d5..7e80b61b51ff 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -318,6 +318,7 @@ static int ip6_forward_proxy_check(struct sk_buff *skb) | |||
318 | 318 | ||
319 | static inline int ip6_forward_finish(struct sk_buff *skb) | 319 | static inline int ip6_forward_finish(struct sk_buff *skb) |
320 | { | 320 | { |
321 | skb_sender_cpu_clear(skb); | ||
321 | return dst_output(skb); | 322 | return dst_output(skb); |
322 | } | 323 | } |
323 | 324 | ||
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 266a264ec212..ddd94eca19b3 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -314,7 +314,7 @@ out: | |||
314 | * Create tunnel matching given parameters. | 314 | * Create tunnel matching given parameters. |
315 | * | 315 | * |
316 | * Return: | 316 | * Return: |
317 | * created tunnel or NULL | 317 | * created tunnel or error pointer |
318 | **/ | 318 | **/ |
319 | 319 | ||
320 | static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) | 320 | static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) |
@@ -322,7 +322,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) | |||
322 | struct net_device *dev; | 322 | struct net_device *dev; |
323 | struct ip6_tnl *t; | 323 | struct ip6_tnl *t; |
324 | char name[IFNAMSIZ]; | 324 | char name[IFNAMSIZ]; |
325 | int err; | 325 | int err = -ENOMEM; |
326 | 326 | ||
327 | if (p->name[0]) | 327 | if (p->name[0]) |
328 | strlcpy(name, p->name, IFNAMSIZ); | 328 | strlcpy(name, p->name, IFNAMSIZ); |
@@ -348,7 +348,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) | |||
348 | failed_free: | 348 | failed_free: |
349 | ip6_dev_free(dev); | 349 | ip6_dev_free(dev); |
350 | failed: | 350 | failed: |
351 | return NULL; | 351 | return ERR_PTR(err); |
352 | } | 352 | } |
353 | 353 | ||
354 | /** | 354 | /** |
@@ -362,7 +362,7 @@ failed: | |||
362 | * tunnel device is created and registered for use. | 362 | * tunnel device is created and registered for use. |
363 | * | 363 | * |
364 | * Return: | 364 | * Return: |
365 | * matching tunnel or NULL | 365 | * matching tunnel or error pointer |
366 | **/ | 366 | **/ |
367 | 367 | ||
368 | static struct ip6_tnl *ip6_tnl_locate(struct net *net, | 368 | static struct ip6_tnl *ip6_tnl_locate(struct net *net, |
@@ -380,13 +380,13 @@ static struct ip6_tnl *ip6_tnl_locate(struct net *net, | |||
380 | if (ipv6_addr_equal(local, &t->parms.laddr) && | 380 | if (ipv6_addr_equal(local, &t->parms.laddr) && |
381 | ipv6_addr_equal(remote, &t->parms.raddr)) { | 381 | ipv6_addr_equal(remote, &t->parms.raddr)) { |
382 | if (create) | 382 | if (create) |
383 | return NULL; | 383 | return ERR_PTR(-EEXIST); |
384 | 384 | ||
385 | return t; | 385 | return t; |
386 | } | 386 | } |
387 | } | 387 | } |
388 | if (!create) | 388 | if (!create) |
389 | return NULL; | 389 | return ERR_PTR(-ENODEV); |
390 | return ip6_tnl_create(net, p); | 390 | return ip6_tnl_create(net, p); |
391 | } | 391 | } |
392 | 392 | ||
@@ -1420,7 +1420,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1420 | } | 1420 | } |
1421 | ip6_tnl_parm_from_user(&p1, &p); | 1421 | ip6_tnl_parm_from_user(&p1, &p); |
1422 | t = ip6_tnl_locate(net, &p1, 0); | 1422 | t = ip6_tnl_locate(net, &p1, 0); |
1423 | if (t == NULL) | 1423 | if (IS_ERR(t)) |
1424 | t = netdev_priv(dev); | 1424 | t = netdev_priv(dev); |
1425 | } else { | 1425 | } else { |
1426 | memset(&p, 0, sizeof(p)); | 1426 | memset(&p, 0, sizeof(p)); |
@@ -1445,7 +1445,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1445 | ip6_tnl_parm_from_user(&p1, &p); | 1445 | ip6_tnl_parm_from_user(&p1, &p); |
1446 | t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL); | 1446 | t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL); |
1447 | if (cmd == SIOCCHGTUNNEL) { | 1447 | if (cmd == SIOCCHGTUNNEL) { |
1448 | if (t != NULL) { | 1448 | if (!IS_ERR(t)) { |
1449 | if (t->dev != dev) { | 1449 | if (t->dev != dev) { |
1450 | err = -EEXIST; | 1450 | err = -EEXIST; |
1451 | break; | 1451 | break; |
@@ -1457,14 +1457,15 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1457 | else | 1457 | else |
1458 | err = ip6_tnl_update(t, &p1); | 1458 | err = ip6_tnl_update(t, &p1); |
1459 | } | 1459 | } |
1460 | if (t) { | 1460 | if (!IS_ERR(t)) { |
1461 | err = 0; | 1461 | err = 0; |
1462 | ip6_tnl_parm_to_user(&p, &t->parms); | 1462 | ip6_tnl_parm_to_user(&p, &t->parms); |
1463 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) | 1463 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) |
1464 | err = -EFAULT; | 1464 | err = -EFAULT; |
1465 | 1465 | ||
1466 | } else | 1466 | } else { |
1467 | err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); | 1467 | err = PTR_ERR(t); |
1468 | } | ||
1468 | break; | 1469 | break; |
1469 | case SIOCDELTUNNEL: | 1470 | case SIOCDELTUNNEL: |
1470 | err = -EPERM; | 1471 | err = -EPERM; |
@@ -1478,7 +1479,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1478 | err = -ENOENT; | 1479 | err = -ENOENT; |
1479 | ip6_tnl_parm_from_user(&p1, &p); | 1480 | ip6_tnl_parm_from_user(&p1, &p); |
1480 | t = ip6_tnl_locate(net, &p1, 0); | 1481 | t = ip6_tnl_locate(net, &p1, 0); |
1481 | if (t == NULL) | 1482 | if (IS_ERR(t)) |
1482 | break; | 1483 | break; |
1483 | err = -EPERM; | 1484 | err = -EPERM; |
1484 | if (t->dev == ip6n->fb_tnl_dev) | 1485 | if (t->dev == ip6n->fb_tnl_dev) |
@@ -1672,12 +1673,13 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev, | |||
1672 | struct nlattr *tb[], struct nlattr *data[]) | 1673 | struct nlattr *tb[], struct nlattr *data[]) |
1673 | { | 1674 | { |
1674 | struct net *net = dev_net(dev); | 1675 | struct net *net = dev_net(dev); |
1675 | struct ip6_tnl *nt; | 1676 | struct ip6_tnl *nt, *t; |
1676 | 1677 | ||
1677 | nt = netdev_priv(dev); | 1678 | nt = netdev_priv(dev); |
1678 | ip6_tnl_netlink_parms(data, &nt->parms); | 1679 | ip6_tnl_netlink_parms(data, &nt->parms); |
1679 | 1680 | ||
1680 | if (ip6_tnl_locate(net, &nt->parms, 0)) | 1681 | t = ip6_tnl_locate(net, &nt->parms, 0); |
1682 | if (!IS_ERR(t)) | ||
1681 | return -EEXIST; | 1683 | return -EEXIST; |
1682 | 1684 | ||
1683 | return ip6_tnl_create2(dev); | 1685 | return ip6_tnl_create2(dev); |
@@ -1697,8 +1699,7 @@ static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], | |||
1697 | ip6_tnl_netlink_parms(data, &p); | 1699 | ip6_tnl_netlink_parms(data, &p); |
1698 | 1700 | ||
1699 | t = ip6_tnl_locate(net, &p, 0); | 1701 | t = ip6_tnl_locate(net, &p, 0); |
1700 | 1702 | if (!IS_ERR(t)) { | |
1701 | if (t) { | ||
1702 | if (t->dev != dev) | 1703 | if (t->dev != dev) |
1703 | return -EEXIST; | 1704 | return -EEXIST; |
1704 | } else | 1705 | } else |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index e080fbbbc0e5..bb00c6f2a885 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -298,9 +298,9 @@ static void trace_packet(const struct sk_buff *skb, | |||
298 | &chainname, &comment, &rulenum) != 0) | 298 | &chainname, &comment, &rulenum) != 0) |
299 | break; | 299 | break; |
300 | 300 | ||
301 | nf_log_packet(net, AF_INET6, hook, skb, in, out, &trace_loginfo, | 301 | nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo, |
302 | "TRACE: %s:%s:%s:%u ", | 302 | "TRACE: %s:%s:%s:%u ", |
303 | tablename, chainname, comment, rulenum); | 303 | tablename, chainname, comment, rulenum); |
304 | } | 304 | } |
305 | #endif | 305 | #endif |
306 | 306 | ||
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index bd46f736f61d..a2dfff6ff227 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c | |||
@@ -102,9 +102,10 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
102 | 102 | ||
103 | if (msg->msg_name) { | 103 | if (msg->msg_name) { |
104 | DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name); | 104 | DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name); |
105 | if (msg->msg_namelen < sizeof(struct sockaddr_in6) || | 105 | if (msg->msg_namelen < sizeof(*u)) |
106 | u->sin6_family != AF_INET6) { | ||
107 | return -EINVAL; | 106 | return -EINVAL; |
107 | if (u->sin6_family != AF_INET6) { | ||
108 | return -EAFNOSUPPORT; | ||
108 | } | 109 | } |
109 | if (sk->sk_bound_dev_if && | 110 | if (sk->sk_bound_dev_if && |
110 | sk->sk_bound_dev_if != u->sin6_scope_id) { | 111 | sk->sk_bound_dev_if != u->sin6_scope_id) { |
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index ab889bb16b3c..be2c0ba82c85 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c | |||
@@ -112,11 +112,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, | |||
112 | fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); | 112 | fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); |
113 | fptr->nexthdr = nexthdr; | 113 | fptr->nexthdr = nexthdr; |
114 | fptr->reserved = 0; | 114 | fptr->reserved = 0; |
115 | if (skb_shinfo(skb)->ip6_frag_id) | 115 | if (!skb_shinfo(skb)->ip6_frag_id) |
116 | fptr->identification = skb_shinfo(skb)->ip6_frag_id; | 116 | ipv6_proxy_select_ident(skb); |
117 | else | 117 | fptr->identification = skb_shinfo(skb)->ip6_frag_id; |
118 | ipv6_select_ident(fptr, | ||
119 | (struct rt6_info *)skb_dst(skb)); | ||
120 | 118 | ||
121 | /* Fragment the skb. ipv6 header and the remaining fields of the | 119 | /* Fragment the skb. ipv6 header and the remaining fields of the |
122 | * fragment header are updated in ipv6_gso_segment() | 120 | * fragment header are updated in ipv6_gso_segment() |
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index ca3f29b98ae5..010f8bd2d577 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c | |||
@@ -114,6 +114,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb) | |||
114 | return err; | 114 | return err; |
115 | 115 | ||
116 | skb->ignore_df = 1; | 116 | skb->ignore_df = 1; |
117 | skb->protocol = htons(ETH_P_IPV6); | ||
117 | 118 | ||
118 | return x->outer_mode->output2(x, skb); | 119 | return x->outer_mode->output2(x, skb); |
119 | } | 120 | } |
@@ -122,7 +123,6 @@ EXPORT_SYMBOL(xfrm6_prepare_output); | |||
122 | int xfrm6_output_finish(struct sk_buff *skb) | 123 | int xfrm6_output_finish(struct sk_buff *skb) |
123 | { | 124 | { |
124 | memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); | 125 | memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); |
125 | skb->protocol = htons(ETH_P_IPV6); | ||
126 | 126 | ||
127 | #ifdef CONFIG_NETFILTER | 127 | #ifdef CONFIG_NETFILTER |
128 | IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; | 128 | IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; |
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 48bf5a06847b..8d2d01b4800a 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -200,6 +200,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) | |||
200 | 200 | ||
201 | #if IS_ENABLED(CONFIG_IPV6_MIP6) | 201 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
202 | case IPPROTO_MH: | 202 | case IPPROTO_MH: |
203 | offset += ipv6_optlen(exthdr); | ||
203 | if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) { | 204 | if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) { |
204 | struct ip6_mh *mh; | 205 | struct ip6_mh *mh; |
205 | 206 | ||
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 3afe36824703..8d53d65bd2ab 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -58,13 +58,24 @@ struct ieee80211_local; | |||
58 | #define IEEE80211_UNSET_POWER_LEVEL INT_MIN | 58 | #define IEEE80211_UNSET_POWER_LEVEL INT_MIN |
59 | 59 | ||
60 | /* | 60 | /* |
61 | * Some APs experience problems when working with U-APSD. Decrease the | 61 | * Some APs experience problems when working with U-APSD. Decreasing the |
62 | * probability of that happening by using legacy mode for all ACs but VO. | 62 | * probability of that happening by using legacy mode for all ACs but VO isn't |
63 | * The AP that caused us trouble was a Cisco 4410N. It ignores our | 63 | * enough. |
64 | * setting, and always treats non-VO ACs as legacy. | 64 | * |
65 | * Cisco 4410N originally forced us to enable VO by default only because it | ||
66 | * treated non-VO ACs as legacy. | ||
67 | * | ||
68 | * However some APs (notably Netgear R7000) silently reclassify packets to | ||
69 | * different ACs. Since u-APSD ACs require trigger frames for frame retrieval | ||
70 | * clients would never see some frames (e.g. ARP responses) or would fetch them | ||
71 | * accidentally after a long time. | ||
72 | * | ||
73 | * It makes little sense to enable u-APSD queues by default because it needs | ||
74 | * userspace applications to be aware of it to actually take advantage of the | ||
75 | * possible additional powersavings. Implicitly depending on driver autotrigger | ||
76 | * frame support doesn't make much sense. | ||
65 | */ | 77 | */ |
66 | #define IEEE80211_DEFAULT_UAPSD_QUEUES \ | 78 | #define IEEE80211_DEFAULT_UAPSD_QUEUES 0 |
67 | IEEE80211_WMM_IE_STA_QOSINFO_AC_VO | ||
68 | 79 | ||
69 | #define IEEE80211_DEFAULT_MAX_SP_LEN \ | 80 | #define IEEE80211_DEFAULT_MAX_SP_LEN \ |
70 | IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL | 81 | IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL |
@@ -453,6 +464,7 @@ struct ieee80211_if_managed { | |||
453 | unsigned int flags; | 464 | unsigned int flags; |
454 | 465 | ||
455 | bool csa_waiting_bcn; | 466 | bool csa_waiting_bcn; |
467 | bool csa_ignored_same_chan; | ||
456 | 468 | ||
457 | bool beacon_crc_valid; | 469 | bool beacon_crc_valid; |
458 | u32 beacon_crc; | 470 | u32 beacon_crc; |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 10ac6324c1d0..142f66aece18 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -1150,6 +1150,17 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | |||
1150 | return; | 1150 | return; |
1151 | } | 1151 | } |
1152 | 1152 | ||
1153 | if (cfg80211_chandef_identical(&csa_ie.chandef, | ||
1154 | &sdata->vif.bss_conf.chandef)) { | ||
1155 | if (ifmgd->csa_ignored_same_chan) | ||
1156 | return; | ||
1157 | sdata_info(sdata, | ||
1158 | "AP %pM tries to chanswitch to same channel, ignore\n", | ||
1159 | ifmgd->associated->bssid); | ||
1160 | ifmgd->csa_ignored_same_chan = true; | ||
1161 | return; | ||
1162 | } | ||
1163 | |||
1153 | mutex_lock(&local->mtx); | 1164 | mutex_lock(&local->mtx); |
1154 | mutex_lock(&local->chanctx_mtx); | 1165 | mutex_lock(&local->chanctx_mtx); |
1155 | conf = rcu_dereference_protected(sdata->vif.chanctx_conf, | 1166 | conf = rcu_dereference_protected(sdata->vif.chanctx_conf, |
@@ -1210,6 +1221,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | |||
1210 | sdata->vif.csa_active = true; | 1221 | sdata->vif.csa_active = true; |
1211 | sdata->csa_chandef = csa_ie.chandef; | 1222 | sdata->csa_chandef = csa_ie.chandef; |
1212 | sdata->csa_block_tx = csa_ie.mode; | 1223 | sdata->csa_block_tx = csa_ie.mode; |
1224 | ifmgd->csa_ignored_same_chan = false; | ||
1213 | 1225 | ||
1214 | if (sdata->csa_block_tx) | 1226 | if (sdata->csa_block_tx) |
1215 | ieee80211_stop_vif_queues(local, sdata, | 1227 | ieee80211_stop_vif_queues(local, sdata, |
@@ -2090,6 +2102,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, | |||
2090 | 2102 | ||
2091 | sdata->vif.csa_active = false; | 2103 | sdata->vif.csa_active = false; |
2092 | ifmgd->csa_waiting_bcn = false; | 2104 | ifmgd->csa_waiting_bcn = false; |
2105 | ifmgd->csa_ignored_same_chan = false; | ||
2093 | if (sdata->csa_block_tx) { | 2106 | if (sdata->csa_block_tx) { |
2094 | ieee80211_wake_vif_queues(local, sdata, | 2107 | ieee80211_wake_vif_queues(local, sdata, |
2095 | IEEE80211_QUEUE_STOP_REASON_CSA); | 2108 | IEEE80211_QUEUE_STOP_REASON_CSA); |
@@ -3204,7 +3217,8 @@ static const u64 care_about_ies = | |||
3204 | (1ULL << WLAN_EID_CHANNEL_SWITCH) | | 3217 | (1ULL << WLAN_EID_CHANNEL_SWITCH) | |
3205 | (1ULL << WLAN_EID_PWR_CONSTRAINT) | | 3218 | (1ULL << WLAN_EID_PWR_CONSTRAINT) | |
3206 | (1ULL << WLAN_EID_HT_CAPABILITY) | | 3219 | (1ULL << WLAN_EID_HT_CAPABILITY) | |
3207 | (1ULL << WLAN_EID_HT_OPERATION); | 3220 | (1ULL << WLAN_EID_HT_OPERATION) | |
3221 | (1ULL << WLAN_EID_EXT_CHANSWITCH_ANN); | ||
3208 | 3222 | ||
3209 | static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | 3223 | static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, |
3210 | struct ieee80211_mgmt *mgmt, size_t len, | 3224 | struct ieee80211_mgmt *mgmt, size_t len, |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 1101563357ea..944bdc04e913 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -2214,6 +2214,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
2214 | hdr = (struct ieee80211_hdr *) skb->data; | 2214 | hdr = (struct ieee80211_hdr *) skb->data; |
2215 | mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); | 2215 | mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); |
2216 | 2216 | ||
2217 | if (ieee80211_drop_unencrypted(rx, hdr->frame_control)) | ||
2218 | return RX_DROP_MONITOR; | ||
2219 | |||
2217 | /* frame is in RMC, don't forward */ | 2220 | /* frame is in RMC, don't forward */ |
2218 | if (ieee80211_is_data(hdr->frame_control) && | 2221 | if (ieee80211_is_data(hdr->frame_control) && |
2219 | is_multicast_ether_addr(hdr->addr1) && | 2222 | is_multicast_ether_addr(hdr->addr1) && |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 8428f4a95479..747bdcf72e92 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -3178,7 +3178,7 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, | |||
3178 | wdev_iter = &sdata_iter->wdev; | 3178 | wdev_iter = &sdata_iter->wdev; |
3179 | 3179 | ||
3180 | if (sdata_iter == sdata || | 3180 | if (sdata_iter == sdata || |
3181 | rcu_access_pointer(sdata_iter->vif.chanctx_conf) == NULL || | 3181 | !ieee80211_sdata_running(sdata_iter) || |
3182 | local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype)) | 3182 | local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype)) |
3183 | continue; | 3183 | continue; |
3184 | 3184 | ||
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index c47ffd7a0a70..d93ceeb3ef04 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c | |||
@@ -896,6 +896,8 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param, | |||
896 | IP_VS_DBG(2, "BACKUP, add new conn. failed\n"); | 896 | IP_VS_DBG(2, "BACKUP, add new conn. failed\n"); |
897 | return; | 897 | return; |
898 | } | 898 | } |
899 | if (!(flags & IP_VS_CONN_F_TEMPLATE)) | ||
900 | kfree(param->pe_data); | ||
899 | } | 901 | } |
900 | 902 | ||
901 | if (opt) | 903 | if (opt) |
@@ -1169,6 +1171,7 @@ static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end) | |||
1169 | (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL) | 1171 | (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL) |
1170 | ); | 1172 | ); |
1171 | #endif | 1173 | #endif |
1174 | ip_vs_pe_put(param.pe); | ||
1172 | return 0; | 1175 | return 0; |
1173 | /* Error exit */ | 1176 | /* Error exit */ |
1174 | out: | 1177 | out: |
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 0d8448f19dfe..675d12c69e32 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c | |||
@@ -212,6 +212,30 @@ void nf_log_packet(struct net *net, | |||
212 | } | 212 | } |
213 | EXPORT_SYMBOL(nf_log_packet); | 213 | EXPORT_SYMBOL(nf_log_packet); |
214 | 214 | ||
215 | void nf_log_trace(struct net *net, | ||
216 | u_int8_t pf, | ||
217 | unsigned int hooknum, | ||
218 | const struct sk_buff *skb, | ||
219 | const struct net_device *in, | ||
220 | const struct net_device *out, | ||
221 | const struct nf_loginfo *loginfo, const char *fmt, ...) | ||
222 | { | ||
223 | va_list args; | ||
224 | char prefix[NF_LOG_PREFIXLEN]; | ||
225 | const struct nf_logger *logger; | ||
226 | |||
227 | rcu_read_lock(); | ||
228 | logger = rcu_dereference(net->nf.nf_loggers[pf]); | ||
229 | if (logger) { | ||
230 | va_start(args, fmt); | ||
231 | vsnprintf(prefix, sizeof(prefix), fmt, args); | ||
232 | va_end(args); | ||
233 | logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix); | ||
234 | } | ||
235 | rcu_read_unlock(); | ||
236 | } | ||
237 | EXPORT_SYMBOL(nf_log_trace); | ||
238 | |||
215 | #define S_SIZE (1024 - (sizeof(unsigned int) + 1)) | 239 | #define S_SIZE (1024 - (sizeof(unsigned int) + 1)) |
216 | 240 | ||
217 | struct nf_log_buf { | 241 | struct nf_log_buf { |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 199fd0f27b0e..ac1a9528dbf2 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -227,7 +227,7 @@ nft_rule_deactivate_next(struct net *net, struct nft_rule *rule) | |||
227 | 227 | ||
228 | static inline void nft_rule_clear(struct net *net, struct nft_rule *rule) | 228 | static inline void nft_rule_clear(struct net *net, struct nft_rule *rule) |
229 | { | 229 | { |
230 | rule->genmask = 0; | 230 | rule->genmask &= ~(1 << gencursor_next(net)); |
231 | } | 231 | } |
232 | 232 | ||
233 | static int | 233 | static int |
@@ -1225,7 +1225,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, | |||
1225 | 1225 | ||
1226 | if (nla[NFTA_CHAIN_POLICY]) { | 1226 | if (nla[NFTA_CHAIN_POLICY]) { |
1227 | if ((chain != NULL && | 1227 | if ((chain != NULL && |
1228 | !(chain->flags & NFT_BASE_CHAIN)) || | 1228 | !(chain->flags & NFT_BASE_CHAIN))) |
1229 | return -EOPNOTSUPP; | ||
1230 | |||
1231 | if (chain == NULL && | ||
1229 | nla[NFTA_CHAIN_HOOK] == NULL) | 1232 | nla[NFTA_CHAIN_HOOK] == NULL) |
1230 | return -EOPNOTSUPP; | 1233 | return -EOPNOTSUPP; |
1231 | 1234 | ||
@@ -1711,9 +1714,12 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net, | |||
1711 | } | 1714 | } |
1712 | nla_nest_end(skb, list); | 1715 | nla_nest_end(skb, list); |
1713 | 1716 | ||
1714 | if (rule->ulen && | 1717 | if (rule->udata) { |
1715 | nla_put(skb, NFTA_RULE_USERDATA, rule->ulen, nft_userdata(rule))) | 1718 | struct nft_userdata *udata = nft_userdata(rule); |
1716 | goto nla_put_failure; | 1719 | if (nla_put(skb, NFTA_RULE_USERDATA, udata->len + 1, |
1720 | udata->data) < 0) | ||
1721 | goto nla_put_failure; | ||
1722 | } | ||
1717 | 1723 | ||
1718 | nlmsg_end(skb, nlh); | 1724 | nlmsg_end(skb, nlh); |
1719 | return 0; | 1725 | return 0; |
@@ -1896,11 +1902,12 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, | |||
1896 | struct nft_table *table; | 1902 | struct nft_table *table; |
1897 | struct nft_chain *chain; | 1903 | struct nft_chain *chain; |
1898 | struct nft_rule *rule, *old_rule = NULL; | 1904 | struct nft_rule *rule, *old_rule = NULL; |
1905 | struct nft_userdata *udata; | ||
1899 | struct nft_trans *trans = NULL; | 1906 | struct nft_trans *trans = NULL; |
1900 | struct nft_expr *expr; | 1907 | struct nft_expr *expr; |
1901 | struct nft_ctx ctx; | 1908 | struct nft_ctx ctx; |
1902 | struct nlattr *tmp; | 1909 | struct nlattr *tmp; |
1903 | unsigned int size, i, n, ulen = 0; | 1910 | unsigned int size, i, n, ulen = 0, usize = 0; |
1904 | int err, rem; | 1911 | int err, rem; |
1905 | bool create; | 1912 | bool create; |
1906 | u64 handle, pos_handle; | 1913 | u64 handle, pos_handle; |
@@ -1968,12 +1975,19 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, | |||
1968 | n++; | 1975 | n++; |
1969 | } | 1976 | } |
1970 | } | 1977 | } |
1978 | /* Check for overflow of dlen field */ | ||
1979 | err = -EFBIG; | ||
1980 | if (size >= 1 << 12) | ||
1981 | goto err1; | ||
1971 | 1982 | ||
1972 | if (nla[NFTA_RULE_USERDATA]) | 1983 | if (nla[NFTA_RULE_USERDATA]) { |
1973 | ulen = nla_len(nla[NFTA_RULE_USERDATA]); | 1984 | ulen = nla_len(nla[NFTA_RULE_USERDATA]); |
1985 | if (ulen > 0) | ||
1986 | usize = sizeof(struct nft_userdata) + ulen; | ||
1987 | } | ||
1974 | 1988 | ||
1975 | err = -ENOMEM; | 1989 | err = -ENOMEM; |
1976 | rule = kzalloc(sizeof(*rule) + size + ulen, GFP_KERNEL); | 1990 | rule = kzalloc(sizeof(*rule) + size + usize, GFP_KERNEL); |
1977 | if (rule == NULL) | 1991 | if (rule == NULL) |
1978 | goto err1; | 1992 | goto err1; |
1979 | 1993 | ||
@@ -1981,10 +1995,13 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, | |||
1981 | 1995 | ||
1982 | rule->handle = handle; | 1996 | rule->handle = handle; |
1983 | rule->dlen = size; | 1997 | rule->dlen = size; |
1984 | rule->ulen = ulen; | 1998 | rule->udata = ulen ? 1 : 0; |
1985 | 1999 | ||
1986 | if (ulen) | 2000 | if (ulen) { |
1987 | nla_memcpy(nft_userdata(rule), nla[NFTA_RULE_USERDATA], ulen); | 2001 | udata = nft_userdata(rule); |
2002 | udata->len = ulen - 1; | ||
2003 | nla_memcpy(udata->data, nla[NFTA_RULE_USERDATA], ulen); | ||
2004 | } | ||
1988 | 2005 | ||
1989 | expr = nft_expr_first(rule); | 2006 | expr = nft_expr_first(rule); |
1990 | for (i = 0; i < n; i++) { | 2007 | for (i = 0; i < n; i++) { |
@@ -2031,12 +2048,6 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, | |||
2031 | 2048 | ||
2032 | err3: | 2049 | err3: |
2033 | list_del_rcu(&rule->list); | 2050 | list_del_rcu(&rule->list); |
2034 | if (trans) { | ||
2035 | list_del_rcu(&nft_trans_rule(trans)->list); | ||
2036 | nft_rule_clear(net, nft_trans_rule(trans)); | ||
2037 | nft_trans_destroy(trans); | ||
2038 | chain->use++; | ||
2039 | } | ||
2040 | err2: | 2051 | err2: |
2041 | nf_tables_rule_destroy(&ctx, rule); | 2052 | nf_tables_rule_destroy(&ctx, rule); |
2042 | err1: | 2053 | err1: |
@@ -3612,12 +3623,11 @@ static int nf_tables_commit(struct sk_buff *skb) | |||
3612 | &te->elem, | 3623 | &te->elem, |
3613 | NFT_MSG_DELSETELEM, 0); | 3624 | NFT_MSG_DELSETELEM, 0); |
3614 | te->set->ops->get(te->set, &te->elem); | 3625 | te->set->ops->get(te->set, &te->elem); |
3615 | te->set->ops->remove(te->set, &te->elem); | ||
3616 | nft_data_uninit(&te->elem.key, NFT_DATA_VALUE); | 3626 | nft_data_uninit(&te->elem.key, NFT_DATA_VALUE); |
3617 | if (te->elem.flags & NFT_SET_MAP) { | 3627 | if (te->set->flags & NFT_SET_MAP && |
3618 | nft_data_uninit(&te->elem.data, | 3628 | !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END)) |
3619 | te->set->dtype); | 3629 | nft_data_uninit(&te->elem.data, te->set->dtype); |
3620 | } | 3630 | te->set->ops->remove(te->set, &te->elem); |
3621 | nft_trans_destroy(trans); | 3631 | nft_trans_destroy(trans); |
3622 | break; | 3632 | break; |
3623 | } | 3633 | } |
@@ -3658,7 +3668,7 @@ static int nf_tables_abort(struct sk_buff *skb) | |||
3658 | { | 3668 | { |
3659 | struct net *net = sock_net(skb->sk); | 3669 | struct net *net = sock_net(skb->sk); |
3660 | struct nft_trans *trans, *next; | 3670 | struct nft_trans *trans, *next; |
3661 | struct nft_set *set; | 3671 | struct nft_trans_elem *te; |
3662 | 3672 | ||
3663 | list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { | 3673 | list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { |
3664 | switch (trans->msg_type) { | 3674 | switch (trans->msg_type) { |
@@ -3719,9 +3729,13 @@ static int nf_tables_abort(struct sk_buff *skb) | |||
3719 | break; | 3729 | break; |
3720 | case NFT_MSG_NEWSETELEM: | 3730 | case NFT_MSG_NEWSETELEM: |
3721 | nft_trans_elem_set(trans)->nelems--; | 3731 | nft_trans_elem_set(trans)->nelems--; |
3722 | set = nft_trans_elem_set(trans); | 3732 | te = (struct nft_trans_elem *)trans->data; |
3723 | set->ops->get(set, &nft_trans_elem(trans)); | 3733 | te->set->ops->get(te->set, &te->elem); |
3724 | set->ops->remove(set, &nft_trans_elem(trans)); | 3734 | nft_data_uninit(&te->elem.key, NFT_DATA_VALUE); |
3735 | if (te->set->flags & NFT_SET_MAP && | ||
3736 | !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END)) | ||
3737 | nft_data_uninit(&te->elem.data, te->set->dtype); | ||
3738 | te->set->ops->remove(te->set, &te->elem); | ||
3725 | nft_trans_destroy(trans); | 3739 | nft_trans_destroy(trans); |
3726 | break; | 3740 | break; |
3727 | case NFT_MSG_DELSETELEM: | 3741 | case NFT_MSG_DELSETELEM: |
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index 3b90eb2b2c55..2d298dccb6dd 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c | |||
@@ -94,10 +94,10 @@ static void nft_trace_packet(const struct nft_pktinfo *pkt, | |||
94 | { | 94 | { |
95 | struct net *net = dev_net(pkt->in ? pkt->in : pkt->out); | 95 | struct net *net = dev_net(pkt->in ? pkt->in : pkt->out); |
96 | 96 | ||
97 | nf_log_packet(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in, | 97 | nf_log_trace(net, pkt->xt.family, pkt->ops->hooknum, pkt->skb, pkt->in, |
98 | pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ", | 98 | pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ", |
99 | chain->table->name, chain->name, comments[type], | 99 | chain->table->name, chain->name, comments[type], |
100 | rulenum); | 100 | rulenum); |
101 | } | 101 | } |
102 | 102 | ||
103 | unsigned int | 103 | unsigned int |
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c index a5599fc51a6f..54330fb5efaf 100644 --- a/net/netfilter/nfnetlink_cthelper.c +++ b/net/netfilter/nfnetlink_cthelper.c | |||
@@ -77,6 +77,9 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple, | |||
77 | if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM]) | 77 | if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM]) |
78 | return -EINVAL; | 78 | return -EINVAL; |
79 | 79 | ||
80 | /* Not all fields are initialized so first zero the tuple */ | ||
81 | memset(tuple, 0, sizeof(struct nf_conntrack_tuple)); | ||
82 | |||
80 | tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM])); | 83 | tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM])); |
81 | tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]); | 84 | tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]); |
82 | 85 | ||
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 1279cd85663e..65f3e2b6be44 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c | |||
@@ -123,7 +123,7 @@ static void | |||
123 | nft_target_set_tgchk_param(struct xt_tgchk_param *par, | 123 | nft_target_set_tgchk_param(struct xt_tgchk_param *par, |
124 | const struct nft_ctx *ctx, | 124 | const struct nft_ctx *ctx, |
125 | struct xt_target *target, void *info, | 125 | struct xt_target *target, void *info, |
126 | union nft_entry *entry, u8 proto, bool inv) | 126 | union nft_entry *entry, u16 proto, bool inv) |
127 | { | 127 | { |
128 | par->net = ctx->net; | 128 | par->net = ctx->net; |
129 | par->table = ctx->table->name; | 129 | par->table = ctx->table->name; |
@@ -133,11 +133,14 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par, | |||
133 | entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; | 133 | entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; |
134 | break; | 134 | break; |
135 | case AF_INET6: | 135 | case AF_INET6: |
136 | if (proto) | ||
137 | entry->e6.ipv6.flags |= IP6T_F_PROTO; | ||
138 | |||
136 | entry->e6.ipv6.proto = proto; | 139 | entry->e6.ipv6.proto = proto; |
137 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; | 140 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; |
138 | break; | 141 | break; |
139 | case NFPROTO_BRIDGE: | 142 | case NFPROTO_BRIDGE: |
140 | entry->ebt.ethproto = proto; | 143 | entry->ebt.ethproto = (__force __be16)proto; |
141 | entry->ebt.invflags = inv ? EBT_IPROTO : 0; | 144 | entry->ebt.invflags = inv ? EBT_IPROTO : 0; |
142 | break; | 145 | break; |
143 | } | 146 | } |
@@ -171,7 +174,7 @@ static const struct nla_policy nft_rule_compat_policy[NFTA_RULE_COMPAT_MAX + 1] | |||
171 | [NFTA_RULE_COMPAT_FLAGS] = { .type = NLA_U32 }, | 174 | [NFTA_RULE_COMPAT_FLAGS] = { .type = NLA_U32 }, |
172 | }; | 175 | }; |
173 | 176 | ||
174 | static int nft_parse_compat(const struct nlattr *attr, u8 *proto, bool *inv) | 177 | static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv) |
175 | { | 178 | { |
176 | struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1]; | 179 | struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1]; |
177 | u32 flags; | 180 | u32 flags; |
@@ -203,7 +206,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
203 | struct xt_target *target = expr->ops->data; | 206 | struct xt_target *target = expr->ops->data; |
204 | struct xt_tgchk_param par; | 207 | struct xt_tgchk_param par; |
205 | size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO])); | 208 | size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO])); |
206 | u8 proto = 0; | 209 | u16 proto = 0; |
207 | bool inv = false; | 210 | bool inv = false; |
208 | union nft_entry e = {}; | 211 | union nft_entry e = {}; |
209 | int ret; | 212 | int ret; |
@@ -334,7 +337,7 @@ static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = { | |||
334 | static void | 337 | static void |
335 | nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, | 338 | nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, |
336 | struct xt_match *match, void *info, | 339 | struct xt_match *match, void *info, |
337 | union nft_entry *entry, u8 proto, bool inv) | 340 | union nft_entry *entry, u16 proto, bool inv) |
338 | { | 341 | { |
339 | par->net = ctx->net; | 342 | par->net = ctx->net; |
340 | par->table = ctx->table->name; | 343 | par->table = ctx->table->name; |
@@ -344,11 +347,14 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, | |||
344 | entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; | 347 | entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; |
345 | break; | 348 | break; |
346 | case AF_INET6: | 349 | case AF_INET6: |
350 | if (proto) | ||
351 | entry->e6.ipv6.flags |= IP6T_F_PROTO; | ||
352 | |||
347 | entry->e6.ipv6.proto = proto; | 353 | entry->e6.ipv6.proto = proto; |
348 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; | 354 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; |
349 | break; | 355 | break; |
350 | case NFPROTO_BRIDGE: | 356 | case NFPROTO_BRIDGE: |
351 | entry->ebt.ethproto = proto; | 357 | entry->ebt.ethproto = (__force __be16)proto; |
352 | entry->ebt.invflags = inv ? EBT_IPROTO : 0; | 358 | entry->ebt.invflags = inv ? EBT_IPROTO : 0; |
353 | break; | 359 | break; |
354 | } | 360 | } |
@@ -385,7 +391,7 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
385 | struct xt_match *match = expr->ops->data; | 391 | struct xt_match *match = expr->ops->data; |
386 | struct xt_mtchk_param par; | 392 | struct xt_mtchk_param par; |
387 | size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO])); | 393 | size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO])); |
388 | u8 proto = 0; | 394 | u16 proto = 0; |
389 | bool inv = false; | 395 | bool inv = false; |
390 | union nft_entry e = {}; | 396 | union nft_entry e = {}; |
391 | int ret; | 397 | int ret; |
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index c82df0a48fcd..37c15e674884 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c | |||
@@ -153,6 +153,8 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set, | |||
153 | iter->err = err; | 153 | iter->err = err; |
154 | goto out; | 154 | goto out; |
155 | } | 155 | } |
156 | |||
157 | continue; | ||
156 | } | 158 | } |
157 | 159 | ||
158 | if (iter->count < iter->skip) | 160 | if (iter->count < iter->skip) |
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index ef8a926752a9..50e1e5aaf4ce 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c | |||
@@ -513,8 +513,8 @@ static int tproxy_tg6_check(const struct xt_tgchk_param *par) | |||
513 | { | 513 | { |
514 | const struct ip6t_ip6 *i = par->entryinfo; | 514 | const struct ip6t_ip6 *i = par->entryinfo; |
515 | 515 | ||
516 | if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) | 516 | if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) && |
517 | && !(i->flags & IP6T_INV_PROTO)) | 517 | !(i->invflags & IP6T_INV_PROTO)) |
518 | return 0; | 518 | return 0; |
519 | 519 | ||
520 | pr_info("Can be used only in combination with " | 520 | pr_info("Can be used only in combination with " |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 5bf1e968a728..f8db7064d81c 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -3123,11 +3123,18 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, | |||
3123 | return 0; | 3123 | return 0; |
3124 | } | 3124 | } |
3125 | 3125 | ||
3126 | static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what) | 3126 | static void packet_dev_mclist_delete(struct net_device *dev, |
3127 | struct packet_mclist **mlp) | ||
3127 | { | 3128 | { |
3128 | for ( ; i; i = i->next) { | 3129 | struct packet_mclist *ml; |
3129 | if (i->ifindex == dev->ifindex) | 3130 | |
3130 | packet_dev_mc(dev, i, what); | 3131 | while ((ml = *mlp) != NULL) { |
3132 | if (ml->ifindex == dev->ifindex) { | ||
3133 | packet_dev_mc(dev, ml, -1); | ||
3134 | *mlp = ml->next; | ||
3135 | kfree(ml); | ||
3136 | } else | ||
3137 | mlp = &ml->next; | ||
3131 | } | 3138 | } |
3132 | } | 3139 | } |
3133 | 3140 | ||
@@ -3204,12 +3211,11 @@ static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) | |||
3204 | packet_dev_mc(dev, ml, -1); | 3211 | packet_dev_mc(dev, ml, -1); |
3205 | kfree(ml); | 3212 | kfree(ml); |
3206 | } | 3213 | } |
3207 | rtnl_unlock(); | 3214 | break; |
3208 | return 0; | ||
3209 | } | 3215 | } |
3210 | } | 3216 | } |
3211 | rtnl_unlock(); | 3217 | rtnl_unlock(); |
3212 | return -EADDRNOTAVAIL; | 3218 | return 0; |
3213 | } | 3219 | } |
3214 | 3220 | ||
3215 | static void packet_flush_mclist(struct sock *sk) | 3221 | static void packet_flush_mclist(struct sock *sk) |
@@ -3559,7 +3565,7 @@ static int packet_notifier(struct notifier_block *this, | |||
3559 | switch (msg) { | 3565 | switch (msg) { |
3560 | case NETDEV_UNREGISTER: | 3566 | case NETDEV_UNREGISTER: |
3561 | if (po->mclist) | 3567 | if (po->mclist) |
3562 | packet_dev_mclist(dev, po->mclist, -1); | 3568 | packet_dev_mclist_delete(dev, &po->mclist); |
3563 | /* fallthrough */ | 3569 | /* fallthrough */ |
3564 | 3570 | ||
3565 | case NETDEV_DOWN: | 3571 | case NETDEV_DOWN: |
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c index a817705ce2d0..dba8d0864f18 100644 --- a/net/rds/iw_rdma.c +++ b/net/rds/iw_rdma.c | |||
@@ -88,7 +88,9 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, | |||
88 | int *unpinned); | 88 | int *unpinned); |
89 | static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); | 89 | static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); |
90 | 90 | ||
91 | static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id) | 91 | static int rds_iw_get_device(struct sockaddr_in *src, struct sockaddr_in *dst, |
92 | struct rds_iw_device **rds_iwdev, | ||
93 | struct rdma_cm_id **cm_id) | ||
92 | { | 94 | { |
93 | struct rds_iw_device *iwdev; | 95 | struct rds_iw_device *iwdev; |
94 | struct rds_iw_cm_id *i_cm_id; | 96 | struct rds_iw_cm_id *i_cm_id; |
@@ -112,15 +114,15 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd | |||
112 | src_addr->sin_port, | 114 | src_addr->sin_port, |
113 | dst_addr->sin_addr.s_addr, | 115 | dst_addr->sin_addr.s_addr, |
114 | dst_addr->sin_port, | 116 | dst_addr->sin_port, |
115 | rs->rs_bound_addr, | 117 | src->sin_addr.s_addr, |
116 | rs->rs_bound_port, | 118 | src->sin_port, |
117 | rs->rs_conn_addr, | 119 | dst->sin_addr.s_addr, |
118 | rs->rs_conn_port); | 120 | dst->sin_port); |
119 | #ifdef WORKING_TUPLE_DETECTION | 121 | #ifdef WORKING_TUPLE_DETECTION |
120 | if (src_addr->sin_addr.s_addr == rs->rs_bound_addr && | 122 | if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr && |
121 | src_addr->sin_port == rs->rs_bound_port && | 123 | src_addr->sin_port == src->sin_port && |
122 | dst_addr->sin_addr.s_addr == rs->rs_conn_addr && | 124 | dst_addr->sin_addr.s_addr == dst->sin_addr.s_addr && |
123 | dst_addr->sin_port == rs->rs_conn_port) { | 125 | dst_addr->sin_port == dst->sin_port) { |
124 | #else | 126 | #else |
125 | /* FIXME - needs to compare the local and remote | 127 | /* FIXME - needs to compare the local and remote |
126 | * ipaddr/port tuple, but the ipaddr is the only | 128 | * ipaddr/port tuple, but the ipaddr is the only |
@@ -128,7 +130,7 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd | |||
128 | * zero'ed. It doesn't appear to be properly populated | 130 | * zero'ed. It doesn't appear to be properly populated |
129 | * during connection setup... | 131 | * during connection setup... |
130 | */ | 132 | */ |
131 | if (src_addr->sin_addr.s_addr == rs->rs_bound_addr) { | 133 | if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr) { |
132 | #endif | 134 | #endif |
133 | spin_unlock_irq(&iwdev->spinlock); | 135 | spin_unlock_irq(&iwdev->spinlock); |
134 | *rds_iwdev = iwdev; | 136 | *rds_iwdev = iwdev; |
@@ -180,19 +182,13 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i | |||
180 | { | 182 | { |
181 | struct sockaddr_in *src_addr, *dst_addr; | 183 | struct sockaddr_in *src_addr, *dst_addr; |
182 | struct rds_iw_device *rds_iwdev_old; | 184 | struct rds_iw_device *rds_iwdev_old; |
183 | struct rds_sock rs; | ||
184 | struct rdma_cm_id *pcm_id; | 185 | struct rdma_cm_id *pcm_id; |
185 | int rc; | 186 | int rc; |
186 | 187 | ||
187 | src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr; | 188 | src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr; |
188 | dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr; | 189 | dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr; |
189 | 190 | ||
190 | rs.rs_bound_addr = src_addr->sin_addr.s_addr; | 191 | rc = rds_iw_get_device(src_addr, dst_addr, &rds_iwdev_old, &pcm_id); |
191 | rs.rs_bound_port = src_addr->sin_port; | ||
192 | rs.rs_conn_addr = dst_addr->sin_addr.s_addr; | ||
193 | rs.rs_conn_port = dst_addr->sin_port; | ||
194 | |||
195 | rc = rds_iw_get_device(&rs, &rds_iwdev_old, &pcm_id); | ||
196 | if (rc) | 192 | if (rc) |
197 | rds_iw_remove_cm_id(rds_iwdev, cm_id); | 193 | rds_iw_remove_cm_id(rds_iwdev, cm_id); |
198 | 194 | ||
@@ -598,9 +594,17 @@ void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents, | |||
598 | struct rds_iw_device *rds_iwdev; | 594 | struct rds_iw_device *rds_iwdev; |
599 | struct rds_iw_mr *ibmr = NULL; | 595 | struct rds_iw_mr *ibmr = NULL; |
600 | struct rdma_cm_id *cm_id; | 596 | struct rdma_cm_id *cm_id; |
597 | struct sockaddr_in src = { | ||
598 | .sin_addr.s_addr = rs->rs_bound_addr, | ||
599 | .sin_port = rs->rs_bound_port, | ||
600 | }; | ||
601 | struct sockaddr_in dst = { | ||
602 | .sin_addr.s_addr = rs->rs_conn_addr, | ||
603 | .sin_port = rs->rs_conn_port, | ||
604 | }; | ||
601 | int ret; | 605 | int ret; |
602 | 606 | ||
603 | ret = rds_iw_get_device(rs, &rds_iwdev, &cm_id); | 607 | ret = rds_iw_get_device(&src, &dst, &rds_iwdev, &cm_id); |
604 | if (ret || !cm_id) { | 608 | if (ret || !cm_id) { |
605 | ret = -ENODEV; | 609 | ret = -ENODEV; |
606 | goto out; | 610 | goto out; |
diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c index 5394b6be46ec..0610efa83d72 100644 --- a/net/rxrpc/ar-error.c +++ b/net/rxrpc/ar-error.c | |||
@@ -42,7 +42,8 @@ void rxrpc_UDP_error_report(struct sock *sk) | |||
42 | _leave("UDP socket errqueue empty"); | 42 | _leave("UDP socket errqueue empty"); |
43 | return; | 43 | return; |
44 | } | 44 | } |
45 | if (!skb->len) { | 45 | serr = SKB_EXT_ERR(skb); |
46 | if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) { | ||
46 | _leave("UDP empty message"); | 47 | _leave("UDP empty message"); |
47 | kfree_skb(skb); | 48 | kfree_skb(skb); |
48 | return; | 49 | return; |
@@ -50,7 +51,6 @@ void rxrpc_UDP_error_report(struct sock *sk) | |||
50 | 51 | ||
51 | rxrpc_new_skb(skb); | 52 | rxrpc_new_skb(skb); |
52 | 53 | ||
53 | serr = SKB_EXT_ERR(skb); | ||
54 | addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset); | 54 | addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset); |
55 | port = serr->port; | 55 | port = serr->port; |
56 | 56 | ||
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c index 4575485ad1b4..19a560626dc4 100644 --- a/net/rxrpc/ar-recvmsg.c +++ b/net/rxrpc/ar-recvmsg.c | |||
@@ -87,7 +87,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
87 | if (!skb) { | 87 | if (!skb) { |
88 | /* nothing remains on the queue */ | 88 | /* nothing remains on the queue */ |
89 | if (copied && | 89 | if (copied && |
90 | (msg->msg_flags & MSG_PEEK || timeo == 0)) | 90 | (flags & MSG_PEEK || timeo == 0)) |
91 | goto out; | 91 | goto out; |
92 | 92 | ||
93 | /* wait for a message to turn up */ | 93 | /* wait for a message to turn up */ |
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index 82c5d7fc1988..5f6288fa3f12 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c | |||
@@ -25,21 +25,41 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *a, | |||
25 | struct tcf_result *res) | 25 | struct tcf_result *res) |
26 | { | 26 | { |
27 | struct tcf_bpf *b = a->priv; | 27 | struct tcf_bpf *b = a->priv; |
28 | int action; | 28 | int action, filter_res; |
29 | int filter_res; | ||
30 | 29 | ||
31 | spin_lock(&b->tcf_lock); | 30 | spin_lock(&b->tcf_lock); |
31 | |||
32 | b->tcf_tm.lastuse = jiffies; | 32 | b->tcf_tm.lastuse = jiffies; |
33 | bstats_update(&b->tcf_bstats, skb); | 33 | bstats_update(&b->tcf_bstats, skb); |
34 | action = b->tcf_action; | ||
35 | 34 | ||
36 | filter_res = BPF_PROG_RUN(b->filter, skb); | 35 | filter_res = BPF_PROG_RUN(b->filter, skb); |
37 | if (filter_res == 0) { | 36 | |
38 | /* Return code 0 from the BPF program | 37 | /* A BPF program may overwrite the default action opcode. |
39 | * is being interpreted as a drop here. | 38 | * Similarly as in cls_bpf, if filter_res == -1 we use the |
40 | */ | 39 | * default action specified from tc. |
41 | action = TC_ACT_SHOT; | 40 | * |
41 | * In case a different well-known TC_ACT opcode has been | ||
42 | * returned, it will overwrite the default one. | ||
43 | * | ||
44 | * For everything else that is unkown, TC_ACT_UNSPEC is | ||
45 | * returned. | ||
46 | */ | ||
47 | switch (filter_res) { | ||
48 | case TC_ACT_PIPE: | ||
49 | case TC_ACT_RECLASSIFY: | ||
50 | case TC_ACT_OK: | ||
51 | action = filter_res; | ||
52 | break; | ||
53 | case TC_ACT_SHOT: | ||
54 | action = filter_res; | ||
42 | b->tcf_qstats.drops++; | 55 | b->tcf_qstats.drops++; |
56 | break; | ||
57 | case TC_ACT_UNSPEC: | ||
58 | action = b->tcf_action; | ||
59 | break; | ||
60 | default: | ||
61 | action = TC_ACT_UNSPEC; | ||
62 | break; | ||
43 | } | 63 | } |
44 | 64 | ||
45 | spin_unlock(&b->tcf_lock); | 65 | spin_unlock(&b->tcf_lock); |
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 09487afbfd51..95fdf4e40051 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -78,8 +78,11 @@ struct tc_u_hnode { | |||
78 | struct tc_u_common *tp_c; | 78 | struct tc_u_common *tp_c; |
79 | int refcnt; | 79 | int refcnt; |
80 | unsigned int divisor; | 80 | unsigned int divisor; |
81 | struct tc_u_knode __rcu *ht[1]; | ||
82 | struct rcu_head rcu; | 81 | struct rcu_head rcu; |
82 | /* The 'ht' field MUST be the last field in structure to allow for | ||
83 | * more entries allocated at end of structure. | ||
84 | */ | ||
85 | struct tc_u_knode __rcu *ht[1]; | ||
83 | }; | 86 | }; |
84 | 87 | ||
85 | struct tc_u_common { | 88 | struct tc_u_common { |
diff --git a/net/socket.c b/net/socket.c index bbedbfcb42c2..245330ca0015 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -1702,6 +1702,8 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, | |||
1702 | 1702 | ||
1703 | if (len > INT_MAX) | 1703 | if (len > INT_MAX) |
1704 | len = INT_MAX; | 1704 | len = INT_MAX; |
1705 | if (unlikely(!access_ok(VERIFY_READ, buff, len))) | ||
1706 | return -EFAULT; | ||
1705 | sock = sockfd_lookup_light(fd, &err, &fput_needed); | 1707 | sock = sockfd_lookup_light(fd, &err, &fput_needed); |
1706 | if (!sock) | 1708 | if (!sock) |
1707 | goto out; | 1709 | goto out; |
@@ -1760,6 +1762,8 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, | |||
1760 | 1762 | ||
1761 | if (size > INT_MAX) | 1763 | if (size > INT_MAX) |
1762 | size = INT_MAX; | 1764 | size = INT_MAX; |
1765 | if (unlikely(!access_ok(VERIFY_WRITE, ubuf, size))) | ||
1766 | return -EFAULT; | ||
1763 | sock = sockfd_lookup_light(fd, &err, &fput_needed); | 1767 | sock = sockfd_lookup_light(fd, &err, &fput_needed); |
1764 | if (!sock) | 1768 | if (!sock) |
1765 | goto out; | 1769 | goto out; |
diff --git a/net/sunrpc/xprtrdma/Makefile b/net/sunrpc/xprtrdma/Makefile index da5136fd5694..579f72bbcf4b 100644 --- a/net/sunrpc/xprtrdma/Makefile +++ b/net/sunrpc/xprtrdma/Makefile | |||
@@ -1,6 +1,7 @@ | |||
1 | obj-$(CONFIG_SUNRPC_XPRT_RDMA_CLIENT) += xprtrdma.o | 1 | obj-$(CONFIG_SUNRPC_XPRT_RDMA_CLIENT) += xprtrdma.o |
2 | 2 | ||
3 | xprtrdma-y := transport.o rpc_rdma.o verbs.o | 3 | xprtrdma-y := transport.o rpc_rdma.o verbs.o \ |
4 | fmr_ops.o frwr_ops.o physical_ops.o | ||
4 | 5 | ||
5 | obj-$(CONFIG_SUNRPC_XPRT_RDMA_SERVER) += svcrdma.o | 6 | obj-$(CONFIG_SUNRPC_XPRT_RDMA_SERVER) += svcrdma.o |
6 | 7 | ||
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c new file mode 100644 index 000000000000..a91ba2c8ef1e --- /dev/null +++ b/net/sunrpc/xprtrdma/fmr_ops.c | |||
@@ -0,0 +1,208 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2015 Oracle. All rights reserved. | ||
3 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. | ||
4 | */ | ||
5 | |||
6 | /* Lightweight memory registration using Fast Memory Regions (FMR). | ||
7 | * Referred to sometimes as MTHCAFMR mode. | ||
8 | * | ||
9 | * FMR uses synchronous memory registration and deregistration. | ||
10 | * FMR registration is known to be fast, but FMR deregistration | ||
11 | * can take tens of usecs to complete. | ||
12 | */ | ||
13 | |||
14 | #include "xprt_rdma.h" | ||
15 | |||
16 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | ||
17 | # define RPCDBG_FACILITY RPCDBG_TRANS | ||
18 | #endif | ||
19 | |||
20 | /* Maximum scatter/gather per FMR */ | ||
21 | #define RPCRDMA_MAX_FMR_SGES (64) | ||
22 | |||
23 | static int | ||
24 | fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, | ||
25 | struct rpcrdma_create_data_internal *cdata) | ||
26 | { | ||
27 | return 0; | ||
28 | } | ||
29 | |||
30 | /* FMR mode conveys up to 64 pages of payload per chunk segment. | ||
31 | */ | ||
32 | static size_t | ||
33 | fmr_op_maxpages(struct rpcrdma_xprt *r_xprt) | ||
34 | { | ||
35 | return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, | ||
36 | rpcrdma_max_segments(r_xprt) * RPCRDMA_MAX_FMR_SGES); | ||
37 | } | ||
38 | |||
39 | static int | ||
40 | fmr_op_init(struct rpcrdma_xprt *r_xprt) | ||
41 | { | ||
42 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | ||
43 | int mr_access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ; | ||
44 | struct ib_fmr_attr fmr_attr = { | ||
45 | .max_pages = RPCRDMA_MAX_FMR_SGES, | ||
46 | .max_maps = 1, | ||
47 | .page_shift = PAGE_SHIFT | ||
48 | }; | ||
49 | struct ib_pd *pd = r_xprt->rx_ia.ri_pd; | ||
50 | struct rpcrdma_mw *r; | ||
51 | int i, rc; | ||
52 | |||
53 | INIT_LIST_HEAD(&buf->rb_mws); | ||
54 | INIT_LIST_HEAD(&buf->rb_all); | ||
55 | |||
56 | i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS; | ||
57 | dprintk("RPC: %s: initalizing %d FMRs\n", __func__, i); | ||
58 | |||
59 | while (i--) { | ||
60 | r = kzalloc(sizeof(*r), GFP_KERNEL); | ||
61 | if (!r) | ||
62 | return -ENOMEM; | ||
63 | |||
64 | r->r.fmr = ib_alloc_fmr(pd, mr_access_flags, &fmr_attr); | ||
65 | if (IS_ERR(r->r.fmr)) | ||
66 | goto out_fmr_err; | ||
67 | |||
68 | list_add(&r->mw_list, &buf->rb_mws); | ||
69 | list_add(&r->mw_all, &buf->rb_all); | ||
70 | } | ||
71 | return 0; | ||
72 | |||
73 | out_fmr_err: | ||
74 | rc = PTR_ERR(r->r.fmr); | ||
75 | dprintk("RPC: %s: ib_alloc_fmr status %i\n", __func__, rc); | ||
76 | kfree(r); | ||
77 | return rc; | ||
78 | } | ||
79 | |||
80 | /* Use the ib_map_phys_fmr() verb to register a memory region | ||
81 | * for remote access via RDMA READ or RDMA WRITE. | ||
82 | */ | ||
83 | static int | ||
84 | fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, | ||
85 | int nsegs, bool writing) | ||
86 | { | ||
87 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | ||
88 | struct ib_device *device = ia->ri_id->device; | ||
89 | enum dma_data_direction direction = rpcrdma_data_dir(writing); | ||
90 | struct rpcrdma_mr_seg *seg1 = seg; | ||
91 | struct rpcrdma_mw *mw = seg1->rl_mw; | ||
92 | u64 physaddrs[RPCRDMA_MAX_DATA_SEGS]; | ||
93 | int len, pageoff, i, rc; | ||
94 | |||
95 | pageoff = offset_in_page(seg1->mr_offset); | ||
96 | seg1->mr_offset -= pageoff; /* start of page */ | ||
97 | seg1->mr_len += pageoff; | ||
98 | len = -pageoff; | ||
99 | if (nsegs > RPCRDMA_MAX_FMR_SGES) | ||
100 | nsegs = RPCRDMA_MAX_FMR_SGES; | ||
101 | for (i = 0; i < nsegs;) { | ||
102 | rpcrdma_map_one(device, seg, direction); | ||
103 | physaddrs[i] = seg->mr_dma; | ||
104 | len += seg->mr_len; | ||
105 | ++seg; | ||
106 | ++i; | ||
107 | /* Check for holes */ | ||
108 | if ((i < nsegs && offset_in_page(seg->mr_offset)) || | ||
109 | offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) | ||
110 | break; | ||
111 | } | ||
112 | |||
113 | rc = ib_map_phys_fmr(mw->r.fmr, physaddrs, i, seg1->mr_dma); | ||
114 | if (rc) | ||
115 | goto out_maperr; | ||
116 | |||
117 | seg1->mr_rkey = mw->r.fmr->rkey; | ||
118 | seg1->mr_base = seg1->mr_dma + pageoff; | ||
119 | seg1->mr_nsegs = i; | ||
120 | seg1->mr_len = len; | ||
121 | return i; | ||
122 | |||
123 | out_maperr: | ||
124 | dprintk("RPC: %s: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n", | ||
125 | __func__, len, (unsigned long long)seg1->mr_dma, | ||
126 | pageoff, i, rc); | ||
127 | while (i--) | ||
128 | rpcrdma_unmap_one(device, --seg); | ||
129 | return rc; | ||
130 | } | ||
131 | |||
132 | /* Use the ib_unmap_fmr() verb to prevent further remote | ||
133 | * access via RDMA READ or RDMA WRITE. | ||
134 | */ | ||
135 | static int | ||
136 | fmr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg) | ||
137 | { | ||
138 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | ||
139 | struct rpcrdma_mr_seg *seg1 = seg; | ||
140 | struct ib_device *device; | ||
141 | int rc, nsegs = seg->mr_nsegs; | ||
142 | LIST_HEAD(l); | ||
143 | |||
144 | list_add(&seg1->rl_mw->r.fmr->list, &l); | ||
145 | rc = ib_unmap_fmr(&l); | ||
146 | read_lock(&ia->ri_qplock); | ||
147 | device = ia->ri_id->device; | ||
148 | while (seg1->mr_nsegs--) | ||
149 | rpcrdma_unmap_one(device, seg++); | ||
150 | read_unlock(&ia->ri_qplock); | ||
151 | if (rc) | ||
152 | goto out_err; | ||
153 | return nsegs; | ||
154 | |||
155 | out_err: | ||
156 | dprintk("RPC: %s: ib_unmap_fmr status %i\n", __func__, rc); | ||
157 | return nsegs; | ||
158 | } | ||
159 | |||
160 | /* After a disconnect, unmap all FMRs. | ||
161 | * | ||
162 | * This is invoked only in the transport connect worker in order | ||
163 | * to serialize with rpcrdma_register_fmr_external(). | ||
164 | */ | ||
165 | static void | ||
166 | fmr_op_reset(struct rpcrdma_xprt *r_xprt) | ||
167 | { | ||
168 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | ||
169 | struct rpcrdma_mw *r; | ||
170 | LIST_HEAD(list); | ||
171 | int rc; | ||
172 | |||
173 | list_for_each_entry(r, &buf->rb_all, mw_all) | ||
174 | list_add(&r->r.fmr->list, &list); | ||
175 | |||
176 | rc = ib_unmap_fmr(&list); | ||
177 | if (rc) | ||
178 | dprintk("RPC: %s: ib_unmap_fmr failed %i\n", | ||
179 | __func__, rc); | ||
180 | } | ||
181 | |||
182 | static void | ||
183 | fmr_op_destroy(struct rpcrdma_buffer *buf) | ||
184 | { | ||
185 | struct rpcrdma_mw *r; | ||
186 | int rc; | ||
187 | |||
188 | while (!list_empty(&buf->rb_all)) { | ||
189 | r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all); | ||
190 | list_del(&r->mw_all); | ||
191 | rc = ib_dealloc_fmr(r->r.fmr); | ||
192 | if (rc) | ||
193 | dprintk("RPC: %s: ib_dealloc_fmr failed %i\n", | ||
194 | __func__, rc); | ||
195 | kfree(r); | ||
196 | } | ||
197 | } | ||
198 | |||
199 | const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = { | ||
200 | .ro_map = fmr_op_map, | ||
201 | .ro_unmap = fmr_op_unmap, | ||
202 | .ro_open = fmr_op_open, | ||
203 | .ro_maxpages = fmr_op_maxpages, | ||
204 | .ro_init = fmr_op_init, | ||
205 | .ro_reset = fmr_op_reset, | ||
206 | .ro_destroy = fmr_op_destroy, | ||
207 | .ro_displayname = "fmr", | ||
208 | }; | ||
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c new file mode 100644 index 000000000000..0a7b9df70133 --- /dev/null +++ b/net/sunrpc/xprtrdma/frwr_ops.c | |||
@@ -0,0 +1,353 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2015 Oracle. All rights reserved. | ||
3 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. | ||
4 | */ | ||
5 | |||
6 | /* Lightweight memory registration using Fast Registration Work | ||
7 | * Requests (FRWR). Also referred to sometimes as FRMR mode. | ||
8 | * | ||
9 | * FRWR features ordered asynchronous registration and deregistration | ||
10 | * of arbitrarily sized memory regions. This is the fastest and safest | ||
11 | * but most complex memory registration mode. | ||
12 | */ | ||
13 | |||
14 | #include "xprt_rdma.h" | ||
15 | |||
16 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | ||
17 | # define RPCDBG_FACILITY RPCDBG_TRANS | ||
18 | #endif | ||
19 | |||
20 | static int | ||
21 | __frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device, | ||
22 | unsigned int depth) | ||
23 | { | ||
24 | struct rpcrdma_frmr *f = &r->r.frmr; | ||
25 | int rc; | ||
26 | |||
27 | f->fr_mr = ib_alloc_fast_reg_mr(pd, depth); | ||
28 | if (IS_ERR(f->fr_mr)) | ||
29 | goto out_mr_err; | ||
30 | f->fr_pgl = ib_alloc_fast_reg_page_list(device, depth); | ||
31 | if (IS_ERR(f->fr_pgl)) | ||
32 | goto out_list_err; | ||
33 | return 0; | ||
34 | |||
35 | out_mr_err: | ||
36 | rc = PTR_ERR(f->fr_mr); | ||
37 | dprintk("RPC: %s: ib_alloc_fast_reg_mr status %i\n", | ||
38 | __func__, rc); | ||
39 | return rc; | ||
40 | |||
41 | out_list_err: | ||
42 | rc = PTR_ERR(f->fr_pgl); | ||
43 | dprintk("RPC: %s: ib_alloc_fast_reg_page_list status %i\n", | ||
44 | __func__, rc); | ||
45 | ib_dereg_mr(f->fr_mr); | ||
46 | return rc; | ||
47 | } | ||
48 | |||
49 | static void | ||
50 | __frwr_release(struct rpcrdma_mw *r) | ||
51 | { | ||
52 | int rc; | ||
53 | |||
54 | rc = ib_dereg_mr(r->r.frmr.fr_mr); | ||
55 | if (rc) | ||
56 | dprintk("RPC: %s: ib_dereg_mr status %i\n", | ||
57 | __func__, rc); | ||
58 | ib_free_fast_reg_page_list(r->r.frmr.fr_pgl); | ||
59 | } | ||
60 | |||
61 | static int | ||
62 | frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, | ||
63 | struct rpcrdma_create_data_internal *cdata) | ||
64 | { | ||
65 | struct ib_device_attr *devattr = &ia->ri_devattr; | ||
66 | int depth, delta; | ||
67 | |||
68 | ia->ri_max_frmr_depth = | ||
69 | min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, | ||
70 | devattr->max_fast_reg_page_list_len); | ||
71 | dprintk("RPC: %s: device's max FR page list len = %u\n", | ||
72 | __func__, ia->ri_max_frmr_depth); | ||
73 | |||
74 | /* Add room for frmr register and invalidate WRs. | ||
75 | * 1. FRMR reg WR for head | ||
76 | * 2. FRMR invalidate WR for head | ||
77 | * 3. N FRMR reg WRs for pagelist | ||
78 | * 4. N FRMR invalidate WRs for pagelist | ||
79 | * 5. FRMR reg WR for tail | ||
80 | * 6. FRMR invalidate WR for tail | ||
81 | * 7. The RDMA_SEND WR | ||
82 | */ | ||
83 | depth = 7; | ||
84 | |||
85 | /* Calculate N if the device max FRMR depth is smaller than | ||
86 | * RPCRDMA_MAX_DATA_SEGS. | ||
87 | */ | ||
88 | if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) { | ||
89 | delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth; | ||
90 | do { | ||
91 | depth += 2; /* FRMR reg + invalidate */ | ||
92 | delta -= ia->ri_max_frmr_depth; | ||
93 | } while (delta > 0); | ||
94 | } | ||
95 | |||
96 | ep->rep_attr.cap.max_send_wr *= depth; | ||
97 | if (ep->rep_attr.cap.max_send_wr > devattr->max_qp_wr) { | ||
98 | cdata->max_requests = devattr->max_qp_wr / depth; | ||
99 | if (!cdata->max_requests) | ||
100 | return -EINVAL; | ||
101 | ep->rep_attr.cap.max_send_wr = cdata->max_requests * | ||
102 | depth; | ||
103 | } | ||
104 | |||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | /* FRWR mode conveys a list of pages per chunk segment. The | ||
109 | * maximum length of that list is the FRWR page list depth. | ||
110 | */ | ||
111 | static size_t | ||
112 | frwr_op_maxpages(struct rpcrdma_xprt *r_xprt) | ||
113 | { | ||
114 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | ||
115 | |||
116 | return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, | ||
117 | rpcrdma_max_segments(r_xprt) * ia->ri_max_frmr_depth); | ||
118 | } | ||
119 | |||
120 | /* If FAST_REG or LOCAL_INV failed, indicate the frmr needs to be reset. */ | ||
121 | static void | ||
122 | frwr_sendcompletion(struct ib_wc *wc) | ||
123 | { | ||
124 | struct rpcrdma_mw *r; | ||
125 | |||
126 | if (likely(wc->status == IB_WC_SUCCESS)) | ||
127 | return; | ||
128 | |||
129 | /* WARNING: Only wr_id and status are reliable at this point */ | ||
130 | r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id; | ||
131 | dprintk("RPC: %s: frmr %p (stale), status %d\n", | ||
132 | __func__, r, wc->status); | ||
133 | r->r.frmr.fr_state = FRMR_IS_STALE; | ||
134 | } | ||
135 | |||
136 | static int | ||
137 | frwr_op_init(struct rpcrdma_xprt *r_xprt) | ||
138 | { | ||
139 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | ||
140 | struct ib_device *device = r_xprt->rx_ia.ri_id->device; | ||
141 | unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth; | ||
142 | struct ib_pd *pd = r_xprt->rx_ia.ri_pd; | ||
143 | int i; | ||
144 | |||
145 | INIT_LIST_HEAD(&buf->rb_mws); | ||
146 | INIT_LIST_HEAD(&buf->rb_all); | ||
147 | |||
148 | i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS; | ||
149 | dprintk("RPC: %s: initalizing %d FRMRs\n", __func__, i); | ||
150 | |||
151 | while (i--) { | ||
152 | struct rpcrdma_mw *r; | ||
153 | int rc; | ||
154 | |||
155 | r = kzalloc(sizeof(*r), GFP_KERNEL); | ||
156 | if (!r) | ||
157 | return -ENOMEM; | ||
158 | |||
159 | rc = __frwr_init(r, pd, device, depth); | ||
160 | if (rc) { | ||
161 | kfree(r); | ||
162 | return rc; | ||
163 | } | ||
164 | |||
165 | list_add(&r->mw_list, &buf->rb_mws); | ||
166 | list_add(&r->mw_all, &buf->rb_all); | ||
167 | r->mw_sendcompletion = frwr_sendcompletion; | ||
168 | } | ||
169 | |||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | /* Post a FAST_REG Work Request to register a memory region | ||
174 | * for remote access via RDMA READ or RDMA WRITE. | ||
175 | */ | ||
176 | static int | ||
177 | frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, | ||
178 | int nsegs, bool writing) | ||
179 | { | ||
180 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | ||
181 | struct ib_device *device = ia->ri_id->device; | ||
182 | enum dma_data_direction direction = rpcrdma_data_dir(writing); | ||
183 | struct rpcrdma_mr_seg *seg1 = seg; | ||
184 | struct rpcrdma_mw *mw = seg1->rl_mw; | ||
185 | struct rpcrdma_frmr *frmr = &mw->r.frmr; | ||
186 | struct ib_mr *mr = frmr->fr_mr; | ||
187 | struct ib_send_wr fastreg_wr, *bad_wr; | ||
188 | u8 key; | ||
189 | int len, pageoff; | ||
190 | int i, rc; | ||
191 | int seg_len; | ||
192 | u64 pa; | ||
193 | int page_no; | ||
194 | |||
195 | pageoff = offset_in_page(seg1->mr_offset); | ||
196 | seg1->mr_offset -= pageoff; /* start of page */ | ||
197 | seg1->mr_len += pageoff; | ||
198 | len = -pageoff; | ||
199 | if (nsegs > ia->ri_max_frmr_depth) | ||
200 | nsegs = ia->ri_max_frmr_depth; | ||
201 | for (page_no = i = 0; i < nsegs;) { | ||
202 | rpcrdma_map_one(device, seg, direction); | ||
203 | pa = seg->mr_dma; | ||
204 | for (seg_len = seg->mr_len; seg_len > 0; seg_len -= PAGE_SIZE) { | ||
205 | frmr->fr_pgl->page_list[page_no++] = pa; | ||
206 | pa += PAGE_SIZE; | ||
207 | } | ||
208 | len += seg->mr_len; | ||
209 | ++seg; | ||
210 | ++i; | ||
211 | /* Check for holes */ | ||
212 | if ((i < nsegs && offset_in_page(seg->mr_offset)) || | ||
213 | offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) | ||
214 | break; | ||
215 | } | ||
216 | dprintk("RPC: %s: Using frmr %p to map %d segments (%d bytes)\n", | ||
217 | __func__, mw, i, len); | ||
218 | |||
219 | frmr->fr_state = FRMR_IS_VALID; | ||
220 | |||
221 | memset(&fastreg_wr, 0, sizeof(fastreg_wr)); | ||
222 | fastreg_wr.wr_id = (unsigned long)(void *)mw; | ||
223 | fastreg_wr.opcode = IB_WR_FAST_REG_MR; | ||
224 | fastreg_wr.wr.fast_reg.iova_start = seg1->mr_dma + pageoff; | ||
225 | fastreg_wr.wr.fast_reg.page_list = frmr->fr_pgl; | ||
226 | fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT; | ||
227 | fastreg_wr.wr.fast_reg.page_list_len = page_no; | ||
228 | fastreg_wr.wr.fast_reg.length = len; | ||
229 | fastreg_wr.wr.fast_reg.access_flags = writing ? | ||
230 | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : | ||
231 | IB_ACCESS_REMOTE_READ; | ||
232 | key = (u8)(mr->rkey & 0x000000FF); | ||
233 | ib_update_fast_reg_key(mr, ++key); | ||
234 | fastreg_wr.wr.fast_reg.rkey = mr->rkey; | ||
235 | |||
236 | DECR_CQCOUNT(&r_xprt->rx_ep); | ||
237 | rc = ib_post_send(ia->ri_id->qp, &fastreg_wr, &bad_wr); | ||
238 | if (rc) | ||
239 | goto out_senderr; | ||
240 | |||
241 | seg1->mr_rkey = mr->rkey; | ||
242 | seg1->mr_base = seg1->mr_dma + pageoff; | ||
243 | seg1->mr_nsegs = i; | ||
244 | seg1->mr_len = len; | ||
245 | return i; | ||
246 | |||
247 | out_senderr: | ||
248 | dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc); | ||
249 | ib_update_fast_reg_key(mr, --key); | ||
250 | frmr->fr_state = FRMR_IS_INVALID; | ||
251 | while (i--) | ||
252 | rpcrdma_unmap_one(device, --seg); | ||
253 | return rc; | ||
254 | } | ||
255 | |||
256 | /* Post a LOCAL_INV Work Request to prevent further remote access | ||
257 | * via RDMA READ or RDMA WRITE. | ||
258 | */ | ||
259 | static int | ||
260 | frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg) | ||
261 | { | ||
262 | struct rpcrdma_mr_seg *seg1 = seg; | ||
263 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | ||
264 | struct ib_send_wr invalidate_wr, *bad_wr; | ||
265 | int rc, nsegs = seg->mr_nsegs; | ||
266 | struct ib_device *device; | ||
267 | |||
268 | seg1->rl_mw->r.frmr.fr_state = FRMR_IS_INVALID; | ||
269 | |||
270 | memset(&invalidate_wr, 0, sizeof(invalidate_wr)); | ||
271 | invalidate_wr.wr_id = (unsigned long)(void *)seg1->rl_mw; | ||
272 | invalidate_wr.opcode = IB_WR_LOCAL_INV; | ||
273 | invalidate_wr.ex.invalidate_rkey = seg1->rl_mw->r.frmr.fr_mr->rkey; | ||
274 | DECR_CQCOUNT(&r_xprt->rx_ep); | ||
275 | |||
276 | read_lock(&ia->ri_qplock); | ||
277 | device = ia->ri_id->device; | ||
278 | while (seg1->mr_nsegs--) | ||
279 | rpcrdma_unmap_one(device, seg++); | ||
280 | rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr); | ||
281 | read_unlock(&ia->ri_qplock); | ||
282 | if (rc) | ||
283 | goto out_err; | ||
284 | return nsegs; | ||
285 | |||
286 | out_err: | ||
287 | /* Force rpcrdma_buffer_get() to retry */ | ||
288 | seg1->rl_mw->r.frmr.fr_state = FRMR_IS_STALE; | ||
289 | dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc); | ||
290 | return nsegs; | ||
291 | } | ||
292 | |||
293 | /* After a disconnect, a flushed FAST_REG_MR can leave an FRMR in | ||
294 | * an unusable state. Find FRMRs in this state and dereg / reg | ||
295 | * each. FRMRs that are VALID and attached to an rpcrdma_req are | ||
296 | * also torn down. | ||
297 | * | ||
298 | * This gives all in-use FRMRs a fresh rkey and leaves them INVALID. | ||
299 | * | ||
300 | * This is invoked only in the transport connect worker in order | ||
301 | * to serialize with rpcrdma_register_frmr_external(). | ||
302 | */ | ||
303 | static void | ||
304 | frwr_op_reset(struct rpcrdma_xprt *r_xprt) | ||
305 | { | ||
306 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | ||
307 | struct ib_device *device = r_xprt->rx_ia.ri_id->device; | ||
308 | unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth; | ||
309 | struct ib_pd *pd = r_xprt->rx_ia.ri_pd; | ||
310 | struct rpcrdma_mw *r; | ||
311 | int rc; | ||
312 | |||
313 | list_for_each_entry(r, &buf->rb_all, mw_all) { | ||
314 | if (r->r.frmr.fr_state == FRMR_IS_INVALID) | ||
315 | continue; | ||
316 | |||
317 | __frwr_release(r); | ||
318 | rc = __frwr_init(r, pd, device, depth); | ||
319 | if (rc) { | ||
320 | dprintk("RPC: %s: mw %p left %s\n", | ||
321 | __func__, r, | ||
322 | (r->r.frmr.fr_state == FRMR_IS_STALE ? | ||
323 | "stale" : "valid")); | ||
324 | continue; | ||
325 | } | ||
326 | |||
327 | r->r.frmr.fr_state = FRMR_IS_INVALID; | ||
328 | } | ||
329 | } | ||
330 | |||
331 | static void | ||
332 | frwr_op_destroy(struct rpcrdma_buffer *buf) | ||
333 | { | ||
334 | struct rpcrdma_mw *r; | ||
335 | |||
336 | while (!list_empty(&buf->rb_all)) { | ||
337 | r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all); | ||
338 | list_del(&r->mw_all); | ||
339 | __frwr_release(r); | ||
340 | kfree(r); | ||
341 | } | ||
342 | } | ||
343 | |||
344 | const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = { | ||
345 | .ro_map = frwr_op_map, | ||
346 | .ro_unmap = frwr_op_unmap, | ||
347 | .ro_open = frwr_op_open, | ||
348 | .ro_maxpages = frwr_op_maxpages, | ||
349 | .ro_init = frwr_op_init, | ||
350 | .ro_reset = frwr_op_reset, | ||
351 | .ro_destroy = frwr_op_destroy, | ||
352 | .ro_displayname = "frwr", | ||
353 | }; | ||
diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c new file mode 100644 index 000000000000..ba518af16787 --- /dev/null +++ b/net/sunrpc/xprtrdma/physical_ops.c | |||
@@ -0,0 +1,94 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2015 Oracle. All rights reserved. | ||
3 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. | ||
4 | */ | ||
5 | |||
6 | /* No-op chunk preparation. All client memory is pre-registered. | ||
7 | * Sometimes referred to as ALLPHYSICAL mode. | ||
8 | * | ||
9 | * Physical registration is simple because all client memory is | ||
10 | * pre-registered and never deregistered. This mode is good for | ||
11 | * adapter bring up, but is considered not safe: the server is | ||
12 | * trusted not to abuse its access to client memory not involved | ||
13 | * in RDMA I/O. | ||
14 | */ | ||
15 | |||
16 | #include "xprt_rdma.h" | ||
17 | |||
18 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | ||
19 | # define RPCDBG_FACILITY RPCDBG_TRANS | ||
20 | #endif | ||
21 | |||
22 | static int | ||
23 | physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, | ||
24 | struct rpcrdma_create_data_internal *cdata) | ||
25 | { | ||
26 | return 0; | ||
27 | } | ||
28 | |||
29 | /* PHYSICAL memory registration conveys one page per chunk segment. | ||
30 | */ | ||
31 | static size_t | ||
32 | physical_op_maxpages(struct rpcrdma_xprt *r_xprt) | ||
33 | { | ||
34 | return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, | ||
35 | rpcrdma_max_segments(r_xprt)); | ||
36 | } | ||
37 | |||
38 | static int | ||
39 | physical_op_init(struct rpcrdma_xprt *r_xprt) | ||
40 | { | ||
41 | return 0; | ||
42 | } | ||
43 | |||
44 | /* The client's physical memory is already exposed for | ||
45 | * remote access via RDMA READ or RDMA WRITE. | ||
46 | */ | ||
47 | static int | ||
48 | physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, | ||
49 | int nsegs, bool writing) | ||
50 | { | ||
51 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | ||
52 | |||
53 | rpcrdma_map_one(ia->ri_id->device, seg, | ||
54 | rpcrdma_data_dir(writing)); | ||
55 | seg->mr_rkey = ia->ri_bind_mem->rkey; | ||
56 | seg->mr_base = seg->mr_dma; | ||
57 | seg->mr_nsegs = 1; | ||
58 | return 1; | ||
59 | } | ||
60 | |||
61 | /* Unmap a memory region, but leave it registered. | ||
62 | */ | ||
63 | static int | ||
64 | physical_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg) | ||
65 | { | ||
66 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | ||
67 | |||
68 | read_lock(&ia->ri_qplock); | ||
69 | rpcrdma_unmap_one(ia->ri_id->device, seg); | ||
70 | read_unlock(&ia->ri_qplock); | ||
71 | |||
72 | return 1; | ||
73 | } | ||
74 | |||
75 | static void | ||
76 | physical_op_reset(struct rpcrdma_xprt *r_xprt) | ||
77 | { | ||
78 | } | ||
79 | |||
80 | static void | ||
81 | physical_op_destroy(struct rpcrdma_buffer *buf) | ||
82 | { | ||
83 | } | ||
84 | |||
85 | const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = { | ||
86 | .ro_map = physical_op_map, | ||
87 | .ro_unmap = physical_op_unmap, | ||
88 | .ro_open = physical_op_open, | ||
89 | .ro_maxpages = physical_op_maxpages, | ||
90 | .ro_init = physical_op_init, | ||
91 | .ro_reset = physical_op_reset, | ||
92 | .ro_destroy = physical_op_destroy, | ||
93 | .ro_displayname = "physical", | ||
94 | }; | ||
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 91ffde82fa0c..2c53ea9e1b83 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c | |||
@@ -53,6 +53,14 @@ | |||
53 | # define RPCDBG_FACILITY RPCDBG_TRANS | 53 | # define RPCDBG_FACILITY RPCDBG_TRANS |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | enum rpcrdma_chunktype { | ||
57 | rpcrdma_noch = 0, | ||
58 | rpcrdma_readch, | ||
59 | rpcrdma_areadch, | ||
60 | rpcrdma_writech, | ||
61 | rpcrdma_replych | ||
62 | }; | ||
63 | |||
56 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | 64 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
57 | static const char transfertypes[][12] = { | 65 | static const char transfertypes[][12] = { |
58 | "pure inline", /* no chunks */ | 66 | "pure inline", /* no chunks */ |
@@ -179,6 +187,7 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
179 | struct rpcrdma_write_array *warray = NULL; | 187 | struct rpcrdma_write_array *warray = NULL; |
180 | struct rpcrdma_write_chunk *cur_wchunk = NULL; | 188 | struct rpcrdma_write_chunk *cur_wchunk = NULL; |
181 | __be32 *iptr = headerp->rm_body.rm_chunks; | 189 | __be32 *iptr = headerp->rm_body.rm_chunks; |
190 | int (*map)(struct rpcrdma_xprt *, struct rpcrdma_mr_seg *, int, bool); | ||
182 | 191 | ||
183 | if (type == rpcrdma_readch || type == rpcrdma_areadch) { | 192 | if (type == rpcrdma_readch || type == rpcrdma_areadch) { |
184 | /* a read chunk - server will RDMA Read our memory */ | 193 | /* a read chunk - server will RDMA Read our memory */ |
@@ -201,9 +210,9 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
201 | if (nsegs < 0) | 210 | if (nsegs < 0) |
202 | return nsegs; | 211 | return nsegs; |
203 | 212 | ||
213 | map = r_xprt->rx_ia.ri_ops->ro_map; | ||
204 | do { | 214 | do { |
205 | n = rpcrdma_register_external(seg, nsegs, | 215 | n = map(r_xprt, seg, nsegs, cur_wchunk != NULL); |
206 | cur_wchunk != NULL, r_xprt); | ||
207 | if (n <= 0) | 216 | if (n <= 0) |
208 | goto out; | 217 | goto out; |
209 | if (cur_rchunk) { /* read */ | 218 | if (cur_rchunk) { /* read */ |
@@ -275,34 +284,13 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
275 | return (unsigned char *)iptr - (unsigned char *)headerp; | 284 | return (unsigned char *)iptr - (unsigned char *)headerp; |
276 | 285 | ||
277 | out: | 286 | out: |
278 | if (r_xprt->rx_ia.ri_memreg_strategy != RPCRDMA_FRMR) { | 287 | if (r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_FRMR) |
279 | for (pos = 0; nchunks--;) | 288 | return n; |
280 | pos += rpcrdma_deregister_external( | ||
281 | &req->rl_segments[pos], r_xprt); | ||
282 | } | ||
283 | return n; | ||
284 | } | ||
285 | 289 | ||
286 | /* | 290 | for (pos = 0; nchunks--;) |
287 | * Marshal chunks. This routine returns the header length | 291 | pos += r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt, |
288 | * consumed by marshaling. | 292 | &req->rl_segments[pos]); |
289 | * | 293 | return n; |
290 | * Returns positive RPC/RDMA header size, or negative errno. | ||
291 | */ | ||
292 | |||
293 | ssize_t | ||
294 | rpcrdma_marshal_chunks(struct rpc_rqst *rqst, ssize_t result) | ||
295 | { | ||
296 | struct rpcrdma_req *req = rpcr_to_rdmar(rqst); | ||
297 | struct rpcrdma_msg *headerp = rdmab_to_msg(req->rl_rdmabuf); | ||
298 | |||
299 | if (req->rl_rtype != rpcrdma_noch) | ||
300 | result = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf, | ||
301 | headerp, req->rl_rtype); | ||
302 | else if (req->rl_wtype != rpcrdma_noch) | ||
303 | result = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf, | ||
304 | headerp, req->rl_wtype); | ||
305 | return result; | ||
306 | } | 294 | } |
307 | 295 | ||
308 | /* | 296 | /* |
@@ -397,6 +385,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) | |||
397 | char *base; | 385 | char *base; |
398 | size_t rpclen, padlen; | 386 | size_t rpclen, padlen; |
399 | ssize_t hdrlen; | 387 | ssize_t hdrlen; |
388 | enum rpcrdma_chunktype rtype, wtype; | ||
400 | struct rpcrdma_msg *headerp; | 389 | struct rpcrdma_msg *headerp; |
401 | 390 | ||
402 | /* | 391 | /* |
@@ -433,13 +422,13 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) | |||
433 | * into pages; otherwise use reply chunks. | 422 | * into pages; otherwise use reply chunks. |
434 | */ | 423 | */ |
435 | if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst)) | 424 | if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst)) |
436 | req->rl_wtype = rpcrdma_noch; | 425 | wtype = rpcrdma_noch; |
437 | else if (rqst->rq_rcv_buf.page_len == 0) | 426 | else if (rqst->rq_rcv_buf.page_len == 0) |
438 | req->rl_wtype = rpcrdma_replych; | 427 | wtype = rpcrdma_replych; |
439 | else if (rqst->rq_rcv_buf.flags & XDRBUF_READ) | 428 | else if (rqst->rq_rcv_buf.flags & XDRBUF_READ) |
440 | req->rl_wtype = rpcrdma_writech; | 429 | wtype = rpcrdma_writech; |
441 | else | 430 | else |
442 | req->rl_wtype = rpcrdma_replych; | 431 | wtype = rpcrdma_replych; |
443 | 432 | ||
444 | /* | 433 | /* |
445 | * Chunks needed for arguments? | 434 | * Chunks needed for arguments? |
@@ -456,16 +445,16 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) | |||
456 | * TBD check NFSv4 setacl | 445 | * TBD check NFSv4 setacl |
457 | */ | 446 | */ |
458 | if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst)) | 447 | if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst)) |
459 | req->rl_rtype = rpcrdma_noch; | 448 | rtype = rpcrdma_noch; |
460 | else if (rqst->rq_snd_buf.page_len == 0) | 449 | else if (rqst->rq_snd_buf.page_len == 0) |
461 | req->rl_rtype = rpcrdma_areadch; | 450 | rtype = rpcrdma_areadch; |
462 | else | 451 | else |
463 | req->rl_rtype = rpcrdma_readch; | 452 | rtype = rpcrdma_readch; |
464 | 453 | ||
465 | /* The following simplification is not true forever */ | 454 | /* The following simplification is not true forever */ |
466 | if (req->rl_rtype != rpcrdma_noch && req->rl_wtype == rpcrdma_replych) | 455 | if (rtype != rpcrdma_noch && wtype == rpcrdma_replych) |
467 | req->rl_wtype = rpcrdma_noch; | 456 | wtype = rpcrdma_noch; |
468 | if (req->rl_rtype != rpcrdma_noch && req->rl_wtype != rpcrdma_noch) { | 457 | if (rtype != rpcrdma_noch && wtype != rpcrdma_noch) { |
469 | dprintk("RPC: %s: cannot marshal multiple chunk lists\n", | 458 | dprintk("RPC: %s: cannot marshal multiple chunk lists\n", |
470 | __func__); | 459 | __func__); |
471 | return -EIO; | 460 | return -EIO; |
@@ -479,7 +468,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) | |||
479 | * When padding is in use and applies to the transfer, insert | 468 | * When padding is in use and applies to the transfer, insert |
480 | * it and change the message type. | 469 | * it and change the message type. |
481 | */ | 470 | */ |
482 | if (req->rl_rtype == rpcrdma_noch) { | 471 | if (rtype == rpcrdma_noch) { |
483 | 472 | ||
484 | padlen = rpcrdma_inline_pullup(rqst, | 473 | padlen = rpcrdma_inline_pullup(rqst, |
485 | RPCRDMA_INLINE_PAD_VALUE(rqst)); | 474 | RPCRDMA_INLINE_PAD_VALUE(rqst)); |
@@ -494,7 +483,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) | |||
494 | headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero; | 483 | headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero; |
495 | headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero; | 484 | headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero; |
496 | hdrlen += 2 * sizeof(u32); /* extra words in padhdr */ | 485 | hdrlen += 2 * sizeof(u32); /* extra words in padhdr */ |
497 | if (req->rl_wtype != rpcrdma_noch) { | 486 | if (wtype != rpcrdma_noch) { |
498 | dprintk("RPC: %s: invalid chunk list\n", | 487 | dprintk("RPC: %s: invalid chunk list\n", |
499 | __func__); | 488 | __func__); |
500 | return -EIO; | 489 | return -EIO; |
@@ -515,18 +504,26 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) | |||
515 | * on receive. Therefore, we request a reply chunk | 504 | * on receive. Therefore, we request a reply chunk |
516 | * for non-writes wherever feasible and efficient. | 505 | * for non-writes wherever feasible and efficient. |
517 | */ | 506 | */ |
518 | if (req->rl_wtype == rpcrdma_noch) | 507 | if (wtype == rpcrdma_noch) |
519 | req->rl_wtype = rpcrdma_replych; | 508 | wtype = rpcrdma_replych; |
520 | } | 509 | } |
521 | } | 510 | } |
522 | 511 | ||
523 | hdrlen = rpcrdma_marshal_chunks(rqst, hdrlen); | 512 | if (rtype != rpcrdma_noch) { |
513 | hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf, | ||
514 | headerp, rtype); | ||
515 | wtype = rtype; /* simplify dprintk */ | ||
516 | |||
517 | } else if (wtype != rpcrdma_noch) { | ||
518 | hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf, | ||
519 | headerp, wtype); | ||
520 | } | ||
524 | if (hdrlen < 0) | 521 | if (hdrlen < 0) |
525 | return hdrlen; | 522 | return hdrlen; |
526 | 523 | ||
527 | dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd" | 524 | dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd" |
528 | " headerp 0x%p base 0x%p lkey 0x%x\n", | 525 | " headerp 0x%p base 0x%p lkey 0x%x\n", |
529 | __func__, transfertypes[req->rl_wtype], hdrlen, rpclen, padlen, | 526 | __func__, transfertypes[wtype], hdrlen, rpclen, padlen, |
530 | headerp, base, rdmab_lkey(req->rl_rdmabuf)); | 527 | headerp, base, rdmab_lkey(req->rl_rdmabuf)); |
531 | 528 | ||
532 | /* | 529 | /* |
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 2e192baa59f3..54f23b1be986 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c | |||
@@ -157,12 +157,47 @@ static struct ctl_table sunrpc_table[] = { | |||
157 | static struct rpc_xprt_ops xprt_rdma_procs; /* forward reference */ | 157 | static struct rpc_xprt_ops xprt_rdma_procs; /* forward reference */ |
158 | 158 | ||
159 | static void | 159 | static void |
160 | xprt_rdma_format_addresses4(struct rpc_xprt *xprt, struct sockaddr *sap) | ||
161 | { | ||
162 | struct sockaddr_in *sin = (struct sockaddr_in *)sap; | ||
163 | char buf[20]; | ||
164 | |||
165 | snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); | ||
166 | xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); | ||
167 | |||
168 | xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA; | ||
169 | } | ||
170 | |||
171 | static void | ||
172 | xprt_rdma_format_addresses6(struct rpc_xprt *xprt, struct sockaddr *sap) | ||
173 | { | ||
174 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; | ||
175 | char buf[40]; | ||
176 | |||
177 | snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); | ||
178 | xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); | ||
179 | |||
180 | xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA6; | ||
181 | } | ||
182 | |||
183 | static void | ||
160 | xprt_rdma_format_addresses(struct rpc_xprt *xprt) | 184 | xprt_rdma_format_addresses(struct rpc_xprt *xprt) |
161 | { | 185 | { |
162 | struct sockaddr *sap = (struct sockaddr *) | 186 | struct sockaddr *sap = (struct sockaddr *) |
163 | &rpcx_to_rdmad(xprt).addr; | 187 | &rpcx_to_rdmad(xprt).addr; |
164 | struct sockaddr_in *sin = (struct sockaddr_in *)sap; | 188 | char buf[128]; |
165 | char buf[64]; | 189 | |
190 | switch (sap->sa_family) { | ||
191 | case AF_INET: | ||
192 | xprt_rdma_format_addresses4(xprt, sap); | ||
193 | break; | ||
194 | case AF_INET6: | ||
195 | xprt_rdma_format_addresses6(xprt, sap); | ||
196 | break; | ||
197 | default: | ||
198 | pr_err("rpcrdma: Unrecognized address family\n"); | ||
199 | return; | ||
200 | } | ||
166 | 201 | ||
167 | (void)rpc_ntop(sap, buf, sizeof(buf)); | 202 | (void)rpc_ntop(sap, buf, sizeof(buf)); |
168 | xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL); | 203 | xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL); |
@@ -170,16 +205,10 @@ xprt_rdma_format_addresses(struct rpc_xprt *xprt) | |||
170 | snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); | 205 | snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); |
171 | xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); | 206 | xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); |
172 | 207 | ||
173 | xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma"; | ||
174 | |||
175 | snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); | ||
176 | xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); | ||
177 | |||
178 | snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); | 208 | snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); |
179 | xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); | 209 | xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); |
180 | 210 | ||
181 | /* netid */ | 211 | xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma"; |
182 | xprt->address_strings[RPC_DISPLAY_NETID] = "rdma"; | ||
183 | } | 212 | } |
184 | 213 | ||
185 | static void | 214 | static void |
@@ -377,7 +406,10 @@ xprt_setup_rdma(struct xprt_create *args) | |||
377 | xprt_rdma_connect_worker); | 406 | xprt_rdma_connect_worker); |
378 | 407 | ||
379 | xprt_rdma_format_addresses(xprt); | 408 | xprt_rdma_format_addresses(xprt); |
380 | xprt->max_payload = rpcrdma_max_payload(new_xprt); | 409 | xprt->max_payload = new_xprt->rx_ia.ri_ops->ro_maxpages(new_xprt); |
410 | if (xprt->max_payload == 0) | ||
411 | goto out4; | ||
412 | xprt->max_payload <<= PAGE_SHIFT; | ||
381 | dprintk("RPC: %s: transport data payload maximum: %zu bytes\n", | 413 | dprintk("RPC: %s: transport data payload maximum: %zu bytes\n", |
382 | __func__, xprt->max_payload); | 414 | __func__, xprt->max_payload); |
383 | 415 | ||
@@ -552,8 +584,8 @@ xprt_rdma_free(void *buffer) | |||
552 | 584 | ||
553 | for (i = 0; req->rl_nchunks;) { | 585 | for (i = 0; req->rl_nchunks;) { |
554 | --req->rl_nchunks; | 586 | --req->rl_nchunks; |
555 | i += rpcrdma_deregister_external( | 587 | i += r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt, |
556 | &req->rl_segments[i], r_xprt); | 588 | &req->rl_segments[i]); |
557 | } | 589 | } |
558 | 590 | ||
559 | rpcrdma_buffer_put(req); | 591 | rpcrdma_buffer_put(req); |
@@ -579,10 +611,7 @@ xprt_rdma_send_request(struct rpc_task *task) | |||
579 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); | 611 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); |
580 | int rc = 0; | 612 | int rc = 0; |
581 | 613 | ||
582 | if (req->rl_niovs == 0) | 614 | rc = rpcrdma_marshal_req(rqst); |
583 | rc = rpcrdma_marshal_req(rqst); | ||
584 | else if (r_xprt->rx_ia.ri_memreg_strategy != RPCRDMA_ALLPHYSICAL) | ||
585 | rc = rpcrdma_marshal_chunks(rqst, 0); | ||
586 | if (rc < 0) | 615 | if (rc < 0) |
587 | goto failed_marshal; | 616 | goto failed_marshal; |
588 | 617 | ||
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 124676c13780..4870d272e006 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include <linux/interrupt.h> | 50 | #include <linux/interrupt.h> |
51 | #include <linux/slab.h> | 51 | #include <linux/slab.h> |
52 | #include <linux/prefetch.h> | 52 | #include <linux/prefetch.h> |
53 | #include <linux/sunrpc/addr.h> | ||
53 | #include <asm/bitops.h> | 54 | #include <asm/bitops.h> |
54 | 55 | ||
55 | #include "xprt_rdma.h" | 56 | #include "xprt_rdma.h" |
@@ -62,9 +63,6 @@ | |||
62 | # define RPCDBG_FACILITY RPCDBG_TRANS | 63 | # define RPCDBG_FACILITY RPCDBG_TRANS |
63 | #endif | 64 | #endif |
64 | 65 | ||
65 | static void rpcrdma_reset_frmrs(struct rpcrdma_ia *); | ||
66 | static void rpcrdma_reset_fmrs(struct rpcrdma_ia *); | ||
67 | |||
68 | /* | 66 | /* |
69 | * internal functions | 67 | * internal functions |
70 | */ | 68 | */ |
@@ -188,7 +186,7 @@ static const char * const wc_status[] = { | |||
188 | "remote access error", | 186 | "remote access error", |
189 | "remote operation error", | 187 | "remote operation error", |
190 | "transport retry counter exceeded", | 188 | "transport retry counter exceeded", |
191 | "RNR retrycounter exceeded", | 189 | "RNR retry counter exceeded", |
192 | "local RDD violation error", | 190 | "local RDD violation error", |
193 | "remove invalid RD request", | 191 | "remove invalid RD request", |
194 | "operation aborted", | 192 | "operation aborted", |
@@ -206,21 +204,17 @@ static const char * const wc_status[] = { | |||
206 | static void | 204 | static void |
207 | rpcrdma_sendcq_process_wc(struct ib_wc *wc) | 205 | rpcrdma_sendcq_process_wc(struct ib_wc *wc) |
208 | { | 206 | { |
209 | if (likely(wc->status == IB_WC_SUCCESS)) | ||
210 | return; | ||
211 | |||
212 | /* WARNING: Only wr_id and status are reliable at this point */ | 207 | /* WARNING: Only wr_id and status are reliable at this point */ |
213 | if (wc->wr_id == 0ULL) { | 208 | if (wc->wr_id == RPCRDMA_IGNORE_COMPLETION) { |
214 | if (wc->status != IB_WC_WR_FLUSH_ERR) | 209 | if (wc->status != IB_WC_SUCCESS && |
210 | wc->status != IB_WC_WR_FLUSH_ERR) | ||
215 | pr_err("RPC: %s: SEND: %s\n", | 211 | pr_err("RPC: %s: SEND: %s\n", |
216 | __func__, COMPLETION_MSG(wc->status)); | 212 | __func__, COMPLETION_MSG(wc->status)); |
217 | } else { | 213 | } else { |
218 | struct rpcrdma_mw *r; | 214 | struct rpcrdma_mw *r; |
219 | 215 | ||
220 | r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id; | 216 | r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id; |
221 | r->r.frmr.fr_state = FRMR_IS_STALE; | 217 | r->mw_sendcompletion(wc); |
222 | pr_err("RPC: %s: frmr %p (stale): %s\n", | ||
223 | __func__, r, COMPLETION_MSG(wc->status)); | ||
224 | } | 218 | } |
225 | } | 219 | } |
226 | 220 | ||
@@ -424,7 +418,7 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event) | |||
424 | struct rpcrdma_ia *ia = &xprt->rx_ia; | 418 | struct rpcrdma_ia *ia = &xprt->rx_ia; |
425 | struct rpcrdma_ep *ep = &xprt->rx_ep; | 419 | struct rpcrdma_ep *ep = &xprt->rx_ep; |
426 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | 420 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
427 | struct sockaddr_in *addr = (struct sockaddr_in *) &ep->rep_remote_addr; | 421 | struct sockaddr *sap = (struct sockaddr *)&ep->rep_remote_addr; |
428 | #endif | 422 | #endif |
429 | struct ib_qp_attr *attr = &ia->ri_qp_attr; | 423 | struct ib_qp_attr *attr = &ia->ri_qp_attr; |
430 | struct ib_qp_init_attr *iattr = &ia->ri_qp_init_attr; | 424 | struct ib_qp_init_attr *iattr = &ia->ri_qp_init_attr; |
@@ -480,9 +474,8 @@ connected: | |||
480 | wake_up_all(&ep->rep_connect_wait); | 474 | wake_up_all(&ep->rep_connect_wait); |
481 | /*FALLTHROUGH*/ | 475 | /*FALLTHROUGH*/ |
482 | default: | 476 | default: |
483 | dprintk("RPC: %s: %pI4:%u (ep 0x%p): %s\n", | 477 | dprintk("RPC: %s: %pIS:%u (ep 0x%p): %s\n", |
484 | __func__, &addr->sin_addr.s_addr, | 478 | __func__, sap, rpc_get_port(sap), ep, |
485 | ntohs(addr->sin_port), ep, | ||
486 | CONNECTION_MSG(event->event)); | 479 | CONNECTION_MSG(event->event)); |
487 | break; | 480 | break; |
488 | } | 481 | } |
@@ -491,19 +484,16 @@ connected: | |||
491 | if (connstate == 1) { | 484 | if (connstate == 1) { |
492 | int ird = attr->max_dest_rd_atomic; | 485 | int ird = attr->max_dest_rd_atomic; |
493 | int tird = ep->rep_remote_cma.responder_resources; | 486 | int tird = ep->rep_remote_cma.responder_resources; |
494 | printk(KERN_INFO "rpcrdma: connection to %pI4:%u " | 487 | |
495 | "on %s, memreg %d slots %d ird %d%s\n", | 488 | pr_info("rpcrdma: connection to %pIS:%u on %s, memreg '%s', %d credits, %d responders%s\n", |
496 | &addr->sin_addr.s_addr, | 489 | sap, rpc_get_port(sap), |
497 | ntohs(addr->sin_port), | ||
498 | ia->ri_id->device->name, | 490 | ia->ri_id->device->name, |
499 | ia->ri_memreg_strategy, | 491 | ia->ri_ops->ro_displayname, |
500 | xprt->rx_buf.rb_max_requests, | 492 | xprt->rx_buf.rb_max_requests, |
501 | ird, ird < 4 && ird < tird / 2 ? " (low!)" : ""); | 493 | ird, ird < 4 && ird < tird / 2 ? " (low!)" : ""); |
502 | } else if (connstate < 0) { | 494 | } else if (connstate < 0) { |
503 | printk(KERN_INFO "rpcrdma: connection to %pI4:%u closed (%d)\n", | 495 | pr_info("rpcrdma: connection to %pIS:%u closed (%d)\n", |
504 | &addr->sin_addr.s_addr, | 496 | sap, rpc_get_port(sap), connstate); |
505 | ntohs(addr->sin_port), | ||
506 | connstate); | ||
507 | } | 497 | } |
508 | #endif | 498 | #endif |
509 | 499 | ||
@@ -621,17 +611,13 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) | |||
621 | 611 | ||
622 | if (memreg == RPCRDMA_FRMR) { | 612 | if (memreg == RPCRDMA_FRMR) { |
623 | /* Requires both frmr reg and local dma lkey */ | 613 | /* Requires both frmr reg and local dma lkey */ |
624 | if ((devattr->device_cap_flags & | 614 | if (((devattr->device_cap_flags & |
625 | (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) != | 615 | (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) != |
626 | (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) { | 616 | (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) || |
617 | (devattr->max_fast_reg_page_list_len == 0)) { | ||
627 | dprintk("RPC: %s: FRMR registration " | 618 | dprintk("RPC: %s: FRMR registration " |
628 | "not supported by HCA\n", __func__); | 619 | "not supported by HCA\n", __func__); |
629 | memreg = RPCRDMA_MTHCAFMR; | 620 | memreg = RPCRDMA_MTHCAFMR; |
630 | } else { | ||
631 | /* Mind the ia limit on FRMR page list depth */ | ||
632 | ia->ri_max_frmr_depth = min_t(unsigned int, | ||
633 | RPCRDMA_MAX_DATA_SEGS, | ||
634 | devattr->max_fast_reg_page_list_len); | ||
635 | } | 621 | } |
636 | } | 622 | } |
637 | if (memreg == RPCRDMA_MTHCAFMR) { | 623 | if (memreg == RPCRDMA_MTHCAFMR) { |
@@ -652,13 +638,16 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) | |||
652 | */ | 638 | */ |
653 | switch (memreg) { | 639 | switch (memreg) { |
654 | case RPCRDMA_FRMR: | 640 | case RPCRDMA_FRMR: |
641 | ia->ri_ops = &rpcrdma_frwr_memreg_ops; | ||
655 | break; | 642 | break; |
656 | case RPCRDMA_ALLPHYSICAL: | 643 | case RPCRDMA_ALLPHYSICAL: |
644 | ia->ri_ops = &rpcrdma_physical_memreg_ops; | ||
657 | mem_priv = IB_ACCESS_LOCAL_WRITE | | 645 | mem_priv = IB_ACCESS_LOCAL_WRITE | |
658 | IB_ACCESS_REMOTE_WRITE | | 646 | IB_ACCESS_REMOTE_WRITE | |
659 | IB_ACCESS_REMOTE_READ; | 647 | IB_ACCESS_REMOTE_READ; |
660 | goto register_setup; | 648 | goto register_setup; |
661 | case RPCRDMA_MTHCAFMR: | 649 | case RPCRDMA_MTHCAFMR: |
650 | ia->ri_ops = &rpcrdma_fmr_memreg_ops; | ||
662 | if (ia->ri_have_dma_lkey) | 651 | if (ia->ri_have_dma_lkey) |
663 | break; | 652 | break; |
664 | mem_priv = IB_ACCESS_LOCAL_WRITE; | 653 | mem_priv = IB_ACCESS_LOCAL_WRITE; |
@@ -678,8 +667,8 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) | |||
678 | rc = -ENOMEM; | 667 | rc = -ENOMEM; |
679 | goto out3; | 668 | goto out3; |
680 | } | 669 | } |
681 | dprintk("RPC: %s: memory registration strategy is %d\n", | 670 | dprintk("RPC: %s: memory registration strategy is '%s'\n", |
682 | __func__, memreg); | 671 | __func__, ia->ri_ops->ro_displayname); |
683 | 672 | ||
684 | /* Else will do memory reg/dereg for each chunk */ | 673 | /* Else will do memory reg/dereg for each chunk */ |
685 | ia->ri_memreg_strategy = memreg; | 674 | ia->ri_memreg_strategy = memreg; |
@@ -743,49 +732,11 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, | |||
743 | 732 | ||
744 | ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall; | 733 | ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall; |
745 | ep->rep_attr.qp_context = ep; | 734 | ep->rep_attr.qp_context = ep; |
746 | /* send_cq and recv_cq initialized below */ | ||
747 | ep->rep_attr.srq = NULL; | 735 | ep->rep_attr.srq = NULL; |
748 | ep->rep_attr.cap.max_send_wr = cdata->max_requests; | 736 | ep->rep_attr.cap.max_send_wr = cdata->max_requests; |
749 | switch (ia->ri_memreg_strategy) { | 737 | rc = ia->ri_ops->ro_open(ia, ep, cdata); |
750 | case RPCRDMA_FRMR: { | 738 | if (rc) |
751 | int depth = 7; | 739 | return rc; |
752 | |||
753 | /* Add room for frmr register and invalidate WRs. | ||
754 | * 1. FRMR reg WR for head | ||
755 | * 2. FRMR invalidate WR for head | ||
756 | * 3. N FRMR reg WRs for pagelist | ||
757 | * 4. N FRMR invalidate WRs for pagelist | ||
758 | * 5. FRMR reg WR for tail | ||
759 | * 6. FRMR invalidate WR for tail | ||
760 | * 7. The RDMA_SEND WR | ||
761 | */ | ||
762 | |||
763 | /* Calculate N if the device max FRMR depth is smaller than | ||
764 | * RPCRDMA_MAX_DATA_SEGS. | ||
765 | */ | ||
766 | if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) { | ||
767 | int delta = RPCRDMA_MAX_DATA_SEGS - | ||
768 | ia->ri_max_frmr_depth; | ||
769 | |||
770 | do { | ||
771 | depth += 2; /* FRMR reg + invalidate */ | ||
772 | delta -= ia->ri_max_frmr_depth; | ||
773 | } while (delta > 0); | ||
774 | |||
775 | } | ||
776 | ep->rep_attr.cap.max_send_wr *= depth; | ||
777 | if (ep->rep_attr.cap.max_send_wr > devattr->max_qp_wr) { | ||
778 | cdata->max_requests = devattr->max_qp_wr / depth; | ||
779 | if (!cdata->max_requests) | ||
780 | return -EINVAL; | ||
781 | ep->rep_attr.cap.max_send_wr = cdata->max_requests * | ||
782 | depth; | ||
783 | } | ||
784 | break; | ||
785 | } | ||
786 | default: | ||
787 | break; | ||
788 | } | ||
789 | ep->rep_attr.cap.max_recv_wr = cdata->max_requests; | 740 | ep->rep_attr.cap.max_recv_wr = cdata->max_requests; |
790 | ep->rep_attr.cap.max_send_sge = (cdata->padding ? 4 : 2); | 741 | ep->rep_attr.cap.max_send_sge = (cdata->padding ? 4 : 2); |
791 | ep->rep_attr.cap.max_recv_sge = 1; | 742 | ep->rep_attr.cap.max_recv_sge = 1; |
@@ -944,21 +895,9 @@ retry: | |||
944 | rpcrdma_ep_disconnect(ep, ia); | 895 | rpcrdma_ep_disconnect(ep, ia); |
945 | rpcrdma_flush_cqs(ep); | 896 | rpcrdma_flush_cqs(ep); |
946 | 897 | ||
947 | switch (ia->ri_memreg_strategy) { | ||
948 | case RPCRDMA_FRMR: | ||
949 | rpcrdma_reset_frmrs(ia); | ||
950 | break; | ||
951 | case RPCRDMA_MTHCAFMR: | ||
952 | rpcrdma_reset_fmrs(ia); | ||
953 | break; | ||
954 | case RPCRDMA_ALLPHYSICAL: | ||
955 | break; | ||
956 | default: | ||
957 | rc = -EIO; | ||
958 | goto out; | ||
959 | } | ||
960 | |||
961 | xprt = container_of(ia, struct rpcrdma_xprt, rx_ia); | 898 | xprt = container_of(ia, struct rpcrdma_xprt, rx_ia); |
899 | ia->ri_ops->ro_reset(xprt); | ||
900 | |||
962 | id = rpcrdma_create_id(xprt, ia, | 901 | id = rpcrdma_create_id(xprt, ia, |
963 | (struct sockaddr *)&xprt->rx_data.addr); | 902 | (struct sockaddr *)&xprt->rx_data.addr); |
964 | if (IS_ERR(id)) { | 903 | if (IS_ERR(id)) { |
@@ -1123,91 +1062,6 @@ out: | |||
1123 | return ERR_PTR(rc); | 1062 | return ERR_PTR(rc); |
1124 | } | 1063 | } |
1125 | 1064 | ||
1126 | static int | ||
1127 | rpcrdma_init_fmrs(struct rpcrdma_ia *ia, struct rpcrdma_buffer *buf) | ||
1128 | { | ||
1129 | int mr_access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ; | ||
1130 | struct ib_fmr_attr fmr_attr = { | ||
1131 | .max_pages = RPCRDMA_MAX_DATA_SEGS, | ||
1132 | .max_maps = 1, | ||
1133 | .page_shift = PAGE_SHIFT | ||
1134 | }; | ||
1135 | struct rpcrdma_mw *r; | ||
1136 | int i, rc; | ||
1137 | |||
1138 | i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS; | ||
1139 | dprintk("RPC: %s: initalizing %d FMRs\n", __func__, i); | ||
1140 | |||
1141 | while (i--) { | ||
1142 | r = kzalloc(sizeof(*r), GFP_KERNEL); | ||
1143 | if (r == NULL) | ||
1144 | return -ENOMEM; | ||
1145 | |||
1146 | r->r.fmr = ib_alloc_fmr(ia->ri_pd, mr_access_flags, &fmr_attr); | ||
1147 | if (IS_ERR(r->r.fmr)) { | ||
1148 | rc = PTR_ERR(r->r.fmr); | ||
1149 | dprintk("RPC: %s: ib_alloc_fmr failed %i\n", | ||
1150 | __func__, rc); | ||
1151 | goto out_free; | ||
1152 | } | ||
1153 | |||
1154 | list_add(&r->mw_list, &buf->rb_mws); | ||
1155 | list_add(&r->mw_all, &buf->rb_all); | ||
1156 | } | ||
1157 | return 0; | ||
1158 | |||
1159 | out_free: | ||
1160 | kfree(r); | ||
1161 | return rc; | ||
1162 | } | ||
1163 | |||
1164 | static int | ||
1165 | rpcrdma_init_frmrs(struct rpcrdma_ia *ia, struct rpcrdma_buffer *buf) | ||
1166 | { | ||
1167 | struct rpcrdma_frmr *f; | ||
1168 | struct rpcrdma_mw *r; | ||
1169 | int i, rc; | ||
1170 | |||
1171 | i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS; | ||
1172 | dprintk("RPC: %s: initalizing %d FRMRs\n", __func__, i); | ||
1173 | |||
1174 | while (i--) { | ||
1175 | r = kzalloc(sizeof(*r), GFP_KERNEL); | ||
1176 | if (r == NULL) | ||
1177 | return -ENOMEM; | ||
1178 | f = &r->r.frmr; | ||
1179 | |||
1180 | f->fr_mr = ib_alloc_fast_reg_mr(ia->ri_pd, | ||
1181 | ia->ri_max_frmr_depth); | ||
1182 | if (IS_ERR(f->fr_mr)) { | ||
1183 | rc = PTR_ERR(f->fr_mr); | ||
1184 | dprintk("RPC: %s: ib_alloc_fast_reg_mr " | ||
1185 | "failed %i\n", __func__, rc); | ||
1186 | goto out_free; | ||
1187 | } | ||
1188 | |||
1189 | f->fr_pgl = ib_alloc_fast_reg_page_list(ia->ri_id->device, | ||
1190 | ia->ri_max_frmr_depth); | ||
1191 | if (IS_ERR(f->fr_pgl)) { | ||
1192 | rc = PTR_ERR(f->fr_pgl); | ||
1193 | dprintk("RPC: %s: ib_alloc_fast_reg_page_list " | ||
1194 | "failed %i\n", __func__, rc); | ||
1195 | |||
1196 | ib_dereg_mr(f->fr_mr); | ||
1197 | goto out_free; | ||
1198 | } | ||
1199 | |||
1200 | list_add(&r->mw_list, &buf->rb_mws); | ||
1201 | list_add(&r->mw_all, &buf->rb_all); | ||
1202 | } | ||
1203 | |||
1204 | return 0; | ||
1205 | |||
1206 | out_free: | ||
1207 | kfree(r); | ||
1208 | return rc; | ||
1209 | } | ||
1210 | |||
1211 | int | 1065 | int |
1212 | rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) | 1066 | rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) |
1213 | { | 1067 | { |
@@ -1244,22 +1098,9 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) | |||
1244 | buf->rb_recv_bufs = (struct rpcrdma_rep **) p; | 1098 | buf->rb_recv_bufs = (struct rpcrdma_rep **) p; |
1245 | p = (char *) &buf->rb_recv_bufs[buf->rb_max_requests]; | 1099 | p = (char *) &buf->rb_recv_bufs[buf->rb_max_requests]; |
1246 | 1100 | ||
1247 | INIT_LIST_HEAD(&buf->rb_mws); | 1101 | rc = ia->ri_ops->ro_init(r_xprt); |
1248 | INIT_LIST_HEAD(&buf->rb_all); | 1102 | if (rc) |
1249 | switch (ia->ri_memreg_strategy) { | 1103 | goto out; |
1250 | case RPCRDMA_FRMR: | ||
1251 | rc = rpcrdma_init_frmrs(ia, buf); | ||
1252 | if (rc) | ||
1253 | goto out; | ||
1254 | break; | ||
1255 | case RPCRDMA_MTHCAFMR: | ||
1256 | rc = rpcrdma_init_fmrs(ia, buf); | ||
1257 | if (rc) | ||
1258 | goto out; | ||
1259 | break; | ||
1260 | default: | ||
1261 | break; | ||
1262 | } | ||
1263 | 1104 | ||
1264 | for (i = 0; i < buf->rb_max_requests; i++) { | 1105 | for (i = 0; i < buf->rb_max_requests; i++) { |
1265 | struct rpcrdma_req *req; | 1106 | struct rpcrdma_req *req; |
@@ -1311,47 +1152,6 @@ rpcrdma_destroy_req(struct rpcrdma_ia *ia, struct rpcrdma_req *req) | |||
1311 | kfree(req); | 1152 | kfree(req); |
1312 | } | 1153 | } |
1313 | 1154 | ||
1314 | static void | ||
1315 | rpcrdma_destroy_fmrs(struct rpcrdma_buffer *buf) | ||
1316 | { | ||
1317 | struct rpcrdma_mw *r; | ||
1318 | int rc; | ||
1319 | |||
1320 | while (!list_empty(&buf->rb_all)) { | ||
1321 | r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all); | ||
1322 | list_del(&r->mw_all); | ||
1323 | list_del(&r->mw_list); | ||
1324 | |||
1325 | rc = ib_dealloc_fmr(r->r.fmr); | ||
1326 | if (rc) | ||
1327 | dprintk("RPC: %s: ib_dealloc_fmr failed %i\n", | ||
1328 | __func__, rc); | ||
1329 | |||
1330 | kfree(r); | ||
1331 | } | ||
1332 | } | ||
1333 | |||
1334 | static void | ||
1335 | rpcrdma_destroy_frmrs(struct rpcrdma_buffer *buf) | ||
1336 | { | ||
1337 | struct rpcrdma_mw *r; | ||
1338 | int rc; | ||
1339 | |||
1340 | while (!list_empty(&buf->rb_all)) { | ||
1341 | r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all); | ||
1342 | list_del(&r->mw_all); | ||
1343 | list_del(&r->mw_list); | ||
1344 | |||
1345 | rc = ib_dereg_mr(r->r.frmr.fr_mr); | ||
1346 | if (rc) | ||
1347 | dprintk("RPC: %s: ib_dereg_mr failed %i\n", | ||
1348 | __func__, rc); | ||
1349 | ib_free_fast_reg_page_list(r->r.frmr.fr_pgl); | ||
1350 | |||
1351 | kfree(r); | ||
1352 | } | ||
1353 | } | ||
1354 | |||
1355 | void | 1155 | void |
1356 | rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) | 1156 | rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) |
1357 | { | 1157 | { |
@@ -1372,104 +1172,11 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) | |||
1372 | rpcrdma_destroy_req(ia, buf->rb_send_bufs[i]); | 1172 | rpcrdma_destroy_req(ia, buf->rb_send_bufs[i]); |
1373 | } | 1173 | } |
1374 | 1174 | ||
1375 | switch (ia->ri_memreg_strategy) { | 1175 | ia->ri_ops->ro_destroy(buf); |
1376 | case RPCRDMA_FRMR: | ||
1377 | rpcrdma_destroy_frmrs(buf); | ||
1378 | break; | ||
1379 | case RPCRDMA_MTHCAFMR: | ||
1380 | rpcrdma_destroy_fmrs(buf); | ||
1381 | break; | ||
1382 | default: | ||
1383 | break; | ||
1384 | } | ||
1385 | 1176 | ||
1386 | kfree(buf->rb_pool); | 1177 | kfree(buf->rb_pool); |
1387 | } | 1178 | } |
1388 | 1179 | ||
1389 | /* After a disconnect, unmap all FMRs. | ||
1390 | * | ||
1391 | * This is invoked only in the transport connect worker in order | ||
1392 | * to serialize with rpcrdma_register_fmr_external(). | ||
1393 | */ | ||
1394 | static void | ||
1395 | rpcrdma_reset_fmrs(struct rpcrdma_ia *ia) | ||
1396 | { | ||
1397 | struct rpcrdma_xprt *r_xprt = | ||
1398 | container_of(ia, struct rpcrdma_xprt, rx_ia); | ||
1399 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | ||
1400 | struct list_head *pos; | ||
1401 | struct rpcrdma_mw *r; | ||
1402 | LIST_HEAD(l); | ||
1403 | int rc; | ||
1404 | |||
1405 | list_for_each(pos, &buf->rb_all) { | ||
1406 | r = list_entry(pos, struct rpcrdma_mw, mw_all); | ||
1407 | |||
1408 | INIT_LIST_HEAD(&l); | ||
1409 | list_add(&r->r.fmr->list, &l); | ||
1410 | rc = ib_unmap_fmr(&l); | ||
1411 | if (rc) | ||
1412 | dprintk("RPC: %s: ib_unmap_fmr failed %i\n", | ||
1413 | __func__, rc); | ||
1414 | } | ||
1415 | } | ||
1416 | |||
1417 | /* After a disconnect, a flushed FAST_REG_MR can leave an FRMR in | ||
1418 | * an unusable state. Find FRMRs in this state and dereg / reg | ||
1419 | * each. FRMRs that are VALID and attached to an rpcrdma_req are | ||
1420 | * also torn down. | ||
1421 | * | ||
1422 | * This gives all in-use FRMRs a fresh rkey and leaves them INVALID. | ||
1423 | * | ||
1424 | * This is invoked only in the transport connect worker in order | ||
1425 | * to serialize with rpcrdma_register_frmr_external(). | ||
1426 | */ | ||
1427 | static void | ||
1428 | rpcrdma_reset_frmrs(struct rpcrdma_ia *ia) | ||
1429 | { | ||
1430 | struct rpcrdma_xprt *r_xprt = | ||
1431 | container_of(ia, struct rpcrdma_xprt, rx_ia); | ||
1432 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | ||
1433 | struct list_head *pos; | ||
1434 | struct rpcrdma_mw *r; | ||
1435 | int rc; | ||
1436 | |||
1437 | list_for_each(pos, &buf->rb_all) { | ||
1438 | r = list_entry(pos, struct rpcrdma_mw, mw_all); | ||
1439 | |||
1440 | if (r->r.frmr.fr_state == FRMR_IS_INVALID) | ||
1441 | continue; | ||
1442 | |||
1443 | rc = ib_dereg_mr(r->r.frmr.fr_mr); | ||
1444 | if (rc) | ||
1445 | dprintk("RPC: %s: ib_dereg_mr failed %i\n", | ||
1446 | __func__, rc); | ||
1447 | ib_free_fast_reg_page_list(r->r.frmr.fr_pgl); | ||
1448 | |||
1449 | r->r.frmr.fr_mr = ib_alloc_fast_reg_mr(ia->ri_pd, | ||
1450 | ia->ri_max_frmr_depth); | ||
1451 | if (IS_ERR(r->r.frmr.fr_mr)) { | ||
1452 | rc = PTR_ERR(r->r.frmr.fr_mr); | ||
1453 | dprintk("RPC: %s: ib_alloc_fast_reg_mr" | ||
1454 | " failed %i\n", __func__, rc); | ||
1455 | continue; | ||
1456 | } | ||
1457 | r->r.frmr.fr_pgl = ib_alloc_fast_reg_page_list( | ||
1458 | ia->ri_id->device, | ||
1459 | ia->ri_max_frmr_depth); | ||
1460 | if (IS_ERR(r->r.frmr.fr_pgl)) { | ||
1461 | rc = PTR_ERR(r->r.frmr.fr_pgl); | ||
1462 | dprintk("RPC: %s: " | ||
1463 | "ib_alloc_fast_reg_page_list " | ||
1464 | "failed %i\n", __func__, rc); | ||
1465 | |||
1466 | ib_dereg_mr(r->r.frmr.fr_mr); | ||
1467 | continue; | ||
1468 | } | ||
1469 | r->r.frmr.fr_state = FRMR_IS_INVALID; | ||
1470 | } | ||
1471 | } | ||
1472 | |||
1473 | /* "*mw" can be NULL when rpcrdma_buffer_get_mrs() fails, leaving | 1180 | /* "*mw" can be NULL when rpcrdma_buffer_get_mrs() fails, leaving |
1474 | * some req segments uninitialized. | 1181 | * some req segments uninitialized. |
1475 | */ | 1182 | */ |
@@ -1509,7 +1216,7 @@ rpcrdma_buffer_put_sendbuf(struct rpcrdma_req *req, struct rpcrdma_buffer *buf) | |||
1509 | } | 1216 | } |
1510 | } | 1217 | } |
1511 | 1218 | ||
1512 | /* rpcrdma_unmap_one() was already done by rpcrdma_deregister_frmr_external(). | 1219 | /* rpcrdma_unmap_one() was already done during deregistration. |
1513 | * Redo only the ib_post_send(). | 1220 | * Redo only the ib_post_send(). |
1514 | */ | 1221 | */ |
1515 | static void | 1222 | static void |
@@ -1729,6 +1436,14 @@ rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep) | |||
1729 | * Wrappers for internal-use kmalloc memory registration, used by buffer code. | 1436 | * Wrappers for internal-use kmalloc memory registration, used by buffer code. |
1730 | */ | 1437 | */ |
1731 | 1438 | ||
1439 | void | ||
1440 | rpcrdma_mapping_error(struct rpcrdma_mr_seg *seg) | ||
1441 | { | ||
1442 | dprintk("RPC: map_one: offset %p iova %llx len %zu\n", | ||
1443 | seg->mr_offset, | ||
1444 | (unsigned long long)seg->mr_dma, seg->mr_dmalen); | ||
1445 | } | ||
1446 | |||
1732 | static int | 1447 | static int |
1733 | rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len, | 1448 | rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len, |
1734 | struct ib_mr **mrp, struct ib_sge *iov) | 1449 | struct ib_mr **mrp, struct ib_sge *iov) |
@@ -1854,287 +1569,6 @@ rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb) | |||
1854 | } | 1569 | } |
1855 | 1570 | ||
1856 | /* | 1571 | /* |
1857 | * Wrappers for chunk registration, shared by read/write chunk code. | ||
1858 | */ | ||
1859 | |||
1860 | static void | ||
1861 | rpcrdma_map_one(struct rpcrdma_ia *ia, struct rpcrdma_mr_seg *seg, int writing) | ||
1862 | { | ||
1863 | seg->mr_dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | ||
1864 | seg->mr_dmalen = seg->mr_len; | ||
1865 | if (seg->mr_page) | ||
1866 | seg->mr_dma = ib_dma_map_page(ia->ri_id->device, | ||
1867 | seg->mr_page, offset_in_page(seg->mr_offset), | ||
1868 | seg->mr_dmalen, seg->mr_dir); | ||
1869 | else | ||
1870 | seg->mr_dma = ib_dma_map_single(ia->ri_id->device, | ||
1871 | seg->mr_offset, | ||
1872 | seg->mr_dmalen, seg->mr_dir); | ||
1873 | if (ib_dma_mapping_error(ia->ri_id->device, seg->mr_dma)) { | ||
1874 | dprintk("RPC: %s: mr_dma %llx mr_offset %p mr_dma_len %zu\n", | ||
1875 | __func__, | ||
1876 | (unsigned long long)seg->mr_dma, | ||
1877 | seg->mr_offset, seg->mr_dmalen); | ||
1878 | } | ||
1879 | } | ||
1880 | |||
1881 | static void | ||
1882 | rpcrdma_unmap_one(struct rpcrdma_ia *ia, struct rpcrdma_mr_seg *seg) | ||
1883 | { | ||
1884 | if (seg->mr_page) | ||
1885 | ib_dma_unmap_page(ia->ri_id->device, | ||
1886 | seg->mr_dma, seg->mr_dmalen, seg->mr_dir); | ||
1887 | else | ||
1888 | ib_dma_unmap_single(ia->ri_id->device, | ||
1889 | seg->mr_dma, seg->mr_dmalen, seg->mr_dir); | ||
1890 | } | ||
1891 | |||
1892 | static int | ||
1893 | rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg, | ||
1894 | int *nsegs, int writing, struct rpcrdma_ia *ia, | ||
1895 | struct rpcrdma_xprt *r_xprt) | ||
1896 | { | ||
1897 | struct rpcrdma_mr_seg *seg1 = seg; | ||
1898 | struct rpcrdma_mw *mw = seg1->rl_mw; | ||
1899 | struct rpcrdma_frmr *frmr = &mw->r.frmr; | ||
1900 | struct ib_mr *mr = frmr->fr_mr; | ||
1901 | struct ib_send_wr fastreg_wr, *bad_wr; | ||
1902 | u8 key; | ||
1903 | int len, pageoff; | ||
1904 | int i, rc; | ||
1905 | int seg_len; | ||
1906 | u64 pa; | ||
1907 | int page_no; | ||
1908 | |||
1909 | pageoff = offset_in_page(seg1->mr_offset); | ||
1910 | seg1->mr_offset -= pageoff; /* start of page */ | ||
1911 | seg1->mr_len += pageoff; | ||
1912 | len = -pageoff; | ||
1913 | if (*nsegs > ia->ri_max_frmr_depth) | ||
1914 | *nsegs = ia->ri_max_frmr_depth; | ||
1915 | for (page_no = i = 0; i < *nsegs;) { | ||
1916 | rpcrdma_map_one(ia, seg, writing); | ||
1917 | pa = seg->mr_dma; | ||
1918 | for (seg_len = seg->mr_len; seg_len > 0; seg_len -= PAGE_SIZE) { | ||
1919 | frmr->fr_pgl->page_list[page_no++] = pa; | ||
1920 | pa += PAGE_SIZE; | ||
1921 | } | ||
1922 | len += seg->mr_len; | ||
1923 | ++seg; | ||
1924 | ++i; | ||
1925 | /* Check for holes */ | ||
1926 | if ((i < *nsegs && offset_in_page(seg->mr_offset)) || | ||
1927 | offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) | ||
1928 | break; | ||
1929 | } | ||
1930 | dprintk("RPC: %s: Using frmr %p to map %d segments\n", | ||
1931 | __func__, mw, i); | ||
1932 | |||
1933 | frmr->fr_state = FRMR_IS_VALID; | ||
1934 | |||
1935 | memset(&fastreg_wr, 0, sizeof(fastreg_wr)); | ||
1936 | fastreg_wr.wr_id = (unsigned long)(void *)mw; | ||
1937 | fastreg_wr.opcode = IB_WR_FAST_REG_MR; | ||
1938 | fastreg_wr.wr.fast_reg.iova_start = seg1->mr_dma; | ||
1939 | fastreg_wr.wr.fast_reg.page_list = frmr->fr_pgl; | ||
1940 | fastreg_wr.wr.fast_reg.page_list_len = page_no; | ||
1941 | fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT; | ||
1942 | fastreg_wr.wr.fast_reg.length = page_no << PAGE_SHIFT; | ||
1943 | if (fastreg_wr.wr.fast_reg.length < len) { | ||
1944 | rc = -EIO; | ||
1945 | goto out_err; | ||
1946 | } | ||
1947 | |||
1948 | /* Bump the key */ | ||
1949 | key = (u8)(mr->rkey & 0x000000FF); | ||
1950 | ib_update_fast_reg_key(mr, ++key); | ||
1951 | |||
1952 | fastreg_wr.wr.fast_reg.access_flags = (writing ? | ||
1953 | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : | ||
1954 | IB_ACCESS_REMOTE_READ); | ||
1955 | fastreg_wr.wr.fast_reg.rkey = mr->rkey; | ||
1956 | DECR_CQCOUNT(&r_xprt->rx_ep); | ||
1957 | |||
1958 | rc = ib_post_send(ia->ri_id->qp, &fastreg_wr, &bad_wr); | ||
1959 | if (rc) { | ||
1960 | dprintk("RPC: %s: failed ib_post_send for register," | ||
1961 | " status %i\n", __func__, rc); | ||
1962 | ib_update_fast_reg_key(mr, --key); | ||
1963 | goto out_err; | ||
1964 | } else { | ||
1965 | seg1->mr_rkey = mr->rkey; | ||
1966 | seg1->mr_base = seg1->mr_dma + pageoff; | ||
1967 | seg1->mr_nsegs = i; | ||
1968 | seg1->mr_len = len; | ||
1969 | } | ||
1970 | *nsegs = i; | ||
1971 | return 0; | ||
1972 | out_err: | ||
1973 | frmr->fr_state = FRMR_IS_INVALID; | ||
1974 | while (i--) | ||
1975 | rpcrdma_unmap_one(ia, --seg); | ||
1976 | return rc; | ||
1977 | } | ||
1978 | |||
1979 | static int | ||
1980 | rpcrdma_deregister_frmr_external(struct rpcrdma_mr_seg *seg, | ||
1981 | struct rpcrdma_ia *ia, struct rpcrdma_xprt *r_xprt) | ||
1982 | { | ||
1983 | struct rpcrdma_mr_seg *seg1 = seg; | ||
1984 | struct ib_send_wr invalidate_wr, *bad_wr; | ||
1985 | int rc; | ||
1986 | |||
1987 | seg1->rl_mw->r.frmr.fr_state = FRMR_IS_INVALID; | ||
1988 | |||
1989 | memset(&invalidate_wr, 0, sizeof invalidate_wr); | ||
1990 | invalidate_wr.wr_id = (unsigned long)(void *)seg1->rl_mw; | ||
1991 | invalidate_wr.opcode = IB_WR_LOCAL_INV; | ||
1992 | invalidate_wr.ex.invalidate_rkey = seg1->rl_mw->r.frmr.fr_mr->rkey; | ||
1993 | DECR_CQCOUNT(&r_xprt->rx_ep); | ||
1994 | |||
1995 | read_lock(&ia->ri_qplock); | ||
1996 | while (seg1->mr_nsegs--) | ||
1997 | rpcrdma_unmap_one(ia, seg++); | ||
1998 | rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr); | ||
1999 | read_unlock(&ia->ri_qplock); | ||
2000 | if (rc) { | ||
2001 | /* Force rpcrdma_buffer_get() to retry */ | ||
2002 | seg1->rl_mw->r.frmr.fr_state = FRMR_IS_STALE; | ||
2003 | dprintk("RPC: %s: failed ib_post_send for invalidate," | ||
2004 | " status %i\n", __func__, rc); | ||
2005 | } | ||
2006 | return rc; | ||
2007 | } | ||
2008 | |||
2009 | static int | ||
2010 | rpcrdma_register_fmr_external(struct rpcrdma_mr_seg *seg, | ||
2011 | int *nsegs, int writing, struct rpcrdma_ia *ia) | ||
2012 | { | ||
2013 | struct rpcrdma_mr_seg *seg1 = seg; | ||
2014 | u64 physaddrs[RPCRDMA_MAX_DATA_SEGS]; | ||
2015 | int len, pageoff, i, rc; | ||
2016 | |||
2017 | pageoff = offset_in_page(seg1->mr_offset); | ||
2018 | seg1->mr_offset -= pageoff; /* start of page */ | ||
2019 | seg1->mr_len += pageoff; | ||
2020 | len = -pageoff; | ||
2021 | if (*nsegs > RPCRDMA_MAX_DATA_SEGS) | ||
2022 | *nsegs = RPCRDMA_MAX_DATA_SEGS; | ||
2023 | for (i = 0; i < *nsegs;) { | ||
2024 | rpcrdma_map_one(ia, seg, writing); | ||
2025 | physaddrs[i] = seg->mr_dma; | ||
2026 | len += seg->mr_len; | ||
2027 | ++seg; | ||
2028 | ++i; | ||
2029 | /* Check for holes */ | ||
2030 | if ((i < *nsegs && offset_in_page(seg->mr_offset)) || | ||
2031 | offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) | ||
2032 | break; | ||
2033 | } | ||
2034 | rc = ib_map_phys_fmr(seg1->rl_mw->r.fmr, physaddrs, i, seg1->mr_dma); | ||
2035 | if (rc) { | ||
2036 | dprintk("RPC: %s: failed ib_map_phys_fmr " | ||
2037 | "%u@0x%llx+%i (%d)... status %i\n", __func__, | ||
2038 | len, (unsigned long long)seg1->mr_dma, | ||
2039 | pageoff, i, rc); | ||
2040 | while (i--) | ||
2041 | rpcrdma_unmap_one(ia, --seg); | ||
2042 | } else { | ||
2043 | seg1->mr_rkey = seg1->rl_mw->r.fmr->rkey; | ||
2044 | seg1->mr_base = seg1->mr_dma + pageoff; | ||
2045 | seg1->mr_nsegs = i; | ||
2046 | seg1->mr_len = len; | ||
2047 | } | ||
2048 | *nsegs = i; | ||
2049 | return rc; | ||
2050 | } | ||
2051 | |||
2052 | static int | ||
2053 | rpcrdma_deregister_fmr_external(struct rpcrdma_mr_seg *seg, | ||
2054 | struct rpcrdma_ia *ia) | ||
2055 | { | ||
2056 | struct rpcrdma_mr_seg *seg1 = seg; | ||
2057 | LIST_HEAD(l); | ||
2058 | int rc; | ||
2059 | |||
2060 | list_add(&seg1->rl_mw->r.fmr->list, &l); | ||
2061 | rc = ib_unmap_fmr(&l); | ||
2062 | read_lock(&ia->ri_qplock); | ||
2063 | while (seg1->mr_nsegs--) | ||
2064 | rpcrdma_unmap_one(ia, seg++); | ||
2065 | read_unlock(&ia->ri_qplock); | ||
2066 | if (rc) | ||
2067 | dprintk("RPC: %s: failed ib_unmap_fmr," | ||
2068 | " status %i\n", __func__, rc); | ||
2069 | return rc; | ||
2070 | } | ||
2071 | |||
2072 | int | ||
2073 | rpcrdma_register_external(struct rpcrdma_mr_seg *seg, | ||
2074 | int nsegs, int writing, struct rpcrdma_xprt *r_xprt) | ||
2075 | { | ||
2076 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | ||
2077 | int rc = 0; | ||
2078 | |||
2079 | switch (ia->ri_memreg_strategy) { | ||
2080 | |||
2081 | case RPCRDMA_ALLPHYSICAL: | ||
2082 | rpcrdma_map_one(ia, seg, writing); | ||
2083 | seg->mr_rkey = ia->ri_bind_mem->rkey; | ||
2084 | seg->mr_base = seg->mr_dma; | ||
2085 | seg->mr_nsegs = 1; | ||
2086 | nsegs = 1; | ||
2087 | break; | ||
2088 | |||
2089 | /* Registration using frmr registration */ | ||
2090 | case RPCRDMA_FRMR: | ||
2091 | rc = rpcrdma_register_frmr_external(seg, &nsegs, writing, ia, r_xprt); | ||
2092 | break; | ||
2093 | |||
2094 | /* Registration using fmr memory registration */ | ||
2095 | case RPCRDMA_MTHCAFMR: | ||
2096 | rc = rpcrdma_register_fmr_external(seg, &nsegs, writing, ia); | ||
2097 | break; | ||
2098 | |||
2099 | default: | ||
2100 | return -EIO; | ||
2101 | } | ||
2102 | if (rc) | ||
2103 | return rc; | ||
2104 | |||
2105 | return nsegs; | ||
2106 | } | ||
2107 | |||
2108 | int | ||
2109 | rpcrdma_deregister_external(struct rpcrdma_mr_seg *seg, | ||
2110 | struct rpcrdma_xprt *r_xprt) | ||
2111 | { | ||
2112 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | ||
2113 | int nsegs = seg->mr_nsegs, rc; | ||
2114 | |||
2115 | switch (ia->ri_memreg_strategy) { | ||
2116 | |||
2117 | case RPCRDMA_ALLPHYSICAL: | ||
2118 | read_lock(&ia->ri_qplock); | ||
2119 | rpcrdma_unmap_one(ia, seg); | ||
2120 | read_unlock(&ia->ri_qplock); | ||
2121 | break; | ||
2122 | |||
2123 | case RPCRDMA_FRMR: | ||
2124 | rc = rpcrdma_deregister_frmr_external(seg, ia, r_xprt); | ||
2125 | break; | ||
2126 | |||
2127 | case RPCRDMA_MTHCAFMR: | ||
2128 | rc = rpcrdma_deregister_fmr_external(seg, ia); | ||
2129 | break; | ||
2130 | |||
2131 | default: | ||
2132 | break; | ||
2133 | } | ||
2134 | return nsegs; | ||
2135 | } | ||
2136 | |||
2137 | /* | ||
2138 | * Prepost any receive buffer, then post send. | 1572 | * Prepost any receive buffer, then post send. |
2139 | * | 1573 | * |
2140 | * Receive buffer is donated to hardware, reclaimed upon recv completion. | 1574 | * Receive buffer is donated to hardware, reclaimed upon recv completion. |
@@ -2156,7 +1590,7 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia, | |||
2156 | } | 1590 | } |
2157 | 1591 | ||
2158 | send_wr.next = NULL; | 1592 | send_wr.next = NULL; |
2159 | send_wr.wr_id = 0ULL; /* no send cookie */ | 1593 | send_wr.wr_id = RPCRDMA_IGNORE_COMPLETION; |
2160 | send_wr.sg_list = req->rl_send_iov; | 1594 | send_wr.sg_list = req->rl_send_iov; |
2161 | send_wr.num_sge = req->rl_niovs; | 1595 | send_wr.num_sge = req->rl_niovs; |
2162 | send_wr.opcode = IB_WR_SEND; | 1596 | send_wr.opcode = IB_WR_SEND; |
@@ -2215,43 +1649,24 @@ rpcrdma_ep_post_recv(struct rpcrdma_ia *ia, | |||
2215 | return rc; | 1649 | return rc; |
2216 | } | 1650 | } |
2217 | 1651 | ||
2218 | /* Physical mapping means one Read/Write list entry per-page. | 1652 | /* How many chunk list items fit within our inline buffers? |
2219 | * All list entries must fit within an inline buffer | ||
2220 | * | ||
2221 | * NB: The server must return a Write list for NFS READ, | ||
2222 | * which has the same constraint. Factor in the inline | ||
2223 | * rsize as well. | ||
2224 | */ | 1653 | */ |
2225 | static size_t | 1654 | unsigned int |
2226 | rpcrdma_physical_max_payload(struct rpcrdma_xprt *r_xprt) | 1655 | rpcrdma_max_segments(struct rpcrdma_xprt *r_xprt) |
2227 | { | 1656 | { |
2228 | struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; | 1657 | struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; |
2229 | unsigned int inline_size, pages; | 1658 | int bytes, segments; |
2230 | 1659 | ||
2231 | inline_size = min_t(unsigned int, | 1660 | bytes = min_t(unsigned int, cdata->inline_wsize, cdata->inline_rsize); |
2232 | cdata->inline_wsize, cdata->inline_rsize); | 1661 | bytes -= RPCRDMA_HDRLEN_MIN; |
2233 | inline_size -= RPCRDMA_HDRLEN_MIN; | 1662 | if (bytes < sizeof(struct rpcrdma_segment) * 2) { |
2234 | pages = inline_size / sizeof(struct rpcrdma_segment); | 1663 | pr_warn("RPC: %s: inline threshold too small\n", |
2235 | return pages << PAGE_SHIFT; | 1664 | __func__); |
2236 | } | 1665 | return 0; |
2237 | |||
2238 | static size_t | ||
2239 | rpcrdma_mr_max_payload(struct rpcrdma_xprt *r_xprt) | ||
2240 | { | ||
2241 | return RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT; | ||
2242 | } | ||
2243 | |||
2244 | size_t | ||
2245 | rpcrdma_max_payload(struct rpcrdma_xprt *r_xprt) | ||
2246 | { | ||
2247 | size_t result; | ||
2248 | |||
2249 | switch (r_xprt->rx_ia.ri_memreg_strategy) { | ||
2250 | case RPCRDMA_ALLPHYSICAL: | ||
2251 | result = rpcrdma_physical_max_payload(r_xprt); | ||
2252 | break; | ||
2253 | default: | ||
2254 | result = rpcrdma_mr_max_payload(r_xprt); | ||
2255 | } | 1666 | } |
2256 | return result; | 1667 | |
1668 | segments = 1 << (fls(bytes / sizeof(struct rpcrdma_segment)) - 1); | ||
1669 | dprintk("RPC: %s: max chunk list size = %d segments\n", | ||
1670 | __func__, segments); | ||
1671 | return segments; | ||
2257 | } | 1672 | } |
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 0a16fb6f0885..78e0b8beaa36 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h | |||
@@ -60,6 +60,7 @@ | |||
60 | * Interface Adapter -- one per transport instance | 60 | * Interface Adapter -- one per transport instance |
61 | */ | 61 | */ |
62 | struct rpcrdma_ia { | 62 | struct rpcrdma_ia { |
63 | const struct rpcrdma_memreg_ops *ri_ops; | ||
63 | rwlock_t ri_qplock; | 64 | rwlock_t ri_qplock; |
64 | struct rdma_cm_id *ri_id; | 65 | struct rdma_cm_id *ri_id; |
65 | struct ib_pd *ri_pd; | 66 | struct ib_pd *ri_pd; |
@@ -105,6 +106,10 @@ struct rpcrdma_ep { | |||
105 | #define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit) | 106 | #define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit) |
106 | #define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount) | 107 | #define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount) |
107 | 108 | ||
109 | /* Force completion handler to ignore the signal | ||
110 | */ | ||
111 | #define RPCRDMA_IGNORE_COMPLETION (0ULL) | ||
112 | |||
108 | /* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV | 113 | /* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV |
109 | * | 114 | * |
110 | * The below structure appears at the front of a large region of kmalloc'd | 115 | * The below structure appears at the front of a large region of kmalloc'd |
@@ -143,14 +148,6 @@ rdmab_to_msg(struct rpcrdma_regbuf *rb) | |||
143 | return (struct rpcrdma_msg *)rb->rg_base; | 148 | return (struct rpcrdma_msg *)rb->rg_base; |
144 | } | 149 | } |
145 | 150 | ||
146 | enum rpcrdma_chunktype { | ||
147 | rpcrdma_noch = 0, | ||
148 | rpcrdma_readch, | ||
149 | rpcrdma_areadch, | ||
150 | rpcrdma_writech, | ||
151 | rpcrdma_replych | ||
152 | }; | ||
153 | |||
154 | /* | 151 | /* |
155 | * struct rpcrdma_rep -- this structure encapsulates state required to recv | 152 | * struct rpcrdma_rep -- this structure encapsulates state required to recv |
156 | * and complete a reply, asychronously. It needs several pieces of | 153 | * and complete a reply, asychronously. It needs several pieces of |
@@ -213,6 +210,7 @@ struct rpcrdma_mw { | |||
213 | struct ib_fmr *fmr; | 210 | struct ib_fmr *fmr; |
214 | struct rpcrdma_frmr frmr; | 211 | struct rpcrdma_frmr frmr; |
215 | } r; | 212 | } r; |
213 | void (*mw_sendcompletion)(struct ib_wc *); | ||
216 | struct list_head mw_list; | 214 | struct list_head mw_list; |
217 | struct list_head mw_all; | 215 | struct list_head mw_all; |
218 | }; | 216 | }; |
@@ -258,7 +256,6 @@ struct rpcrdma_req { | |||
258 | unsigned int rl_niovs; /* 0, 2 or 4 */ | 256 | unsigned int rl_niovs; /* 0, 2 or 4 */ |
259 | unsigned int rl_nchunks; /* non-zero if chunks */ | 257 | unsigned int rl_nchunks; /* non-zero if chunks */ |
260 | unsigned int rl_connect_cookie; /* retry detection */ | 258 | unsigned int rl_connect_cookie; /* retry detection */ |
261 | enum rpcrdma_chunktype rl_rtype, rl_wtype; | ||
262 | struct rpcrdma_buffer *rl_buffer; /* home base for this structure */ | 259 | struct rpcrdma_buffer *rl_buffer; /* home base for this structure */ |
263 | struct rpcrdma_rep *rl_reply;/* holder for reply buffer */ | 260 | struct rpcrdma_rep *rl_reply;/* holder for reply buffer */ |
264 | struct ib_sge rl_send_iov[4]; /* for active requests */ | 261 | struct ib_sge rl_send_iov[4]; /* for active requests */ |
@@ -340,6 +337,29 @@ struct rpcrdma_stats { | |||
340 | }; | 337 | }; |
341 | 338 | ||
342 | /* | 339 | /* |
340 | * Per-registration mode operations | ||
341 | */ | ||
342 | struct rpcrdma_xprt; | ||
343 | struct rpcrdma_memreg_ops { | ||
344 | int (*ro_map)(struct rpcrdma_xprt *, | ||
345 | struct rpcrdma_mr_seg *, int, bool); | ||
346 | int (*ro_unmap)(struct rpcrdma_xprt *, | ||
347 | struct rpcrdma_mr_seg *); | ||
348 | int (*ro_open)(struct rpcrdma_ia *, | ||
349 | struct rpcrdma_ep *, | ||
350 | struct rpcrdma_create_data_internal *); | ||
351 | size_t (*ro_maxpages)(struct rpcrdma_xprt *); | ||
352 | int (*ro_init)(struct rpcrdma_xprt *); | ||
353 | void (*ro_reset)(struct rpcrdma_xprt *); | ||
354 | void (*ro_destroy)(struct rpcrdma_buffer *); | ||
355 | const char *ro_displayname; | ||
356 | }; | ||
357 | |||
358 | extern const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops; | ||
359 | extern const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops; | ||
360 | extern const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops; | ||
361 | |||
362 | /* | ||
343 | * RPCRDMA transport -- encapsulates the structures above for | 363 | * RPCRDMA transport -- encapsulates the structures above for |
344 | * integration with RPC. | 364 | * integration with RPC. |
345 | * | 365 | * |
@@ -398,16 +418,56 @@ void rpcrdma_buffer_put(struct rpcrdma_req *); | |||
398 | void rpcrdma_recv_buffer_get(struct rpcrdma_req *); | 418 | void rpcrdma_recv_buffer_get(struct rpcrdma_req *); |
399 | void rpcrdma_recv_buffer_put(struct rpcrdma_rep *); | 419 | void rpcrdma_recv_buffer_put(struct rpcrdma_rep *); |
400 | 420 | ||
401 | int rpcrdma_register_external(struct rpcrdma_mr_seg *, | ||
402 | int, int, struct rpcrdma_xprt *); | ||
403 | int rpcrdma_deregister_external(struct rpcrdma_mr_seg *, | ||
404 | struct rpcrdma_xprt *); | ||
405 | |||
406 | struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(struct rpcrdma_ia *, | 421 | struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(struct rpcrdma_ia *, |
407 | size_t, gfp_t); | 422 | size_t, gfp_t); |
408 | void rpcrdma_free_regbuf(struct rpcrdma_ia *, | 423 | void rpcrdma_free_regbuf(struct rpcrdma_ia *, |
409 | struct rpcrdma_regbuf *); | 424 | struct rpcrdma_regbuf *); |
410 | 425 | ||
426 | unsigned int rpcrdma_max_segments(struct rpcrdma_xprt *); | ||
427 | |||
428 | /* | ||
429 | * Wrappers for chunk registration, shared by read/write chunk code. | ||
430 | */ | ||
431 | |||
432 | void rpcrdma_mapping_error(struct rpcrdma_mr_seg *); | ||
433 | |||
434 | static inline enum dma_data_direction | ||
435 | rpcrdma_data_dir(bool writing) | ||
436 | { | ||
437 | return writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | ||
438 | } | ||
439 | |||
440 | static inline void | ||
441 | rpcrdma_map_one(struct ib_device *device, struct rpcrdma_mr_seg *seg, | ||
442 | enum dma_data_direction direction) | ||
443 | { | ||
444 | seg->mr_dir = direction; | ||
445 | seg->mr_dmalen = seg->mr_len; | ||
446 | |||
447 | if (seg->mr_page) | ||
448 | seg->mr_dma = ib_dma_map_page(device, | ||
449 | seg->mr_page, offset_in_page(seg->mr_offset), | ||
450 | seg->mr_dmalen, seg->mr_dir); | ||
451 | else | ||
452 | seg->mr_dma = ib_dma_map_single(device, | ||
453 | seg->mr_offset, | ||
454 | seg->mr_dmalen, seg->mr_dir); | ||
455 | |||
456 | if (ib_dma_mapping_error(device, seg->mr_dma)) | ||
457 | rpcrdma_mapping_error(seg); | ||
458 | } | ||
459 | |||
460 | static inline void | ||
461 | rpcrdma_unmap_one(struct ib_device *device, struct rpcrdma_mr_seg *seg) | ||
462 | { | ||
463 | if (seg->mr_page) | ||
464 | ib_dma_unmap_page(device, | ||
465 | seg->mr_dma, seg->mr_dmalen, seg->mr_dir); | ||
466 | else | ||
467 | ib_dma_unmap_single(device, | ||
468 | seg->mr_dma, seg->mr_dmalen, seg->mr_dir); | ||
469 | } | ||
470 | |||
411 | /* | 471 | /* |
412 | * RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c | 472 | * RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c |
413 | */ | 473 | */ |
@@ -418,9 +478,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *); | |||
418 | /* | 478 | /* |
419 | * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c | 479 | * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c |
420 | */ | 480 | */ |
421 | ssize_t rpcrdma_marshal_chunks(struct rpc_rqst *, ssize_t); | ||
422 | int rpcrdma_marshal_req(struct rpc_rqst *); | 481 | int rpcrdma_marshal_req(struct rpc_rqst *); |
423 | size_t rpcrdma_max_payload(struct rpcrdma_xprt *); | ||
424 | 482 | ||
425 | /* Temporary NFS request map cache. Created in svc_rdma.c */ | 483 | /* Temporary NFS request map cache. Created in svc_rdma.c */ |
426 | extern struct kmem_cache *svc_rdma_map_cachep; | 484 | extern struct kmem_cache *svc_rdma_map_cachep; |
diff --git a/net/tipc/link.c b/net/tipc/link.c index a4cf364316de..14f09b3cb87c 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -464,10 +464,11 @@ void tipc_link_reset(struct tipc_link *l_ptr) | |||
464 | /* Clean up all queues, except inputq: */ | 464 | /* Clean up all queues, except inputq: */ |
465 | __skb_queue_purge(&l_ptr->outqueue); | 465 | __skb_queue_purge(&l_ptr->outqueue); |
466 | __skb_queue_purge(&l_ptr->deferred_queue); | 466 | __skb_queue_purge(&l_ptr->deferred_queue); |
467 | skb_queue_splice_init(&l_ptr->wakeupq, &l_ptr->inputq); | 467 | if (!owner->inputq) |
468 | if (!skb_queue_empty(&l_ptr->inputq)) | 468 | owner->inputq = &l_ptr->inputq; |
469 | skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq); | ||
470 | if (!skb_queue_empty(owner->inputq)) | ||
469 | owner->action_flags |= TIPC_MSG_EVT; | 471 | owner->action_flags |= TIPC_MSG_EVT; |
470 | owner->inputq = &l_ptr->inputq; | ||
471 | l_ptr->next_out = NULL; | 472 | l_ptr->next_out = NULL; |
472 | l_ptr->unacked_window = 0; | 473 | l_ptr->unacked_window = 0; |
473 | l_ptr->checkpoint = 1; | 474 | l_ptr->checkpoint = 1; |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index be2501538011..b6f84f6a2a09 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -4400,6 +4400,16 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) | |||
4400 | if (parse_station_flags(info, dev->ieee80211_ptr->iftype, ¶ms)) | 4400 | if (parse_station_flags(info, dev->ieee80211_ptr->iftype, ¶ms)) |
4401 | return -EINVAL; | 4401 | return -EINVAL; |
4402 | 4402 | ||
4403 | /* HT/VHT requires QoS, but if we don't have that just ignore HT/VHT | ||
4404 | * as userspace might just pass through the capabilities from the IEs | ||
4405 | * directly, rather than enforcing this restriction and returning an | ||
4406 | * error in this case. | ||
4407 | */ | ||
4408 | if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_WME))) { | ||
4409 | params.ht_capa = NULL; | ||
4410 | params.vht_capa = NULL; | ||
4411 | } | ||
4412 | |||
4403 | /* When you run into this, adjust the code below for the new flag */ | 4413 | /* When you run into this, adjust the code below for the new flag */ |
4404 | BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7); | 4414 | BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7); |
4405 | 4415 | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index cee479bc655c..638af0655aaf 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -2269,11 +2269,9 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, | |||
2269 | * have the xfrm_state's. We need to wait for KM to | 2269 | * have the xfrm_state's. We need to wait for KM to |
2270 | * negotiate new SA's or bail out with error.*/ | 2270 | * negotiate new SA's or bail out with error.*/ |
2271 | if (net->xfrm.sysctl_larval_drop) { | 2271 | if (net->xfrm.sysctl_larval_drop) { |
2272 | dst_release(dst); | ||
2273 | xfrm_pols_put(pols, drop_pols); | ||
2274 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); | 2272 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); |
2275 | 2273 | err = -EREMOTE; | |
2276 | return ERR_PTR(-EREMOTE); | 2274 | goto error; |
2277 | } | 2275 | } |
2278 | 2276 | ||
2279 | err = -EAGAIN; | 2277 | err = -EAGAIN; |
@@ -2324,7 +2322,8 @@ nopol: | |||
2324 | error: | 2322 | error: |
2325 | dst_release(dst); | 2323 | dst_release(dst); |
2326 | dropdst: | 2324 | dropdst: |
2327 | dst_release(dst_orig); | 2325 | if (!(flags & XFRM_LOOKUP_KEEP_DST_REF)) |
2326 | dst_release(dst_orig); | ||
2328 | xfrm_pols_put(pols, drop_pols); | 2327 | xfrm_pols_put(pols, drop_pols); |
2329 | return ERR_PTR(err); | 2328 | return ERR_PTR(err); |
2330 | } | 2329 | } |
@@ -2338,7 +2337,8 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, | |||
2338 | struct sock *sk, int flags) | 2337 | struct sock *sk, int flags) |
2339 | { | 2338 | { |
2340 | struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk, | 2339 | struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk, |
2341 | flags | XFRM_LOOKUP_QUEUE); | 2340 | flags | XFRM_LOOKUP_QUEUE | |
2341 | XFRM_LOOKUP_KEEP_DST_REF); | ||
2342 | 2342 | ||
2343 | if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) | 2343 | if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) |
2344 | return make_blackhole(net, dst_orig->ops->family, dst_orig); | 2344 | return make_blackhole(net, dst_orig->ops->family, dst_orig); |
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 1684bcc78b34..5fde34326dcf 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c | |||
@@ -152,7 +152,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf, | |||
152 | goto out; | 152 | goto out; |
153 | 153 | ||
154 | /* No partial writes. */ | 154 | /* No partial writes. */ |
155 | length = EINVAL; | 155 | length = -EINVAL; |
156 | if (*ppos != 0) | 156 | if (*ppos != 0) |
157 | goto out; | 157 | goto out; |
158 | 158 | ||
diff --git a/sound/core/control.c b/sound/core/control.c index 35324a8e83c8..eeb691d1911f 100644 --- a/sound/core/control.c +++ b/sound/core/control.c | |||
@@ -1170,6 +1170,10 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file, | |||
1170 | 1170 | ||
1171 | if (info->count < 1) | 1171 | if (info->count < 1) |
1172 | return -EINVAL; | 1172 | return -EINVAL; |
1173 | if (!*info->id.name) | ||
1174 | return -EINVAL; | ||
1175 | if (strnlen(info->id.name, sizeof(info->id.name)) >= sizeof(info->id.name)) | ||
1176 | return -EINVAL; | ||
1173 | access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE : | 1177 | access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE : |
1174 | (info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE| | 1178 | (info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE| |
1175 | SNDRV_CTL_ELEM_ACCESS_INACTIVE| | 1179 | SNDRV_CTL_ELEM_ACCESS_INACTIVE| |
diff --git a/sound/firewire/dice/dice-interface.h b/sound/firewire/dice/dice-interface.h index de7602bd69b5..27b044f84c81 100644 --- a/sound/firewire/dice/dice-interface.h +++ b/sound/firewire/dice/dice-interface.h | |||
@@ -299,23 +299,23 @@ | |||
299 | #define RX_ISOCHRONOUS 0x008 | 299 | #define RX_ISOCHRONOUS 0x008 |
300 | 300 | ||
301 | /* | 301 | /* |
302 | * Index of first quadlet to be interpreted; read/write. If > 0, that many | ||
303 | * quadlets at the beginning of each data block will be ignored, and all the | ||
304 | * audio and MIDI quadlets will follow. | ||
305 | */ | ||
306 | #define RX_SEQ_START 0x00c | ||
307 | |||
308 | /* | ||
302 | * The number of audio channels; read-only. There will be one quadlet per | 309 | * The number of audio channels; read-only. There will be one quadlet per |
303 | * channel. | 310 | * channel. |
304 | */ | 311 | */ |
305 | #define RX_NUMBER_AUDIO 0x00c | 312 | #define RX_NUMBER_AUDIO 0x010 |
306 | 313 | ||
307 | /* | 314 | /* |
308 | * The number of MIDI ports, 0-8; read-only. If > 0, there will be one | 315 | * The number of MIDI ports, 0-8; read-only. If > 0, there will be one |
309 | * additional quadlet in each data block, following the audio quadlets. | 316 | * additional quadlet in each data block, following the audio quadlets. |
310 | */ | 317 | */ |
311 | #define RX_NUMBER_MIDI 0x010 | 318 | #define RX_NUMBER_MIDI 0x014 |
312 | |||
313 | /* | ||
314 | * Index of first quadlet to be interpreted; read/write. If > 0, that many | ||
315 | * quadlets at the beginning of each data block will be ignored, and all the | ||
316 | * audio and MIDI quadlets will follow. | ||
317 | */ | ||
318 | #define RX_SEQ_START 0x014 | ||
319 | 319 | ||
320 | /* | 320 | /* |
321 | * Names of all audio channels; read-only. Quadlets are byte-swapped. Names | 321 | * Names of all audio channels; read-only. Quadlets are byte-swapped. Names |
diff --git a/sound/firewire/dice/dice-proc.c b/sound/firewire/dice/dice-proc.c index ecfe20fd4de5..f5c1d1bced59 100644 --- a/sound/firewire/dice/dice-proc.c +++ b/sound/firewire/dice/dice-proc.c | |||
@@ -99,9 +99,9 @@ static void dice_proc_read(struct snd_info_entry *entry, | |||
99 | } tx; | 99 | } tx; |
100 | struct { | 100 | struct { |
101 | u32 iso; | 101 | u32 iso; |
102 | u32 seq_start; | ||
102 | u32 number_audio; | 103 | u32 number_audio; |
103 | u32 number_midi; | 104 | u32 number_midi; |
104 | u32 seq_start; | ||
105 | char names[RX_NAMES_SIZE]; | 105 | char names[RX_NAMES_SIZE]; |
106 | u32 ac3_caps; | 106 | u32 ac3_caps; |
107 | u32 ac3_enable; | 107 | u32 ac3_enable; |
@@ -204,10 +204,10 @@ static void dice_proc_read(struct snd_info_entry *entry, | |||
204 | break; | 204 | break; |
205 | snd_iprintf(buffer, "rx %u:\n", stream); | 205 | snd_iprintf(buffer, "rx %u:\n", stream); |
206 | snd_iprintf(buffer, " iso channel: %d\n", (int)buf.rx.iso); | 206 | snd_iprintf(buffer, " iso channel: %d\n", (int)buf.rx.iso); |
207 | snd_iprintf(buffer, " sequence start: %u\n", buf.rx.seq_start); | ||
207 | snd_iprintf(buffer, " audio channels: %u\n", | 208 | snd_iprintf(buffer, " audio channels: %u\n", |
208 | buf.rx.number_audio); | 209 | buf.rx.number_audio); |
209 | snd_iprintf(buffer, " midi ports: %u\n", buf.rx.number_midi); | 210 | snd_iprintf(buffer, " midi ports: %u\n", buf.rx.number_midi); |
210 | snd_iprintf(buffer, " sequence start: %u\n", buf.rx.seq_start); | ||
211 | if (quadlets >= 68) { | 211 | if (quadlets >= 68) { |
212 | dice_proc_fixup_string(buf.rx.names, RX_NAMES_SIZE); | 212 | dice_proc_fixup_string(buf.rx.names, RX_NAMES_SIZE); |
213 | snd_iprintf(buffer, " names: %s\n", buf.rx.names); | 213 | snd_iprintf(buffer, " names: %s\n", buf.rx.names); |
diff --git a/sound/firewire/iso-resources.c b/sound/firewire/iso-resources.c index 5f17b77ee152..f0e4d502d604 100644 --- a/sound/firewire/iso-resources.c +++ b/sound/firewire/iso-resources.c | |||
@@ -26,7 +26,7 @@ | |||
26 | int fw_iso_resources_init(struct fw_iso_resources *r, struct fw_unit *unit) | 26 | int fw_iso_resources_init(struct fw_iso_resources *r, struct fw_unit *unit) |
27 | { | 27 | { |
28 | r->channels_mask = ~0uLL; | 28 | r->channels_mask = ~0uLL; |
29 | r->unit = fw_unit_get(unit); | 29 | r->unit = unit; |
30 | mutex_init(&r->mutex); | 30 | mutex_init(&r->mutex); |
31 | r->allocated = false; | 31 | r->allocated = false; |
32 | 32 | ||
@@ -42,7 +42,6 @@ void fw_iso_resources_destroy(struct fw_iso_resources *r) | |||
42 | { | 42 | { |
43 | WARN_ON(r->allocated); | 43 | WARN_ON(r->allocated); |
44 | mutex_destroy(&r->mutex); | 44 | mutex_destroy(&r->mutex); |
45 | fw_unit_put(r->unit); | ||
46 | } | 45 | } |
47 | EXPORT_SYMBOL(fw_iso_resources_destroy); | 46 | EXPORT_SYMBOL(fw_iso_resources_destroy); |
48 | 47 | ||
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index a2ce773bdc62..17c2637d842c 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c | |||
@@ -1164,7 +1164,7 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus, | |||
1164 | } | 1164 | } |
1165 | } | 1165 | } |
1166 | 1166 | ||
1167 | if (!bus->no_response_fallback) | 1167 | if (bus->no_response_fallback) |
1168 | return -1; | 1168 | return -1; |
1169 | 1169 | ||
1170 | if (!chip->polling_mode && chip->poll_count < 2) { | 1170 | if (!chip->polling_mode && chip->poll_count < 2) { |
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index b680b4ec6331..8ec5289f8e05 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c | |||
@@ -687,12 +687,45 @@ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid, | |||
687 | return val; | 687 | return val; |
688 | } | 688 | } |
689 | 689 | ||
690 | /* is this a stereo widget or a stereo-to-mono mix? */ | ||
691 | static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid, int dir) | ||
692 | { | ||
693 | unsigned int wcaps = get_wcaps(codec, nid); | ||
694 | hda_nid_t conn; | ||
695 | |||
696 | if (wcaps & AC_WCAP_STEREO) | ||
697 | return true; | ||
698 | if (dir != HDA_INPUT || get_wcaps_type(wcaps) != AC_WID_AUD_MIX) | ||
699 | return false; | ||
700 | if (snd_hda_get_num_conns(codec, nid) != 1) | ||
701 | return false; | ||
702 | if (snd_hda_get_connections(codec, nid, &conn, 1) < 0) | ||
703 | return false; | ||
704 | return !!(get_wcaps(codec, conn) & AC_WCAP_STEREO); | ||
705 | } | ||
706 | |||
690 | /* initialize the amp value (only at the first time) */ | 707 | /* initialize the amp value (only at the first time) */ |
691 | static void init_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx) | 708 | static void init_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx) |
692 | { | 709 | { |
693 | unsigned int caps = query_amp_caps(codec, nid, dir); | 710 | unsigned int caps = query_amp_caps(codec, nid, dir); |
694 | int val = get_amp_val_to_activate(codec, nid, dir, caps, false); | 711 | int val = get_amp_val_to_activate(codec, nid, dir, caps, false); |
695 | snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val); | 712 | |
713 | if (is_stereo_amps(codec, nid, dir)) | ||
714 | snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val); | ||
715 | else | ||
716 | snd_hda_codec_amp_init(codec, nid, 0, dir, idx, 0xff, val); | ||
717 | } | ||
718 | |||
719 | /* update the amp, doing in stereo or mono depending on NID */ | ||
720 | static int update_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx, | ||
721 | unsigned int mask, unsigned int val) | ||
722 | { | ||
723 | if (is_stereo_amps(codec, nid, dir)) | ||
724 | return snd_hda_codec_amp_stereo(codec, nid, dir, idx, | ||
725 | mask, val); | ||
726 | else | ||
727 | return snd_hda_codec_amp_update(codec, nid, 0, dir, idx, | ||
728 | mask, val); | ||
696 | } | 729 | } |
697 | 730 | ||
698 | /* calculate amp value mask we can modify; | 731 | /* calculate amp value mask we can modify; |
@@ -732,7 +765,7 @@ static void activate_amp(struct hda_codec *codec, hda_nid_t nid, int dir, | |||
732 | return; | 765 | return; |
733 | 766 | ||
734 | val &= mask; | 767 | val &= mask; |
735 | snd_hda_codec_amp_stereo(codec, nid, dir, idx, mask, val); | 768 | update_amp(codec, nid, dir, idx, mask, val); |
736 | } | 769 | } |
737 | 770 | ||
738 | static void activate_amp_out(struct hda_codec *codec, struct nid_path *path, | 771 | static void activate_amp_out(struct hda_codec *codec, struct nid_path *path, |
@@ -4424,13 +4457,11 @@ static void mute_all_mixer_nid(struct hda_codec *codec, hda_nid_t mix) | |||
4424 | has_amp = nid_has_mute(codec, mix, HDA_INPUT); | 4457 | has_amp = nid_has_mute(codec, mix, HDA_INPUT); |
4425 | for (i = 0; i < nums; i++) { | 4458 | for (i = 0; i < nums; i++) { |
4426 | if (has_amp) | 4459 | if (has_amp) |
4427 | snd_hda_codec_amp_stereo(codec, mix, | 4460 | update_amp(codec, mix, HDA_INPUT, i, |
4428 | HDA_INPUT, i, | 4461 | 0xff, HDA_AMP_MUTE); |
4429 | 0xff, HDA_AMP_MUTE); | ||
4430 | else if (nid_has_volume(codec, conn[i], HDA_OUTPUT)) | 4462 | else if (nid_has_volume(codec, conn[i], HDA_OUTPUT)) |
4431 | snd_hda_codec_amp_stereo(codec, conn[i], | 4463 | update_amp(codec, conn[i], HDA_OUTPUT, 0, |
4432 | HDA_OUTPUT, 0, | 4464 | 0xff, HDA_AMP_MUTE); |
4433 | 0xff, HDA_AMP_MUTE); | ||
4434 | } | 4465 | } |
4435 | } | 4466 | } |
4436 | 4467 | ||
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 4ca3d5d02436..a8a1e14272a1 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -1989,7 +1989,7 @@ static const struct pci_device_id azx_ids[] = { | |||
1989 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, | 1989 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, |
1990 | /* Sunrise Point */ | 1990 | /* Sunrise Point */ |
1991 | { PCI_DEVICE(0x8086, 0xa170), | 1991 | { PCI_DEVICE(0x8086, 0xa170), |
1992 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, | 1992 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, |
1993 | /* Sunrise Point-LP */ | 1993 | /* Sunrise Point-LP */ |
1994 | { PCI_DEVICE(0x8086, 0x9d70), | 1994 | { PCI_DEVICE(0x8086, 0x9d70), |
1995 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, | 1995 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, |
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c index ce5a6da83419..05e19f78b4cb 100644 --- a/sound/pci/hda/hda_proc.c +++ b/sound/pci/hda/hda_proc.c | |||
@@ -134,13 +134,38 @@ static void print_amp_caps(struct snd_info_buffer *buffer, | |||
134 | (caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT); | 134 | (caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT); |
135 | } | 135 | } |
136 | 136 | ||
137 | /* is this a stereo widget or a stereo-to-mono mix? */ | ||
138 | static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid, | ||
139 | int dir, unsigned int wcaps, int indices) | ||
140 | { | ||
141 | hda_nid_t conn; | ||
142 | |||
143 | if (wcaps & AC_WCAP_STEREO) | ||
144 | return true; | ||
145 | /* check for a stereo-to-mono mix; it must be: | ||
146 | * only a single connection, only for input, and only a mixer widget | ||
147 | */ | ||
148 | if (indices != 1 || dir != HDA_INPUT || | ||
149 | get_wcaps_type(wcaps) != AC_WID_AUD_MIX) | ||
150 | return false; | ||
151 | |||
152 | if (snd_hda_get_raw_connections(codec, nid, &conn, 1) < 0) | ||
153 | return false; | ||
154 | /* the connection source is a stereo? */ | ||
155 | wcaps = snd_hda_param_read(codec, conn, AC_PAR_AUDIO_WIDGET_CAP); | ||
156 | return !!(wcaps & AC_WCAP_STEREO); | ||
157 | } | ||
158 | |||
137 | static void print_amp_vals(struct snd_info_buffer *buffer, | 159 | static void print_amp_vals(struct snd_info_buffer *buffer, |
138 | struct hda_codec *codec, hda_nid_t nid, | 160 | struct hda_codec *codec, hda_nid_t nid, |
139 | int dir, int stereo, int indices) | 161 | int dir, unsigned int wcaps, int indices) |
140 | { | 162 | { |
141 | unsigned int val; | 163 | unsigned int val; |
164 | bool stereo; | ||
142 | int i; | 165 | int i; |
143 | 166 | ||
167 | stereo = is_stereo_amps(codec, nid, dir, wcaps, indices); | ||
168 | |||
144 | dir = dir == HDA_OUTPUT ? AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT; | 169 | dir = dir == HDA_OUTPUT ? AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT; |
145 | for (i = 0; i < indices; i++) { | 170 | for (i = 0; i < indices; i++) { |
146 | snd_iprintf(buffer, " ["); | 171 | snd_iprintf(buffer, " ["); |
@@ -757,12 +782,10 @@ static void print_codec_info(struct snd_info_entry *entry, | |||
757 | (codec->single_adc_amp && | 782 | (codec->single_adc_amp && |
758 | wid_type == AC_WID_AUD_IN)) | 783 | wid_type == AC_WID_AUD_IN)) |
759 | print_amp_vals(buffer, codec, nid, HDA_INPUT, | 784 | print_amp_vals(buffer, codec, nid, HDA_INPUT, |
760 | wid_caps & AC_WCAP_STEREO, | 785 | wid_caps, 1); |
761 | 1); | ||
762 | else | 786 | else |
763 | print_amp_vals(buffer, codec, nid, HDA_INPUT, | 787 | print_amp_vals(buffer, codec, nid, HDA_INPUT, |
764 | wid_caps & AC_WCAP_STEREO, | 788 | wid_caps, conn_len); |
765 | conn_len); | ||
766 | } | 789 | } |
767 | if (wid_caps & AC_WCAP_OUT_AMP) { | 790 | if (wid_caps & AC_WCAP_OUT_AMP) { |
768 | snd_iprintf(buffer, " Amp-Out caps: "); | 791 | snd_iprintf(buffer, " Amp-Out caps: "); |
@@ -771,11 +794,10 @@ static void print_codec_info(struct snd_info_entry *entry, | |||
771 | if (wid_type == AC_WID_PIN && | 794 | if (wid_type == AC_WID_PIN && |
772 | codec->pin_amp_workaround) | 795 | codec->pin_amp_workaround) |
773 | print_amp_vals(buffer, codec, nid, HDA_OUTPUT, | 796 | print_amp_vals(buffer, codec, nid, HDA_OUTPUT, |
774 | wid_caps & AC_WCAP_STEREO, | 797 | wid_caps, conn_len); |
775 | conn_len); | ||
776 | else | 798 | else |
777 | print_amp_vals(buffer, codec, nid, HDA_OUTPUT, | 799 | print_amp_vals(buffer, codec, nid, HDA_OUTPUT, |
778 | wid_caps & AC_WCAP_STEREO, 1); | 800 | wid_caps, 1); |
779 | } | 801 | } |
780 | 802 | ||
781 | switch (wid_type) { | 803 | switch (wid_type) { |
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index 1589c9bcce3e..dd2b3d92071f 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c | |||
@@ -393,6 +393,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = { | |||
393 | SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81), | 393 | SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81), |
394 | SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122), | 394 | SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122), |
395 | SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101), | 395 | SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101), |
396 | SND_PCI_QUIRK(0x106b, 0x5600, "MacBookAir 5,2", CS420X_MBP81), | ||
396 | SND_PCI_QUIRK(0x106b, 0x5b00, "MacBookAir 4,2", CS420X_MBA42), | 397 | SND_PCI_QUIRK(0x106b, 0x5b00, "MacBookAir 4,2", CS420X_MBA42), |
397 | SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE), | 398 | SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE), |
398 | {} /* terminator */ | 399 | {} /* terminator */ |
@@ -584,6 +585,7 @@ static int patch_cs420x(struct hda_codec *codec) | |||
584 | return -ENOMEM; | 585 | return -ENOMEM; |
585 | 586 | ||
586 | spec->gen.automute_hook = cs_automute; | 587 | spec->gen.automute_hook = cs_automute; |
588 | codec->single_adc_amp = 1; | ||
587 | 589 | ||
588 | snd_hda_pick_fixup(codec, cs420x_models, cs420x_fixup_tbl, | 590 | snd_hda_pick_fixup(codec, cs420x_models, cs420x_fixup_tbl, |
589 | cs420x_fixups); | 591 | cs420x_fixups); |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index fd3ed18670e9..da67ea8645a6 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -223,6 +223,7 @@ enum { | |||
223 | CXT_PINCFG_LENOVO_TP410, | 223 | CXT_PINCFG_LENOVO_TP410, |
224 | CXT_PINCFG_LEMOTE_A1004, | 224 | CXT_PINCFG_LEMOTE_A1004, |
225 | CXT_PINCFG_LEMOTE_A1205, | 225 | CXT_PINCFG_LEMOTE_A1205, |
226 | CXT_PINCFG_COMPAQ_CQ60, | ||
226 | CXT_FIXUP_STEREO_DMIC, | 227 | CXT_FIXUP_STEREO_DMIC, |
227 | CXT_FIXUP_INC_MIC_BOOST, | 228 | CXT_FIXUP_INC_MIC_BOOST, |
228 | CXT_FIXUP_HEADPHONE_MIC_PIN, | 229 | CXT_FIXUP_HEADPHONE_MIC_PIN, |
@@ -660,6 +661,15 @@ static const struct hda_fixup cxt_fixups[] = { | |||
660 | .type = HDA_FIXUP_PINS, | 661 | .type = HDA_FIXUP_PINS, |
661 | .v.pins = cxt_pincfg_lemote, | 662 | .v.pins = cxt_pincfg_lemote, |
662 | }, | 663 | }, |
664 | [CXT_PINCFG_COMPAQ_CQ60] = { | ||
665 | .type = HDA_FIXUP_PINS, | ||
666 | .v.pins = (const struct hda_pintbl[]) { | ||
667 | /* 0x17 was falsely set up as a mic, it should 0x1d */ | ||
668 | { 0x17, 0x400001f0 }, | ||
669 | { 0x1d, 0x97a70120 }, | ||
670 | { } | ||
671 | } | ||
672 | }, | ||
663 | [CXT_FIXUP_STEREO_DMIC] = { | 673 | [CXT_FIXUP_STEREO_DMIC] = { |
664 | .type = HDA_FIXUP_FUNC, | 674 | .type = HDA_FIXUP_FUNC, |
665 | .v.func = cxt_fixup_stereo_dmic, | 675 | .v.func = cxt_fixup_stereo_dmic, |
@@ -769,6 +779,7 @@ static const struct hda_model_fixup cxt5047_fixup_models[] = { | |||
769 | }; | 779 | }; |
770 | 780 | ||
771 | static const struct snd_pci_quirk cxt5051_fixups[] = { | 781 | static const struct snd_pci_quirk cxt5051_fixups[] = { |
782 | SND_PCI_QUIRK(0x103c, 0x360b, "Compaq CQ60", CXT_PINCFG_COMPAQ_CQ60), | ||
772 | SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200), | 783 | SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200), |
773 | {} | 784 | {} |
774 | }; | 785 | }; |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 526398a4a442..74382137b9f5 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -396,7 +396,7 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on) | |||
396 | { | 396 | { |
397 | /* We currently only handle front, HP */ | 397 | /* We currently only handle front, HP */ |
398 | static hda_nid_t pins[] = { | 398 | static hda_nid_t pins[] = { |
399 | 0x0f, 0x10, 0x14, 0x15, 0 | 399 | 0x0f, 0x10, 0x14, 0x15, 0x17, 0 |
400 | }; | 400 | }; |
401 | hda_nid_t *p; | 401 | hda_nid_t *p; |
402 | for (p = pins; *p; p++) | 402 | for (p = pins; *p; p++) |
@@ -5036,6 +5036,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
5036 | SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), | 5036 | SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), |
5037 | SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK), | 5037 | SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK), |
5038 | SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), | 5038 | SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
5039 | SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK), | ||
5039 | SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), | 5040 | SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
5040 | SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), | 5041 | SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), |
5041 | SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), | 5042 | SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), |
diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c index b67480f1b1aa..4373ada95648 100644 --- a/sound/soc/codecs/adav80x.c +++ b/sound/soc/codecs/adav80x.c | |||
@@ -317,7 +317,7 @@ static int adav80x_put_deemph(struct snd_kcontrol *kcontrol, | |||
317 | { | 317 | { |
318 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 318 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
319 | struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec); | 319 | struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec); |
320 | unsigned int deemph = ucontrol->value.enumerated.item[0]; | 320 | unsigned int deemph = ucontrol->value.integer.value[0]; |
321 | 321 | ||
322 | if (deemph > 1) | 322 | if (deemph > 1) |
323 | return -EINVAL; | 323 | return -EINVAL; |
@@ -333,7 +333,7 @@ static int adav80x_get_deemph(struct snd_kcontrol *kcontrol, | |||
333 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 333 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
334 | struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec); | 334 | struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec); |
335 | 335 | ||
336 | ucontrol->value.enumerated.item[0] = adav80x->deemph; | 336 | ucontrol->value.integer.value[0] = adav80x->deemph; |
337 | return 0; | 337 | return 0; |
338 | }; | 338 | }; |
339 | 339 | ||
diff --git a/sound/soc/codecs/ak4641.c b/sound/soc/codecs/ak4641.c index 70861c7b1631..81b54a270bd8 100644 --- a/sound/soc/codecs/ak4641.c +++ b/sound/soc/codecs/ak4641.c | |||
@@ -76,7 +76,7 @@ static int ak4641_put_deemph(struct snd_kcontrol *kcontrol, | |||
76 | { | 76 | { |
77 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 77 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
78 | struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec); | 78 | struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec); |
79 | int deemph = ucontrol->value.enumerated.item[0]; | 79 | int deemph = ucontrol->value.integer.value[0]; |
80 | 80 | ||
81 | if (deemph > 1) | 81 | if (deemph > 1) |
82 | return -EINVAL; | 82 | return -EINVAL; |
@@ -92,7 +92,7 @@ static int ak4641_get_deemph(struct snd_kcontrol *kcontrol, | |||
92 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 92 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
93 | struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec); | 93 | struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec); |
94 | 94 | ||
95 | ucontrol->value.enumerated.item[0] = ak4641->deemph; | 95 | ucontrol->value.integer.value[0] = ak4641->deemph; |
96 | return 0; | 96 | return 0; |
97 | }; | 97 | }; |
98 | 98 | ||
diff --git a/sound/soc/codecs/ak4671.c b/sound/soc/codecs/ak4671.c index 632e89f793a7..2a58b1dccd2f 100644 --- a/sound/soc/codecs/ak4671.c +++ b/sound/soc/codecs/ak4671.c | |||
@@ -343,25 +343,25 @@ static const struct snd_soc_dapm_widget ak4671_dapm_widgets[] = { | |||
343 | }; | 343 | }; |
344 | 344 | ||
345 | static const struct snd_soc_dapm_route ak4671_intercon[] = { | 345 | static const struct snd_soc_dapm_route ak4671_intercon[] = { |
346 | {"DAC Left", "NULL", "PMPLL"}, | 346 | {"DAC Left", NULL, "PMPLL"}, |
347 | {"DAC Right", "NULL", "PMPLL"}, | 347 | {"DAC Right", NULL, "PMPLL"}, |
348 | {"ADC Left", "NULL", "PMPLL"}, | 348 | {"ADC Left", NULL, "PMPLL"}, |
349 | {"ADC Right", "NULL", "PMPLL"}, | 349 | {"ADC Right", NULL, "PMPLL"}, |
350 | 350 | ||
351 | /* Outputs */ | 351 | /* Outputs */ |
352 | {"LOUT1", "NULL", "LOUT1 Mixer"}, | 352 | {"LOUT1", NULL, "LOUT1 Mixer"}, |
353 | {"ROUT1", "NULL", "ROUT1 Mixer"}, | 353 | {"ROUT1", NULL, "ROUT1 Mixer"}, |
354 | {"LOUT2", "NULL", "LOUT2 Mix Amp"}, | 354 | {"LOUT2", NULL, "LOUT2 Mix Amp"}, |
355 | {"ROUT2", "NULL", "ROUT2 Mix Amp"}, | 355 | {"ROUT2", NULL, "ROUT2 Mix Amp"}, |
356 | {"LOUT3", "NULL", "LOUT3 Mixer"}, | 356 | {"LOUT3", NULL, "LOUT3 Mixer"}, |
357 | {"ROUT3", "NULL", "ROUT3 Mixer"}, | 357 | {"ROUT3", NULL, "ROUT3 Mixer"}, |
358 | 358 | ||
359 | {"LOUT1 Mixer", "DACL", "DAC Left"}, | 359 | {"LOUT1 Mixer", "DACL", "DAC Left"}, |
360 | {"ROUT1 Mixer", "DACR", "DAC Right"}, | 360 | {"ROUT1 Mixer", "DACR", "DAC Right"}, |
361 | {"LOUT2 Mixer", "DACHL", "DAC Left"}, | 361 | {"LOUT2 Mixer", "DACHL", "DAC Left"}, |
362 | {"ROUT2 Mixer", "DACHR", "DAC Right"}, | 362 | {"ROUT2 Mixer", "DACHR", "DAC Right"}, |
363 | {"LOUT2 Mix Amp", "NULL", "LOUT2 Mixer"}, | 363 | {"LOUT2 Mix Amp", NULL, "LOUT2 Mixer"}, |
364 | {"ROUT2 Mix Amp", "NULL", "ROUT2 Mixer"}, | 364 | {"ROUT2 Mix Amp", NULL, "ROUT2 Mixer"}, |
365 | {"LOUT3 Mixer", "DACSL", "DAC Left"}, | 365 | {"LOUT3 Mixer", "DACSL", "DAC Left"}, |
366 | {"ROUT3 Mixer", "DACSR", "DAC Right"}, | 366 | {"ROUT3 Mixer", "DACSR", "DAC Right"}, |
367 | 367 | ||
@@ -381,18 +381,18 @@ static const struct snd_soc_dapm_route ak4671_intercon[] = { | |||
381 | {"LIN2", NULL, "Mic Bias"}, | 381 | {"LIN2", NULL, "Mic Bias"}, |
382 | {"RIN2", NULL, "Mic Bias"}, | 382 | {"RIN2", NULL, "Mic Bias"}, |
383 | 383 | ||
384 | {"ADC Left", "NULL", "LIN MUX"}, | 384 | {"ADC Left", NULL, "LIN MUX"}, |
385 | {"ADC Right", "NULL", "RIN MUX"}, | 385 | {"ADC Right", NULL, "RIN MUX"}, |
386 | 386 | ||
387 | /* Analog Loops */ | 387 | /* Analog Loops */ |
388 | {"LIN1 Mixing Circuit", "NULL", "LIN1"}, | 388 | {"LIN1 Mixing Circuit", NULL, "LIN1"}, |
389 | {"RIN1 Mixing Circuit", "NULL", "RIN1"}, | 389 | {"RIN1 Mixing Circuit", NULL, "RIN1"}, |
390 | {"LIN2 Mixing Circuit", "NULL", "LIN2"}, | 390 | {"LIN2 Mixing Circuit", NULL, "LIN2"}, |
391 | {"RIN2 Mixing Circuit", "NULL", "RIN2"}, | 391 | {"RIN2 Mixing Circuit", NULL, "RIN2"}, |
392 | {"LIN3 Mixing Circuit", "NULL", "LIN3"}, | 392 | {"LIN3 Mixing Circuit", NULL, "LIN3"}, |
393 | {"RIN3 Mixing Circuit", "NULL", "RIN3"}, | 393 | {"RIN3 Mixing Circuit", NULL, "RIN3"}, |
394 | {"LIN4 Mixing Circuit", "NULL", "LIN4"}, | 394 | {"LIN4 Mixing Circuit", NULL, "LIN4"}, |
395 | {"RIN4 Mixing Circuit", "NULL", "RIN4"}, | 395 | {"RIN4 Mixing Circuit", NULL, "RIN4"}, |
396 | 396 | ||
397 | {"LOUT1 Mixer", "LINL1", "LIN1 Mixing Circuit"}, | 397 | {"LOUT1 Mixer", "LINL1", "LIN1 Mixing Circuit"}, |
398 | {"ROUT1 Mixer", "RINR1", "RIN1 Mixing Circuit"}, | 398 | {"ROUT1 Mixer", "RINR1", "RIN1 Mixing Circuit"}, |
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c index 79a4efcb894c..7d3a6accaf9a 100644 --- a/sound/soc/codecs/cs4271.c +++ b/sound/soc/codecs/cs4271.c | |||
@@ -286,7 +286,7 @@ static int cs4271_get_deemph(struct snd_kcontrol *kcontrol, | |||
286 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 286 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
287 | struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec); | 287 | struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec); |
288 | 288 | ||
289 | ucontrol->value.enumerated.item[0] = cs4271->deemph; | 289 | ucontrol->value.integer.value[0] = cs4271->deemph; |
290 | return 0; | 290 | return 0; |
291 | } | 291 | } |
292 | 292 | ||
@@ -296,7 +296,7 @@ static int cs4271_put_deemph(struct snd_kcontrol *kcontrol, | |||
296 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 296 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
297 | struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec); | 297 | struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec); |
298 | 298 | ||
299 | cs4271->deemph = ucontrol->value.enumerated.item[0]; | 299 | cs4271->deemph = ucontrol->value.integer.value[0]; |
300 | return cs4271_set_deemph(codec); | 300 | return cs4271_set_deemph(codec); |
301 | } | 301 | } |
302 | 302 | ||
diff --git a/sound/soc/codecs/da732x.c b/sound/soc/codecs/da732x.c index ffe96175a8a5..911c26c705fc 100644 --- a/sound/soc/codecs/da732x.c +++ b/sound/soc/codecs/da732x.c | |||
@@ -876,11 +876,11 @@ static const struct snd_soc_dapm_widget da732x_dapm_widgets[] = { | |||
876 | 876 | ||
877 | static const struct snd_soc_dapm_route da732x_dapm_routes[] = { | 877 | static const struct snd_soc_dapm_route da732x_dapm_routes[] = { |
878 | /* Inputs */ | 878 | /* Inputs */ |
879 | {"AUX1L PGA", "NULL", "AUX1L"}, | 879 | {"AUX1L PGA", NULL, "AUX1L"}, |
880 | {"AUX1R PGA", "NULL", "AUX1R"}, | 880 | {"AUX1R PGA", NULL, "AUX1R"}, |
881 | {"MIC1 PGA", NULL, "MIC1"}, | 881 | {"MIC1 PGA", NULL, "MIC1"}, |
882 | {"MIC2 PGA", "NULL", "MIC2"}, | 882 | {"MIC2 PGA", NULL, "MIC2"}, |
883 | {"MIC3 PGA", "NULL", "MIC3"}, | 883 | {"MIC3 PGA", NULL, "MIC3"}, |
884 | 884 | ||
885 | /* Capture Path */ | 885 | /* Capture Path */ |
886 | {"ADC1 Left MUX", "MIC1", "MIC1 PGA"}, | 886 | {"ADC1 Left MUX", "MIC1", "MIC1 PGA"}, |
diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c index f27325155ace..c5f35a07e8e4 100644 --- a/sound/soc/codecs/es8328.c +++ b/sound/soc/codecs/es8328.c | |||
@@ -120,7 +120,7 @@ static int es8328_get_deemph(struct snd_kcontrol *kcontrol, | |||
120 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 120 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
121 | struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec); | 121 | struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec); |
122 | 122 | ||
123 | ucontrol->value.enumerated.item[0] = es8328->deemph; | 123 | ucontrol->value.integer.value[0] = es8328->deemph; |
124 | return 0; | 124 | return 0; |
125 | } | 125 | } |
126 | 126 | ||
@@ -129,7 +129,7 @@ static int es8328_put_deemph(struct snd_kcontrol *kcontrol, | |||
129 | { | 129 | { |
130 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 130 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
131 | struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec); | 131 | struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec); |
132 | int deemph = ucontrol->value.enumerated.item[0]; | 132 | int deemph = ucontrol->value.integer.value[0]; |
133 | int ret; | 133 | int ret; |
134 | 134 | ||
135 | if (deemph > 1) | 135 | if (deemph > 1) |
diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c index a722a023c262..477e13d30971 100644 --- a/sound/soc/codecs/pcm1681.c +++ b/sound/soc/codecs/pcm1681.c | |||
@@ -118,7 +118,7 @@ static int pcm1681_get_deemph(struct snd_kcontrol *kcontrol, | |||
118 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 118 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
119 | struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec); | 119 | struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec); |
120 | 120 | ||
121 | ucontrol->value.enumerated.item[0] = priv->deemph; | 121 | ucontrol->value.integer.value[0] = priv->deemph; |
122 | 122 | ||
123 | return 0; | 123 | return 0; |
124 | } | 124 | } |
@@ -129,7 +129,7 @@ static int pcm1681_put_deemph(struct snd_kcontrol *kcontrol, | |||
129 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 129 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
130 | struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec); | 130 | struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec); |
131 | 131 | ||
132 | priv->deemph = ucontrol->value.enumerated.item[0]; | 132 | priv->deemph = ucontrol->value.integer.value[0]; |
133 | 133 | ||
134 | return pcm1681_set_deemph(codec); | 134 | return pcm1681_set_deemph(codec); |
135 | } | 135 | } |
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c index f374840a5a7c..9b541e52da8c 100644 --- a/sound/soc/codecs/rt286.c +++ b/sound/soc/codecs/rt286.c | |||
@@ -1198,7 +1198,7 @@ static struct dmi_system_id dmi_dell_dino[] = { | |||
1198 | .ident = "Dell Dino", | 1198 | .ident = "Dell Dino", |
1199 | .matches = { | 1199 | .matches = { |
1200 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | 1200 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
1201 | DMI_MATCH(DMI_BOARD_NAME, "0144P8") | 1201 | DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343") |
1202 | } | 1202 | } |
1203 | }, | 1203 | }, |
1204 | { } | 1204 | { } |
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index e182e6569bbd..3593a1496056 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c | |||
@@ -1151,13 +1151,7 @@ static int sgtl5000_set_power_regs(struct snd_soc_codec *codec) | |||
1151 | /* Enable VDDC charge pump */ | 1151 | /* Enable VDDC charge pump */ |
1152 | ana_pwr |= SGTL5000_VDDC_CHRGPMP_POWERUP; | 1152 | ana_pwr |= SGTL5000_VDDC_CHRGPMP_POWERUP; |
1153 | } else if (vddio >= 3100 && vdda >= 3100) { | 1153 | } else if (vddio >= 3100 && vdda >= 3100) { |
1154 | /* | 1154 | ana_pwr &= ~SGTL5000_VDDC_CHRGPMP_POWERUP; |
1155 | * if vddio and vddd > 3.1v, | ||
1156 | * charge pump should be clean before set ana_pwr | ||
1157 | */ | ||
1158 | snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER, | ||
1159 | SGTL5000_VDDC_CHRGPMP_POWERUP, 0); | ||
1160 | |||
1161 | /* VDDC use VDDIO rail */ | 1155 | /* VDDC use VDDIO rail */ |
1162 | lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD; | 1156 | lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD; |
1163 | lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO << | 1157 | lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO << |
diff --git a/sound/soc/codecs/sn95031.c b/sound/soc/codecs/sn95031.c index 47b257e41809..82095d6cd070 100644 --- a/sound/soc/codecs/sn95031.c +++ b/sound/soc/codecs/sn95031.c | |||
@@ -538,8 +538,8 @@ static const struct snd_soc_dapm_route sn95031_audio_map[] = { | |||
538 | /* speaker map */ | 538 | /* speaker map */ |
539 | { "IHFOUTL", NULL, "Speaker Rail"}, | 539 | { "IHFOUTL", NULL, "Speaker Rail"}, |
540 | { "IHFOUTR", NULL, "Speaker Rail"}, | 540 | { "IHFOUTR", NULL, "Speaker Rail"}, |
541 | { "IHFOUTL", "NULL", "Speaker Left Playback"}, | 541 | { "IHFOUTL", NULL, "Speaker Left Playback"}, |
542 | { "IHFOUTR", "NULL", "Speaker Right Playback"}, | 542 | { "IHFOUTR", NULL, "Speaker Right Playback"}, |
543 | { "Speaker Left Playback", NULL, "Speaker Left Filter"}, | 543 | { "Speaker Left Playback", NULL, "Speaker Left Filter"}, |
544 | { "Speaker Right Playback", NULL, "Speaker Right Filter"}, | 544 | { "Speaker Right Playback", NULL, "Speaker Right Filter"}, |
545 | { "Speaker Left Filter", NULL, "IHFDAC Left"}, | 545 | { "Speaker Left Filter", NULL, "IHFDAC Left"}, |
diff --git a/sound/soc/codecs/tas5086.c b/sound/soc/codecs/tas5086.c index 249ef5c4c762..32942bed34b1 100644 --- a/sound/soc/codecs/tas5086.c +++ b/sound/soc/codecs/tas5086.c | |||
@@ -281,7 +281,7 @@ static int tas5086_get_deemph(struct snd_kcontrol *kcontrol, | |||
281 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 281 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
282 | struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec); | 282 | struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec); |
283 | 283 | ||
284 | ucontrol->value.enumerated.item[0] = priv->deemph; | 284 | ucontrol->value.integer.value[0] = priv->deemph; |
285 | 285 | ||
286 | return 0; | 286 | return 0; |
287 | } | 287 | } |
@@ -292,7 +292,7 @@ static int tas5086_put_deemph(struct snd_kcontrol *kcontrol, | |||
292 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 292 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
293 | struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec); | 293 | struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec); |
294 | 294 | ||
295 | priv->deemph = ucontrol->value.enumerated.item[0]; | 295 | priv->deemph = ucontrol->value.integer.value[0]; |
296 | 296 | ||
297 | return tas5086_set_deemph(codec); | 297 | return tas5086_set_deemph(codec); |
298 | } | 298 | } |
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c index 8d9de49a5052..21d5402e343f 100644 --- a/sound/soc/codecs/wm2000.c +++ b/sound/soc/codecs/wm2000.c | |||
@@ -610,7 +610,7 @@ static int wm2000_anc_mode_get(struct snd_kcontrol *kcontrol, | |||
610 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 610 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
611 | struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); | 611 | struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); |
612 | 612 | ||
613 | ucontrol->value.enumerated.item[0] = wm2000->anc_active; | 613 | ucontrol->value.integer.value[0] = wm2000->anc_active; |
614 | 614 | ||
615 | return 0; | 615 | return 0; |
616 | } | 616 | } |
@@ -620,7 +620,7 @@ static int wm2000_anc_mode_put(struct snd_kcontrol *kcontrol, | |||
620 | { | 620 | { |
621 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 621 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
622 | struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); | 622 | struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); |
623 | int anc_active = ucontrol->value.enumerated.item[0]; | 623 | int anc_active = ucontrol->value.integer.value[0]; |
624 | int ret; | 624 | int ret; |
625 | 625 | ||
626 | if (anc_active > 1) | 626 | if (anc_active > 1) |
@@ -643,7 +643,7 @@ static int wm2000_speaker_get(struct snd_kcontrol *kcontrol, | |||
643 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 643 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
644 | struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); | 644 | struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); |
645 | 645 | ||
646 | ucontrol->value.enumerated.item[0] = wm2000->spk_ena; | 646 | ucontrol->value.integer.value[0] = wm2000->spk_ena; |
647 | 647 | ||
648 | return 0; | 648 | return 0; |
649 | } | 649 | } |
@@ -653,7 +653,7 @@ static int wm2000_speaker_put(struct snd_kcontrol *kcontrol, | |||
653 | { | 653 | { |
654 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 654 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
655 | struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); | 655 | struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); |
656 | int val = ucontrol->value.enumerated.item[0]; | 656 | int val = ucontrol->value.integer.value[0]; |
657 | int ret; | 657 | int ret; |
658 | 658 | ||
659 | if (val > 1) | 659 | if (val > 1) |
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c index 098c143f44d6..c6d10533e2bd 100644 --- a/sound/soc/codecs/wm8731.c +++ b/sound/soc/codecs/wm8731.c | |||
@@ -125,7 +125,7 @@ static int wm8731_get_deemph(struct snd_kcontrol *kcontrol, | |||
125 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 125 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
126 | struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); | 126 | struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); |
127 | 127 | ||
128 | ucontrol->value.enumerated.item[0] = wm8731->deemph; | 128 | ucontrol->value.integer.value[0] = wm8731->deemph; |
129 | 129 | ||
130 | return 0; | 130 | return 0; |
131 | } | 131 | } |
@@ -135,7 +135,7 @@ static int wm8731_put_deemph(struct snd_kcontrol *kcontrol, | |||
135 | { | 135 | { |
136 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 136 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
137 | struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); | 137 | struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); |
138 | int deemph = ucontrol->value.enumerated.item[0]; | 138 | int deemph = ucontrol->value.integer.value[0]; |
139 | int ret = 0; | 139 | int ret = 0; |
140 | 140 | ||
141 | if (deemph > 1) | 141 | if (deemph > 1) |
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c index dde462c082be..04b04f8e147c 100644 --- a/sound/soc/codecs/wm8903.c +++ b/sound/soc/codecs/wm8903.c | |||
@@ -442,7 +442,7 @@ static int wm8903_get_deemph(struct snd_kcontrol *kcontrol, | |||
442 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 442 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
443 | struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec); | 443 | struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec); |
444 | 444 | ||
445 | ucontrol->value.enumerated.item[0] = wm8903->deemph; | 445 | ucontrol->value.integer.value[0] = wm8903->deemph; |
446 | 446 | ||
447 | return 0; | 447 | return 0; |
448 | } | 448 | } |
@@ -452,7 +452,7 @@ static int wm8903_put_deemph(struct snd_kcontrol *kcontrol, | |||
452 | { | 452 | { |
453 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 453 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
454 | struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec); | 454 | struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec); |
455 | int deemph = ucontrol->value.enumerated.item[0]; | 455 | int deemph = ucontrol->value.integer.value[0]; |
456 | int ret = 0; | 456 | int ret = 0; |
457 | 457 | ||
458 | if (deemph > 1) | 458 | if (deemph > 1) |
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c index d3b3f57668cc..215e93c1ddf0 100644 --- a/sound/soc/codecs/wm8904.c +++ b/sound/soc/codecs/wm8904.c | |||
@@ -525,7 +525,7 @@ static int wm8904_get_deemph(struct snd_kcontrol *kcontrol, | |||
525 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 525 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
526 | struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec); | 526 | struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec); |
527 | 527 | ||
528 | ucontrol->value.enumerated.item[0] = wm8904->deemph; | 528 | ucontrol->value.integer.value[0] = wm8904->deemph; |
529 | return 0; | 529 | return 0; |
530 | } | 530 | } |
531 | 531 | ||
@@ -534,7 +534,7 @@ static int wm8904_put_deemph(struct snd_kcontrol *kcontrol, | |||
534 | { | 534 | { |
535 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 535 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
536 | struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec); | 536 | struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec); |
537 | int deemph = ucontrol->value.enumerated.item[0]; | 537 | int deemph = ucontrol->value.integer.value[0]; |
538 | 538 | ||
539 | if (deemph > 1) | 539 | if (deemph > 1) |
540 | return -EINVAL; | 540 | return -EINVAL; |
diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c index 1ab2d462afad..00bec915d652 100644 --- a/sound/soc/codecs/wm8955.c +++ b/sound/soc/codecs/wm8955.c | |||
@@ -393,7 +393,7 @@ static int wm8955_get_deemph(struct snd_kcontrol *kcontrol, | |||
393 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 393 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
394 | struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec); | 394 | struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec); |
395 | 395 | ||
396 | ucontrol->value.enumerated.item[0] = wm8955->deemph; | 396 | ucontrol->value.integer.value[0] = wm8955->deemph; |
397 | return 0; | 397 | return 0; |
398 | } | 398 | } |
399 | 399 | ||
@@ -402,7 +402,7 @@ static int wm8955_put_deemph(struct snd_kcontrol *kcontrol, | |||
402 | { | 402 | { |
403 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 403 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
404 | struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec); | 404 | struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec); |
405 | int deemph = ucontrol->value.enumerated.item[0]; | 405 | int deemph = ucontrol->value.integer.value[0]; |
406 | 406 | ||
407 | if (deemph > 1) | 407 | if (deemph > 1) |
408 | return -EINVAL; | 408 | return -EINVAL; |
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c index cf8fecf97f2c..3035d9856415 100644 --- a/sound/soc/codecs/wm8960.c +++ b/sound/soc/codecs/wm8960.c | |||
@@ -184,7 +184,7 @@ static int wm8960_get_deemph(struct snd_kcontrol *kcontrol, | |||
184 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 184 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
185 | struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec); | 185 | struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec); |
186 | 186 | ||
187 | ucontrol->value.enumerated.item[0] = wm8960->deemph; | 187 | ucontrol->value.integer.value[0] = wm8960->deemph; |
188 | return 0; | 188 | return 0; |
189 | } | 189 | } |
190 | 190 | ||
@@ -193,7 +193,7 @@ static int wm8960_put_deemph(struct snd_kcontrol *kcontrol, | |||
193 | { | 193 | { |
194 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); | 194 | struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); |
195 | struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec); | 195 | struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec); |
196 | int deemph = ucontrol->value.enumerated.item[0]; | 196 | int deemph = ucontrol->value.integer.value[0]; |
197 | 197 | ||
198 | if (deemph > 1) | 198 | if (deemph > 1) |
199 | return -EINVAL; | 199 | return -EINVAL; |
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c index 9517571e820d..98c9525bd751 100644 --- a/sound/soc/codecs/wm9712.c +++ b/sound/soc/codecs/wm9712.c | |||
@@ -180,7 +180,7 @@ static int wm9712_hp_mixer_put(struct snd_kcontrol *kcontrol, | |||
180 | struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); | 180 | struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); |
181 | struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm); | 181 | struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm); |
182 | struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec); | 182 | struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec); |
183 | unsigned int val = ucontrol->value.enumerated.item[0]; | 183 | unsigned int val = ucontrol->value.integer.value[0]; |
184 | struct soc_mixer_control *mc = | 184 | struct soc_mixer_control *mc = |
185 | (struct soc_mixer_control *)kcontrol->private_value; | 185 | (struct soc_mixer_control *)kcontrol->private_value; |
186 | unsigned int mixer, mask, shift, old; | 186 | unsigned int mixer, mask, shift, old; |
@@ -193,7 +193,7 @@ static int wm9712_hp_mixer_put(struct snd_kcontrol *kcontrol, | |||
193 | 193 | ||
194 | mutex_lock(&wm9712->lock); | 194 | mutex_lock(&wm9712->lock); |
195 | old = wm9712->hp_mixer[mixer]; | 195 | old = wm9712->hp_mixer[mixer]; |
196 | if (ucontrol->value.enumerated.item[0]) | 196 | if (ucontrol->value.integer.value[0]) |
197 | wm9712->hp_mixer[mixer] |= mask; | 197 | wm9712->hp_mixer[mixer] |= mask; |
198 | else | 198 | else |
199 | wm9712->hp_mixer[mixer] &= ~mask; | 199 | wm9712->hp_mixer[mixer] &= ~mask; |
@@ -231,7 +231,7 @@ static int wm9712_hp_mixer_get(struct snd_kcontrol *kcontrol, | |||
231 | mixer = mc->shift >> 8; | 231 | mixer = mc->shift >> 8; |
232 | shift = mc->shift & 0xff; | 232 | shift = mc->shift & 0xff; |
233 | 233 | ||
234 | ucontrol->value.enumerated.item[0] = | 234 | ucontrol->value.integer.value[0] = |
235 | (wm9712->hp_mixer[mixer] >> shift) & 1; | 235 | (wm9712->hp_mixer[mixer] >> shift) & 1; |
236 | 236 | ||
237 | return 0; | 237 | return 0; |
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c index 68222917b396..79552953e1bd 100644 --- a/sound/soc/codecs/wm9713.c +++ b/sound/soc/codecs/wm9713.c | |||
@@ -255,7 +255,7 @@ static int wm9713_hp_mixer_put(struct snd_kcontrol *kcontrol, | |||
255 | struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); | 255 | struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); |
256 | struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm); | 256 | struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm); |
257 | struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec); | 257 | struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec); |
258 | unsigned int val = ucontrol->value.enumerated.item[0]; | 258 | unsigned int val = ucontrol->value.integer.value[0]; |
259 | struct soc_mixer_control *mc = | 259 | struct soc_mixer_control *mc = |
260 | (struct soc_mixer_control *)kcontrol->private_value; | 260 | (struct soc_mixer_control *)kcontrol->private_value; |
261 | unsigned int mixer, mask, shift, old; | 261 | unsigned int mixer, mask, shift, old; |
@@ -268,7 +268,7 @@ static int wm9713_hp_mixer_put(struct snd_kcontrol *kcontrol, | |||
268 | 268 | ||
269 | mutex_lock(&wm9713->lock); | 269 | mutex_lock(&wm9713->lock); |
270 | old = wm9713->hp_mixer[mixer]; | 270 | old = wm9713->hp_mixer[mixer]; |
271 | if (ucontrol->value.enumerated.item[0]) | 271 | if (ucontrol->value.integer.value[0]) |
272 | wm9713->hp_mixer[mixer] |= mask; | 272 | wm9713->hp_mixer[mixer] |= mask; |
273 | else | 273 | else |
274 | wm9713->hp_mixer[mixer] &= ~mask; | 274 | wm9713->hp_mixer[mixer] &= ~mask; |
@@ -306,7 +306,7 @@ static int wm9713_hp_mixer_get(struct snd_kcontrol *kcontrol, | |||
306 | mixer = mc->shift >> 8; | 306 | mixer = mc->shift >> 8; |
307 | shift = mc->shift & 0xff; | 307 | shift = mc->shift & 0xff; |
308 | 308 | ||
309 | ucontrol->value.enumerated.item[0] = | 309 | ucontrol->value.integer.value[0] = |
310 | (wm9713->hp_mixer[mixer] >> shift) & 1; | 310 | (wm9713->hp_mixer[mixer] >> shift) & 1; |
311 | 311 | ||
312 | return 0; | 312 | return 0; |
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c index 75870c0ea2c9..91eb3aef7f02 100644 --- a/sound/soc/fsl/fsl_spdif.c +++ b/sound/soc/fsl/fsl_spdif.c | |||
@@ -1049,7 +1049,7 @@ static u32 fsl_spdif_txclk_caldiv(struct fsl_spdif_priv *spdif_priv, | |||
1049 | enum spdif_txrate index, bool round) | 1049 | enum spdif_txrate index, bool round) |
1050 | { | 1050 | { |
1051 | const u32 rate[] = { 32000, 44100, 48000, 96000, 192000 }; | 1051 | const u32 rate[] = { 32000, 44100, 48000, 96000, 192000 }; |
1052 | bool is_sysclk = clk == spdif_priv->sysclk; | 1052 | bool is_sysclk = clk_is_match(clk, spdif_priv->sysclk); |
1053 | u64 rate_ideal, rate_actual, sub; | 1053 | u64 rate_ideal, rate_actual, sub; |
1054 | u32 sysclk_dfmin, sysclk_dfmax; | 1054 | u32 sysclk_dfmin, sysclk_dfmax; |
1055 | u32 txclk_df, sysclk_df, arate; | 1055 | u32 txclk_df, sysclk_df, arate; |
@@ -1143,7 +1143,7 @@ static int fsl_spdif_probe_txclk(struct fsl_spdif_priv *spdif_priv, | |||
1143 | spdif_priv->txclk_src[index], rate[index]); | 1143 | spdif_priv->txclk_src[index], rate[index]); |
1144 | dev_dbg(&pdev->dev, "use txclk df %d for %dHz sample rate\n", | 1144 | dev_dbg(&pdev->dev, "use txclk df %d for %dHz sample rate\n", |
1145 | spdif_priv->txclk_df[index], rate[index]); | 1145 | spdif_priv->txclk_df[index], rate[index]); |
1146 | if (spdif_priv->txclk[index] == spdif_priv->sysclk) | 1146 | if (clk_is_match(spdif_priv->txclk[index], spdif_priv->sysclk)) |
1147 | dev_dbg(&pdev->dev, "use sysclk df %d for %dHz sample rate\n", | 1147 | dev_dbg(&pdev->dev, "use sysclk df %d for %dHz sample rate\n", |
1148 | spdif_priv->sysclk_df[index], rate[index]); | 1148 | spdif_priv->sysclk_df[index], rate[index]); |
1149 | dev_dbg(&pdev->dev, "the best rate for %dHz sample rate is %dHz\n", | 1149 | dev_dbg(&pdev->dev, "the best rate for %dHz sample rate is %dHz\n", |
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index b9fabbf69db6..6b0c8f717ec2 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c | |||
@@ -603,7 +603,7 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream, | |||
603 | factor = (div2 + 1) * (7 * psr + 1) * 2; | 603 | factor = (div2 + 1) * (7 * psr + 1) * 2; |
604 | 604 | ||
605 | for (i = 0; i < 255; i++) { | 605 | for (i = 0; i < 255; i++) { |
606 | tmprate = freq * factor * (i + 2); | 606 | tmprate = freq * factor * (i + 1); |
607 | 607 | ||
608 | if (baudclk_is_used) | 608 | if (baudclk_is_used) |
609 | clkrate = clk_get_rate(ssi_private->baudclk); | 609 | clkrate = clk_get_rate(ssi_private->baudclk); |
@@ -1227,7 +1227,7 @@ static int fsl_ssi_imx_probe(struct platform_device *pdev, | |||
1227 | ssi_private->dma_params_tx.addr = ssi_private->ssi_phys + CCSR_SSI_STX0; | 1227 | ssi_private->dma_params_tx.addr = ssi_private->ssi_phys + CCSR_SSI_STX0; |
1228 | ssi_private->dma_params_rx.addr = ssi_private->ssi_phys + CCSR_SSI_SRX0; | 1228 | ssi_private->dma_params_rx.addr = ssi_private->ssi_phys + CCSR_SSI_SRX0; |
1229 | 1229 | ||
1230 | ret = !of_property_read_u32_array(np, "dmas", dmas, 4); | 1230 | ret = of_property_read_u32_array(np, "dmas", dmas, 4); |
1231 | if (ssi_private->use_dma && !ret && dmas[2] == IMX_DMATYPE_SSI_DUAL) { | 1231 | if (ssi_private->use_dma && !ret && dmas[2] == IMX_DMATYPE_SSI_DUAL) { |
1232 | ssi_private->use_dual_fifo = true; | 1232 | ssi_private->use_dual_fifo = true; |
1233 | /* When using dual fifo mode, we need to keep watermark | 1233 | /* When using dual fifo mode, we need to keep watermark |
diff --git a/sound/soc/intel/sst-haswell-dsp.c b/sound/soc/intel/sst-haswell-dsp.c index c42ffae5fe9f..402b728c0a06 100644 --- a/sound/soc/intel/sst-haswell-dsp.c +++ b/sound/soc/intel/sst-haswell-dsp.c | |||
@@ -207,9 +207,6 @@ static int hsw_parse_fw_image(struct sst_fw *sst_fw) | |||
207 | module = (void *)module + sizeof(*module) + module->mod_size; | 207 | module = (void *)module + sizeof(*module) + module->mod_size; |
208 | } | 208 | } |
209 | 209 | ||
210 | /* allocate scratch mem regions */ | ||
211 | sst_block_alloc_scratch(dsp); | ||
212 | |||
213 | return 0; | 210 | return 0; |
214 | } | 211 | } |
215 | 212 | ||
diff --git a/sound/soc/intel/sst-haswell-ipc.c b/sound/soc/intel/sst-haswell-ipc.c index 394af5684c05..863a9ca34b8e 100644 --- a/sound/soc/intel/sst-haswell-ipc.c +++ b/sound/soc/intel/sst-haswell-ipc.c | |||
@@ -1732,6 +1732,7 @@ static void sst_hsw_drop_all(struct sst_hsw *hsw) | |||
1732 | int sst_hsw_dsp_load(struct sst_hsw *hsw) | 1732 | int sst_hsw_dsp_load(struct sst_hsw *hsw) |
1733 | { | 1733 | { |
1734 | struct sst_dsp *dsp = hsw->dsp; | 1734 | struct sst_dsp *dsp = hsw->dsp; |
1735 | struct sst_fw *sst_fw, *t; | ||
1735 | int ret; | 1736 | int ret; |
1736 | 1737 | ||
1737 | dev_dbg(hsw->dev, "loading audio DSP...."); | 1738 | dev_dbg(hsw->dev, "loading audio DSP...."); |
@@ -1748,12 +1749,17 @@ int sst_hsw_dsp_load(struct sst_hsw *hsw) | |||
1748 | return ret; | 1749 | return ret; |
1749 | } | 1750 | } |
1750 | 1751 | ||
1751 | ret = sst_fw_reload(hsw->sst_fw); | 1752 | list_for_each_entry_safe_reverse(sst_fw, t, &dsp->fw_list, list) { |
1752 | if (ret < 0) { | 1753 | ret = sst_fw_reload(sst_fw); |
1753 | dev_err(hsw->dev, "error: SST FW reload failed\n"); | 1754 | if (ret < 0) { |
1754 | sst_dsp_dma_put_channel(dsp); | 1755 | dev_err(hsw->dev, "error: SST FW reload failed\n"); |
1755 | return -ENOMEM; | 1756 | sst_dsp_dma_put_channel(dsp); |
1757 | return -ENOMEM; | ||
1758 | } | ||
1756 | } | 1759 | } |
1760 | ret = sst_block_alloc_scratch(hsw->dsp); | ||
1761 | if (ret < 0) | ||
1762 | return -EINVAL; | ||
1757 | 1763 | ||
1758 | sst_dsp_dma_put_channel(dsp); | 1764 | sst_dsp_dma_put_channel(dsp); |
1759 | return 0; | 1765 | return 0; |
@@ -1809,12 +1815,17 @@ int sst_hsw_dsp_runtime_suspend(struct sst_hsw *hsw) | |||
1809 | 1815 | ||
1810 | int sst_hsw_dsp_runtime_sleep(struct sst_hsw *hsw) | 1816 | int sst_hsw_dsp_runtime_sleep(struct sst_hsw *hsw) |
1811 | { | 1817 | { |
1812 | sst_fw_unload(hsw->sst_fw); | 1818 | struct sst_fw *sst_fw, *t; |
1813 | sst_block_free_scratch(hsw->dsp); | 1819 | struct sst_dsp *dsp = hsw->dsp; |
1820 | |||
1821 | list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) { | ||
1822 | sst_fw_unload(sst_fw); | ||
1823 | } | ||
1824 | sst_block_free_scratch(dsp); | ||
1814 | 1825 | ||
1815 | hsw->boot_complete = false; | 1826 | hsw->boot_complete = false; |
1816 | 1827 | ||
1817 | sst_dsp_sleep(hsw->dsp); | 1828 | sst_dsp_sleep(dsp); |
1818 | 1829 | ||
1819 | return 0; | 1830 | return 0; |
1820 | } | 1831 | } |
@@ -1943,6 +1954,11 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata) | |||
1943 | goto fw_err; | 1954 | goto fw_err; |
1944 | } | 1955 | } |
1945 | 1956 | ||
1957 | /* allocate scratch mem regions */ | ||
1958 | ret = sst_block_alloc_scratch(hsw->dsp); | ||
1959 | if (ret < 0) | ||
1960 | goto boot_err; | ||
1961 | |||
1946 | /* wait for DSP boot completion */ | 1962 | /* wait for DSP boot completion */ |
1947 | sst_dsp_boot(hsw->dsp); | 1963 | sst_dsp_boot(hsw->dsp); |
1948 | ret = wait_event_timeout(hsw->boot_wait, hsw->boot_complete, | 1964 | ret = wait_event_timeout(hsw->boot_wait, hsw->boot_complete, |
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c index def7d8260c4e..d19483081f9b 100644 --- a/sound/soc/kirkwood/kirkwood-i2s.c +++ b/sound/soc/kirkwood/kirkwood-i2s.c | |||
@@ -579,7 +579,7 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev) | |||
579 | if (PTR_ERR(priv->extclk) == -EPROBE_DEFER) | 579 | if (PTR_ERR(priv->extclk) == -EPROBE_DEFER) |
580 | return -EPROBE_DEFER; | 580 | return -EPROBE_DEFER; |
581 | } else { | 581 | } else { |
582 | if (priv->extclk == priv->clk) { | 582 | if (clk_is_match(priv->extclk, priv->clk)) { |
583 | devm_clk_put(&pdev->dev, priv->extclk); | 583 | devm_clk_put(&pdev->dev, priv->extclk); |
584 | priv->extclk = ERR_PTR(-EINVAL); | 584 | priv->extclk = ERR_PTR(-EINVAL); |
585 | } else { | 585 | } else { |
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 30579ca5bacb..e5c990889dcc 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c | |||
@@ -347,6 +347,8 @@ static ssize_t codec_list_read_file(struct file *file, char __user *user_buf, | |||
347 | if (!buf) | 347 | if (!buf) |
348 | return -ENOMEM; | 348 | return -ENOMEM; |
349 | 349 | ||
350 | mutex_lock(&client_mutex); | ||
351 | |||
350 | list_for_each_entry(codec, &codec_list, list) { | 352 | list_for_each_entry(codec, &codec_list, list) { |
351 | len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", | 353 | len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", |
352 | codec->component.name); | 354 | codec->component.name); |
@@ -358,6 +360,8 @@ static ssize_t codec_list_read_file(struct file *file, char __user *user_buf, | |||
358 | } | 360 | } |
359 | } | 361 | } |
360 | 362 | ||
363 | mutex_unlock(&client_mutex); | ||
364 | |||
361 | if (ret >= 0) | 365 | if (ret >= 0) |
362 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); | 366 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); |
363 | 367 | ||
@@ -382,6 +386,8 @@ static ssize_t dai_list_read_file(struct file *file, char __user *user_buf, | |||
382 | if (!buf) | 386 | if (!buf) |
383 | return -ENOMEM; | 387 | return -ENOMEM; |
384 | 388 | ||
389 | mutex_lock(&client_mutex); | ||
390 | |||
385 | list_for_each_entry(component, &component_list, list) { | 391 | list_for_each_entry(component, &component_list, list) { |
386 | list_for_each_entry(dai, &component->dai_list, list) { | 392 | list_for_each_entry(dai, &component->dai_list, list) { |
387 | len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", | 393 | len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", |
@@ -395,6 +401,8 @@ static ssize_t dai_list_read_file(struct file *file, char __user *user_buf, | |||
395 | } | 401 | } |
396 | } | 402 | } |
397 | 403 | ||
404 | mutex_unlock(&client_mutex); | ||
405 | |||
398 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); | 406 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); |
399 | 407 | ||
400 | kfree(buf); | 408 | kfree(buf); |
@@ -418,6 +426,8 @@ static ssize_t platform_list_read_file(struct file *file, | |||
418 | if (!buf) | 426 | if (!buf) |
419 | return -ENOMEM; | 427 | return -ENOMEM; |
420 | 428 | ||
429 | mutex_lock(&client_mutex); | ||
430 | |||
421 | list_for_each_entry(platform, &platform_list, list) { | 431 | list_for_each_entry(platform, &platform_list, list) { |
422 | len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", | 432 | len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", |
423 | platform->component.name); | 433 | platform->component.name); |
@@ -429,6 +439,8 @@ static ssize_t platform_list_read_file(struct file *file, | |||
429 | } | 439 | } |
430 | } | 440 | } |
431 | 441 | ||
442 | mutex_unlock(&client_mutex); | ||
443 | |||
432 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); | 444 | ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); |
433 | 445 | ||
434 | kfree(buf); | 446 | kfree(buf); |
@@ -836,6 +848,8 @@ static struct snd_soc_component *soc_find_component( | |||
836 | { | 848 | { |
837 | struct snd_soc_component *component; | 849 | struct snd_soc_component *component; |
838 | 850 | ||
851 | lockdep_assert_held(&client_mutex); | ||
852 | |||
839 | list_for_each_entry(component, &component_list, list) { | 853 | list_for_each_entry(component, &component_list, list) { |
840 | if (of_node) { | 854 | if (of_node) { |
841 | if (component->dev->of_node == of_node) | 855 | if (component->dev->of_node == of_node) |
@@ -854,6 +868,8 @@ static struct snd_soc_dai *snd_soc_find_dai( | |||
854 | struct snd_soc_component *component; | 868 | struct snd_soc_component *component; |
855 | struct snd_soc_dai *dai; | 869 | struct snd_soc_dai *dai; |
856 | 870 | ||
871 | lockdep_assert_held(&client_mutex); | ||
872 | |||
857 | /* Find CPU DAI from registered DAIs*/ | 873 | /* Find CPU DAI from registered DAIs*/ |
858 | list_for_each_entry(component, &component_list, list) { | 874 | list_for_each_entry(component, &component_list, list) { |
859 | if (dlc->of_node && component->dev->of_node != dlc->of_node) | 875 | if (dlc->of_node && component->dev->of_node != dlc->of_node) |
@@ -1508,6 +1524,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card) | |||
1508 | struct snd_soc_codec *codec; | 1524 | struct snd_soc_codec *codec; |
1509 | int ret, i, order; | 1525 | int ret, i, order; |
1510 | 1526 | ||
1527 | mutex_lock(&client_mutex); | ||
1511 | mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_INIT); | 1528 | mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_INIT); |
1512 | 1529 | ||
1513 | /* bind DAIs */ | 1530 | /* bind DAIs */ |
@@ -1662,6 +1679,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card) | |||
1662 | card->instantiated = 1; | 1679 | card->instantiated = 1; |
1663 | snd_soc_dapm_sync(&card->dapm); | 1680 | snd_soc_dapm_sync(&card->dapm); |
1664 | mutex_unlock(&card->mutex); | 1681 | mutex_unlock(&card->mutex); |
1682 | mutex_unlock(&client_mutex); | ||
1665 | 1683 | ||
1666 | return 0; | 1684 | return 0; |
1667 | 1685 | ||
@@ -1680,6 +1698,7 @@ card_probe_error: | |||
1680 | 1698 | ||
1681 | base_error: | 1699 | base_error: |
1682 | mutex_unlock(&card->mutex); | 1700 | mutex_unlock(&card->mutex); |
1701 | mutex_unlock(&client_mutex); | ||
1683 | 1702 | ||
1684 | return ret; | 1703 | return ret; |
1685 | } | 1704 | } |
@@ -2713,13 +2732,6 @@ static void snd_soc_component_del_unlocked(struct snd_soc_component *component) | |||
2713 | list_del(&component->list); | 2732 | list_del(&component->list); |
2714 | } | 2733 | } |
2715 | 2734 | ||
2716 | static void snd_soc_component_del(struct snd_soc_component *component) | ||
2717 | { | ||
2718 | mutex_lock(&client_mutex); | ||
2719 | snd_soc_component_del_unlocked(component); | ||
2720 | mutex_unlock(&client_mutex); | ||
2721 | } | ||
2722 | |||
2723 | int snd_soc_register_component(struct device *dev, | 2735 | int snd_soc_register_component(struct device *dev, |
2724 | const struct snd_soc_component_driver *cmpnt_drv, | 2736 | const struct snd_soc_component_driver *cmpnt_drv, |
2725 | struct snd_soc_dai_driver *dai_drv, | 2737 | struct snd_soc_dai_driver *dai_drv, |
@@ -2767,14 +2779,17 @@ void snd_soc_unregister_component(struct device *dev) | |||
2767 | { | 2779 | { |
2768 | struct snd_soc_component *cmpnt; | 2780 | struct snd_soc_component *cmpnt; |
2769 | 2781 | ||
2782 | mutex_lock(&client_mutex); | ||
2770 | list_for_each_entry(cmpnt, &component_list, list) { | 2783 | list_for_each_entry(cmpnt, &component_list, list) { |
2771 | if (dev == cmpnt->dev && cmpnt->registered_as_component) | 2784 | if (dev == cmpnt->dev && cmpnt->registered_as_component) |
2772 | goto found; | 2785 | goto found; |
2773 | } | 2786 | } |
2787 | mutex_unlock(&client_mutex); | ||
2774 | return; | 2788 | return; |
2775 | 2789 | ||
2776 | found: | 2790 | found: |
2777 | snd_soc_component_del(cmpnt); | 2791 | snd_soc_component_del_unlocked(cmpnt); |
2792 | mutex_unlock(&client_mutex); | ||
2778 | snd_soc_component_cleanup(cmpnt); | 2793 | snd_soc_component_cleanup(cmpnt); |
2779 | kfree(cmpnt); | 2794 | kfree(cmpnt); |
2780 | } | 2795 | } |
@@ -2882,10 +2897,14 @@ struct snd_soc_platform *snd_soc_lookup_platform(struct device *dev) | |||
2882 | { | 2897 | { |
2883 | struct snd_soc_platform *platform; | 2898 | struct snd_soc_platform *platform; |
2884 | 2899 | ||
2900 | mutex_lock(&client_mutex); | ||
2885 | list_for_each_entry(platform, &platform_list, list) { | 2901 | list_for_each_entry(platform, &platform_list, list) { |
2886 | if (dev == platform->dev) | 2902 | if (dev == platform->dev) { |
2903 | mutex_unlock(&client_mutex); | ||
2887 | return platform; | 2904 | return platform; |
2905 | } | ||
2888 | } | 2906 | } |
2907 | mutex_unlock(&client_mutex); | ||
2889 | 2908 | ||
2890 | return NULL; | 2909 | return NULL; |
2891 | } | 2910 | } |
@@ -3090,15 +3109,15 @@ void snd_soc_unregister_codec(struct device *dev) | |||
3090 | { | 3109 | { |
3091 | struct snd_soc_codec *codec; | 3110 | struct snd_soc_codec *codec; |
3092 | 3111 | ||
3112 | mutex_lock(&client_mutex); | ||
3093 | list_for_each_entry(codec, &codec_list, list) { | 3113 | list_for_each_entry(codec, &codec_list, list) { |
3094 | if (dev == codec->dev) | 3114 | if (dev == codec->dev) |
3095 | goto found; | 3115 | goto found; |
3096 | } | 3116 | } |
3117 | mutex_unlock(&client_mutex); | ||
3097 | return; | 3118 | return; |
3098 | 3119 | ||
3099 | found: | 3120 | found: |
3100 | |||
3101 | mutex_lock(&client_mutex); | ||
3102 | list_del(&codec->list); | 3121 | list_del(&codec->list); |
3103 | snd_soc_component_del_unlocked(&codec->component); | 3122 | snd_soc_component_del_unlocked(&codec->component); |
3104 | mutex_unlock(&client_mutex); | 3123 | mutex_unlock(&client_mutex); |
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 67d476548dcf..07f984d5f516 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h | |||
@@ -1773,6 +1773,36 @@ YAMAHA_DEVICE(0x7010, "UB99"), | |||
1773 | } | 1773 | } |
1774 | } | 1774 | } |
1775 | }, | 1775 | }, |
1776 | { | ||
1777 | USB_DEVICE(0x0582, 0x0159), | ||
1778 | .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { | ||
1779 | /* .vendor_name = "Roland", */ | ||
1780 | /* .product_name = "UA-22", */ | ||
1781 | .ifnum = QUIRK_ANY_INTERFACE, | ||
1782 | .type = QUIRK_COMPOSITE, | ||
1783 | .data = (const struct snd_usb_audio_quirk[]) { | ||
1784 | { | ||
1785 | .ifnum = 0, | ||
1786 | .type = QUIRK_AUDIO_STANDARD_INTERFACE | ||
1787 | }, | ||
1788 | { | ||
1789 | .ifnum = 1, | ||
1790 | .type = QUIRK_AUDIO_STANDARD_INTERFACE | ||
1791 | }, | ||
1792 | { | ||
1793 | .ifnum = 2, | ||
1794 | .type = QUIRK_MIDI_FIXED_ENDPOINT, | ||
1795 | .data = & (const struct snd_usb_midi_endpoint_info) { | ||
1796 | .out_cables = 0x0001, | ||
1797 | .in_cables = 0x0001 | ||
1798 | } | ||
1799 | }, | ||
1800 | { | ||
1801 | .ifnum = -1 | ||
1802 | } | ||
1803 | } | ||
1804 | } | ||
1805 | }, | ||
1776 | /* this catches most recent vendor-specific Roland devices */ | 1806 | /* this catches most recent vendor-specific Roland devices */ |
1777 | { | 1807 | { |
1778 | .match_flags = USB_DEVICE_ID_MATCH_VENDOR | | 1808 | .match_flags = USB_DEVICE_ID_MATCH_VENDOR | |
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 61bf9128e1f2..9d9db3b296dd 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c | |||
@@ -30,6 +30,8 @@ static int disasm_line__parse(char *line, char **namep, char **rawp); | |||
30 | 30 | ||
31 | static void ins__delete(struct ins_operands *ops) | 31 | static void ins__delete(struct ins_operands *ops) |
32 | { | 32 | { |
33 | if (ops == NULL) | ||
34 | return; | ||
33 | zfree(&ops->source.raw); | 35 | zfree(&ops->source.raw); |
34 | zfree(&ops->source.name); | 36 | zfree(&ops->source.name); |
35 | zfree(&ops->target.raw); | 37 | zfree(&ops->target.raw); |
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile index 3ed7c0476d48..2e2ba2efa0d9 100644 --- a/tools/power/cpupower/Makefile +++ b/tools/power/cpupower/Makefile | |||
@@ -209,7 +209,7 @@ $(OUTPUT)%.o: %.c | |||
209 | 209 | ||
210 | $(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)libcpupower.so.$(LIB_MAJ) | 210 | $(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)libcpupower.so.$(LIB_MAJ) |
211 | $(ECHO) " CC " $@ | 211 | $(ECHO) " CC " $@ |
212 | $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -Wl,-rpath=./ -lrt -lpci -L$(OUTPUT) -o $@ | 212 | $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -lrt -lpci -L$(OUTPUT) -o $@ |
213 | $(QUIET) $(STRIPCMD) $@ | 213 | $(QUIET) $(STRIPCMD) $@ |
214 | 214 | ||
215 | $(OUTPUT)po/$(PACKAGE).pot: $(UTIL_SRC) | 215 | $(OUTPUT)po/$(PACKAGE).pot: $(UTIL_SRC) |
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 4e511221a0c1..0db571340edb 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile | |||
@@ -22,6 +22,14 @@ TARGETS += vm | |||
22 | TARGETS_HOTPLUG = cpu-hotplug | 22 | TARGETS_HOTPLUG = cpu-hotplug |
23 | TARGETS_HOTPLUG += memory-hotplug | 23 | TARGETS_HOTPLUG += memory-hotplug |
24 | 24 | ||
25 | # Clear LDFLAGS and MAKEFLAGS if called from main | ||
26 | # Makefile to avoid test build failures when test | ||
27 | # Makefile doesn't have explicit build rules. | ||
28 | ifeq (1,$(MAKELEVEL)) | ||
29 | undefine LDFLAGS | ||
30 | override MAKEFLAGS = | ||
31 | endif | ||
32 | |||
25 | all: | 33 | all: |
26 | for TARGET in $(TARGETS); do \ | 34 | for TARGET in $(TARGETS); do \ |
27 | make -C $$TARGET; \ | 35 | make -C $$TARGET; \ |
diff --git a/tools/testing/selftests/exec/execveat.c b/tools/testing/selftests/exec/execveat.c index e238c9559caf..8d5d1d2ee7c1 100644 --- a/tools/testing/selftests/exec/execveat.c +++ b/tools/testing/selftests/exec/execveat.c | |||
@@ -30,7 +30,7 @@ static int execveat_(int fd, const char *path, char **argv, char **envp, | |||
30 | #ifdef __NR_execveat | 30 | #ifdef __NR_execveat |
31 | return syscall(__NR_execveat, fd, path, argv, envp, flags); | 31 | return syscall(__NR_execveat, fd, path, argv, envp, flags); |
32 | #else | 32 | #else |
33 | errno = -ENOSYS; | 33 | errno = ENOSYS; |
34 | return -1; | 34 | return -1; |
35 | #endif | 35 | #endif |
36 | } | 36 | } |
@@ -234,6 +234,14 @@ static int run_tests(void) | |||
234 | int fd_cloexec = open_or_die("execveat", O_RDONLY|O_CLOEXEC); | 234 | int fd_cloexec = open_or_die("execveat", O_RDONLY|O_CLOEXEC); |
235 | int fd_script_cloexec = open_or_die("script", O_RDONLY|O_CLOEXEC); | 235 | int fd_script_cloexec = open_or_die("script", O_RDONLY|O_CLOEXEC); |
236 | 236 | ||
237 | /* Check if we have execveat at all, and bail early if not */ | ||
238 | errno = 0; | ||
239 | execveat_(-1, NULL, NULL, NULL, 0); | ||
240 | if (errno == ENOSYS) { | ||
241 | printf("[FAIL] ENOSYS calling execveat - no kernel support?\n"); | ||
242 | return 1; | ||
243 | } | ||
244 | |||
237 | /* Change file position to confirm it doesn't affect anything */ | 245 | /* Change file position to confirm it doesn't affect anything */ |
238 | lseek(fd, 10, SEEK_SET); | 246 | lseek(fd, 10, SEEK_SET); |
239 | 247 | ||
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c index a0a7b5d1a070..f9b9c7c51372 100644 --- a/virt/kvm/arm/vgic-v2.c +++ b/virt/kvm/arm/vgic-v2.c | |||
@@ -72,6 +72,8 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, | |||
72 | { | 72 | { |
73 | if (!(lr_desc.state & LR_STATE_MASK)) | 73 | if (!(lr_desc.state & LR_STATE_MASK)) |
74 | vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr); | 74 | vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr); |
75 | else | ||
76 | vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr &= ~(1ULL << lr); | ||
75 | } | 77 | } |
76 | 78 | ||
77 | static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu) | 79 | static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu) |
@@ -84,6 +86,11 @@ static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu) | |||
84 | return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr; | 86 | return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr; |
85 | } | 87 | } |
86 | 88 | ||
89 | static void vgic_v2_clear_eisr(struct kvm_vcpu *vcpu) | ||
90 | { | ||
91 | vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr = 0; | ||
92 | } | ||
93 | |||
87 | static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu) | 94 | static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu) |
88 | { | 95 | { |
89 | u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr; | 96 | u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr; |
@@ -148,6 +155,7 @@ static const struct vgic_ops vgic_v2_ops = { | |||
148 | .sync_lr_elrsr = vgic_v2_sync_lr_elrsr, | 155 | .sync_lr_elrsr = vgic_v2_sync_lr_elrsr, |
149 | .get_elrsr = vgic_v2_get_elrsr, | 156 | .get_elrsr = vgic_v2_get_elrsr, |
150 | .get_eisr = vgic_v2_get_eisr, | 157 | .get_eisr = vgic_v2_get_eisr, |
158 | .clear_eisr = vgic_v2_clear_eisr, | ||
151 | .get_interrupt_status = vgic_v2_get_interrupt_status, | 159 | .get_interrupt_status = vgic_v2_get_interrupt_status, |
152 | .enable_underflow = vgic_v2_enable_underflow, | 160 | .enable_underflow = vgic_v2_enable_underflow, |
153 | .disable_underflow = vgic_v2_disable_underflow, | 161 | .disable_underflow = vgic_v2_disable_underflow, |
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c index 3a62d8a9a2c6..dff06021e748 100644 --- a/virt/kvm/arm/vgic-v3.c +++ b/virt/kvm/arm/vgic-v3.c | |||
@@ -104,6 +104,8 @@ static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, | |||
104 | { | 104 | { |
105 | if (!(lr_desc.state & LR_STATE_MASK)) | 105 | if (!(lr_desc.state & LR_STATE_MASK)) |
106 | vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr); | 106 | vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr); |
107 | else | ||
108 | vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr &= ~(1U << lr); | ||
107 | } | 109 | } |
108 | 110 | ||
109 | static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu) | 111 | static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu) |
@@ -116,6 +118,11 @@ static u64 vgic_v3_get_eisr(const struct kvm_vcpu *vcpu) | |||
116 | return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr; | 118 | return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr; |
117 | } | 119 | } |
118 | 120 | ||
121 | static void vgic_v3_clear_eisr(struct kvm_vcpu *vcpu) | ||
122 | { | ||
123 | vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr = 0; | ||
124 | } | ||
125 | |||
119 | static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu) | 126 | static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu) |
120 | { | 127 | { |
121 | u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr; | 128 | u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr; |
@@ -192,6 +199,7 @@ static const struct vgic_ops vgic_v3_ops = { | |||
192 | .sync_lr_elrsr = vgic_v3_sync_lr_elrsr, | 199 | .sync_lr_elrsr = vgic_v3_sync_lr_elrsr, |
193 | .get_elrsr = vgic_v3_get_elrsr, | 200 | .get_elrsr = vgic_v3_get_elrsr, |
194 | .get_eisr = vgic_v3_get_eisr, | 201 | .get_eisr = vgic_v3_get_eisr, |
202 | .clear_eisr = vgic_v3_clear_eisr, | ||
195 | .get_interrupt_status = vgic_v3_get_interrupt_status, | 203 | .get_interrupt_status = vgic_v3_get_interrupt_status, |
196 | .enable_underflow = vgic_v3_enable_underflow, | 204 | .enable_underflow = vgic_v3_enable_underflow, |
197 | .disable_underflow = vgic_v3_disable_underflow, | 205 | .disable_underflow = vgic_v3_disable_underflow, |
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 0cc6ab6005a0..c9f60f524588 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c | |||
@@ -883,6 +883,11 @@ static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu) | |||
883 | return vgic_ops->get_eisr(vcpu); | 883 | return vgic_ops->get_eisr(vcpu); |
884 | } | 884 | } |
885 | 885 | ||
886 | static inline void vgic_clear_eisr(struct kvm_vcpu *vcpu) | ||
887 | { | ||
888 | vgic_ops->clear_eisr(vcpu); | ||
889 | } | ||
890 | |||
886 | static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu) | 891 | static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu) |
887 | { | 892 | { |
888 | return vgic_ops->get_interrupt_status(vcpu); | 893 | return vgic_ops->get_interrupt_status(vcpu); |
@@ -922,6 +927,7 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu) | |||
922 | vgic_set_lr(vcpu, lr_nr, vlr); | 927 | vgic_set_lr(vcpu, lr_nr, vlr); |
923 | clear_bit(lr_nr, vgic_cpu->lr_used); | 928 | clear_bit(lr_nr, vgic_cpu->lr_used); |
924 | vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; | 929 | vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; |
930 | vgic_sync_lr_elrsr(vcpu, lr_nr, vlr); | ||
925 | } | 931 | } |
926 | 932 | ||
927 | /* | 933 | /* |
@@ -978,6 +984,7 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) | |||
978 | BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); | 984 | BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); |
979 | vlr.state |= LR_STATE_PENDING; | 985 | vlr.state |= LR_STATE_PENDING; |
980 | vgic_set_lr(vcpu, lr, vlr); | 986 | vgic_set_lr(vcpu, lr, vlr); |
987 | vgic_sync_lr_elrsr(vcpu, lr, vlr); | ||
981 | return true; | 988 | return true; |
982 | } | 989 | } |
983 | } | 990 | } |
@@ -999,6 +1006,7 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) | |||
999 | vlr.state |= LR_EOI_INT; | 1006 | vlr.state |= LR_EOI_INT; |
1000 | 1007 | ||
1001 | vgic_set_lr(vcpu, lr, vlr); | 1008 | vgic_set_lr(vcpu, lr, vlr); |
1009 | vgic_sync_lr_elrsr(vcpu, lr, vlr); | ||
1002 | 1010 | ||
1003 | return true; | 1011 | return true; |
1004 | } | 1012 | } |
@@ -1136,6 +1144,14 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
1136 | if (status & INT_STATUS_UNDERFLOW) | 1144 | if (status & INT_STATUS_UNDERFLOW) |
1137 | vgic_disable_underflow(vcpu); | 1145 | vgic_disable_underflow(vcpu); |
1138 | 1146 | ||
1147 | /* | ||
1148 | * In the next iterations of the vcpu loop, if we sync the vgic state | ||
1149 | * after flushing it, but before entering the guest (this happens for | ||
1150 | * pending signals and vmid rollovers), then make sure we don't pick | ||
1151 | * up any old maintenance interrupts here. | ||
1152 | */ | ||
1153 | vgic_clear_eisr(vcpu); | ||
1154 | |||
1139 | return level_pending; | 1155 | return level_pending; |
1140 | } | 1156 | } |
1141 | 1157 | ||
@@ -1583,8 +1599,10 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) | |||
1583 | * emulation. So check this here again. KVM_CREATE_DEVICE does | 1599 | * emulation. So check this here again. KVM_CREATE_DEVICE does |
1584 | * the proper checks already. | 1600 | * the proper checks already. |
1585 | */ | 1601 | */ |
1586 | if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2) | 1602 | if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2) { |
1587 | return -ENODEV; | 1603 | ret = -ENODEV; |
1604 | goto out; | ||
1605 | } | ||
1588 | 1606 | ||
1589 | /* | 1607 | /* |
1590 | * Any time a vcpu is run, vcpu_load is called which tries to grab the | 1608 | * Any time a vcpu is run, vcpu_load is called which tries to grab the |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a1093700f3a4..cc6a25d95fbf 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -471,7 +471,7 @@ static struct kvm *kvm_create_vm(unsigned long type) | |||
471 | BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); | 471 | BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); |
472 | 472 | ||
473 | r = -ENOMEM; | 473 | r = -ENOMEM; |
474 | kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); | 474 | kvm->memslots = kvm_kvzalloc(sizeof(struct kvm_memslots)); |
475 | if (!kvm->memslots) | 475 | if (!kvm->memslots) |
476 | goto out_err_no_srcu; | 476 | goto out_err_no_srcu; |
477 | 477 | ||
@@ -522,7 +522,7 @@ out_err_no_srcu: | |||
522 | out_err_no_disable: | 522 | out_err_no_disable: |
523 | for (i = 0; i < KVM_NR_BUSES; i++) | 523 | for (i = 0; i < KVM_NR_BUSES; i++) |
524 | kfree(kvm->buses[i]); | 524 | kfree(kvm->buses[i]); |
525 | kfree(kvm->memslots); | 525 | kvfree(kvm->memslots); |
526 | kvm_arch_free_vm(kvm); | 526 | kvm_arch_free_vm(kvm); |
527 | return ERR_PTR(r); | 527 | return ERR_PTR(r); |
528 | } | 528 | } |
@@ -578,7 +578,7 @@ static void kvm_free_physmem(struct kvm *kvm) | |||
578 | kvm_for_each_memslot(memslot, slots) | 578 | kvm_for_each_memslot(memslot, slots) |
579 | kvm_free_physmem_slot(kvm, memslot, NULL); | 579 | kvm_free_physmem_slot(kvm, memslot, NULL); |
580 | 580 | ||
581 | kfree(kvm->memslots); | 581 | kvfree(kvm->memslots); |
582 | } | 582 | } |
583 | 583 | ||
584 | static void kvm_destroy_devices(struct kvm *kvm) | 584 | static void kvm_destroy_devices(struct kvm *kvm) |
@@ -871,10 +871,10 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
871 | goto out_free; | 871 | goto out_free; |
872 | } | 872 | } |
873 | 873 | ||
874 | slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), | 874 | slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); |
875 | GFP_KERNEL); | ||
876 | if (!slots) | 875 | if (!slots) |
877 | goto out_free; | 876 | goto out_free; |
877 | memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); | ||
878 | 878 | ||
879 | if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { | 879 | if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { |
880 | slot = id_to_memslot(slots, mem->slot); | 880 | slot = id_to_memslot(slots, mem->slot); |
@@ -917,7 +917,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
917 | kvm_arch_commit_memory_region(kvm, mem, &old, change); | 917 | kvm_arch_commit_memory_region(kvm, mem, &old, change); |
918 | 918 | ||
919 | kvm_free_physmem_slot(kvm, &old, &new); | 919 | kvm_free_physmem_slot(kvm, &old, &new); |
920 | kfree(old_memslots); | 920 | kvfree(old_memslots); |
921 | 921 | ||
922 | /* | 922 | /* |
923 | * IOMMU mapping: New slots need to be mapped. Old slots need to be | 923 | * IOMMU mapping: New slots need to be mapped. Old slots need to be |
@@ -936,7 +936,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
936 | return 0; | 936 | return 0; |
937 | 937 | ||
938 | out_slots: | 938 | out_slots: |
939 | kfree(slots); | 939 | kvfree(slots); |
940 | out_free: | 940 | out_free: |
941 | kvm_free_physmem_slot(kvm, &new, &old); | 941 | kvm_free_physmem_slot(kvm, &new, &old); |
942 | out: | 942 | out: |
@@ -2492,6 +2492,7 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) | |||
2492 | case KVM_CAP_SIGNAL_MSI: | 2492 | case KVM_CAP_SIGNAL_MSI: |
2493 | #endif | 2493 | #endif |
2494 | #ifdef CONFIG_HAVE_KVM_IRQFD | 2494 | #ifdef CONFIG_HAVE_KVM_IRQFD |
2495 | case KVM_CAP_IRQFD: | ||
2495 | case KVM_CAP_IRQFD_RESAMPLE: | 2496 | case KVM_CAP_IRQFD_RESAMPLE: |
2496 | #endif | 2497 | #endif |
2497 | case KVM_CAP_CHECK_EXTENSION_VM: | 2498 | case KVM_CAP_CHECK_EXTENSION_VM: |