diff options
author | David S. Miller <davem@davemloft.net> | 2014-02-19 01:24:22 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-02-19 01:24:22 -0500 |
commit | 1e8d6421cff2c24fe0b345711e7a21af02e8bcf5 (patch) | |
tree | 773b30106efb9b48055bc93958e5a94ac53768ce | |
parent | f7b12606b5de323a2bb5ca1696558efde8f25441 (diff) | |
parent | 960dfc4eb23a28495276b02604d7458e0e1a1ed8 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/bonding/bond_3ad.h
drivers/net/bonding/bond_main.c
Two minor conflicts in bonding, both of which were overlapping
changes.
Signed-off-by: David S. Miller <davem@davemloft.net>
540 files changed, 5378 insertions, 3558 deletions
diff --git a/Documentation/ABI/testing/sysfs-tty b/Documentation/ABI/testing/sysfs-tty index ad22fb0ee765..a2ccec35ffce 100644 --- a/Documentation/ABI/testing/sysfs-tty +++ b/Documentation/ABI/testing/sysfs-tty | |||
@@ -3,7 +3,8 @@ Date: Nov 2010 | |||
3 | Contact: Kay Sievers <kay.sievers@vrfy.org> | 3 | Contact: Kay Sievers <kay.sievers@vrfy.org> |
4 | Description: | 4 | Description: |
5 | Shows the list of currently configured | 5 | Shows the list of currently configured |
6 | console devices, like 'tty1 ttyS0'. | 6 | tty devices used for the console, |
7 | like 'tty1 ttyS0'. | ||
7 | The last entry in the file is the active | 8 | The last entry in the file is the active |
8 | device connected to /dev/console. | 9 | device connected to /dev/console. |
9 | The file supports poll() to detect virtual | 10 | The file supports poll() to detect virtual |
diff --git a/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt b/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt index 0a85c70cd30a..07ad02075a93 100644 --- a/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt +++ b/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt | |||
@@ -13,6 +13,9 @@ Required properties: | |||
13 | - #address-cells: should be one. The cell is the slot id. | 13 | - #address-cells: should be one. The cell is the slot id. |
14 | - #size-cells: should be zero. | 14 | - #size-cells: should be zero. |
15 | - at least one slot node | 15 | - at least one slot node |
16 | - clock-names: tuple listing input clock names. | ||
17 | Required elements: "mci_clk" | ||
18 | - clocks: phandles to input clocks. | ||
16 | 19 | ||
17 | The node contains child nodes for each slot that the platform uses | 20 | The node contains child nodes for each slot that the platform uses |
18 | 21 | ||
@@ -24,6 +27,8 @@ mmc0: mmc@f0008000 { | |||
24 | interrupts = <12 4>; | 27 | interrupts = <12 4>; |
25 | #address-cells = <1>; | 28 | #address-cells = <1>; |
26 | #size-cells = <0>; | 29 | #size-cells = <0>; |
30 | clock-names = "mci_clk"; | ||
31 | clocks = <&mci0_clk>; | ||
27 | 32 | ||
28 | [ child node definitions...] | 33 | [ child node definitions...] |
29 | }; | 34 | }; |
diff --git a/Documentation/devicetree/bindings/net/sti-dwmac.txt b/Documentation/devicetree/bindings/net/sti-dwmac.txt new file mode 100644 index 000000000000..3dd3d0bf112f --- /dev/null +++ b/Documentation/devicetree/bindings/net/sti-dwmac.txt | |||
@@ -0,0 +1,58 @@ | |||
1 | STMicroelectronics SoC DWMAC glue layer controller | ||
2 | |||
3 | The device node has following properties. | ||
4 | |||
5 | Required properties: | ||
6 | - compatible : Can be "st,stih415-dwmac", "st,stih416-dwmac" or | ||
7 | "st,stid127-dwmac". | ||
8 | - reg : Offset of the glue configuration register map in system | ||
9 | configuration regmap pointed by st,syscon property and size. | ||
10 | |||
11 | - reg-names : Should be "sti-ethconf". | ||
12 | |||
13 | - st,syscon : Should be phandle to system configuration node which | ||
14 | encompases this glue registers. | ||
15 | |||
16 | - st,tx-retime-src: On STi Parts for Giga bit speeds, 125Mhz clocks can be | ||
17 | wired up in from different sources. One via TXCLK pin and other via CLK_125 | ||
18 | pin. This wiring is totally board dependent. However the retiming glue | ||
19 | logic should be configured accordingly. Possible values for this property | ||
20 | |||
21 | "txclk" - if 125Mhz clock is wired up via txclk line. | ||
22 | "clk_125" - if 125Mhz clock is wired up via clk_125 line. | ||
23 | |||
24 | This property is only valid for Giga bit setup( GMII, RGMII), and it is | ||
25 | un-used for non-giga bit (MII and RMII) setups. Also note that internal | ||
26 | clockgen can not generate stable 125Mhz clock. | ||
27 | |||
28 | - st,ext-phyclk: This boolean property indicates who is generating the clock | ||
29 | for tx and rx. This property is only valid for RMII case where the clock can | ||
30 | be generated from the MAC or PHY. | ||
31 | |||
32 | - clock-names: should be "sti-ethclk". | ||
33 | - clocks: Should point to ethernet clockgen which can generate phyclk. | ||
34 | |||
35 | |||
36 | Example: | ||
37 | |||
38 | ethernet0: dwmac@fe810000 { | ||
39 | device_type = "network"; | ||
40 | compatible = "st,stih416-dwmac", "snps,dwmac", "snps,dwmac-3.710"; | ||
41 | reg = <0xfe810000 0x8000>, <0x8bc 0x4>; | ||
42 | reg-names = "stmmaceth", "sti-ethconf"; | ||
43 | interrupts = <0 133 0>, <0 134 0>, <0 135 0>; | ||
44 | interrupt-names = "macirq", "eth_wake_irq", "eth_lpi"; | ||
45 | phy-mode = "mii"; | ||
46 | |||
47 | st,syscon = <&syscfg_rear>; | ||
48 | |||
49 | snps,pbl = <32>; | ||
50 | snps,mixed-burst; | ||
51 | |||
52 | resets = <&softreset STIH416_ETH0_SOFTRESET>; | ||
53 | reset-names = "stmmaceth"; | ||
54 | pinctrl-0 = <&pinctrl_mii0>; | ||
55 | pinctrl-names = "default"; | ||
56 | clocks = <&CLK_S_GMAC0_PHY>; | ||
57 | clock-names = "stmmaceth"; | ||
58 | }; | ||
diff --git a/Documentation/devicetree/bindings/power/bq2415x.txt b/Documentation/devicetree/bindings/power/bq2415x.txt new file mode 100644 index 000000000000..d0327f0b59ad --- /dev/null +++ b/Documentation/devicetree/bindings/power/bq2415x.txt | |||
@@ -0,0 +1,47 @@ | |||
1 | Binding for TI bq2415x Li-Ion Charger | ||
2 | |||
3 | Required properties: | ||
4 | - compatible: Should contain one of the following: | ||
5 | * "ti,bq24150" | ||
6 | * "ti,bq24150" | ||
7 | * "ti,bq24150a" | ||
8 | * "ti,bq24151" | ||
9 | * "ti,bq24151a" | ||
10 | * "ti,bq24152" | ||
11 | * "ti,bq24153" | ||
12 | * "ti,bq24153a" | ||
13 | * "ti,bq24155" | ||
14 | * "ti,bq24156" | ||
15 | * "ti,bq24156a" | ||
16 | * "ti,bq24158" | ||
17 | - reg: integer, i2c address of the device. | ||
18 | - ti,current-limit: integer, initial maximum current charger can pull | ||
19 | from power supply in mA. | ||
20 | - ti,weak-battery-voltage: integer, weak battery voltage threshold in mV. | ||
21 | The chip will use slow precharge if battery voltage | ||
22 | is below this value. | ||
23 | - ti,battery-regulation-voltage: integer, maximum charging voltage in mV. | ||
24 | - ti,charge-current: integer, maximum charging current in mA. | ||
25 | - ti,termination-current: integer, charge will be terminated when current in | ||
26 | constant-voltage phase drops below this value (in mA). | ||
27 | - ti,resistor-sense: integer, value of sensing resistor in milliohm. | ||
28 | |||
29 | Optional properties: | ||
30 | - ti,usb-charger-detection: phandle to usb charger detection device. | ||
31 | (required for auto mode) | ||
32 | |||
33 | Example from Nokia N900: | ||
34 | |||
35 | bq24150a { | ||
36 | compatible = "ti,bq24150a"; | ||
37 | reg = <0x6b>; | ||
38 | |||
39 | ti,current-limit = <100>; | ||
40 | ti,weak-battery-voltage = <3400>; | ||
41 | ti,battery-regulation-voltage = <4200>; | ||
42 | ti,charge-current = <650>; | ||
43 | ti,termination-current = <100>; | ||
44 | ti,resistor-sense = <68>; | ||
45 | |||
46 | ti,usb-charger-detection = <&isp1704>; | ||
47 | }; | ||
diff --git a/Documentation/devicetree/bindings/spi/spi_atmel.txt b/Documentation/devicetree/bindings/spi/spi_atmel.txt index 07e04cdc0c9e..4f8184d069cb 100644 --- a/Documentation/devicetree/bindings/spi/spi_atmel.txt +++ b/Documentation/devicetree/bindings/spi/spi_atmel.txt | |||
@@ -5,6 +5,9 @@ Required properties: | |||
5 | - reg: Address and length of the register set for the device | 5 | - reg: Address and length of the register set for the device |
6 | - interrupts: Should contain spi interrupt | 6 | - interrupts: Should contain spi interrupt |
7 | - cs-gpios: chipselects | 7 | - cs-gpios: chipselects |
8 | - clock-names: tuple listing input clock names. | ||
9 | Required elements: "spi_clk" | ||
10 | - clocks: phandles to input clocks. | ||
8 | 11 | ||
9 | Example: | 12 | Example: |
10 | 13 | ||
@@ -14,6 +17,8 @@ spi1: spi@fffcc000 { | |||
14 | interrupts = <13 4 5>; | 17 | interrupts = <13 4 5>; |
15 | #address-cells = <1>; | 18 | #address-cells = <1>; |
16 | #size-cells = <0>; | 19 | #size-cells = <0>; |
20 | clocks = <&spi1_clk>; | ||
21 | clock-names = "spi_clk"; | ||
17 | cs-gpios = <&pioB 3 0>; | 22 | cs-gpios = <&pioB 3 0>; |
18 | status = "okay"; | 23 | status = "okay"; |
19 | 24 | ||
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 3f900cd51bf0..40ce2df0e0e9 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt | |||
@@ -8,6 +8,7 @@ ad Avionic Design GmbH | |||
8 | adi Analog Devices, Inc. | 8 | adi Analog Devices, Inc. |
9 | aeroflexgaisler Aeroflex Gaisler AB | 9 | aeroflexgaisler Aeroflex Gaisler AB |
10 | ak Asahi Kasei Corp. | 10 | ak Asahi Kasei Corp. |
11 | allwinner Allwinner Technology Co., Ltd. | ||
11 | altr Altera Corp. | 12 | altr Altera Corp. |
12 | amcc Applied Micro Circuits Corporation (APM, formally AMCC) | 13 | amcc Applied Micro Circuits Corporation (APM, formally AMCC) |
13 | amstaos AMS-Taos Inc. | 14 | amstaos AMS-Taos Inc. |
@@ -40,6 +41,7 @@ gmt Global Mixed-mode Technology, Inc. | |||
40 | gumstix Gumstix, Inc. | 41 | gumstix Gumstix, Inc. |
41 | haoyu Haoyu Microelectronic Co. Ltd. | 42 | haoyu Haoyu Microelectronic Co. Ltd. |
42 | hisilicon Hisilicon Limited. | 43 | hisilicon Hisilicon Limited. |
44 | honeywell Honeywell | ||
43 | hp Hewlett Packard | 45 | hp Hewlett Packard |
44 | ibm International Business Machines (IBM) | 46 | ibm International Business Machines (IBM) |
45 | idt Integrated Device Technologies, Inc. | 47 | idt Integrated Device Technologies, Inc. |
@@ -55,6 +57,7 @@ maxim Maxim Integrated Products | |||
55 | microchip Microchip Technology Inc. | 57 | microchip Microchip Technology Inc. |
56 | mosaixtech Mosaix Technologies, Inc. | 58 | mosaixtech Mosaix Technologies, Inc. |
57 | national National Semiconductor | 59 | national National Semiconductor |
60 | neonode Neonode Inc. | ||
58 | nintendo Nintendo | 61 | nintendo Nintendo |
59 | nvidia NVIDIA | 62 | nvidia NVIDIA |
60 | nxp NXP Semiconductors | 63 | nxp NXP Semiconductors |
@@ -64,7 +67,7 @@ phytec PHYTEC Messtechnik GmbH | |||
64 | picochip Picochip Ltd | 67 | picochip Picochip Ltd |
65 | powervr PowerVR (deprecated, use img) | 68 | powervr PowerVR (deprecated, use img) |
66 | qca Qualcomm Atheros, Inc. | 69 | qca Qualcomm Atheros, Inc. |
67 | qcom Qualcomm, Inc. | 70 | qcom Qualcomm Technologies, Inc |
68 | ralink Mediatek/Ralink Technology Corp. | 71 | ralink Mediatek/Ralink Technology Corp. |
69 | ramtron Ramtron International | 72 | ramtron Ramtron International |
70 | realtek Realtek Semiconductor Corp. | 73 | realtek Realtek Semiconductor Corp. |
@@ -78,6 +81,7 @@ silabs Silicon Laboratories | |||
78 | simtek | 81 | simtek |
79 | sirf SiRF Technology, Inc. | 82 | sirf SiRF Technology, Inc. |
80 | snps Synopsys, Inc. | 83 | snps Synopsys, Inc. |
84 | spansion Spansion Inc. | ||
81 | st STMicroelectronics | 85 | st STMicroelectronics |
82 | ste ST-Ericsson | 86 | ste ST-Ericsson |
83 | stericsson ST-Ericsson | 87 | stericsson ST-Ericsson |
diff --git a/Documentation/i2c/instantiating-devices b/Documentation/i2c/instantiating-devices index c70e7a7638d1..0d85ac1935b7 100644 --- a/Documentation/i2c/instantiating-devices +++ b/Documentation/i2c/instantiating-devices | |||
@@ -8,8 +8,8 @@ reason, the kernel code must instantiate I2C devices explicitly. There are | |||
8 | several ways to achieve this, depending on the context and requirements. | 8 | several ways to achieve this, depending on the context and requirements. |
9 | 9 | ||
10 | 10 | ||
11 | Method 1: Declare the I2C devices by bus number | 11 | Method 1a: Declare the I2C devices by bus number |
12 | ----------------------------------------------- | 12 | ------------------------------------------------ |
13 | 13 | ||
14 | This method is appropriate when the I2C bus is a system bus as is the case | 14 | This method is appropriate when the I2C bus is a system bus as is the case |
15 | for many embedded systems. On such systems, each I2C bus has a number | 15 | for many embedded systems. On such systems, each I2C bus has a number |
@@ -51,6 +51,43 @@ The devices will be automatically unbound and destroyed when the I2C bus | |||
51 | they sit on goes away (if ever.) | 51 | they sit on goes away (if ever.) |
52 | 52 | ||
53 | 53 | ||
54 | Method 1b: Declare the I2C devices via devicetree | ||
55 | ------------------------------------------------- | ||
56 | |||
57 | This method has the same implications as method 1a. The declaration of I2C | ||
58 | devices is here done via devicetree as subnodes of the master controller. | ||
59 | |||
60 | Example: | ||
61 | |||
62 | i2c1: i2c@400a0000 { | ||
63 | /* ... master properties skipped ... */ | ||
64 | clock-frequency = <100000>; | ||
65 | |||
66 | flash@50 { | ||
67 | compatible = "atmel,24c256"; | ||
68 | reg = <0x50>; | ||
69 | }; | ||
70 | |||
71 | pca9532: gpio@60 { | ||
72 | compatible = "nxp,pca9532"; | ||
73 | gpio-controller; | ||
74 | #gpio-cells = <2>; | ||
75 | reg = <0x60>; | ||
76 | }; | ||
77 | }; | ||
78 | |||
79 | Here, two devices are attached to the bus using a speed of 100kHz. For | ||
80 | additional properties which might be needed to set up the device, please refer | ||
81 | to its devicetree documentation in Documentation/devicetree/bindings/. | ||
82 | |||
83 | |||
84 | Method 1c: Declare the I2C devices via ACPI | ||
85 | ------------------------------------------- | ||
86 | |||
87 | ACPI can also describe I2C devices. There is special documentation for this | ||
88 | which is currently located at Documentation/acpi/enumeration.txt. | ||
89 | |||
90 | |||
54 | Method 2: Instantiate the devices explicitly | 91 | Method 2: Instantiate the devices explicitly |
55 | -------------------------------------------- | 92 | -------------------------------------------- |
56 | 93 | ||
diff --git a/Documentation/networking/3c505.txt b/Documentation/networking/3c505.txt deleted file mode 100644 index 72f38b13101d..000000000000 --- a/Documentation/networking/3c505.txt +++ /dev/null | |||
@@ -1,45 +0,0 @@ | |||
1 | The 3Com Etherlink Plus (3c505) driver. | ||
2 | |||
3 | This driver now uses DMA. There is currently no support for PIO operation. | ||
4 | The default DMA channel is 6; this is _not_ autoprobed, so you must | ||
5 | make sure you configure it correctly. If loading the driver as a | ||
6 | module, you can do this with "modprobe 3c505 dma=n". If the driver is | ||
7 | linked statically into the kernel, you must either use an "ether=" | ||
8 | statement on the command line, or change the definition of ELP_DMA in 3c505.h. | ||
9 | |||
10 | The driver will warn you if it has to fall back on the compiled in | ||
11 | default DMA channel. | ||
12 | |||
13 | If no base address is given at boot time, the driver will autoprobe | ||
14 | ports 0x300, 0x280 and 0x310 (in that order). If no IRQ is given, the driver | ||
15 | will try to probe for it. | ||
16 | |||
17 | The driver can be used as a loadable module. | ||
18 | |||
19 | Theoretically, one instance of the driver can now run multiple cards, | ||
20 | in the standard way (when loading a module, say "modprobe 3c505 | ||
21 | io=0x300,0x340 irq=10,11 dma=6,7" or whatever). I have not tested | ||
22 | this, though. | ||
23 | |||
24 | The driver may now support revision 2 hardware; the dependency on | ||
25 | being able to read the host control register has been removed. This | ||
26 | is also untested, since I don't have a suitable card. | ||
27 | |||
28 | Known problems: | ||
29 | I still see "DMA upload timed out" messages from time to time. These | ||
30 | seem to be fairly non-fatal though. | ||
31 | The card is old and slow. | ||
32 | |||
33 | To do: | ||
34 | Improve probe/setup code | ||
35 | Test multicast and promiscuous operation | ||
36 | |||
37 | Authors: | ||
38 | The driver is mainly written by Craig Southeren, email | ||
39 | <craigs@ineluki.apana.org.au>. | ||
40 | Parts of the driver (adapting the driver to 1.1.4+ kernels, | ||
41 | IRQ/address detection, some changes) and this README by | ||
42 | Juha Laiho <jlaiho@ichaos.nullnet.fi>. | ||
43 | DMA mode, more fixes, etc, by Philip Blundell <pjb27@cam.ac.uk> | ||
44 | Multicard support, Software configurable DMA, etc., by | ||
45 | Christopher Collins <ccollins@pcug.org.au> | ||
diff --git a/Documentation/phy.txt b/Documentation/phy.txt index 0103e4b15b0e..ebff6ee52441 100644 --- a/Documentation/phy.txt +++ b/Documentation/phy.txt | |||
@@ -75,14 +75,26 @@ Before the controller can make use of the PHY, it has to get a reference to | |||
75 | it. This framework provides the following APIs to get a reference to the PHY. | 75 | it. This framework provides the following APIs to get a reference to the PHY. |
76 | 76 | ||
77 | struct phy *phy_get(struct device *dev, const char *string); | 77 | struct phy *phy_get(struct device *dev, const char *string); |
78 | struct phy *phy_optional_get(struct device *dev, const char *string); | ||
78 | struct phy *devm_phy_get(struct device *dev, const char *string); | 79 | struct phy *devm_phy_get(struct device *dev, const char *string); |
79 | 80 | struct phy *devm_phy_optional_get(struct device *dev, const char *string); | |
80 | phy_get and devm_phy_get can be used to get the PHY. In the case of dt boot, | 81 | |
81 | the string arguments should contain the phy name as given in the dt data and | 82 | phy_get, phy_optional_get, devm_phy_get and devm_phy_optional_get can |
82 | in the case of non-dt boot, it should contain the label of the PHY. | 83 | be used to get the PHY. In the case of dt boot, the string arguments |
83 | The only difference between the two APIs is that devm_phy_get associates the | 84 | should contain the phy name as given in the dt data and in the case of |
84 | device with the PHY using devres on successful PHY get. On driver detach, | 85 | non-dt boot, it should contain the label of the PHY. The two |
85 | release function is invoked on the the devres data and devres data is freed. | 86 | devm_phy_get associates the device with the PHY using devres on |
87 | successful PHY get. On driver detach, release function is invoked on | ||
88 | the the devres data and devres data is freed. phy_optional_get and | ||
89 | devm_phy_optional_get should be used when the phy is optional. These | ||
90 | two functions will never return -ENODEV, but instead returns NULL when | ||
91 | the phy cannot be found. | ||
92 | |||
93 | It should be noted that NULL is a valid phy reference. All phy | ||
94 | consumer calls on the NULL phy become NOPs. That is the release calls, | ||
95 | the phy_init() and phy_exit() calls, and phy_power_on() and | ||
96 | phy_power_off() calls are all NOP when applied to a NULL phy. The NULL | ||
97 | phy is useful in devices for handling optional phy devices. | ||
86 | 98 | ||
87 | 5. Releasing a reference to the PHY | 99 | 5. Releasing a reference to the PHY |
88 | 100 | ||
diff --git a/Documentation/spi/spi-summary b/Documentation/spi/spi-summary index f72e0d1e0da8..7982bcc4d151 100644 --- a/Documentation/spi/spi-summary +++ b/Documentation/spi/spi-summary | |||
@@ -543,7 +543,22 @@ SPI MASTER METHODS | |||
543 | queuing transfers that arrive in the meantime. When the driver is | 543 | queuing transfers that arrive in the meantime. When the driver is |
544 | finished with this message, it must call | 544 | finished with this message, it must call |
545 | spi_finalize_current_message() so the subsystem can issue the next | 545 | spi_finalize_current_message() so the subsystem can issue the next |
546 | transfer. This may sleep. | 546 | message. This may sleep. |
547 | |||
548 | master->transfer_one(struct spi_master *master, struct spi_device *spi, | ||
549 | struct spi_transfer *transfer) | ||
550 | The subsystem calls the driver to transfer a single transfer while | ||
551 | queuing transfers that arrive in the meantime. When the driver is | ||
552 | finished with this transfer, it must call | ||
553 | spi_finalize_current_transfer() so the subsystem can issue the next | ||
554 | transfer. This may sleep. Note: transfer_one and transfer_one_message | ||
555 | are mutually exclusive; when both are set, the generic subsystem does | ||
556 | not call your transfer_one callback. | ||
557 | |||
558 | Return values: | ||
559 | negative errno: error | ||
560 | 0: transfer is finished | ||
561 | 1: transfer is still in progress | ||
547 | 562 | ||
548 | DEPRECATED METHODS | 563 | DEPRECATED METHODS |
549 | 564 | ||
diff --git a/Documentation/zh_CN/arm64/booting.txt b/Documentation/zh_CN/arm64/booting.txt index 28fa325b7461..6f6d956ac1c9 100644 --- a/Documentation/zh_CN/arm64/booting.txt +++ b/Documentation/zh_CN/arm64/booting.txt | |||
@@ -7,7 +7,7 @@ help. Contact the Chinese maintainer if this translation is outdated | |||
7 | or if there is a problem with the translation. | 7 | or if there is a problem with the translation. |
8 | 8 | ||
9 | Maintainer: Will Deacon <will.deacon@arm.com> | 9 | Maintainer: Will Deacon <will.deacon@arm.com> |
10 | Chinese maintainer: Fu Wei <tekkamanninja@gmail.com> | 10 | Chinese maintainer: Fu Wei <wefu@redhat.com> |
11 | --------------------------------------------------------------------- | 11 | --------------------------------------------------------------------- |
12 | Documentation/arm64/booting.txt çš„ä¸æ–‡ç¿»è¯‘ | 12 | Documentation/arm64/booting.txt çš„ä¸æ–‡ç¿»è¯‘ |
13 | 13 | ||
@@ -16,9 +16,9 @@ Documentation/arm64/booting.txt çš„ä¸æ–‡ç¿»è¯‘ | |||
16 | 译å˜åœ¨é—®é¢˜ï¼Œè¯·è”ç³»ä¸æ–‡ç‰ˆç»´æŠ¤è€…。 | 16 | 译å˜åœ¨é—®é¢˜ï¼Œè¯·è”ç³»ä¸æ–‡ç‰ˆç»´æŠ¤è€…。 |
17 | 17 | ||
18 | 英文版维护者: Will Deacon <will.deacon@arm.com> | 18 | 英文版维护者: Will Deacon <will.deacon@arm.com> |
19 | ä¸æ–‡ç‰ˆç»´æŠ¤è€…: å‚…ç‚œ Fu Wei <tekkamanninja@gmail.com> | 19 | ä¸æ–‡ç‰ˆç»´æŠ¤è€…: å‚…ç‚œ Fu Wei <wefu@redhat.com> |
20 | ä¸æ–‡ç‰ˆç¿»è¯‘者: å‚…ç‚œ Fu Wei <tekkamanninja@gmail.com> | 20 | ä¸æ–‡ç‰ˆç¿»è¯‘者: å‚…ç‚œ Fu Wei <wefu@redhat.com> |
21 | ä¸æ–‡ç‰ˆæ ¡è¯‘者: å‚…ç‚œ Fu Wei <tekkamanninja@gmail.com> | 21 | ä¸æ–‡ç‰ˆæ ¡è¯‘者: å‚…ç‚œ Fu Wei <wefu@redhat.com> |
22 | 22 | ||
23 | 以下为æ£æ–‡ | 23 | 以下为æ£æ–‡ |
24 | --------------------------------------------------------------------- | 24 | --------------------------------------------------------------------- |
@@ -64,8 +64,8 @@ RAM,或å¯èƒ½ä½¿ç”¨å¯¹è¿™ä¸ªè®¾å¤‡å·²çŸ¥çš„ RAM ä¿¡æ¯ï¼Œè¿˜å¯èƒ½ä½¿ç”¨ä»»ä½• | |||
64 | 64 | ||
65 | å¿…è¦æ€§: 强制 | 65 | å¿…è¦æ€§: 强制 |
66 | 66 | ||
67 | è®¾å¤‡æ ‘æ•°æ®å—(dtb)大å°å¿…é¡»ä¸å¤§äºŽ 2 MB,且ä½äºŽä»Žå†…æ ¸æ˜ åƒèµ·å§‹ç®—起第一个 | 67 | è®¾å¤‡æ ‘æ•°æ®å—(dtb)必须 8 å—节对é½ï¼Œå¹¶ä½äºŽä»Žå†…æ ¸æ˜ åƒèµ·å§‹ç®—起第一个 512MB |
68 | 512MB 内的 2MB è¾¹ç•Œä¸Šã€‚è¿™ä½¿å¾—å†…æ ¸å¯ä»¥é€šè¿‡åˆå§‹é¡µè¡¨ä¸çš„å•ä¸ªèŠ‚æè¿°ç¬¦æ¥ | 68 | 内,且ä¸å¾—跨越 2MB 对é½è¾¹ç•Œã€‚è¿™ä½¿å¾—å†…æ ¸å¯ä»¥é€šè¿‡åˆå§‹é¡µè¡¨ä¸çš„å•ä¸ªèŠ‚æè¿°ç¬¦æ¥ |
69 | æ˜ å°„æ¤æ•°æ®å—。 | 69 | æ˜ å°„æ¤æ•°æ®å—。 |
70 | 70 | ||
71 | 71 | ||
@@ -84,13 +84,23 @@ AArch64 å†…æ ¸å½“å‰æ²¡æœ‰æ供自解压代ç ï¼Œå› æ¤å¦‚果使用了压缩内 | |||
84 | 84 | ||
85 | å¿…è¦æ€§: 强制 | 85 | å¿…è¦æ€§: 强制 |
86 | 86 | ||
87 | å·²è§£åŽ‹çš„å†…æ ¸æ˜ åƒåŒ…å«ä¸€ä¸ª 32 å—节的头,内容如下: | 87 | å·²è§£åŽ‹çš„å†…æ ¸æ˜ åƒåŒ…å«ä¸€ä¸ª 64 å—节的头,内容如下: |
88 | 88 | ||
89 | u32 magic = 0x14000008; /* 跳转到 stext, å°ç«¯ */ | 89 | u32 code0; /* å¯æ‰§è¡Œä»£ç */ |
90 | u32 res0 = 0; /* ä¿ç•™ */ | 90 | u32 code1; /* å¯æ‰§è¡Œä»£ç */ |
91 | u64 text_offset; /* æ˜ åƒè£…è½½å移 */ | 91 | u64 text_offset; /* æ˜ åƒè£…è½½å移 */ |
92 | u64 res0 = 0; /* ä¿ç•™ */ | ||
92 | u64 res1 = 0; /* ä¿ç•™ */ | 93 | u64 res1 = 0; /* ä¿ç•™ */ |
93 | u64 res2 = 0; /* ä¿ç•™ */ | 94 | u64 res2 = 0; /* ä¿ç•™ */ |
95 | u64 res3 = 0; /* ä¿ç•™ */ | ||
96 | u64 res4 = 0; /* ä¿ç•™ */ | ||
97 | u32 magic = 0x644d5241; /* é”æ•°, å°ç«¯, "ARM\x64" */ | ||
98 | u32 res5 = 0; /* ä¿ç•™ */ | ||
99 | |||
100 | |||
101 | æ˜ åƒå¤´æ³¨é‡Šï¼š | ||
102 | |||
103 | - code0/code1 负责跳转到 stext. | ||
94 | 104 | ||
95 | æ˜ åƒå¿…é¡»ä½äºŽç³»ç»Ÿ RAM 起始处的特定å移(当å‰æ˜¯ 0x80000)。系统 RAM | 105 | æ˜ åƒå¿…é¡»ä½äºŽç³»ç»Ÿ RAM 起始处的特定å移(当å‰æ˜¯ 0x80000)。系统 RAM |
96 | 的起始地å€å¿…须是以 2MB 对é½çš„。 | 106 | 的起始地å€å¿…须是以 2MB 对é½çš„。 |
@@ -118,9 +128,9 @@ AArch64 å†…æ ¸å½“å‰æ²¡æœ‰æ供自解压代ç ï¼Œå› æ¤å¦‚果使用了压缩内 | |||
118 | 外部高速缓å˜ï¼ˆå¦‚æžœå˜åœ¨ï¼‰å¿…é¡»é…置并ç¦ç”¨ã€‚ | 128 | 外部高速缓å˜ï¼ˆå¦‚æžœå˜åœ¨ï¼‰å¿…é¡»é…置并ç¦ç”¨ã€‚ |
119 | 129 | ||
120 | - 架构计时器 | 130 | - 架构计时器 |
121 | CNTFRQ 必须设定为计时器的频率。 | 131 | CNTFRQ 必须设定为计时器的频率,且 CNTVOFF 必须设定为对所有 CPU |
122 | 如果在 EL1 模å¼ä¸‹è¿›å…¥å†…æ ¸ï¼Œåˆ™ CNTHCTL_EL2 ä¸çš„ EL1PCTEN (bit 0) | 132 | 都一致的值。如果在 EL1 模å¼ä¸‹è¿›å…¥å†…æ ¸ï¼Œåˆ™ CNTHCTL_EL2 ä¸çš„ |
123 | 必须置ä½ã€‚ | 133 | EL1PCTEN (bit 0) 必须置ä½ã€‚ |
124 | 134 | ||
125 | - 一致性 | 135 | - 一致性 |
126 | é€šè¿‡å†…æ ¸å¯åŠ¨çš„所有 CPU åœ¨å†…æ ¸å…¥å£åœ°å€ä¸Šå¿…须处于相åŒçš„一致性域ä¸ã€‚ | 136 | é€šè¿‡å†…æ ¸å¯åŠ¨çš„所有 CPU åœ¨å†…æ ¸å…¥å£åœ°å€ä¸Šå¿…须处于相åŒçš„一致性域ä¸ã€‚ |
@@ -131,23 +141,40 @@ AArch64 å†…æ ¸å½“å‰æ²¡æœ‰æ供自解压代ç ï¼Œå› æ¤å¦‚果使用了压缩内 | |||
131 | åœ¨è¿›å…¥å†…æ ¸æ˜ åƒçš„异常级ä¸ï¼Œæ‰€æœ‰æž„架ä¸å¯å†™çš„系统寄å˜å™¨å¿…须通过软件 | 141 | åœ¨è¿›å…¥å†…æ ¸æ˜ åƒçš„异常级ä¸ï¼Œæ‰€æœ‰æž„架ä¸å¯å†™çš„系统寄å˜å™¨å¿…须通过软件 |
132 | 在一个更高的异常级别下åˆå§‹åŒ–,以防æ¢åœ¨ 未知 状æ€ä¸‹è¿è¡Œã€‚ | 142 | 在一个更高的异常级别下åˆå§‹åŒ–,以防æ¢åœ¨ 未知 状æ€ä¸‹è¿è¡Œã€‚ |
133 | 143 | ||
144 | 以上对于 CPU 模å¼ã€é«˜é€Ÿç¼“å˜ã€MMUã€æž¶æž„计时器ã€ä¸€è‡´æ€§ã€ç³»ç»Ÿå¯„å˜å™¨çš„ | ||
145 | å¿…è¦æ¡ä»¶æ述适用于所有 CPU。所有 CPU 必须在åŒä¸€å¼‚å¸¸çº§åˆ«è·³å…¥å†…æ ¸ã€‚ | ||
146 | |||
134 | 引导装载程åºå¿…须在æ¯ä¸ª CPU 处于以下状æ€æ—¶è·³å…¥å†…æ ¸å…¥å£ï¼š | 147 | 引导装载程åºå¿…须在æ¯ä¸ª CPU 处于以下状æ€æ—¶è·³å…¥å†…æ ¸å…¥å£ï¼š |
135 | 148 | ||
136 | - 主 CPU å¿…é¡»ç›´æŽ¥è·³å…¥å†…æ ¸æ˜ åƒçš„第一æ¡æŒ‡ä»¤ã€‚é€šè¿‡æ¤ CPU ä¼ é€’çš„è®¾å¤‡æ ‘ | 149 | - 主 CPU å¿…é¡»ç›´æŽ¥è·³å…¥å†…æ ¸æ˜ åƒçš„第一æ¡æŒ‡ä»¤ã€‚é€šè¿‡æ¤ CPU ä¼ é€’çš„è®¾å¤‡æ ‘ |
137 | æ•°æ®å—必须在æ¯ä¸ª CPU 节点ä¸åŒ…å«ä»¥ä¸‹å†…容: | 150 | æ•°æ®å—必须在æ¯ä¸ª CPU 节点ä¸åŒ…å«ä¸€ä¸ª ‘enable-method’ 属性,所 |
138 | 151 | 支æŒçš„ enable-method 请è§ä¸‹æ–‡ã€‚ | |
139 | 1ã€â€˜enable-method’属性。目å‰ï¼Œæ¤å—段支æŒçš„值仅为å—符串“spin-tableâ€ã€‚ | ||
140 | |||
141 | 2ã€â€˜cpu-release-addrâ€™æ ‡è¯†ä¸€ä¸ª 64-bitã€åˆå§‹åŒ–为零的内å˜ä½ç½®ã€‚ | ||
142 | 152 | ||
143 | 引导装载程åºå¿…须生æˆè¿™äº›è®¾å¤‡æ ‘å±žæ€§ï¼Œå¹¶åœ¨è·³å…¥å†…æ ¸å…¥å£ä¹‹å‰å°†å…¶æ’å…¥ | 153 | 引导装载程åºå¿…须生æˆè¿™äº›è®¾å¤‡æ ‘å±žæ€§ï¼Œå¹¶åœ¨è·³å…¥å†…æ ¸å…¥å£ä¹‹å‰å°†å…¶æ’å…¥ |
144 | æ•°æ®å—。 | 154 | æ•°æ®å—。 |
145 | 155 | ||
146 | - 任何辅助 CPU 必须在内å˜ä¿ç•™åŒºï¼ˆé€šè¿‡è®¾å¤‡æ ‘ä¸çš„ /memreserve/ åŸŸä¼ é€’ | 156 | - enable-method 为 “spin-table†的 CPU 必须在它们的 CPU |
157 | 节点ä¸åŒ…å«ä¸€ä¸ª ‘cpu-release-addr’ å±žæ€§ã€‚è¿™ä¸ªå±žæ€§æ ‡è¯†äº†ä¸€ä¸ª | ||
158 | 64 ä½è‡ªç„¶å¯¹é½ä¸”åˆå§‹åŒ–为零的内å˜ä½ç½®ã€‚ | ||
159 | |||
160 | 这些 CPU 必须在内å˜ä¿ç•™åŒºï¼ˆé€šè¿‡è®¾å¤‡æ ‘ä¸çš„ /memreserve/ åŸŸä¼ é€’ | ||
147 | ç»™å†…æ ¸ï¼‰ä¸è‡ªæ—‹äºŽå†…æ ¸ä¹‹å¤–ï¼Œè½®è¯¢å®ƒä»¬çš„ cpu-release-addr ä½ç½®ï¼ˆå¿…é¡» | 161 | ç»™å†…æ ¸ï¼‰ä¸è‡ªæ—‹äºŽå†…æ ¸ä¹‹å¤–ï¼Œè½®è¯¢å®ƒä»¬çš„ cpu-release-addr ä½ç½®ï¼ˆå¿…é¡» |
148 | 包å«åœ¨ä¿ç•™åŒºä¸ï¼‰ã€‚å¯é€šè¿‡æ’å…¥ wfe 指令æ¥é™ä½Žå¿™å¾ªçŽ¯å¼€é”€ï¼Œè€Œä¸» CPU å°† | 162 | 包å«åœ¨ä¿ç•™åŒºä¸ï¼‰ã€‚å¯é€šè¿‡æ’å…¥ wfe 指令æ¥é™ä½Žå¿™å¾ªçŽ¯å¼€é”€ï¼Œè€Œä¸» CPU å°† |
149 | å‘出 sev 指令。当对 cpu-release-addr 所指ä½ç½®çš„读å–æ“作返回éžé›¶å€¼ | 163 | å‘出 sev 指令。当对 cpu-release-addr 所指ä½ç½®çš„读å–æ“作返回éžé›¶å€¼ |
150 | 时,CPU 必须直接跳入æ¤å€¼æ‰€æŒ‡å‘的地å€ã€‚ | 164 | 时,CPU 必须跳入æ¤å€¼æ‰€æŒ‡å‘的地å€ã€‚æ¤å€¼ä¸ºä¸€ä¸ªå•ç‹¬çš„ 64 ä½å°ç«¯å€¼ï¼Œ |
165 | å› æ¤ CPU 须在跳转å‰å°†æ‰€è¯»å–的值转æ¢ä¸ºå…¶æœ¬èº«çš„端模å¼ã€‚ | ||
166 | |||
167 | - enable-method 为 “psci†的 CPU ä¿æŒåœ¨å†…æ ¸å¤–ï¼ˆæ¯”å¦‚ï¼Œåœ¨ | ||
168 | memory 节点ä¸æè¿°ä¸ºå†…æ ¸ç©ºé—´çš„å†…å˜åŒºå¤–ï¼Œæˆ–åœ¨é€šè¿‡è®¾å¤‡æ ‘ /memreserve/ | ||
169 | 域ä¸æè¿°ä¸ºå†…æ ¸ä¿ç•™åŒºçš„空间ä¸ï¼‰ã€‚å†…æ ¸å°†ä¼šå‘起在 ARM æ–‡æ¡£ï¼ˆç¼–å· | ||
170 | ARM DEN 0022A:用于 ARM 上的电æºçŠ¶æ€å调接å£ç³»ç»Ÿè½¯ä»¶ï¼‰ä¸æè¿°çš„ | ||
171 | CPU_ON 调用æ¥å°† CPU å¸¦å…¥å†…æ ¸ã€‚ | ||
172 | |||
173 | *译者注:到文档翻译时,æ¤æ–‡æ¡£å·²æ›´æ–°ä¸º ARM DEN 0022B。 | ||
174 | |||
175 | è®¾å¤‡æ ‘å¿…é¡»åŒ…å«ä¸€ä¸ª ‘psci’ 节点,请å‚考以下文档: | ||
176 | Documentation/devicetree/bindings/arm/psci.txt | ||
177 | |||
151 | 178 | ||
152 | - 辅助 CPU 通用寄å˜å™¨è®¾ç½® | 179 | - 辅助 CPU 通用寄å˜å™¨è®¾ç½® |
153 | x0 = 0 (ä¿ç•™ï¼Œå°†æ¥å¯èƒ½ä½¿ç”¨) | 180 | x0 = 0 (ä¿ç•™ï¼Œå°†æ¥å¯èƒ½ä½¿ç”¨) |
diff --git a/Documentation/zh_CN/arm64/memory.txt b/Documentation/zh_CN/arm64/memory.txt index a5f6283829f9..a782704c1cb5 100644 --- a/Documentation/zh_CN/arm64/memory.txt +++ b/Documentation/zh_CN/arm64/memory.txt | |||
@@ -7,7 +7,7 @@ help. Contact the Chinese maintainer if this translation is outdated | |||
7 | or if there is a problem with the translation. | 7 | or if there is a problem with the translation. |
8 | 8 | ||
9 | Maintainer: Catalin Marinas <catalin.marinas@arm.com> | 9 | Maintainer: Catalin Marinas <catalin.marinas@arm.com> |
10 | Chinese maintainer: Fu Wei <tekkamanninja@gmail.com> | 10 | Chinese maintainer: Fu Wei <wefu@redhat.com> |
11 | --------------------------------------------------------------------- | 11 | --------------------------------------------------------------------- |
12 | Documentation/arm64/memory.txt çš„ä¸æ–‡ç¿»è¯‘ | 12 | Documentation/arm64/memory.txt çš„ä¸æ–‡ç¿»è¯‘ |
13 | 13 | ||
@@ -16,9 +16,9 @@ Documentation/arm64/memory.txt çš„ä¸æ–‡ç¿»è¯‘ | |||
16 | 译å˜åœ¨é—®é¢˜ï¼Œè¯·è”ç³»ä¸æ–‡ç‰ˆç»´æŠ¤è€…。 | 16 | 译å˜åœ¨é—®é¢˜ï¼Œè¯·è”ç³»ä¸æ–‡ç‰ˆç»´æŠ¤è€…。 |
17 | 17 | ||
18 | 英文版维护者: Catalin Marinas <catalin.marinas@arm.com> | 18 | 英文版维护者: Catalin Marinas <catalin.marinas@arm.com> |
19 | ä¸æ–‡ç‰ˆç»´æŠ¤è€…: å‚…ç‚œ Fu Wei <tekkamanninja@gmail.com> | 19 | ä¸æ–‡ç‰ˆç»´æŠ¤è€…: å‚…ç‚œ Fu Wei <wefu@redhat.com> |
20 | ä¸æ–‡ç‰ˆç¿»è¯‘者: å‚…ç‚œ Fu Wei <tekkamanninja@gmail.com> | 20 | ä¸æ–‡ç‰ˆç¿»è¯‘者: å‚…ç‚œ Fu Wei <wefu@redhat.com> |
21 | ä¸æ–‡ç‰ˆæ ¡è¯‘者: å‚…ç‚œ Fu Wei <tekkamanninja@gmail.com> | 21 | ä¸æ–‡ç‰ˆæ ¡è¯‘者: å‚…ç‚œ Fu Wei <wefu@redhat.com> |
22 | 22 | ||
23 | 以下为æ£æ–‡ | 23 | 以下为æ£æ–‡ |
24 | --------------------------------------------------------------------- | 24 | --------------------------------------------------------------------- |
@@ -41,7 +41,7 @@ AArch64 Linux 使用页大å°ä¸º 4KB çš„ 3 级转æ¢è¡¨é…置,对于用户和å | |||
41 | TTBR1 ä¸ï¼Œä¸”从ä¸å†™å…¥ TTBR0。 | 41 | TTBR1 ä¸ï¼Œä¸”从ä¸å†™å…¥ TTBR0。 |
42 | 42 | ||
43 | 43 | ||
44 | AArch64 Linux 内å˜å¸ƒå±€ï¼š | 44 | AArch64 Linux 在页大å°ä¸º 4KB 时的内å˜å¸ƒå±€ï¼š |
45 | 45 | ||
46 | èµ·å§‹åœ°å€ ç»“æŸåœ°å€ å¤§å° ç”¨é€” | 46 | èµ·å§‹åœ°å€ ç»“æŸåœ°å€ å¤§å° ç”¨é€” |
47 | ----------------------------------------------------------------------- | 47 | ----------------------------------------------------------------------- |
@@ -55,15 +55,42 @@ ffffffbc00000000 ffffffbdffffffff 8GB vmemmap | |||
55 | 55 | ||
56 | ffffffbe00000000 ffffffbffbbfffff ~8GB [防护页,未æ¥ç”¨äºŽ vmmemap] | 56 | ffffffbe00000000 ffffffbffbbfffff ~8GB [防护页,未æ¥ç”¨äºŽ vmmemap] |
57 | 57 | ||
58 | ffffffbffbc00000 ffffffbffbdfffff 2MB earlyprintk 设备 | ||
59 | |||
58 | ffffffbffbe00000 ffffffbffbe0ffff 64KB PCI I/O 空间 | 60 | ffffffbffbe00000 ffffffbffbe0ffff 64KB PCI I/O 空间 |
59 | 61 | ||
60 | ffffffbbffff0000 ffffffbcffffffff ~2MB [防护页] | 62 | ffffffbffbe10000 ffffffbcffffffff ~2MB [防护页] |
61 | 63 | ||
62 | ffffffbffc000000 ffffffbfffffffff 64MB æ¨¡å— | 64 | ffffffbffc000000 ffffffbfffffffff 64MB æ¨¡å— |
63 | 65 | ||
64 | ffffffc000000000 ffffffffffffffff 256GB å†…æ ¸é€»è¾‘å†…å˜æ˜ å°„ | 66 | ffffffc000000000 ffffffffffffffff 256GB å†…æ ¸é€»è¾‘å†…å˜æ˜ å°„ |
65 | 67 | ||
66 | 68 | ||
69 | AArch64 Linux 在页大å°ä¸º 64KB 时的内å˜å¸ƒå±€ï¼š | ||
70 | |||
71 | èµ·å§‹åœ°å€ ç»“æŸåœ°å€ å¤§å° ç”¨é€” | ||
72 | ----------------------------------------------------------------------- | ||
73 | 0000000000000000 000003ffffffffff 4TB 用户空间 | ||
74 | |||
75 | fffffc0000000000 fffffdfbfffeffff ~2TB vmalloc | ||
76 | |||
77 | fffffdfbffff0000 fffffdfbffffffff 64KB [防护页] | ||
78 | |||
79 | fffffdfc00000000 fffffdfdffffffff 8GB vmemmap | ||
80 | |||
81 | fffffdfe00000000 fffffdfffbbfffff ~8GB [防护页,未æ¥ç”¨äºŽ vmmemap] | ||
82 | |||
83 | fffffdfffbc00000 fffffdfffbdfffff 2MB earlyprintk 设备 | ||
84 | |||
85 | fffffdfffbe00000 fffffdfffbe0ffff 64KB PCI I/O 空间 | ||
86 | |||
87 | fffffdfffbe10000 fffffdfffbffffff ~2MB [防护页] | ||
88 | |||
89 | fffffdfffc000000 fffffdffffffffff 64MB æ¨¡å— | ||
90 | |||
91 | fffffe0000000000 ffffffffffffffff 2TB å†…æ ¸é€»è¾‘å†…å˜æ˜ å°„ | ||
92 | |||
93 | |||
67 | 4KB 页大å°çš„转æ¢è¡¨æŸ¥æ‰¾ï¼š | 94 | 4KB 页大å°çš„转æ¢è¡¨æŸ¥æ‰¾ï¼š |
68 | 95 | ||
69 | +--------+--------+--------+--------+--------+--------+--------+--------+ | 96 | +--------+--------+--------+--------+--------+--------+--------+--------+ |
@@ -91,3 +118,10 @@ ffffffc000000000 ffffffffffffffff 256GB å†…æ ¸é€»è¾‘å†…å˜æ˜ å°„ | |||
91 | | | +--------------------------> [41:29] L2 索引 (仅使用 38:29 ) | 118 | | | +--------------------------> [41:29] L2 索引 (仅使用 38:29 ) |
92 | | +-------------------------------> [47:42] L1 索引 (未使用) | 119 | | +-------------------------------> [47:42] L1 索引 (未使用) |
93 | +-------------------------------------------------> [63] TTBR0/1 | 120 | +-------------------------------------------------> [63] TTBR0/1 |
121 | |||
122 | 当使用 KVM æ—¶, 管ç†ç¨‹åºï¼ˆhypervisor)在 EL2 ä¸é€šè¿‡ç›¸å¯¹å†…æ ¸è™šæ‹Ÿåœ°å€çš„ | ||
123 | 一个固定å移æ¥æ˜ å°„å†…æ ¸é¡µï¼ˆå†…æ ¸è™šæ‹Ÿåœ°å€çš„高 24 ä½è®¾ä¸ºé›¶ï¼‰: | ||
124 | |||
125 | èµ·å§‹åœ°å€ ç»“æŸåœ°å€ å¤§å° ç”¨é€” | ||
126 | ----------------------------------------------------------------------- | ||
127 | 0000004000000000 0000007fffffffff 256GB 在 HYP ä¸æ˜ å°„çš„å†…æ ¸å¯¹è±¡ | ||
diff --git a/Documentation/zh_CN/arm64/tagged-pointers.txt b/Documentation/zh_CN/arm64/tagged-pointers.txt new file mode 100644 index 000000000000..2664d1bd5a1c --- /dev/null +++ b/Documentation/zh_CN/arm64/tagged-pointers.txt | |||
@@ -0,0 +1,52 @@ | |||
1 | Chinese translated version of Documentation/arm64/tagged-pointers.txt | ||
2 | |||
3 | If you have any comment or update to the content, please contact the | ||
4 | original document maintainer directly. However, if you have a problem | ||
5 | communicating in English you can also ask the Chinese maintainer for | ||
6 | help. Contact the Chinese maintainer if this translation is outdated | ||
7 | or if there is a problem with the translation. | ||
8 | |||
9 | Maintainer: Will Deacon <will.deacon@arm.com> | ||
10 | Chinese maintainer: Fu Wei <wefu@redhat.com> | ||
11 | --------------------------------------------------------------------- | ||
12 | Documentation/arm64/tagged-pointers.txt çš„ä¸æ–‡ç¿»è¯‘ | ||
13 | |||
14 | 如果想评论或更新本文的内容,请直接è”ç³»åŽŸæ–‡æ¡£çš„ç»´æŠ¤è€…ã€‚å¦‚æžœä½ ä½¿ç”¨è‹±æ–‡ | ||
15 | 交æµæœ‰å›°éš¾çš„è¯ï¼Œä¹Ÿå¯ä»¥å‘ä¸æ–‡ç‰ˆç»´æŠ¤è€…求助。如果本翻译更新ä¸åŠæ—¶æˆ–者翻 | ||
16 | 译å˜åœ¨é—®é¢˜ï¼Œè¯·è”ç³»ä¸æ–‡ç‰ˆç»´æŠ¤è€…。 | ||
17 | |||
18 | 英文版维护者: Will Deacon <will.deacon@arm.com> | ||
19 | ä¸æ–‡ç‰ˆç»´æŠ¤è€…: å‚…ç‚œ Fu Wei <wefu@redhat.com> | ||
20 | ä¸æ–‡ç‰ˆç¿»è¯‘者: å‚…ç‚œ Fu Wei <wefu@redhat.com> | ||
21 | ä¸æ–‡ç‰ˆæ ¡è¯‘者: å‚…ç‚œ Fu Wei <wefu@redhat.com> | ||
22 | |||
23 | 以下为æ£æ–‡ | ||
24 | --------------------------------------------------------------------- | ||
25 | Linux 在 AArch64 ä¸å¸¦æ ‡è®°çš„è™šæ‹Ÿåœ°å€ | ||
26 | ================================= | ||
27 | |||
28 | 作者: Will Deacon <will.deacon@arm.com> | ||
29 | 日期: 2013 年 06 月 12 日 | ||
30 | |||
31 | 本文档简述了在 AArch64 地å€è½¬æ¢ç³»ç»Ÿä¸æä¾›çš„å¸¦æ ‡è®°çš„è™šæ‹Ÿåœ°å€åŠå…¶åœ¨ | ||
32 | AArch64 Linux ä¸çš„潜在用途。 | ||
33 | |||
34 | å†…æ ¸æ供的地å€è½¬æ¢è¡¨é…置使通过 TTBR0 完æˆçš„虚拟地å€è½¬æ¢ï¼ˆå³ç”¨æˆ·ç©ºé—´ | ||
35 | æ˜ å°„ï¼‰ï¼Œå…¶è™šæ‹Ÿåœ°å€çš„最高 8 ä½ï¼ˆ63:56)会被转æ¢ç¡¬ä»¶æ‰€å¿½ç•¥ã€‚è¿™ç§æœºåˆ¶ | ||
36 | 让这些ä½å¯ä¾›åº”用程åºè‡ªç”±ä½¿ç”¨ï¼Œå…¶æ³¨æ„事项如下: | ||
37 | |||
38 | (1) å†…æ ¸è¦æ±‚æ‰€æœ‰ä¼ é€’åˆ° EL1 的用户空间地å€å¸¦æœ‰ 0x00 æ ‡è®°ã€‚ | ||
39 | è¿™æ„味ç€ä»»ä½•æºå¸¦ç”¨æˆ·ç©ºé—´è™šæ‹Ÿåœ°å€çš„系统调用(syscall) | ||
40 | å‚æ•° *å¿…é¡»* åœ¨é™·å…¥å†…æ ¸å‰ä½¿å®ƒä»¬çš„最高å—节被清零。 | ||
41 | |||
42 | (2) éžé›¶æ ‡è®°åœ¨ä¼ 递信å·æ—¶ä¸è¢«ä¿å˜ã€‚è¿™æ„味ç€åœ¨åº”用程åºä¸åˆ©ç”¨äº† | ||
43 | æ ‡è®°çš„ä¿¡å·å¤„ç†å‡½æ•°æ— 法ä¾èµ– siginfo_t 的用户空间虚拟 | ||
44 | 地å€æ‰€æºå¸¦çš„包å«å…¶å†…部域信æ¯çš„æ ‡è®°ã€‚æ¤è§„则的一个例外是 | ||
45 | 当信å·æ˜¯åœ¨è°ƒè¯•è§‚察点的异常处ç†ç¨‹åºä¸äº§ç”Ÿçš„,æ¤æ—¶æ ‡è®°çš„ | ||
46 | ä¿¡æ¯å°†è¢«ä¿å˜ã€‚ | ||
47 | |||
48 | (3) å½“ä½¿ç”¨å¸¦æ ‡è®°çš„æŒ‡é’ˆæ—¶éœ€ç‰¹åˆ«ç•™å¿ƒï¼Œå› ä¸ºä»…å¯¹ä¸¤ä¸ªè™šæ‹Ÿåœ°å€ | ||
49 | 的高å—节,C 编译器很å¯èƒ½æ— 法判æ–它们是ä¸åŒçš„。 | ||
50 | |||
51 | æ¤æž„架会阻æ¢å¯¹å¸¦æ ‡è®°çš„ PC æŒ‡é’ˆçš„åˆ©ç”¨ï¼Œå› æ¤åœ¨å¼‚常返回时,其高å—节 | ||
52 | 将被设置æˆä¸€ä¸ªä¸º “55†的扩展符。 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 5a7b3ecba94c..db8bb0d2379e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2373,7 +2373,7 @@ F: include/linux/cpufreq.h | |||
2373 | 2373 | ||
2374 | CPU FREQUENCY DRIVERS - ARM BIG LITTLE | 2374 | CPU FREQUENCY DRIVERS - ARM BIG LITTLE |
2375 | M: Viresh Kumar <viresh.kumar@linaro.org> | 2375 | M: Viresh Kumar <viresh.kumar@linaro.org> |
2376 | M: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com> | 2376 | M: Sudeep Holla <sudeep.holla@arm.com> |
2377 | L: cpufreq@vger.kernel.org | 2377 | L: cpufreq@vger.kernel.org |
2378 | L: linux-pm@vger.kernel.org | 2378 | L: linux-pm@vger.kernel.org |
2379 | W: http://www.arm.com/products/processors/technologies/biglittleprocessing.php | 2379 | W: http://www.arm.com/products/processors/technologies/biglittleprocessing.php |
@@ -2863,7 +2863,7 @@ M: Jani Nikula <jani.nikula@linux.intel.com> | |||
2863 | L: intel-gfx@lists.freedesktop.org | 2863 | L: intel-gfx@lists.freedesktop.org |
2864 | L: dri-devel@lists.freedesktop.org | 2864 | L: dri-devel@lists.freedesktop.org |
2865 | Q: http://patchwork.freedesktop.org/project/intel-gfx/ | 2865 | Q: http://patchwork.freedesktop.org/project/intel-gfx/ |
2866 | T: git git://people.freedesktop.org/~danvet/drm-intel | 2866 | T: git git://anongit.freedesktop.org/drm-intel |
2867 | S: Supported | 2867 | S: Supported |
2868 | F: drivers/gpu/drm/i915/ | 2868 | F: drivers/gpu/drm/i915/ |
2869 | F: include/drm/i915* | 2869 | F: include/drm/i915* |
@@ -3330,6 +3330,17 @@ S: Maintained | |||
3330 | F: include/linux/netfilter_bridge/ | 3330 | F: include/linux/netfilter_bridge/ |
3331 | F: net/bridge/ | 3331 | F: net/bridge/ |
3332 | 3332 | ||
3333 | ETHERNET PHY LIBRARY | ||
3334 | M: Florian Fainelli <f.fainelli@gmail.com> | ||
3335 | L: netdev@vger.kernel.org | ||
3336 | S: Maintained | ||
3337 | F: include/linux/phy.h | ||
3338 | F: include/linux/phy_fixed.h | ||
3339 | F: drivers/net/phy/ | ||
3340 | F: Documentation/networking/phy.txt | ||
3341 | F: drivers/of/of_mdio.c | ||
3342 | F: drivers/of/of_net.c | ||
3343 | |||
3333 | EXT2 FILE SYSTEM | 3344 | EXT2 FILE SYSTEM |
3334 | M: Jan Kara <jack@suse.cz> | 3345 | M: Jan Kara <jack@suse.cz> |
3335 | L: linux-ext4@vger.kernel.org | 3346 | L: linux-ext4@vger.kernel.org |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 14 | 2 | PATCHLEVEL = 14 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc2 | 4 | EXTRAVERSION = -rc3 |
5 | NAME = Shuffling Zombie Juror | 5 | NAME = Shuffling Zombie Juror |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index b9d6a8b485e0..6d1e43d46187 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile | |||
@@ -38,6 +38,7 @@ dtb-$(CONFIG_ARCH_AT91) += at91sam9g35ek.dtb | |||
38 | dtb-$(CONFIG_ARCH_AT91) += at91sam9x25ek.dtb | 38 | dtb-$(CONFIG_ARCH_AT91) += at91sam9x25ek.dtb |
39 | dtb-$(CONFIG_ARCH_AT91) += at91sam9x35ek.dtb | 39 | dtb-$(CONFIG_ARCH_AT91) += at91sam9x35ek.dtb |
40 | # sama5d3 | 40 | # sama5d3 |
41 | dtb-$(CONFIG_ARCH_AT91) += at91-sama5d3_xplained.dtb | ||
41 | dtb-$(CONFIG_ARCH_AT91) += sama5d31ek.dtb | 42 | dtb-$(CONFIG_ARCH_AT91) += sama5d31ek.dtb |
42 | dtb-$(CONFIG_ARCH_AT91) += sama5d33ek.dtb | 43 | dtb-$(CONFIG_ARCH_AT91) += sama5d33ek.dtb |
43 | dtb-$(CONFIG_ARCH_AT91) += sama5d34ek.dtb | 44 | dtb-$(CONFIG_ARCH_AT91) += sama5d34ek.dtb |
diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts new file mode 100644 index 000000000000..ce1375595e5f --- /dev/null +++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts | |||
@@ -0,0 +1,229 @@ | |||
1 | /* | ||
2 | * at91-sama5d3_xplained.dts - Device Tree file for the SAMA5D3 Xplained board | ||
3 | * | ||
4 | * Copyright (C) 2014 Atmel, | ||
5 | * 2014 Nicolas Ferre <nicolas.ferre@atmel.com> | ||
6 | * | ||
7 | * Licensed under GPLv2 or later. | ||
8 | */ | ||
9 | /dts-v1/; | ||
10 | #include "sama5d36.dtsi" | ||
11 | |||
12 | / { | ||
13 | model = "SAMA5D3 Xplained"; | ||
14 | compatible = "atmel,sama5d3-xplained", "atmel,sama5d3", "atmel,sama5"; | ||
15 | |||
16 | chosen { | ||
17 | bootargs = "console=ttyS0,115200"; | ||
18 | }; | ||
19 | |||
20 | memory { | ||
21 | reg = <0x20000000 0x10000000>; | ||
22 | }; | ||
23 | |||
24 | ahb { | ||
25 | apb { | ||
26 | mmc0: mmc@f0000000 { | ||
27 | pinctrl-0 = <&pinctrl_mmc0_clk_cmd_dat0 &pinctrl_mmc0_dat1_3 &pinctrl_mmc0_dat4_7 &pinctrl_mmc0_cd>; | ||
28 | status = "okay"; | ||
29 | slot@0 { | ||
30 | reg = <0>; | ||
31 | bus-width = <8>; | ||
32 | cd-gpios = <&pioE 0 GPIO_ACTIVE_LOW>; | ||
33 | }; | ||
34 | }; | ||
35 | |||
36 | spi0: spi@f0004000 { | ||
37 | cs-gpios = <&pioD 13 0>; | ||
38 | status = "okay"; | ||
39 | }; | ||
40 | |||
41 | can0: can@f000c000 { | ||
42 | status = "okay"; | ||
43 | }; | ||
44 | |||
45 | i2c0: i2c@f0014000 { | ||
46 | status = "okay"; | ||
47 | }; | ||
48 | |||
49 | i2c1: i2c@f0018000 { | ||
50 | status = "okay"; | ||
51 | }; | ||
52 | |||
53 | macb0: ethernet@f0028000 { | ||
54 | phy-mode = "rgmii"; | ||
55 | status = "okay"; | ||
56 | }; | ||
57 | |||
58 | usart0: serial@f001c000 { | ||
59 | status = "okay"; | ||
60 | }; | ||
61 | |||
62 | usart1: serial@f0020000 { | ||
63 | pinctrl-0 = <&pinctrl_usart1 &pinctrl_usart1_rts_cts>; | ||
64 | status = "okay"; | ||
65 | }; | ||
66 | |||
67 | uart0: serial@f0024000 { | ||
68 | status = "okay"; | ||
69 | }; | ||
70 | |||
71 | mmc1: mmc@f8000000 { | ||
72 | pinctrl-0 = <&pinctrl_mmc1_clk_cmd_dat0 &pinctrl_mmc1_dat1_3 &pinctrl_mmc1_cd>; | ||
73 | status = "okay"; | ||
74 | slot@0 { | ||
75 | reg = <0>; | ||
76 | bus-width = <4>; | ||
77 | cd-gpios = <&pioE 1 GPIO_ACTIVE_HIGH>; | ||
78 | }; | ||
79 | }; | ||
80 | |||
81 | spi1: spi@f8008000 { | ||
82 | cs-gpios = <&pioC 25 0>, <0>, <0>, <&pioD 16 0>; | ||
83 | status = "okay"; | ||
84 | }; | ||
85 | |||
86 | adc0: adc@f8018000 { | ||
87 | pinctrl-0 = < | ||
88 | &pinctrl_adc0_adtrg | ||
89 | &pinctrl_adc0_ad0 | ||
90 | &pinctrl_adc0_ad1 | ||
91 | &pinctrl_adc0_ad2 | ||
92 | &pinctrl_adc0_ad3 | ||
93 | &pinctrl_adc0_ad4 | ||
94 | &pinctrl_adc0_ad5 | ||
95 | &pinctrl_adc0_ad6 | ||
96 | &pinctrl_adc0_ad7 | ||
97 | &pinctrl_adc0_ad8 | ||
98 | &pinctrl_adc0_ad9 | ||
99 | >; | ||
100 | status = "okay"; | ||
101 | }; | ||
102 | |||
103 | i2c2: i2c@f801c000 { | ||
104 | dmas = <0>, <0>; /* Do not use DMA for i2c2 */ | ||
105 | status = "okay"; | ||
106 | }; | ||
107 | |||
108 | macb1: ethernet@f802c000 { | ||
109 | phy-mode = "rmii"; | ||
110 | status = "okay"; | ||
111 | }; | ||
112 | |||
113 | dbgu: serial@ffffee00 { | ||
114 | status = "okay"; | ||
115 | }; | ||
116 | |||
117 | pinctrl@fffff200 { | ||
118 | board { | ||
119 | pinctrl_mmc0_cd: mmc0_cd { | ||
120 | atmel,pins = | ||
121 | <AT91_PIOE 0 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>; | ||
122 | }; | ||
123 | |||
124 | pinctrl_mmc1_cd: mmc1_cd { | ||
125 | atmel,pins = | ||
126 | <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>; | ||
127 | }; | ||
128 | |||
129 | pinctrl_usba_vbus: usba_vbus { | ||
130 | atmel,pins = | ||
131 | <AT91_PIOE 9 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>; /* PE9, conflicts with A9 */ | ||
132 | }; | ||
133 | }; | ||
134 | }; | ||
135 | |||
136 | pmc: pmc@fffffc00 { | ||
137 | main: mainck { | ||
138 | clock-frequency = <12000000>; | ||
139 | }; | ||
140 | }; | ||
141 | }; | ||
142 | |||
143 | nand0: nand@60000000 { | ||
144 | nand-bus-width = <8>; | ||
145 | nand-ecc-mode = "hw"; | ||
146 | atmel,has-pmecc; | ||
147 | atmel,pmecc-cap = <4>; | ||
148 | atmel,pmecc-sector-size = <512>; | ||
149 | nand-on-flash-bbt; | ||
150 | status = "okay"; | ||
151 | |||
152 | at91bootstrap@0 { | ||
153 | label = "at91bootstrap"; | ||
154 | reg = <0x0 0x40000>; | ||
155 | }; | ||
156 | |||
157 | bootloader@40000 { | ||
158 | label = "bootloader"; | ||
159 | reg = <0x40000 0x80000>; | ||
160 | }; | ||
161 | |||
162 | bootloaderenv@c0000 { | ||
163 | label = "bootloader env"; | ||
164 | reg = <0xc0000 0xc0000>; | ||
165 | }; | ||
166 | |||
167 | dtb@180000 { | ||
168 | label = "device tree"; | ||
169 | reg = <0x180000 0x80000>; | ||
170 | }; | ||
171 | |||
172 | kernel@200000 { | ||
173 | label = "kernel"; | ||
174 | reg = <0x200000 0x600000>; | ||
175 | }; | ||
176 | |||
177 | rootfs@800000 { | ||
178 | label = "rootfs"; | ||
179 | reg = <0x800000 0x0f800000>; | ||
180 | }; | ||
181 | }; | ||
182 | |||
183 | usb0: gadget@00500000 { | ||
184 | atmel,vbus-gpio = <&pioE 9 GPIO_ACTIVE_HIGH>; /* PE9, conflicts with A9 */ | ||
185 | pinctrl-names = "default"; | ||
186 | pinctrl-0 = <&pinctrl_usba_vbus>; | ||
187 | status = "okay"; | ||
188 | }; | ||
189 | |||
190 | usb1: ohci@00600000 { | ||
191 | num-ports = <3>; | ||
192 | atmel,vbus-gpio = <0 | ||
193 | &pioE 3 GPIO_ACTIVE_LOW | ||
194 | &pioE 4 GPIO_ACTIVE_LOW | ||
195 | >; | ||
196 | status = "okay"; | ||
197 | }; | ||
198 | |||
199 | usb2: ehci@00700000 { | ||
200 | status = "okay"; | ||
201 | }; | ||
202 | }; | ||
203 | |||
204 | gpio_keys { | ||
205 | compatible = "gpio-keys"; | ||
206 | |||
207 | bp3 { | ||
208 | label = "PB_USER"; | ||
209 | gpios = <&pioE 29 GPIO_ACTIVE_LOW>; | ||
210 | linux,code = <0x104>; | ||
211 | gpio-key,wakeup; | ||
212 | }; | ||
213 | }; | ||
214 | |||
215 | leds { | ||
216 | compatible = "gpio-leds"; | ||
217 | |||
218 | d2 { | ||
219 | label = "d2"; | ||
220 | gpios = <&pioE 23 GPIO_ACTIVE_LOW>; /* PE23, conflicts with A23, CTS2 */ | ||
221 | linux,default-trigger = "heartbeat"; | ||
222 | }; | ||
223 | |||
224 | d3 { | ||
225 | label = "d3"; | ||
226 | gpios = <&pioE 24 GPIO_ACTIVE_HIGH>; | ||
227 | }; | ||
228 | }; | ||
229 | }; | ||
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi index 0042f73068b0..fece8665fb63 100644 --- a/arch/arm/boot/dts/at91sam9263.dtsi +++ b/arch/arm/boot/dts/at91sam9263.dtsi | |||
@@ -523,7 +523,7 @@ | |||
523 | }; | 523 | }; |
524 | 524 | ||
525 | i2c0: i2c@fff88000 { | 525 | i2c0: i2c@fff88000 { |
526 | compatible = "atmel,at91sam9263-i2c"; | 526 | compatible = "atmel,at91sam9260-i2c"; |
527 | reg = <0xfff88000 0x100>; | 527 | reg = <0xfff88000 0x100>; |
528 | interrupts = <13 IRQ_TYPE_LEVEL_HIGH 6>; | 528 | interrupts = <13 IRQ_TYPE_LEVEL_HIGH 6>; |
529 | #address-cells = <1>; | 529 | #address-cells = <1>; |
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts index e9487f6f0166..924a6a6ffd0f 100644 --- a/arch/arm/boot/dts/at91sam9n12ek.dts +++ b/arch/arm/boot/dts/at91sam9n12ek.dts | |||
@@ -124,6 +124,10 @@ | |||
124 | nand-on-flash-bbt; | 124 | nand-on-flash-bbt; |
125 | status = "okay"; | 125 | status = "okay"; |
126 | }; | 126 | }; |
127 | |||
128 | usb0: ohci@00500000 { | ||
129 | status = "okay"; | ||
130 | }; | ||
127 | }; | 131 | }; |
128 | 132 | ||
129 | leds { | 133 | leds { |
diff --git a/arch/arm/boot/dts/imx6dl-hummingboard.dts b/arch/arm/boot/dts/imx6dl-hummingboard.dts index fd8fc7cd53f3..5bfae54fb780 100644 --- a/arch/arm/boot/dts/imx6dl-hummingboard.dts +++ b/arch/arm/boot/dts/imx6dl-hummingboard.dts | |||
@@ -52,12 +52,6 @@ | |||
52 | }; | 52 | }; |
53 | }; | 53 | }; |
54 | 54 | ||
55 | codec: spdif-transmitter { | ||
56 | compatible = "linux,spdif-dit"; | ||
57 | pinctrl-names = "default"; | ||
58 | pinctrl-0 = <&pinctrl_hummingboard_spdif>; | ||
59 | }; | ||
60 | |||
61 | sound-spdif { | 55 | sound-spdif { |
62 | compatible = "fsl,imx-audio-spdif"; | 56 | compatible = "fsl,imx-audio-spdif"; |
63 | model = "imx-spdif"; | 57 | model = "imx-spdif"; |
@@ -111,7 +105,7 @@ | |||
111 | }; | 105 | }; |
112 | 106 | ||
113 | pinctrl_hummingboard_spdif: hummingboard-spdif { | 107 | pinctrl_hummingboard_spdif: hummingboard-spdif { |
114 | fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0>; | 108 | fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>; |
115 | }; | 109 | }; |
116 | 110 | ||
117 | pinctrl_hummingboard_usbh1_vbus: hummingboard-usbh1-vbus { | 111 | pinctrl_hummingboard_usbh1_vbus: hummingboard-usbh1-vbus { |
@@ -142,6 +136,8 @@ | |||
142 | }; | 136 | }; |
143 | 137 | ||
144 | &spdif { | 138 | &spdif { |
139 | pinctrl-names = "default"; | ||
140 | pinctrl-0 = <&pinctrl_hummingboard_spdif>; | ||
145 | status = "okay"; | 141 | status = "okay"; |
146 | }; | 142 | }; |
147 | 143 | ||
diff --git a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi index 64daa3b311f6..c2a24888a276 100644 --- a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi +++ b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi | |||
@@ -46,12 +46,6 @@ | |||
46 | }; | 46 | }; |
47 | }; | 47 | }; |
48 | 48 | ||
49 | codec: spdif-transmitter { | ||
50 | compatible = "linux,spdif-dit"; | ||
51 | pinctrl-names = "default"; | ||
52 | pinctrl-0 = <&pinctrl_cubox_i_spdif>; | ||
53 | }; | ||
54 | |||
55 | sound-spdif { | 49 | sound-spdif { |
56 | compatible = "fsl,imx-audio-spdif"; | 50 | compatible = "fsl,imx-audio-spdif"; |
57 | model = "imx-spdif"; | 51 | model = "imx-spdif"; |
@@ -89,7 +83,7 @@ | |||
89 | }; | 83 | }; |
90 | 84 | ||
91 | pinctrl_cubox_i_spdif: cubox-i-spdif { | 85 | pinctrl_cubox_i_spdif: cubox-i-spdif { |
92 | fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0>; | 86 | fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>; |
93 | }; | 87 | }; |
94 | 88 | ||
95 | pinctrl_cubox_i_usbh1_vbus: cubox-i-usbh1-vbus { | 89 | pinctrl_cubox_i_usbh1_vbus: cubox-i-usbh1-vbus { |
@@ -121,6 +115,8 @@ | |||
121 | }; | 115 | }; |
122 | 116 | ||
123 | &spdif { | 117 | &spdif { |
118 | pinctrl-names = "default"; | ||
119 | pinctrl-0 = <&pinctrl_cubox_i_spdif>; | ||
124 | status = "okay"; | 120 | status = "okay"; |
125 | }; | 121 | }; |
126 | 122 | ||
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi index 52447c17537a..3d5faf85f51b 100644 --- a/arch/arm/boot/dts/sama5d3.dtsi +++ b/arch/arm/boot/dts/sama5d3.dtsi | |||
@@ -1228,7 +1228,7 @@ | |||
1228 | compatible = "atmel,at91rm9200-ohci", "usb-ohci"; | 1228 | compatible = "atmel,at91rm9200-ohci", "usb-ohci"; |
1229 | reg = <0x00600000 0x100000>; | 1229 | reg = <0x00600000 0x100000>; |
1230 | interrupts = <32 IRQ_TYPE_LEVEL_HIGH 2>; | 1230 | interrupts = <32 IRQ_TYPE_LEVEL_HIGH 2>; |
1231 | clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>, | 1231 | clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>, |
1232 | <&uhpck>; | 1232 | <&uhpck>; |
1233 | clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck"; | 1233 | clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck"; |
1234 | status = "disabled"; | 1234 | status = "disabled"; |
diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi index 0c1e8d871ed1..6cb9b68e2188 100644 --- a/arch/arm/boot/dts/ste-href.dtsi +++ b/arch/arm/boot/dts/ste-href.dtsi | |||
@@ -188,7 +188,6 @@ | |||
188 | msp2: msp@80117000 { | 188 | msp2: msp@80117000 { |
189 | pinctrl-names = "default"; | 189 | pinctrl-names = "default"; |
190 | pinctrl-0 = <&msp2_default_mode>; | 190 | pinctrl-0 = <&msp2_default_mode>; |
191 | status = "okay"; | ||
192 | }; | 191 | }; |
193 | 192 | ||
194 | msp3: msp@80125000 { | 193 | msp3: msp@80125000 { |
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 845bc745706b..ee6982976d66 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig | |||
@@ -29,6 +29,7 @@ CONFIG_ARCH_OMAP3=y | |||
29 | CONFIG_ARCH_OMAP4=y | 29 | CONFIG_ARCH_OMAP4=y |
30 | CONFIG_SOC_OMAP5=y | 30 | CONFIG_SOC_OMAP5=y |
31 | CONFIG_SOC_AM33XX=y | 31 | CONFIG_SOC_AM33XX=y |
32 | CONFIG_SOC_DRA7XX=y | ||
32 | CONFIG_SOC_AM43XX=y | 33 | CONFIG_SOC_AM43XX=y |
33 | CONFIG_ARCH_ROCKCHIP=y | 34 | CONFIG_ARCH_ROCKCHIP=y |
34 | CONFIG_ARCH_SOCFPGA=y | 35 | CONFIG_ARCH_SOCFPGA=y |
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index e9a49fe0284e..8b8b61685a34 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h | |||
@@ -212,6 +212,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *, | |||
212 | static inline void __flush_icache_all(void) | 212 | static inline void __flush_icache_all(void) |
213 | { | 213 | { |
214 | __flush_icache_preferred(); | 214 | __flush_icache_preferred(); |
215 | dsb(); | ||
215 | } | 216 | } |
216 | 217 | ||
217 | /* | 218 | /* |
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 03243f7eeddf..85c60adc8b60 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h | |||
@@ -120,13 +120,16 @@ | |||
120 | /* | 120 | /* |
121 | * 2nd stage PTE definitions for LPAE. | 121 | * 2nd stage PTE definitions for LPAE. |
122 | */ | 122 | */ |
123 | #define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */ | 123 | #define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */ |
124 | #define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ | 124 | #define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */ |
125 | #define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ | 125 | #define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */ |
126 | #define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ | 126 | #define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */ |
127 | #define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ | 127 | #define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2) |
128 | 128 | ||
129 | #define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ | 129 | #define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ |
130 | #define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ | ||
131 | |||
132 | #define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ | ||
130 | 133 | ||
131 | /* | 134 | /* |
132 | * Hyp-mode PL2 PTE definitions for LPAE. | 135 | * Hyp-mode PL2 PTE definitions for LPAE. |
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index ef3c6072aa45..ac4bfae26702 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
@@ -37,18 +37,9 @@ | |||
37 | 37 | ||
38 | static inline void dsb_sev(void) | 38 | static inline void dsb_sev(void) |
39 | { | 39 | { |
40 | #if __LINUX_ARM_ARCH__ >= 7 | 40 | |
41 | __asm__ __volatile__ ( | 41 | dsb(ishst); |
42 | "dsb ishst\n" | 42 | __asm__(SEV); |
43 | SEV | ||
44 | ); | ||
45 | #else | ||
46 | __asm__ __volatile__ ( | ||
47 | "mcr p15, 0, %0, c7, c10, 4\n" | ||
48 | SEV | ||
49 | : : "r" (0) | ||
50 | ); | ||
51 | #endif | ||
52 | } | 43 | } |
53 | 44 | ||
54 | /* | 45 | /* |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index b0df9761de6d..1e8b030dbefd 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -731,7 +731,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) | |||
731 | kernel_data.end = virt_to_phys(_end - 1); | 731 | kernel_data.end = virt_to_phys(_end - 1); |
732 | 732 | ||
733 | for_each_memblock(memory, region) { | 733 | for_each_memblock(memory, region) { |
734 | res = memblock_virt_alloc_low(sizeof(*res), 0); | 734 | res = memblock_virt_alloc(sizeof(*res), 0); |
735 | res->name = "System RAM"; | 735 | res->name = "System RAM"; |
736 | res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); | 736 | res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); |
737 | res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; | 737 | res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; |
diff --git a/arch/arm/mach-hisi/Kconfig b/arch/arm/mach-hisi/Kconfig index 8f4649b301b2..1abae5f6a418 100644 --- a/arch/arm/mach-hisi/Kconfig +++ b/arch/arm/mach-hisi/Kconfig | |||
@@ -8,7 +8,7 @@ config ARCH_HI3xxx | |||
8 | select CLKSRC_OF | 8 | select CLKSRC_OF |
9 | select GENERIC_CLOCKEVENTS | 9 | select GENERIC_CLOCKEVENTS |
10 | select HAVE_ARM_SCU | 10 | select HAVE_ARM_SCU |
11 | select HAVE_ARM_TWD | 11 | select HAVE_ARM_TWD if SMP |
12 | select HAVE_SMP | 12 | select HAVE_SMP |
13 | select PINCTRL | 13 | select PINCTRL |
14 | select PINCTRL_SINGLE | 14 | select PINCTRL_SINGLE |
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c index af2e582d2b74..4d677f442539 100644 --- a/arch/arm/mach-imx/clk-imx6q.c +++ b/arch/arm/mach-imx/clk-imx6q.c | |||
@@ -482,6 +482,9 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) | |||
482 | if (IS_ENABLED(CONFIG_PCI_IMX6)) | 482 | if (IS_ENABLED(CONFIG_PCI_IMX6)) |
483 | clk_set_parent(clk[lvds1_sel], clk[sata_ref]); | 483 | clk_set_parent(clk[lvds1_sel], clk[sata_ref]); |
484 | 484 | ||
485 | /* Set initial power mode */ | ||
486 | imx6q_set_lpm(WAIT_CLOCKED); | ||
487 | |||
485 | np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt"); | 488 | np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt"); |
486 | base = of_iomap(np, 0); | 489 | base = of_iomap(np, 0); |
487 | WARN_ON(!base); | 490 | WARN_ON(!base); |
diff --git a/arch/arm/mach-imx/clk-imx6sl.c b/arch/arm/mach-imx/clk-imx6sl.c index 3781a1853998..4c86f3035205 100644 --- a/arch/arm/mach-imx/clk-imx6sl.c +++ b/arch/arm/mach-imx/clk-imx6sl.c | |||
@@ -266,6 +266,9 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node) | |||
266 | /* Audio-related clocks configuration */ | 266 | /* Audio-related clocks configuration */ |
267 | clk_set_parent(clks[IMX6SL_CLK_SPDIF0_SEL], clks[IMX6SL_CLK_PLL3_PFD3]); | 267 | clk_set_parent(clks[IMX6SL_CLK_SPDIF0_SEL], clks[IMX6SL_CLK_PLL3_PFD3]); |
268 | 268 | ||
269 | /* Set initial power mode */ | ||
270 | imx6q_set_lpm(WAIT_CLOCKED); | ||
271 | |||
269 | np = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-gpt"); | 272 | np = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-gpt"); |
270 | base = of_iomap(np, 0); | 273 | base = of_iomap(np, 0); |
271 | WARN_ON(!base); | 274 | WARN_ON(!base); |
diff --git a/arch/arm/mach-imx/pm-imx6q.c b/arch/arm/mach-imx/pm-imx6q.c index 9d47adc078aa..7a9b98589db7 100644 --- a/arch/arm/mach-imx/pm-imx6q.c +++ b/arch/arm/mach-imx/pm-imx6q.c | |||
@@ -236,8 +236,6 @@ void __init imx6q_pm_init(void) | |||
236 | regmap_update_bits(gpr, IOMUXC_GPR1, IMX6Q_GPR1_GINT, | 236 | regmap_update_bits(gpr, IOMUXC_GPR1, IMX6Q_GPR1_GINT, |
237 | IMX6Q_GPR1_GINT); | 237 | IMX6Q_GPR1_GINT); |
238 | 238 | ||
239 | /* Set initial power mode */ | ||
240 | imx6q_set_lpm(WAIT_CLOCKED); | ||
241 | 239 | ||
242 | suspend_set_ops(&imx6q_pm_ops); | 240 | suspend_set_ops(&imx6q_pm_ops); |
243 | } | 241 | } |
diff --git a/arch/arm/mach-moxart/Kconfig b/arch/arm/mach-moxart/Kconfig index ba470d64493b..3795ae28a613 100644 --- a/arch/arm/mach-moxart/Kconfig +++ b/arch/arm/mach-moxart/Kconfig | |||
@@ -2,7 +2,6 @@ config ARCH_MOXART | |||
2 | bool "MOXA ART SoC" if ARCH_MULTI_V4T | 2 | bool "MOXA ART SoC" if ARCH_MULTI_V4T |
3 | select CPU_FA526 | 3 | select CPU_FA526 |
4 | select ARM_DMA_MEM_BUFFERABLE | 4 | select ARM_DMA_MEM_BUFFERABLE |
5 | select DMA_OF | ||
6 | select USE_OF | 5 | select USE_OF |
7 | select CLKSRC_OF | 6 | select CLKSRC_OF |
8 | select CLKSRC_MMIO | 7 | select CLKSRC_MMIO |
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 653b489479e0..e2ce4f8366a7 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig | |||
@@ -54,7 +54,7 @@ config SOC_OMAP5 | |||
54 | select ARM_GIC | 54 | select ARM_GIC |
55 | select CPU_V7 | 55 | select CPU_V7 |
56 | select HAVE_ARM_SCU if SMP | 56 | select HAVE_ARM_SCU if SMP |
57 | select HAVE_ARM_TWD if LOCAL_TIMERS | 57 | select HAVE_ARM_TWD if SMP |
58 | select HAVE_SMP | 58 | select HAVE_SMP |
59 | select HAVE_ARM_ARCH_TIMER | 59 | select HAVE_ARM_ARCH_TIMER |
60 | select ARM_ERRATA_798181 if SMP | 60 | select ARM_ERRATA_798181 if SMP |
diff --git a/arch/arm/mach-pxa/am300epd.c b/arch/arm/mach-pxa/am300epd.c index c9f309ae88c5..8b90c4f2d430 100644 --- a/arch/arm/mach-pxa/am300epd.c +++ b/arch/arm/mach-pxa/am300epd.c | |||
@@ -30,6 +30,7 @@ | |||
30 | 30 | ||
31 | #include <mach/gumstix.h> | 31 | #include <mach/gumstix.h> |
32 | #include <mach/mfp-pxa25x.h> | 32 | #include <mach/mfp-pxa25x.h> |
33 | #include <mach/irqs.h> | ||
33 | #include <linux/platform_data/video-pxafb.h> | 34 | #include <linux/platform_data/video-pxafb.h> |
34 | 35 | ||
35 | #include "generic.h" | 36 | #include "generic.h" |
diff --git a/arch/arm/mach-pxa/include/mach/balloon3.h b/arch/arm/mach-pxa/include/mach/balloon3.h index 954641e6c8b1..1b0825911e62 100644 --- a/arch/arm/mach-pxa/include/mach/balloon3.h +++ b/arch/arm/mach-pxa/include/mach/balloon3.h | |||
@@ -14,6 +14,8 @@ | |||
14 | #ifndef ASM_ARCH_BALLOON3_H | 14 | #ifndef ASM_ARCH_BALLOON3_H |
15 | #define ASM_ARCH_BALLOON3_H | 15 | #define ASM_ARCH_BALLOON3_H |
16 | 16 | ||
17 | #include "irqs.h" /* PXA_NR_BUILTIN_GPIO */ | ||
18 | |||
17 | enum balloon3_features { | 19 | enum balloon3_features { |
18 | BALLOON3_FEATURE_OHCI, | 20 | BALLOON3_FEATURE_OHCI, |
19 | BALLOON3_FEATURE_MMC, | 21 | BALLOON3_FEATURE_MMC, |
diff --git a/arch/arm/mach-pxa/include/mach/corgi.h b/arch/arm/mach-pxa/include/mach/corgi.h index f3c3493b468d..c030d955bbd7 100644 --- a/arch/arm/mach-pxa/include/mach/corgi.h +++ b/arch/arm/mach-pxa/include/mach/corgi.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #ifndef __ASM_ARCH_CORGI_H | 13 | #ifndef __ASM_ARCH_CORGI_H |
14 | #define __ASM_ARCH_CORGI_H 1 | 14 | #define __ASM_ARCH_CORGI_H 1 |
15 | 15 | ||
16 | #include "irqs.h" /* PXA_NR_BUILTIN_GPIO */ | ||
16 | 17 | ||
17 | /* | 18 | /* |
18 | * Corgi (Non Standard) GPIO Definitions | 19 | * Corgi (Non Standard) GPIO Definitions |
diff --git a/arch/arm/mach-pxa/include/mach/csb726.h b/arch/arm/mach-pxa/include/mach/csb726.h index 2628e7b72116..00cfbbbf73f7 100644 --- a/arch/arm/mach-pxa/include/mach/csb726.h +++ b/arch/arm/mach-pxa/include/mach/csb726.h | |||
@@ -11,6 +11,8 @@ | |||
11 | #ifndef CSB726_H | 11 | #ifndef CSB726_H |
12 | #define CSB726_H | 12 | #define CSB726_H |
13 | 13 | ||
14 | #include "irqs.h" /* PXA_GPIO_TO_IRQ */ | ||
15 | |||
14 | #define CSB726_GPIO_IRQ_LAN 52 | 16 | #define CSB726_GPIO_IRQ_LAN 52 |
15 | #define CSB726_GPIO_IRQ_SM501 53 | 17 | #define CSB726_GPIO_IRQ_SM501 53 |
16 | #define CSB726_GPIO_MMC_DETECT 100 | 18 | #define CSB726_GPIO_MMC_DETECT 100 |
diff --git a/arch/arm/mach-pxa/include/mach/gumstix.h b/arch/arm/mach-pxa/include/mach/gumstix.h index dba14b6503ad..f7df27bbb42e 100644 --- a/arch/arm/mach-pxa/include/mach/gumstix.h +++ b/arch/arm/mach-pxa/include/mach/gumstix.h | |||
@@ -6,6 +6,7 @@ | |||
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include "irqs.h" /* PXA_GPIO_TO_IRQ */ | ||
9 | 10 | ||
10 | /* BTRESET - Reset line to Bluetooth module, active low signal. */ | 11 | /* BTRESET - Reset line to Bluetooth module, active low signal. */ |
11 | #define GPIO_GUMSTIX_BTRESET 7 | 12 | #define GPIO_GUMSTIX_BTRESET 7 |
diff --git a/arch/arm/mach-pxa/include/mach/idp.h b/arch/arm/mach-pxa/include/mach/idp.h index 22a96f87232b..7e63f4680271 100644 --- a/arch/arm/mach-pxa/include/mach/idp.h +++ b/arch/arm/mach-pxa/include/mach/idp.h | |||
@@ -23,6 +23,7 @@ | |||
23 | * IDP hardware. | 23 | * IDP hardware. |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include "irqs.h" /* PXA_GPIO_TO_IRQ */ | ||
26 | 27 | ||
27 | #define IDP_FLASH_PHYS (PXA_CS0_PHYS) | 28 | #define IDP_FLASH_PHYS (PXA_CS0_PHYS) |
28 | #define IDP_ALT_FLASH_PHYS (PXA_CS1_PHYS) | 29 | #define IDP_ALT_FLASH_PHYS (PXA_CS1_PHYS) |
diff --git a/arch/arm/mach-pxa/include/mach/palmld.h b/arch/arm/mach-pxa/include/mach/palmld.h index 2c4471336570..b184f296023b 100644 --- a/arch/arm/mach-pxa/include/mach/palmld.h +++ b/arch/arm/mach-pxa/include/mach/palmld.h | |||
@@ -13,6 +13,8 @@ | |||
13 | #ifndef _INCLUDE_PALMLD_H_ | 13 | #ifndef _INCLUDE_PALMLD_H_ |
14 | #define _INCLUDE_PALMLD_H_ | 14 | #define _INCLUDE_PALMLD_H_ |
15 | 15 | ||
16 | #include "irqs.h" /* PXA_GPIO_TO_IRQ */ | ||
17 | |||
16 | /** HERE ARE GPIOs **/ | 18 | /** HERE ARE GPIOs **/ |
17 | 19 | ||
18 | /* GPIOs */ | 20 | /* GPIOs */ |
diff --git a/arch/arm/mach-pxa/include/mach/palmt5.h b/arch/arm/mach-pxa/include/mach/palmt5.h index 0bd4f036c72f..e342c5921405 100644 --- a/arch/arm/mach-pxa/include/mach/palmt5.h +++ b/arch/arm/mach-pxa/include/mach/palmt5.h | |||
@@ -15,6 +15,8 @@ | |||
15 | #ifndef _INCLUDE_PALMT5_H_ | 15 | #ifndef _INCLUDE_PALMT5_H_ |
16 | #define _INCLUDE_PALMT5_H_ | 16 | #define _INCLUDE_PALMT5_H_ |
17 | 17 | ||
18 | #include "irqs.h" /* PXA_GPIO_TO_IRQ */ | ||
19 | |||
18 | /** HERE ARE GPIOs **/ | 20 | /** HERE ARE GPIOs **/ |
19 | 21 | ||
20 | /* GPIOs */ | 22 | /* GPIOs */ |
diff --git a/arch/arm/mach-pxa/include/mach/palmtc.h b/arch/arm/mach-pxa/include/mach/palmtc.h index c383a21680b6..81c727b3cfd2 100644 --- a/arch/arm/mach-pxa/include/mach/palmtc.h +++ b/arch/arm/mach-pxa/include/mach/palmtc.h | |||
@@ -16,6 +16,8 @@ | |||
16 | #ifndef _INCLUDE_PALMTC_H_ | 16 | #ifndef _INCLUDE_PALMTC_H_ |
17 | #define _INCLUDE_PALMTC_H_ | 17 | #define _INCLUDE_PALMTC_H_ |
18 | 18 | ||
19 | #include "irqs.h" /* PXA_GPIO_TO_IRQ */ | ||
20 | |||
19 | /** HERE ARE GPIOs **/ | 21 | /** HERE ARE GPIOs **/ |
20 | 22 | ||
21 | /* GPIOs */ | 23 | /* GPIOs */ |
diff --git a/arch/arm/mach-pxa/include/mach/palmtx.h b/arch/arm/mach-pxa/include/mach/palmtx.h index f2e530380253..92bc1f05300d 100644 --- a/arch/arm/mach-pxa/include/mach/palmtx.h +++ b/arch/arm/mach-pxa/include/mach/palmtx.h | |||
@@ -16,6 +16,8 @@ | |||
16 | #ifndef _INCLUDE_PALMTX_H_ | 16 | #ifndef _INCLUDE_PALMTX_H_ |
17 | #define _INCLUDE_PALMTX_H_ | 17 | #define _INCLUDE_PALMTX_H_ |
18 | 18 | ||
19 | #include "irqs.h" /* PXA_GPIO_TO_IRQ */ | ||
20 | |||
19 | /** HERE ARE GPIOs **/ | 21 | /** HERE ARE GPIOs **/ |
20 | 22 | ||
21 | /* GPIOs */ | 23 | /* GPIOs */ |
diff --git a/arch/arm/mach-pxa/include/mach/pcm027.h b/arch/arm/mach-pxa/include/mach/pcm027.h index 6bf28de228bd..86ebd7b6c960 100644 --- a/arch/arm/mach-pxa/include/mach/pcm027.h +++ b/arch/arm/mach-pxa/include/mach/pcm027.h | |||
@@ -23,6 +23,8 @@ | |||
23 | * Definitions of CPU card resources only | 23 | * Definitions of CPU card resources only |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include "irqs.h" /* PXA_GPIO_TO_IRQ */ | ||
27 | |||
26 | /* phyCORE-PXA270 (PCM027) Interrupts */ | 28 | /* phyCORE-PXA270 (PCM027) Interrupts */ |
27 | #define PCM027_IRQ(x) (IRQ_BOARD_START + (x)) | 29 | #define PCM027_IRQ(x) (IRQ_BOARD_START + (x)) |
28 | #define PCM027_BTDET_IRQ PCM027_IRQ(0) | 30 | #define PCM027_BTDET_IRQ PCM027_IRQ(0) |
diff --git a/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h b/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h index 0260aaa2fc17..7e544c14967e 100644 --- a/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h +++ b/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h | |||
@@ -20,6 +20,7 @@ | |||
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <mach/pcm027.h> | 22 | #include <mach/pcm027.h> |
23 | #include "irqs.h" /* PXA_GPIO_TO_IRQ */ | ||
23 | 24 | ||
24 | /* | 25 | /* |
25 | * definitions relevant only when the PCM-990 | 26 | * definitions relevant only when the PCM-990 |
diff --git a/arch/arm/mach-pxa/include/mach/poodle.h b/arch/arm/mach-pxa/include/mach/poodle.h index f32ff75dcca8..b56b19351a03 100644 --- a/arch/arm/mach-pxa/include/mach/poodle.h +++ b/arch/arm/mach-pxa/include/mach/poodle.h | |||
@@ -15,6 +15,8 @@ | |||
15 | #ifndef __ASM_ARCH_POODLE_H | 15 | #ifndef __ASM_ARCH_POODLE_H |
16 | #define __ASM_ARCH_POODLE_H 1 | 16 | #define __ASM_ARCH_POODLE_H 1 |
17 | 17 | ||
18 | #include "irqs.h" /* PXA_GPIO_TO_IRQ */ | ||
19 | |||
18 | /* | 20 | /* |
19 | * GPIOs | 21 | * GPIOs |
20 | */ | 22 | */ |
diff --git a/arch/arm/mach-pxa/include/mach/spitz.h b/arch/arm/mach-pxa/include/mach/spitz.h index 0bfe6507c95d..25c9f62e46aa 100644 --- a/arch/arm/mach-pxa/include/mach/spitz.h +++ b/arch/arm/mach-pxa/include/mach/spitz.h | |||
@@ -15,8 +15,8 @@ | |||
15 | #define __ASM_ARCH_SPITZ_H 1 | 15 | #define __ASM_ARCH_SPITZ_H 1 |
16 | #endif | 16 | #endif |
17 | 17 | ||
18 | #include "irqs.h" /* PXA_NR_BUILTIN_GPIO, PXA_GPIO_TO_IRQ */ | ||
18 | #include <linux/fb.h> | 19 | #include <linux/fb.h> |
19 | #include <linux/gpio.h> | ||
20 | 20 | ||
21 | /* Spitz/Akita GPIOs */ | 21 | /* Spitz/Akita GPIOs */ |
22 | 22 | ||
diff --git a/arch/arm/mach-pxa/include/mach/tosa.h b/arch/arm/mach-pxa/include/mach/tosa.h index 2bb0e862598c..0497d95cef25 100644 --- a/arch/arm/mach-pxa/include/mach/tosa.h +++ b/arch/arm/mach-pxa/include/mach/tosa.h | |||
@@ -13,6 +13,8 @@ | |||
13 | #ifndef _ASM_ARCH_TOSA_H_ | 13 | #ifndef _ASM_ARCH_TOSA_H_ |
14 | #define _ASM_ARCH_TOSA_H_ 1 | 14 | #define _ASM_ARCH_TOSA_H_ 1 |
15 | 15 | ||
16 | #include "irqs.h" /* PXA_NR_BUILTIN_GPIO */ | ||
17 | |||
16 | /* TOSA Chip selects */ | 18 | /* TOSA Chip selects */ |
17 | #define TOSA_LCDC_PHYS PXA_CS4_PHYS | 19 | #define TOSA_LCDC_PHYS PXA_CS4_PHYS |
18 | /* Internel Scoop */ | 20 | /* Internel Scoop */ |
diff --git a/arch/arm/mach-pxa/include/mach/trizeps4.h b/arch/arm/mach-pxa/include/mach/trizeps4.h index d2ca01053f69..ae3ca013afab 100644 --- a/arch/arm/mach-pxa/include/mach/trizeps4.h +++ b/arch/arm/mach-pxa/include/mach/trizeps4.h | |||
@@ -10,6 +10,8 @@ | |||
10 | #ifndef _TRIPEPS4_H_ | 10 | #ifndef _TRIPEPS4_H_ |
11 | #define _TRIPEPS4_H_ | 11 | #define _TRIPEPS4_H_ |
12 | 12 | ||
13 | #include "irqs.h" /* PXA_GPIO_TO_IRQ */ | ||
14 | |||
13 | /* physical memory regions */ | 15 | /* physical memory regions */ |
14 | #define TRIZEPS4_FLASH_PHYS (PXA_CS0_PHYS) /* Flash region */ | 16 | #define TRIZEPS4_FLASH_PHYS (PXA_CS0_PHYS) /* Flash region */ |
15 | #define TRIZEPS4_DISK_PHYS (PXA_CS1_PHYS) /* Disk On Chip region */ | 17 | #define TRIZEPS4_DISK_PHYS (PXA_CS1_PHYS) /* Disk On Chip region */ |
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig index 338640631e08..05fa505df585 100644 --- a/arch/arm/mach-shmobile/Kconfig +++ b/arch/arm/mach-shmobile/Kconfig | |||
@@ -8,7 +8,7 @@ config ARCH_SHMOBILE_MULTI | |||
8 | select CPU_V7 | 8 | select CPU_V7 |
9 | select GENERIC_CLOCKEVENTS | 9 | select GENERIC_CLOCKEVENTS |
10 | select HAVE_ARM_SCU if SMP | 10 | select HAVE_ARM_SCU if SMP |
11 | select HAVE_ARM_TWD if LOCAL_TIMERS | 11 | select HAVE_ARM_TWD if SMP |
12 | select HAVE_SMP | 12 | select HAVE_SMP |
13 | select ARM_GIC | 13 | select ARM_GIC |
14 | select MIGHT_HAVE_CACHE_L2X0 | 14 | select MIGHT_HAVE_CACHE_L2X0 |
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c index 1db2a5ca9ab8..8c09a8393fb6 100644 --- a/arch/arm/mach-zynq/common.c +++ b/arch/arm/mach-zynq/common.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/of_irq.h> | 25 | #include <linux/of_irq.h> |
26 | #include <linux/of_platform.h> | 26 | #include <linux/of_platform.h> |
27 | #include <linux/of.h> | 27 | #include <linux/of.h> |
28 | #include <linux/memblock.h> | ||
28 | #include <linux/irqchip.h> | 29 | #include <linux/irqchip.h> |
29 | #include <linux/irqchip/arm-gic.h> | 30 | #include <linux/irqchip/arm-gic.h> |
30 | 31 | ||
@@ -41,6 +42,18 @@ | |||
41 | 42 | ||
42 | void __iomem *zynq_scu_base; | 43 | void __iomem *zynq_scu_base; |
43 | 44 | ||
45 | /** | ||
46 | * zynq_memory_init - Initialize special memory | ||
47 | * | ||
48 | * We need to stop things allocating the low memory as DMA can't work in | ||
49 | * the 1st 512K of memory. | ||
50 | */ | ||
51 | static void __init zynq_memory_init(void) | ||
52 | { | ||
53 | if (!__pa(PAGE_OFFSET)) | ||
54 | memblock_reserve(__pa(PAGE_OFFSET), __pa(swapper_pg_dir)); | ||
55 | } | ||
56 | |||
44 | static struct platform_device zynq_cpuidle_device = { | 57 | static struct platform_device zynq_cpuidle_device = { |
45 | .name = "cpuidle-zynq", | 58 | .name = "cpuidle-zynq", |
46 | }; | 59 | }; |
@@ -117,5 +130,6 @@ DT_MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform") | |||
117 | .init_machine = zynq_init_machine, | 130 | .init_machine = zynq_init_machine, |
118 | .init_time = zynq_timer_init, | 131 | .init_time = zynq_timer_init, |
119 | .dt_compat = zynq_dt_match, | 132 | .dt_compat = zynq_dt_match, |
133 | .reserve = zynq_memory_init, | ||
120 | .restart = zynq_system_reset, | 134 | .restart = zynq_system_reset, |
121 | MACHINE_END | 135 | MACHINE_END |
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index d5a982d15a88..7ea641b7aa7d 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h | |||
@@ -38,6 +38,7 @@ static inline pmd_t *pmd_off_k(unsigned long virt) | |||
38 | 38 | ||
39 | struct mem_type { | 39 | struct mem_type { |
40 | pteval_t prot_pte; | 40 | pteval_t prot_pte; |
41 | pteval_t prot_pte_s2; | ||
41 | pmdval_t prot_l1; | 42 | pmdval_t prot_l1; |
42 | pmdval_t prot_sect; | 43 | pmdval_t prot_sect; |
43 | unsigned int domain; | 44 | unsigned int domain; |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4f08c133cc25..a623cb3ad012 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -232,12 +232,16 @@ __setup("noalign", noalign_setup); | |||
232 | #endif /* ifdef CONFIG_CPU_CP15 / else */ | 232 | #endif /* ifdef CONFIG_CPU_CP15 / else */ |
233 | 233 | ||
234 | #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN | 234 | #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN |
235 | #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE | ||
235 | #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE | 236 | #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE |
236 | 237 | ||
237 | static struct mem_type mem_types[] = { | 238 | static struct mem_type mem_types[] = { |
238 | [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ | 239 | [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ |
239 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | | 240 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | |
240 | L_PTE_SHARED, | 241 | L_PTE_SHARED, |
242 | .prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) | | ||
243 | s2_policy(L_PTE_S2_MT_DEV_SHARED) | | ||
244 | L_PTE_SHARED, | ||
241 | .prot_l1 = PMD_TYPE_TABLE, | 245 | .prot_l1 = PMD_TYPE_TABLE, |
242 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, | 246 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, |
243 | .domain = DOMAIN_IO, | 247 | .domain = DOMAIN_IO, |
@@ -508,7 +512,8 @@ static void __init build_mem_type_table(void) | |||
508 | cp = &cache_policies[cachepolicy]; | 512 | cp = &cache_policies[cachepolicy]; |
509 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; | 513 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; |
510 | s2_pgprot = cp->pte_s2; | 514 | s2_pgprot = cp->pte_s2; |
511 | hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte; | 515 | hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte; |
516 | s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2; | ||
512 | 517 | ||
513 | /* | 518 | /* |
514 | * ARMv6 and above have extended page tables. | 519 | * ARMv6 and above have extended page tables. |
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 45dc29f85d56..32b3558321c4 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S | |||
@@ -208,7 +208,6 @@ __v6_setup: | |||
208 | mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache | 208 | mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache |
209 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache | 209 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache |
210 | mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache | 210 | mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache |
211 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | ||
212 | #ifdef CONFIG_MMU | 211 | #ifdef CONFIG_MMU |
213 | mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs | 212 | mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs |
214 | mcr p15, 0, r0, c2, c0, 2 @ TTB control register | 213 | mcr p15, 0, r0, c2, c0, 2 @ TTB control register |
@@ -218,6 +217,8 @@ __v6_setup: | |||
218 | ALT_UP(orr r8, r8, #TTB_FLAGS_UP) | 217 | ALT_UP(orr r8, r8, #TTB_FLAGS_UP) |
219 | mcr p15, 0, r8, c2, c0, 1 @ load TTB1 | 218 | mcr p15, 0, r8, c2, c0, 1 @ load TTB1 |
220 | #endif /* CONFIG_MMU */ | 219 | #endif /* CONFIG_MMU */ |
220 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer and | ||
221 | @ complete invalidations | ||
221 | adr r5, v6_crval | 222 | adr r5, v6_crval |
222 | ldmia r5, {r5, r6} | 223 | ldmia r5, {r5, r6} |
223 | ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables | 224 | ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index bd1781979a39..74f6033e76dd 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -351,7 +351,6 @@ __v7_setup: | |||
351 | 351 | ||
352 | 4: mov r10, #0 | 352 | 4: mov r10, #0 |
353 | mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate | 353 | mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate |
354 | dsb | ||
355 | #ifdef CONFIG_MMU | 354 | #ifdef CONFIG_MMU |
356 | mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs | 355 | mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs |
357 | v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup | 356 | v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup |
@@ -360,6 +359,7 @@ __v7_setup: | |||
360 | mcr p15, 0, r5, c10, c2, 0 @ write PRRR | 359 | mcr p15, 0, r5, c10, c2, 0 @ write PRRR |
361 | mcr p15, 0, r6, c10, c2, 1 @ write NMRR | 360 | mcr p15, 0, r6, c10, c2, 1 @ write NMRR |
362 | #endif | 361 | #endif |
362 | dsb @ Complete invalidations | ||
363 | #ifndef CONFIG_ARM_THUMBEE | 363 | #ifndef CONFIG_ARM_THUMBEE |
364 | mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE | 364 | mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE |
365 | and r0, r0, #(0xf << 12) @ ThumbEE enabled field | 365 | and r0, r0, #(0xf << 12) @ ThumbEE enabled field |
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 495ab6f84a61..eaf54a30bedc 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h | |||
@@ -148,6 +148,15 @@ struct kvm_arch_memory_slot { | |||
148 | #define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) | 148 | #define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) |
149 | #define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) | 149 | #define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) |
150 | 150 | ||
151 | /* Device Control API: ARM VGIC */ | ||
152 | #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 | ||
153 | #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 | ||
154 | #define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2 | ||
155 | #define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32 | ||
156 | #define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT) | ||
157 | #define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0 | ||
158 | #define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) | ||
159 | |||
151 | /* KVM_IRQ_LINE irq field index values */ | 160 | /* KVM_IRQ_LINE irq field index values */ |
152 | #define KVM_ARM_IRQ_TYPE_SHIFT 24 | 161 | #define KVM_ARM_IRQ_TYPE_SHIFT 24 |
153 | #define KVM_ARM_IRQ_TYPE_MASK 0xff | 162 | #define KVM_ARM_IRQ_TYPE_MASK 0xff |
diff --git a/arch/avr32/Makefile b/arch/avr32/Makefile index 22fb66590dcd..dba48a5d5bb9 100644 --- a/arch/avr32/Makefile +++ b/arch/avr32/Makefile | |||
@@ -11,7 +11,7 @@ all: uImage vmlinux.elf | |||
11 | 11 | ||
12 | KBUILD_DEFCONFIG := atstk1002_defconfig | 12 | KBUILD_DEFCONFIG := atstk1002_defconfig |
13 | 13 | ||
14 | KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic | 14 | KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic -D__linux__ |
15 | KBUILD_AFLAGS += -mrelax -mno-pic | 15 | KBUILD_AFLAGS += -mrelax -mno-pic |
16 | KBUILD_CFLAGS_MODULE += -mno-relax | 16 | KBUILD_CFLAGS_MODULE += -mno-relax |
17 | LDFLAGS_vmlinux += --relax | 17 | LDFLAGS_vmlinux += --relax |
diff --git a/arch/avr32/boards/mimc200/fram.c b/arch/avr32/boards/mimc200/fram.c index 9764a1a1073e..c1466a872b9c 100644 --- a/arch/avr32/boards/mimc200/fram.c +++ b/arch/avr32/boards/mimc200/fram.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #define FRAM_VERSION "1.0" | 11 | #define FRAM_VERSION "1.0" |
12 | 12 | ||
13 | #include <linux/miscdevice.h> | 13 | #include <linux/miscdevice.h> |
14 | #include <linux/module.h> | ||
14 | #include <linux/proc_fs.h> | 15 | #include <linux/proc_fs.h> |
15 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
16 | #include <linux/io.h> | 17 | #include <linux/io.h> |
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild index cfb9fe1b8df9..c7c64a63c29f 100644 --- a/arch/avr32/include/asm/Kbuild +++ b/arch/avr32/include/asm/Kbuild | |||
@@ -17,5 +17,6 @@ generic-y += scatterlist.h | |||
17 | generic-y += sections.h | 17 | generic-y += sections.h |
18 | generic-y += topology.h | 18 | generic-y += topology.h |
19 | generic-y += trace_clock.h | 19 | generic-y += trace_clock.h |
20 | generic-y += vga.h | ||
20 | generic-y += xor.h | 21 | generic-y += xor.h |
21 | generic-y += hash.h | 22 | generic-y += hash.h |
diff --git a/arch/avr32/include/asm/io.h b/arch/avr32/include/asm/io.h index fc6483f83ccc..4f5ec2bb7172 100644 --- a/arch/avr32/include/asm/io.h +++ b/arch/avr32/include/asm/io.h | |||
@@ -295,6 +295,8 @@ extern void __iounmap(void __iomem *addr); | |||
295 | #define iounmap(addr) \ | 295 | #define iounmap(addr) \ |
296 | __iounmap(addr) | 296 | __iounmap(addr) |
297 | 297 | ||
298 | #define ioremap_wc ioremap_nocache | ||
299 | |||
298 | #define cached(addr) P1SEGADDR(addr) | 300 | #define cached(addr) P1SEGADDR(addr) |
299 | #define uncached(addr) P2SEGADDR(addr) | 301 | #define uncached(addr) P2SEGADDR(addr) |
300 | 302 | ||
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index e27e9ad6818e..150866b2a3fe 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h | |||
@@ -134,6 +134,7 @@ static inline int dma_supported(struct device *dev, u64 mask) | |||
134 | } | 134 | } |
135 | 135 | ||
136 | extern int dma_set_mask(struct device *dev, u64 dma_mask); | 136 | extern int dma_set_mask(struct device *dev, u64 dma_mask); |
137 | extern int __dma_set_mask(struct device *dev, u64 dma_mask); | ||
137 | 138 | ||
138 | #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) | 139 | #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) |
139 | 140 | ||
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index 9e39ceb1d19f..d4dd41fb951b 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h | |||
@@ -172,10 +172,20 @@ struct eeh_ops { | |||
172 | }; | 172 | }; |
173 | 173 | ||
174 | extern struct eeh_ops *eeh_ops; | 174 | extern struct eeh_ops *eeh_ops; |
175 | extern int eeh_subsystem_enabled; | 175 | extern bool eeh_subsystem_enabled; |
176 | extern raw_spinlock_t confirm_error_lock; | 176 | extern raw_spinlock_t confirm_error_lock; |
177 | extern int eeh_probe_mode; | 177 | extern int eeh_probe_mode; |
178 | 178 | ||
179 | static inline bool eeh_enabled(void) | ||
180 | { | ||
181 | return eeh_subsystem_enabled; | ||
182 | } | ||
183 | |||
184 | static inline void eeh_set_enable(bool mode) | ||
185 | { | ||
186 | eeh_subsystem_enabled = mode; | ||
187 | } | ||
188 | |||
179 | #define EEH_PROBE_MODE_DEV (1<<0) /* From PCI device */ | 189 | #define EEH_PROBE_MODE_DEV (1<<0) /* From PCI device */ |
180 | #define EEH_PROBE_MODE_DEVTREE (1<<1) /* From device tree */ | 190 | #define EEH_PROBE_MODE_DEVTREE (1<<1) /* From device tree */ |
181 | 191 | ||
@@ -246,7 +256,7 @@ void eeh_remove_device(struct pci_dev *); | |||
246 | * If this macro yields TRUE, the caller relays to eeh_check_failure() | 256 | * If this macro yields TRUE, the caller relays to eeh_check_failure() |
247 | * which does further tests out of line. | 257 | * which does further tests out of line. |
248 | */ | 258 | */ |
249 | #define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_subsystem_enabled) | 259 | #define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_enabled()) |
250 | 260 | ||
251 | /* | 261 | /* |
252 | * Reads from a device which has been isolated by EEH will return | 262 | * Reads from a device which has been isolated by EEH will return |
@@ -257,6 +267,13 @@ void eeh_remove_device(struct pci_dev *); | |||
257 | 267 | ||
258 | #else /* !CONFIG_EEH */ | 268 | #else /* !CONFIG_EEH */ |
259 | 269 | ||
270 | static inline bool eeh_enabled(void) | ||
271 | { | ||
272 | return false; | ||
273 | } | ||
274 | |||
275 | static inline void eeh_set_enable(bool mode) { } | ||
276 | |||
260 | static inline int eeh_init(void) | 277 | static inline int eeh_init(void) |
261 | { | 278 | { |
262 | return 0; | 279 | return 0; |
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index d750336b171d..623f2971ce0e 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h | |||
@@ -127,7 +127,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | |||
127 | unsigned long addr, pte_t *ptep) | 127 | unsigned long addr, pte_t *ptep) |
128 | { | 128 | { |
129 | #ifdef CONFIG_PPC64 | 129 | #ifdef CONFIG_PPC64 |
130 | return __pte(pte_update(mm, addr, ptep, ~0UL, 1)); | 130 | return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1)); |
131 | #else | 131 | #else |
132 | return __pte(pte_update(ptep, ~0UL, 0)); | 132 | return __pte(pte_update(ptep, ~0UL, 0)); |
133 | #endif | 133 | #endif |
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index f7a8036579b5..42632c7a2a4e 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h | |||
@@ -77,6 +77,7 @@ struct iommu_table { | |||
77 | #ifdef CONFIG_IOMMU_API | 77 | #ifdef CONFIG_IOMMU_API |
78 | struct iommu_group *it_group; | 78 | struct iommu_group *it_group; |
79 | #endif | 79 | #endif |
80 | void (*set_bypass)(struct iommu_table *tbl, bool enable); | ||
80 | }; | 81 | }; |
81 | 82 | ||
82 | /* Pure 2^n version of get_order */ | 83 | /* Pure 2^n version of get_order */ |
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index bc141c950b1e..eb9261024f51 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h | |||
@@ -195,6 +195,7 @@ extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | |||
195 | static inline unsigned long pte_update(struct mm_struct *mm, | 195 | static inline unsigned long pte_update(struct mm_struct *mm, |
196 | unsigned long addr, | 196 | unsigned long addr, |
197 | pte_t *ptep, unsigned long clr, | 197 | pte_t *ptep, unsigned long clr, |
198 | unsigned long set, | ||
198 | int huge) | 199 | int huge) |
199 | { | 200 | { |
200 | #ifdef PTE_ATOMIC_UPDATES | 201 | #ifdef PTE_ATOMIC_UPDATES |
@@ -205,14 +206,15 @@ static inline unsigned long pte_update(struct mm_struct *mm, | |||
205 | andi. %1,%0,%6\n\ | 206 | andi. %1,%0,%6\n\ |
206 | bne- 1b \n\ | 207 | bne- 1b \n\ |
207 | andc %1,%0,%4 \n\ | 208 | andc %1,%0,%4 \n\ |
209 | or %1,%1,%7\n\ | ||
208 | stdcx. %1,0,%3 \n\ | 210 | stdcx. %1,0,%3 \n\ |
209 | bne- 1b" | 211 | bne- 1b" |
210 | : "=&r" (old), "=&r" (tmp), "=m" (*ptep) | 212 | : "=&r" (old), "=&r" (tmp), "=m" (*ptep) |
211 | : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY) | 213 | : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set) |
212 | : "cc" ); | 214 | : "cc" ); |
213 | #else | 215 | #else |
214 | unsigned long old = pte_val(*ptep); | 216 | unsigned long old = pte_val(*ptep); |
215 | *ptep = __pte(old & ~clr); | 217 | *ptep = __pte((old & ~clr) | set); |
216 | #endif | 218 | #endif |
217 | /* huge pages use the old page table lock */ | 219 | /* huge pages use the old page table lock */ |
218 | if (!huge) | 220 | if (!huge) |
@@ -231,9 +233,9 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, | |||
231 | { | 233 | { |
232 | unsigned long old; | 234 | unsigned long old; |
233 | 235 | ||
234 | if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) | 236 | if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) |
235 | return 0; | 237 | return 0; |
236 | old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0); | 238 | old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); |
237 | return (old & _PAGE_ACCESSED) != 0; | 239 | return (old & _PAGE_ACCESSED) != 0; |
238 | } | 240 | } |
239 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | 241 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
@@ -252,7 +254,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |||
252 | if ((pte_val(*ptep) & _PAGE_RW) == 0) | 254 | if ((pte_val(*ptep) & _PAGE_RW) == 0) |
253 | return; | 255 | return; |
254 | 256 | ||
255 | pte_update(mm, addr, ptep, _PAGE_RW, 0); | 257 | pte_update(mm, addr, ptep, _PAGE_RW, 0, 0); |
256 | } | 258 | } |
257 | 259 | ||
258 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | 260 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, |
@@ -261,7 +263,7 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | |||
261 | if ((pte_val(*ptep) & _PAGE_RW) == 0) | 263 | if ((pte_val(*ptep) & _PAGE_RW) == 0) |
262 | return; | 264 | return; |
263 | 265 | ||
264 | pte_update(mm, addr, ptep, _PAGE_RW, 1); | 266 | pte_update(mm, addr, ptep, _PAGE_RW, 0, 1); |
265 | } | 267 | } |
266 | 268 | ||
267 | /* | 269 | /* |
@@ -284,14 +286,14 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | |||
284 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | 286 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, |
285 | unsigned long addr, pte_t *ptep) | 287 | unsigned long addr, pte_t *ptep) |
286 | { | 288 | { |
287 | unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0); | 289 | unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0); |
288 | return __pte(old); | 290 | return __pte(old); |
289 | } | 291 | } |
290 | 292 | ||
291 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, | 293 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, |
292 | pte_t * ptep) | 294 | pte_t * ptep) |
293 | { | 295 | { |
294 | pte_update(mm, addr, ptep, ~0UL, 0); | 296 | pte_update(mm, addr, ptep, ~0UL, 0, 0); |
295 | } | 297 | } |
296 | 298 | ||
297 | 299 | ||
@@ -506,7 +508,9 @@ extern int pmdp_set_access_flags(struct vm_area_struct *vma, | |||
506 | 508 | ||
507 | extern unsigned long pmd_hugepage_update(struct mm_struct *mm, | 509 | extern unsigned long pmd_hugepage_update(struct mm_struct *mm, |
508 | unsigned long addr, | 510 | unsigned long addr, |
509 | pmd_t *pmdp, unsigned long clr); | 511 | pmd_t *pmdp, |
512 | unsigned long clr, | ||
513 | unsigned long set); | ||
510 | 514 | ||
511 | static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, | 515 | static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, |
512 | unsigned long addr, pmd_t *pmdp) | 516 | unsigned long addr, pmd_t *pmdp) |
@@ -515,7 +519,7 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, | |||
515 | 519 | ||
516 | if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) | 520 | if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) |
517 | return 0; | 521 | return 0; |
518 | old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED); | 522 | old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0); |
519 | return ((old & _PAGE_ACCESSED) != 0); | 523 | return ((old & _PAGE_ACCESSED) != 0); |
520 | } | 524 | } |
521 | 525 | ||
@@ -542,7 +546,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |||
542 | if ((pmd_val(*pmdp) & _PAGE_RW) == 0) | 546 | if ((pmd_val(*pmdp) & _PAGE_RW) == 0) |
543 | return; | 547 | return; |
544 | 548 | ||
545 | pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW); | 549 | pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0); |
546 | } | 550 | } |
547 | 551 | ||
548 | #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH | 552 | #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH |
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index f83b6f3e1b39..3ebb188c3ff5 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h | |||
@@ -75,12 +75,34 @@ static inline pte_t pte_mknuma(pte_t pte) | |||
75 | return pte; | 75 | return pte; |
76 | } | 76 | } |
77 | 77 | ||
78 | #define ptep_set_numa ptep_set_numa | ||
79 | static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, | ||
80 | pte_t *ptep) | ||
81 | { | ||
82 | if ((pte_val(*ptep) & _PAGE_PRESENT) == 0) | ||
83 | VM_BUG_ON(1); | ||
84 | |||
85 | pte_update(mm, addr, ptep, _PAGE_PRESENT, _PAGE_NUMA, 0); | ||
86 | return; | ||
87 | } | ||
88 | |||
78 | #define pmd_numa pmd_numa | 89 | #define pmd_numa pmd_numa |
79 | static inline int pmd_numa(pmd_t pmd) | 90 | static inline int pmd_numa(pmd_t pmd) |
80 | { | 91 | { |
81 | return pte_numa(pmd_pte(pmd)); | 92 | return pte_numa(pmd_pte(pmd)); |
82 | } | 93 | } |
83 | 94 | ||
95 | #define pmdp_set_numa pmdp_set_numa | ||
96 | static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, | ||
97 | pmd_t *pmdp) | ||
98 | { | ||
99 | if ((pmd_val(*pmdp) & _PAGE_PRESENT) == 0) | ||
100 | VM_BUG_ON(1); | ||
101 | |||
102 | pmd_hugepage_update(mm, addr, pmdp, _PAGE_PRESENT, _PAGE_NUMA); | ||
103 | return; | ||
104 | } | ||
105 | |||
84 | #define pmd_mknonnuma pmd_mknonnuma | 106 | #define pmd_mknonnuma pmd_mknonnuma |
85 | static inline pmd_t pmd_mknonnuma(pmd_t pmd) | 107 | static inline pmd_t pmd_mknonnuma(pmd_t pmd) |
86 | { | 108 | { |
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index 4ee06fe15de4..d0e784e0ff48 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h | |||
@@ -8,6 +8,7 @@ | |||
8 | 8 | ||
9 | #ifdef __powerpc64__ | 9 | #ifdef __powerpc64__ |
10 | 10 | ||
11 | extern char __start_interrupts[]; | ||
11 | extern char __end_interrupts[]; | 12 | extern char __end_interrupts[]; |
12 | 13 | ||
13 | extern char __prom_init_toc_start[]; | 14 | extern char __prom_init_toc_start[]; |
@@ -21,6 +22,17 @@ static inline int in_kernel_text(unsigned long addr) | |||
21 | return 0; | 22 | return 0; |
22 | } | 23 | } |
23 | 24 | ||
25 | static inline int overlaps_interrupt_vector_text(unsigned long start, | ||
26 | unsigned long end) | ||
27 | { | ||
28 | unsigned long real_start, real_end; | ||
29 | real_start = __start_interrupts - _stext; | ||
30 | real_end = __end_interrupts - _stext; | ||
31 | |||
32 | return start < (unsigned long)__va(real_end) && | ||
33 | (unsigned long)__va(real_start) < end; | ||
34 | } | ||
35 | |||
24 | static inline int overlaps_kernel_text(unsigned long start, unsigned long end) | 36 | static inline int overlaps_kernel_text(unsigned long start, unsigned long end) |
25 | { | 37 | { |
26 | return start < (unsigned long)__init_end && | 38 | return start < (unsigned long)__init_end && |
diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h index 0d9cecddf8a4..c53f5f6d1761 100644 --- a/arch/powerpc/include/asm/vdso.h +++ b/arch/powerpc/include/asm/vdso.h | |||
@@ -4,11 +4,11 @@ | |||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | 5 | ||
6 | /* Default link addresses for the vDSOs */ | 6 | /* Default link addresses for the vDSOs */ |
7 | #define VDSO32_LBASE 0x100000 | 7 | #define VDSO32_LBASE 0x0 |
8 | #define VDSO64_LBASE 0x100000 | 8 | #define VDSO64_LBASE 0x0 |
9 | 9 | ||
10 | /* Default map addresses for 32bit vDSO */ | 10 | /* Default map addresses for 32bit vDSO */ |
11 | #define VDSO32_MBASE VDSO32_LBASE | 11 | #define VDSO32_MBASE 0x100000 |
12 | 12 | ||
13 | #define VDSO_VERSION_STRING LINUX_2.6.15 | 13 | #define VDSO_VERSION_STRING LINUX_2.6.15 |
14 | 14 | ||
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 8032b97ccdcb..ee78f6e49d64 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c | |||
@@ -191,12 +191,10 @@ EXPORT_SYMBOL(dma_direct_ops); | |||
191 | 191 | ||
192 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | 192 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) |
193 | 193 | ||
194 | int dma_set_mask(struct device *dev, u64 dma_mask) | 194 | int __dma_set_mask(struct device *dev, u64 dma_mask) |
195 | { | 195 | { |
196 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | 196 | struct dma_map_ops *dma_ops = get_dma_ops(dev); |
197 | 197 | ||
198 | if (ppc_md.dma_set_mask) | ||
199 | return ppc_md.dma_set_mask(dev, dma_mask); | ||
200 | if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL)) | 198 | if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL)) |
201 | return dma_ops->set_dma_mask(dev, dma_mask); | 199 | return dma_ops->set_dma_mask(dev, dma_mask); |
202 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | 200 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) |
@@ -204,6 +202,12 @@ int dma_set_mask(struct device *dev, u64 dma_mask) | |||
204 | *dev->dma_mask = dma_mask; | 202 | *dev->dma_mask = dma_mask; |
205 | return 0; | 203 | return 0; |
206 | } | 204 | } |
205 | int dma_set_mask(struct device *dev, u64 dma_mask) | ||
206 | { | ||
207 | if (ppc_md.dma_set_mask) | ||
208 | return ppc_md.dma_set_mask(dev, dma_mask); | ||
209 | return __dma_set_mask(dev, dma_mask); | ||
210 | } | ||
207 | EXPORT_SYMBOL(dma_set_mask); | 211 | EXPORT_SYMBOL(dma_set_mask); |
208 | 212 | ||
209 | u64 dma_get_required_mask(struct device *dev) | 213 | u64 dma_get_required_mask(struct device *dev) |
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 148db72a8c43..e7b76a6bf150 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/pci.h> | 28 | #include <linux/pci.h> |
29 | #include <linux/proc_fs.h> | 29 | #include <linux/proc_fs.h> |
30 | #include <linux/rbtree.h> | 30 | #include <linux/rbtree.h> |
31 | #include <linux/reboot.h> | ||
31 | #include <linux/seq_file.h> | 32 | #include <linux/seq_file.h> |
32 | #include <linux/spinlock.h> | 33 | #include <linux/spinlock.h> |
33 | #include <linux/export.h> | 34 | #include <linux/export.h> |
@@ -89,7 +90,7 @@ | |||
89 | /* Platform dependent EEH operations */ | 90 | /* Platform dependent EEH operations */ |
90 | struct eeh_ops *eeh_ops = NULL; | 91 | struct eeh_ops *eeh_ops = NULL; |
91 | 92 | ||
92 | int eeh_subsystem_enabled; | 93 | bool eeh_subsystem_enabled = false; |
93 | EXPORT_SYMBOL(eeh_subsystem_enabled); | 94 | EXPORT_SYMBOL(eeh_subsystem_enabled); |
94 | 95 | ||
95 | /* | 96 | /* |
@@ -364,7 +365,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) | |||
364 | 365 | ||
365 | eeh_stats.total_mmio_ffs++; | 366 | eeh_stats.total_mmio_ffs++; |
366 | 367 | ||
367 | if (!eeh_subsystem_enabled) | 368 | if (!eeh_enabled()) |
368 | return 0; | 369 | return 0; |
369 | 370 | ||
370 | if (!edev) { | 371 | if (!edev) { |
@@ -747,6 +748,17 @@ int __exit eeh_ops_unregister(const char *name) | |||
747 | return -EEXIST; | 748 | return -EEXIST; |
748 | } | 749 | } |
749 | 750 | ||
751 | static int eeh_reboot_notifier(struct notifier_block *nb, | ||
752 | unsigned long action, void *unused) | ||
753 | { | ||
754 | eeh_set_enable(false); | ||
755 | return NOTIFY_DONE; | ||
756 | } | ||
757 | |||
758 | static struct notifier_block eeh_reboot_nb = { | ||
759 | .notifier_call = eeh_reboot_notifier, | ||
760 | }; | ||
761 | |||
750 | /** | 762 | /** |
751 | * eeh_init - EEH initialization | 763 | * eeh_init - EEH initialization |
752 | * | 764 | * |
@@ -778,6 +790,14 @@ int eeh_init(void) | |||
778 | if (machine_is(powernv) && cnt++ <= 0) | 790 | if (machine_is(powernv) && cnt++ <= 0) |
779 | return ret; | 791 | return ret; |
780 | 792 | ||
793 | /* Register reboot notifier */ | ||
794 | ret = register_reboot_notifier(&eeh_reboot_nb); | ||
795 | if (ret) { | ||
796 | pr_warn("%s: Failed to register notifier (%d)\n", | ||
797 | __func__, ret); | ||
798 | return ret; | ||
799 | } | ||
800 | |||
781 | /* call platform initialization function */ | 801 | /* call platform initialization function */ |
782 | if (!eeh_ops) { | 802 | if (!eeh_ops) { |
783 | pr_warning("%s: Platform EEH operation not found\n", | 803 | pr_warning("%s: Platform EEH operation not found\n", |
@@ -822,7 +842,7 @@ int eeh_init(void) | |||
822 | return ret; | 842 | return ret; |
823 | } | 843 | } |
824 | 844 | ||
825 | if (eeh_subsystem_enabled) | 845 | if (eeh_enabled()) |
826 | pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n"); | 846 | pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n"); |
827 | else | 847 | else |
828 | pr_warning("EEH: No capable adapters found\n"); | 848 | pr_warning("EEH: No capable adapters found\n"); |
@@ -897,7 +917,7 @@ void eeh_add_device_late(struct pci_dev *dev) | |||
897 | struct device_node *dn; | 917 | struct device_node *dn; |
898 | struct eeh_dev *edev; | 918 | struct eeh_dev *edev; |
899 | 919 | ||
900 | if (!dev || !eeh_subsystem_enabled) | 920 | if (!dev || !eeh_enabled()) |
901 | return; | 921 | return; |
902 | 922 | ||
903 | pr_debug("EEH: Adding device %s\n", pci_name(dev)); | 923 | pr_debug("EEH: Adding device %s\n", pci_name(dev)); |
@@ -1005,7 +1025,7 @@ void eeh_remove_device(struct pci_dev *dev) | |||
1005 | { | 1025 | { |
1006 | struct eeh_dev *edev; | 1026 | struct eeh_dev *edev; |
1007 | 1027 | ||
1008 | if (!dev || !eeh_subsystem_enabled) | 1028 | if (!dev || !eeh_enabled()) |
1009 | return; | 1029 | return; |
1010 | edev = pci_dev_to_eeh_dev(dev); | 1030 | edev = pci_dev_to_eeh_dev(dev); |
1011 | 1031 | ||
@@ -1045,7 +1065,7 @@ void eeh_remove_device(struct pci_dev *dev) | |||
1045 | 1065 | ||
1046 | static int proc_eeh_show(struct seq_file *m, void *v) | 1066 | static int proc_eeh_show(struct seq_file *m, void *v) |
1047 | { | 1067 | { |
1048 | if (0 == eeh_subsystem_enabled) { | 1068 | if (!eeh_enabled()) { |
1049 | seq_printf(m, "EEH Subsystem is globally disabled\n"); | 1069 | seq_printf(m, "EEH Subsystem is globally disabled\n"); |
1050 | seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs); | 1070 | seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs); |
1051 | } else { | 1071 | } else { |
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 7bb30dca4e19..fdc679d309ec 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c | |||
@@ -362,9 +362,13 @@ static void *eeh_rmv_device(void *data, void *userdata) | |||
362 | */ | 362 | */ |
363 | if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)) | 363 | if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)) |
364 | return NULL; | 364 | return NULL; |
365 | |||
365 | driver = eeh_pcid_get(dev); | 366 | driver = eeh_pcid_get(dev); |
366 | if (driver && driver->err_handler) | 367 | if (driver) { |
367 | return NULL; | 368 | eeh_pcid_put(dev); |
369 | if (driver->err_handler) | ||
370 | return NULL; | ||
371 | } | ||
368 | 372 | ||
369 | /* Remove it from PCI subsystem */ | 373 | /* Remove it from PCI subsystem */ |
370 | pr_debug("EEH: Removing %s without EEH sensitive driver\n", | 374 | pr_debug("EEH: Removing %s without EEH sensitive driver\n", |
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index d773dd440a45..88e3ec6e1d96 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c | |||
@@ -1088,6 +1088,14 @@ int iommu_take_ownership(struct iommu_table *tbl) | |||
1088 | memset(tbl->it_map, 0xff, sz); | 1088 | memset(tbl->it_map, 0xff, sz); |
1089 | iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size); | 1089 | iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size); |
1090 | 1090 | ||
1091 | /* | ||
1092 | * Disable iommu bypass, otherwise the user can DMA to all of | ||
1093 | * our physical memory via the bypass window instead of just | ||
1094 | * the pages that has been explicitly mapped into the iommu | ||
1095 | */ | ||
1096 | if (tbl->set_bypass) | ||
1097 | tbl->set_bypass(tbl, false); | ||
1098 | |||
1091 | return 0; | 1099 | return 0; |
1092 | } | 1100 | } |
1093 | EXPORT_SYMBOL_GPL(iommu_take_ownership); | 1101 | EXPORT_SYMBOL_GPL(iommu_take_ownership); |
@@ -1102,6 +1110,10 @@ void iommu_release_ownership(struct iommu_table *tbl) | |||
1102 | /* Restore bit#0 set by iommu_init_table() */ | 1110 | /* Restore bit#0 set by iommu_init_table() */ |
1103 | if (tbl->it_offset == 0) | 1111 | if (tbl->it_offset == 0) |
1104 | set_bit(0, tbl->it_map); | 1112 | set_bit(0, tbl->it_map); |
1113 | |||
1114 | /* The kernel owns the device now, we can restore the iommu bypass */ | ||
1115 | if (tbl->set_bypass) | ||
1116 | tbl->set_bypass(tbl, true); | ||
1105 | } | 1117 | } |
1106 | EXPORT_SYMBOL_GPL(iommu_release_ownership); | 1118 | EXPORT_SYMBOL_GPL(iommu_release_ownership); |
1107 | 1119 | ||
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 9729b23bfb0a..1d0848bba049 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -559,8 +559,13 @@ void exc_lvl_ctx_init(void) | |||
559 | #ifdef CONFIG_PPC64 | 559 | #ifdef CONFIG_PPC64 |
560 | cpu_nr = i; | 560 | cpu_nr = i; |
561 | #else | 561 | #else |
562 | #ifdef CONFIG_SMP | ||
562 | cpu_nr = get_hard_smp_processor_id(i); | 563 | cpu_nr = get_hard_smp_processor_id(i); |
564 | #else | ||
565 | cpu_nr = 0; | ||
563 | #endif | 566 | #endif |
567 | #endif | ||
568 | |||
564 | memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); | 569 | memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); |
565 | tp = critirq_ctx[cpu_nr]; | 570 | tp = critirq_ctx[cpu_nr]; |
566 | tp->cpu = cpu_nr; | 571 | tp->cpu = cpu_nr; |
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index 75d4f7340da8..015ae55c1868 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c | |||
@@ -196,7 +196,9 @@ int overlaps_crashkernel(unsigned long start, unsigned long size) | |||
196 | 196 | ||
197 | /* Values we need to export to the second kernel via the device tree. */ | 197 | /* Values we need to export to the second kernel via the device tree. */ |
198 | static phys_addr_t kernel_end; | 198 | static phys_addr_t kernel_end; |
199 | static phys_addr_t crashk_base; | ||
199 | static phys_addr_t crashk_size; | 200 | static phys_addr_t crashk_size; |
201 | static unsigned long long mem_limit; | ||
200 | 202 | ||
201 | static struct property kernel_end_prop = { | 203 | static struct property kernel_end_prop = { |
202 | .name = "linux,kernel-end", | 204 | .name = "linux,kernel-end", |
@@ -207,7 +209,7 @@ static struct property kernel_end_prop = { | |||
207 | static struct property crashk_base_prop = { | 209 | static struct property crashk_base_prop = { |
208 | .name = "linux,crashkernel-base", | 210 | .name = "linux,crashkernel-base", |
209 | .length = sizeof(phys_addr_t), | 211 | .length = sizeof(phys_addr_t), |
210 | .value = &crashk_res.start, | 212 | .value = &crashk_base |
211 | }; | 213 | }; |
212 | 214 | ||
213 | static struct property crashk_size_prop = { | 215 | static struct property crashk_size_prop = { |
@@ -219,9 +221,11 @@ static struct property crashk_size_prop = { | |||
219 | static struct property memory_limit_prop = { | 221 | static struct property memory_limit_prop = { |
220 | .name = "linux,memory-limit", | 222 | .name = "linux,memory-limit", |
221 | .length = sizeof(unsigned long long), | 223 | .length = sizeof(unsigned long long), |
222 | .value = &memory_limit, | 224 | .value = &mem_limit, |
223 | }; | 225 | }; |
224 | 226 | ||
227 | #define cpu_to_be_ulong __PASTE(cpu_to_be, BITS_PER_LONG) | ||
228 | |||
225 | static void __init export_crashk_values(struct device_node *node) | 229 | static void __init export_crashk_values(struct device_node *node) |
226 | { | 230 | { |
227 | struct property *prop; | 231 | struct property *prop; |
@@ -237,8 +241,9 @@ static void __init export_crashk_values(struct device_node *node) | |||
237 | of_remove_property(node, prop); | 241 | of_remove_property(node, prop); |
238 | 242 | ||
239 | if (crashk_res.start != 0) { | 243 | if (crashk_res.start != 0) { |
244 | crashk_base = cpu_to_be_ulong(crashk_res.start), | ||
240 | of_add_property(node, &crashk_base_prop); | 245 | of_add_property(node, &crashk_base_prop); |
241 | crashk_size = resource_size(&crashk_res); | 246 | crashk_size = cpu_to_be_ulong(resource_size(&crashk_res)); |
242 | of_add_property(node, &crashk_size_prop); | 247 | of_add_property(node, &crashk_size_prop); |
243 | } | 248 | } |
244 | 249 | ||
@@ -246,6 +251,7 @@ static void __init export_crashk_values(struct device_node *node) | |||
246 | * memory_limit is required by the kexec-tools to limit the | 251 | * memory_limit is required by the kexec-tools to limit the |
247 | * crash regions to the actual memory used. | 252 | * crash regions to the actual memory used. |
248 | */ | 253 | */ |
254 | mem_limit = cpu_to_be_ulong(memory_limit); | ||
249 | of_update_property(node, &memory_limit_prop); | 255 | of_update_property(node, &memory_limit_prop); |
250 | } | 256 | } |
251 | 257 | ||
@@ -264,7 +270,7 @@ static int __init kexec_setup(void) | |||
264 | of_remove_property(node, prop); | 270 | of_remove_property(node, prop); |
265 | 271 | ||
266 | /* information needed by userspace when using default_machine_kexec */ | 272 | /* information needed by userspace when using default_machine_kexec */ |
267 | kernel_end = __pa(_end); | 273 | kernel_end = cpu_to_be_ulong(__pa(_end)); |
268 | of_add_property(node, &kernel_end_prop); | 274 | of_add_property(node, &kernel_end_prop); |
269 | 275 | ||
270 | export_crashk_values(node); | 276 | export_crashk_values(node); |
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index be4e6d648f60..59d229a2a3e0 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c | |||
@@ -369,6 +369,7 @@ void default_machine_kexec(struct kimage *image) | |||
369 | 369 | ||
370 | /* Values we need to export to the second kernel via the device tree. */ | 370 | /* Values we need to export to the second kernel via the device tree. */ |
371 | static unsigned long htab_base; | 371 | static unsigned long htab_base; |
372 | static unsigned long htab_size; | ||
372 | 373 | ||
373 | static struct property htab_base_prop = { | 374 | static struct property htab_base_prop = { |
374 | .name = "linux,htab-base", | 375 | .name = "linux,htab-base", |
@@ -379,7 +380,7 @@ static struct property htab_base_prop = { | |||
379 | static struct property htab_size_prop = { | 380 | static struct property htab_size_prop = { |
380 | .name = "linux,htab-size", | 381 | .name = "linux,htab-size", |
381 | .length = sizeof(unsigned long), | 382 | .length = sizeof(unsigned long), |
382 | .value = &htab_size_bytes, | 383 | .value = &htab_size, |
383 | }; | 384 | }; |
384 | 385 | ||
385 | static int __init export_htab_values(void) | 386 | static int __init export_htab_values(void) |
@@ -403,8 +404,9 @@ static int __init export_htab_values(void) | |||
403 | if (prop) | 404 | if (prop) |
404 | of_remove_property(node, prop); | 405 | of_remove_property(node, prop); |
405 | 406 | ||
406 | htab_base = __pa(htab_address); | 407 | htab_base = cpu_to_be64(__pa(htab_address)); |
407 | of_add_property(node, &htab_base_prop); | 408 | of_add_property(node, &htab_base_prop); |
409 | htab_size = cpu_to_be64(htab_size_bytes); | ||
408 | of_add_property(node, &htab_size_prop); | 410 | of_add_property(node, &htab_size_prop); |
409 | 411 | ||
410 | of_node_put(node); | 412 | of_node_put(node); |
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 879f09620f83..7c6bb4b17b49 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S | |||
@@ -57,11 +57,14 @@ _GLOBAL(call_do_softirq) | |||
57 | mtlr r0 | 57 | mtlr r0 |
58 | blr | 58 | blr |
59 | 59 | ||
60 | /* | ||
61 | * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp); | ||
62 | */ | ||
60 | _GLOBAL(call_do_irq) | 63 | _GLOBAL(call_do_irq) |
61 | mflr r0 | 64 | mflr r0 |
62 | stw r0,4(r1) | 65 | stw r0,4(r1) |
63 | lwz r10,THREAD+KSP_LIMIT(r2) | 66 | lwz r10,THREAD+KSP_LIMIT(r2) |
64 | addi r11,r3,THREAD_INFO_GAP | 67 | addi r11,r4,THREAD_INFO_GAP |
65 | stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) | 68 | stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) |
66 | mr r1,r4 | 69 | mr r1,r4 |
67 | stw r10,8(r1) | 70 | stw r10,8(r1) |
diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S index b47a0e1ab001..1482327cfeba 100644 --- a/arch/powerpc/kernel/reloc_64.S +++ b/arch/powerpc/kernel/reloc_64.S | |||
@@ -69,8 +69,8 @@ _GLOBAL(relocate) | |||
69 | * R_PPC64_RELATIVE ones. | 69 | * R_PPC64_RELATIVE ones. |
70 | */ | 70 | */ |
71 | mtctr r8 | 71 | mtctr r8 |
72 | 5: lwz r0,12(9) /* ELF64_R_TYPE(reloc->r_info) */ | 72 | 5: ld r0,8(9) /* ELF64_R_TYPE(reloc->r_info) */ |
73 | cmpwi r0,R_PPC64_RELATIVE | 73 | cmpdi r0,R_PPC64_RELATIVE |
74 | bne 6f | 74 | bne 6f |
75 | ld r6,0(r9) /* reloc->r_offset */ | 75 | ld r6,0(r9) /* reloc->r_offset */ |
76 | ld r0,16(r9) /* reloc->r_addend */ | 76 | ld r0,16(r9) /* reloc->r_addend */ |
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 2b0da27eaee4..04cc4fcca78b 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
@@ -247,7 +247,12 @@ static void __init exc_lvl_early_init(void) | |||
247 | /* interrupt stacks must be in lowmem, we get that for free on ppc32 | 247 | /* interrupt stacks must be in lowmem, we get that for free on ppc32 |
248 | * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ | 248 | * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ |
249 | for_each_possible_cpu(i) { | 249 | for_each_possible_cpu(i) { |
250 | #ifdef CONFIG_SMP | ||
250 | hw_cpu = get_hard_smp_processor_id(i); | 251 | hw_cpu = get_hard_smp_processor_id(i); |
252 | #else | ||
253 | hw_cpu = 0; | ||
254 | #endif | ||
255 | |||
251 | critirq_ctx[hw_cpu] = (struct thread_info *) | 256 | critirq_ctx[hw_cpu] = (struct thread_info *) |
252 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); | 257 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); |
253 | #ifdef CONFIG_BOOKE | 258 | #ifdef CONFIG_BOOKE |
diff --git a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S index 79683d0393f5..6ac107ac402a 100644 --- a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S +++ b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S | |||
@@ -6,7 +6,7 @@ | |||
6 | .globl vdso32_start, vdso32_end | 6 | .globl vdso32_start, vdso32_end |
7 | .balign PAGE_SIZE | 7 | .balign PAGE_SIZE |
8 | vdso32_start: | 8 | vdso32_start: |
9 | .incbin "arch/powerpc/kernel/vdso32/vdso32.so" | 9 | .incbin "arch/powerpc/kernel/vdso32/vdso32.so.dbg" |
10 | .balign PAGE_SIZE | 10 | .balign PAGE_SIZE |
11 | vdso32_end: | 11 | vdso32_end: |
12 | 12 | ||
diff --git a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S index 8df9e2463007..df60fca6a13d 100644 --- a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S +++ b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S | |||
@@ -6,7 +6,7 @@ | |||
6 | .globl vdso64_start, vdso64_end | 6 | .globl vdso64_start, vdso64_end |
7 | .balign PAGE_SIZE | 7 | .balign PAGE_SIZE |
8 | vdso64_start: | 8 | vdso64_start: |
9 | .incbin "arch/powerpc/kernel/vdso64/vdso64.so" | 9 | .incbin "arch/powerpc/kernel/vdso64/vdso64.so.dbg" |
10 | .balign PAGE_SIZE | 10 | .balign PAGE_SIZE |
11 | vdso64_end: | 11 | vdso64_end: |
12 | 12 | ||
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index de6881259aef..d766d6ee33fe 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -207,6 +207,20 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, | |||
207 | if (overlaps_kernel_text(vaddr, vaddr + step)) | 207 | if (overlaps_kernel_text(vaddr, vaddr + step)) |
208 | tprot &= ~HPTE_R_N; | 208 | tprot &= ~HPTE_R_N; |
209 | 209 | ||
210 | /* | ||
211 | * If relocatable, check if it overlaps interrupt vectors that | ||
212 | * are copied down to real 0. For relocatable kernel | ||
213 | * (e.g. kdump case) we copy interrupt vectors down to real | ||
214 | * address 0. Mark that region as executable. This is | ||
215 | * because on p8 system with relocation on exception feature | ||
216 | * enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence | ||
217 | * in order to execute the interrupt handlers in virtual | ||
218 | * mode the vector region need to be marked as executable. | ||
219 | */ | ||
220 | if ((PHYSICAL_START > MEMORY_START) && | ||
221 | overlaps_interrupt_vector_text(vaddr, vaddr + step)) | ||
222 | tprot &= ~HPTE_R_N; | ||
223 | |||
210 | hash = hpt_hash(vpn, shift, ssize); | 224 | hash = hpt_hash(vpn, shift, ssize); |
211 | hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); | 225 | hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); |
212 | 226 | ||
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 65b7b65e8708..62bf5e8e78da 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c | |||
@@ -510,7 +510,8 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, | |||
510 | } | 510 | } |
511 | 511 | ||
512 | unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, | 512 | unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, |
513 | pmd_t *pmdp, unsigned long clr) | 513 | pmd_t *pmdp, unsigned long clr, |
514 | unsigned long set) | ||
514 | { | 515 | { |
515 | 516 | ||
516 | unsigned long old, tmp; | 517 | unsigned long old, tmp; |
@@ -526,14 +527,15 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, | |||
526 | andi. %1,%0,%6\n\ | 527 | andi. %1,%0,%6\n\ |
527 | bne- 1b \n\ | 528 | bne- 1b \n\ |
528 | andc %1,%0,%4 \n\ | 529 | andc %1,%0,%4 \n\ |
530 | or %1,%1,%7\n\ | ||
529 | stdcx. %1,0,%3 \n\ | 531 | stdcx. %1,0,%3 \n\ |
530 | bne- 1b" | 532 | bne- 1b" |
531 | : "=&r" (old), "=&r" (tmp), "=m" (*pmdp) | 533 | : "=&r" (old), "=&r" (tmp), "=m" (*pmdp) |
532 | : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY) | 534 | : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY), "r" (set) |
533 | : "cc" ); | 535 | : "cc" ); |
534 | #else | 536 | #else |
535 | old = pmd_val(*pmdp); | 537 | old = pmd_val(*pmdp); |
536 | *pmdp = __pmd(old & ~clr); | 538 | *pmdp = __pmd((old & ~clr) | set); |
537 | #endif | 539 | #endif |
538 | if (old & _PAGE_HASHPTE) | 540 | if (old & _PAGE_HASHPTE) |
539 | hpte_do_hugepage_flush(mm, addr, pmdp); | 541 | hpte_do_hugepage_flush(mm, addr, pmdp); |
@@ -708,7 +710,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, | |||
708 | void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, | 710 | void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, |
709 | pmd_t *pmdp) | 711 | pmd_t *pmdp) |
710 | { | 712 | { |
711 | pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT); | 713 | pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); |
712 | } | 714 | } |
713 | 715 | ||
714 | /* | 716 | /* |
@@ -835,7 +837,7 @@ pmd_t pmdp_get_and_clear(struct mm_struct *mm, | |||
835 | unsigned long old; | 837 | unsigned long old; |
836 | pgtable_t *pgtable_slot; | 838 | pgtable_t *pgtable_slot; |
837 | 839 | ||
838 | old = pmd_hugepage_update(mm, addr, pmdp, ~0UL); | 840 | old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); |
839 | old_pmd = __pmd(old); | 841 | old_pmd = __pmd(old); |
840 | /* | 842 | /* |
841 | * We have pmd == none and we are holding page_table_lock. | 843 | * We have pmd == none and we are holding page_table_lock. |
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c index a770df2dae70..6c0b1f5f8d2c 100644 --- a/arch/powerpc/mm/subpage-prot.c +++ b/arch/powerpc/mm/subpage-prot.c | |||
@@ -78,7 +78,7 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr, | |||
78 | pte = pte_offset_map_lock(mm, pmd, addr, &ptl); | 78 | pte = pte_offset_map_lock(mm, pmd, addr, &ptl); |
79 | arch_enter_lazy_mmu_mode(); | 79 | arch_enter_lazy_mmu_mode(); |
80 | for (; npages > 0; --npages) { | 80 | for (; npages > 0; --npages) { |
81 | pte_update(mm, addr, pte, 0, 0); | 81 | pte_update(mm, addr, pte, 0, 0, 0); |
82 | addr += PAGE_SIZE; | 82 | addr += PAGE_SIZE; |
83 | ++pte; | 83 | ++pte; |
84 | } | 84 | } |
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 29b89e863d7c..67cf22083f4c 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c | |||
@@ -1147,6 +1147,9 @@ static void power_pmu_enable(struct pmu *pmu) | |||
1147 | mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]); | 1147 | mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]); |
1148 | 1148 | ||
1149 | mb(); | 1149 | mb(); |
1150 | if (cpuhw->bhrb_users) | ||
1151 | ppmu->config_bhrb(cpuhw->bhrb_filter); | ||
1152 | |||
1150 | write_mmcr0(cpuhw, mmcr0); | 1153 | write_mmcr0(cpuhw, mmcr0); |
1151 | 1154 | ||
1152 | /* | 1155 | /* |
@@ -1158,8 +1161,6 @@ static void power_pmu_enable(struct pmu *pmu) | |||
1158 | } | 1161 | } |
1159 | 1162 | ||
1160 | out: | 1163 | out: |
1161 | if (cpuhw->bhrb_users) | ||
1162 | ppmu->config_bhrb(cpuhw->bhrb_filter); | ||
1163 | 1164 | ||
1164 | local_irq_restore(flags); | 1165 | local_irq_restore(flags); |
1165 | } | 1166 | } |
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c index a3f7abd2f13f..96cee20dcd34 100644 --- a/arch/powerpc/perf/power8-pmu.c +++ b/arch/powerpc/perf/power8-pmu.c | |||
@@ -25,6 +25,37 @@ | |||
25 | #define PM_BRU_FIN 0x10068 | 25 | #define PM_BRU_FIN 0x10068 |
26 | #define PM_BR_MPRED_CMPL 0x400f6 | 26 | #define PM_BR_MPRED_CMPL 0x400f6 |
27 | 27 | ||
28 | /* All L1 D cache load references counted at finish, gated by reject */ | ||
29 | #define PM_LD_REF_L1 0x100ee | ||
30 | /* Load Missed L1 */ | ||
31 | #define PM_LD_MISS_L1 0x3e054 | ||
32 | /* Store Missed L1 */ | ||
33 | #define PM_ST_MISS_L1 0x300f0 | ||
34 | /* L1 cache data prefetches */ | ||
35 | #define PM_L1_PREF 0x0d8b8 | ||
36 | /* Instruction fetches from L1 */ | ||
37 | #define PM_INST_FROM_L1 0x04080 | ||
38 | /* Demand iCache Miss */ | ||
39 | #define PM_L1_ICACHE_MISS 0x200fd | ||
40 | /* Instruction Demand sectors wriittent into IL1 */ | ||
41 | #define PM_L1_DEMAND_WRITE 0x0408c | ||
42 | /* Instruction prefetch written into IL1 */ | ||
43 | #define PM_IC_PREF_WRITE 0x0408e | ||
44 | /* The data cache was reloaded from local core's L3 due to a demand load */ | ||
45 | #define PM_DATA_FROM_L3 0x4c042 | ||
46 | /* Demand LD - L3 Miss (not L2 hit and not L3 hit) */ | ||
47 | #define PM_DATA_FROM_L3MISS 0x300fe | ||
48 | /* All successful D-side store dispatches for this thread */ | ||
49 | #define PM_L2_ST 0x17080 | ||
50 | /* All successful D-side store dispatches for this thread that were L2 Miss */ | ||
51 | #define PM_L2_ST_MISS 0x17082 | ||
52 | /* Total HW L3 prefetches(Load+store) */ | ||
53 | #define PM_L3_PREF_ALL 0x4e052 | ||
54 | /* Data PTEG reload */ | ||
55 | #define PM_DTLB_MISS 0x300fc | ||
56 | /* ITLB Reloaded */ | ||
57 | #define PM_ITLB_MISS 0x400fc | ||
58 | |||
28 | 59 | ||
29 | /* | 60 | /* |
30 | * Raw event encoding for POWER8: | 61 | * Raw event encoding for POWER8: |
@@ -557,6 +588,8 @@ static int power8_generic_events[] = { | |||
557 | [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL, | 588 | [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL, |
558 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN, | 589 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN, |
559 | [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL, | 590 | [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL, |
591 | [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1, | ||
592 | [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1, | ||
560 | }; | 593 | }; |
561 | 594 | ||
562 | static u64 power8_bhrb_filter_map(u64 branch_sample_type) | 595 | static u64 power8_bhrb_filter_map(u64 branch_sample_type) |
@@ -596,6 +629,116 @@ static void power8_config_bhrb(u64 pmu_bhrb_filter) | |||
596 | mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter)); | 629 | mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter)); |
597 | } | 630 | } |
598 | 631 | ||
632 | #define C(x) PERF_COUNT_HW_CACHE_##x | ||
633 | |||
634 | /* | ||
635 | * Table of generalized cache-related events. | ||
636 | * 0 means not supported, -1 means nonsensical, other values | ||
637 | * are event codes. | ||
638 | */ | ||
639 | static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | ||
640 | [ C(L1D) ] = { | ||
641 | [ C(OP_READ) ] = { | ||
642 | [ C(RESULT_ACCESS) ] = PM_LD_REF_L1, | ||
643 | [ C(RESULT_MISS) ] = PM_LD_MISS_L1, | ||
644 | }, | ||
645 | [ C(OP_WRITE) ] = { | ||
646 | [ C(RESULT_ACCESS) ] = 0, | ||
647 | [ C(RESULT_MISS) ] = PM_ST_MISS_L1, | ||
648 | }, | ||
649 | [ C(OP_PREFETCH) ] = { | ||
650 | [ C(RESULT_ACCESS) ] = PM_L1_PREF, | ||
651 | [ C(RESULT_MISS) ] = 0, | ||
652 | }, | ||
653 | }, | ||
654 | [ C(L1I) ] = { | ||
655 | [ C(OP_READ) ] = { | ||
656 | [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1, | ||
657 | [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS, | ||
658 | }, | ||
659 | [ C(OP_WRITE) ] = { | ||
660 | [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE, | ||
661 | [ C(RESULT_MISS) ] = -1, | ||
662 | }, | ||
663 | [ C(OP_PREFETCH) ] = { | ||
664 | [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE, | ||
665 | [ C(RESULT_MISS) ] = 0, | ||
666 | }, | ||
667 | }, | ||
668 | [ C(LL) ] = { | ||
669 | [ C(OP_READ) ] = { | ||
670 | [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3, | ||
671 | [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS, | ||
672 | }, | ||
673 | [ C(OP_WRITE) ] = { | ||
674 | [ C(RESULT_ACCESS) ] = PM_L2_ST, | ||
675 | [ C(RESULT_MISS) ] = PM_L2_ST_MISS, | ||
676 | }, | ||
677 | [ C(OP_PREFETCH) ] = { | ||
678 | [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL, | ||
679 | [ C(RESULT_MISS) ] = 0, | ||
680 | }, | ||
681 | }, | ||
682 | [ C(DTLB) ] = { | ||
683 | [ C(OP_READ) ] = { | ||
684 | [ C(RESULT_ACCESS) ] = 0, | ||
685 | [ C(RESULT_MISS) ] = PM_DTLB_MISS, | ||
686 | }, | ||
687 | [ C(OP_WRITE) ] = { | ||
688 | [ C(RESULT_ACCESS) ] = -1, | ||
689 | [ C(RESULT_MISS) ] = -1, | ||
690 | }, | ||
691 | [ C(OP_PREFETCH) ] = { | ||
692 | [ C(RESULT_ACCESS) ] = -1, | ||
693 | [ C(RESULT_MISS) ] = -1, | ||
694 | }, | ||
695 | }, | ||
696 | [ C(ITLB) ] = { | ||
697 | [ C(OP_READ) ] = { | ||
698 | [ C(RESULT_ACCESS) ] = 0, | ||
699 | [ C(RESULT_MISS) ] = PM_ITLB_MISS, | ||
700 | }, | ||
701 | [ C(OP_WRITE) ] = { | ||
702 | [ C(RESULT_ACCESS) ] = -1, | ||
703 | [ C(RESULT_MISS) ] = -1, | ||
704 | }, | ||
705 | [ C(OP_PREFETCH) ] = { | ||
706 | [ C(RESULT_ACCESS) ] = -1, | ||
707 | [ C(RESULT_MISS) ] = -1, | ||
708 | }, | ||
709 | }, | ||
710 | [ C(BPU) ] = { | ||
711 | [ C(OP_READ) ] = { | ||
712 | [ C(RESULT_ACCESS) ] = PM_BRU_FIN, | ||
713 | [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL, | ||
714 | }, | ||
715 | [ C(OP_WRITE) ] = { | ||
716 | [ C(RESULT_ACCESS) ] = -1, | ||
717 | [ C(RESULT_MISS) ] = -1, | ||
718 | }, | ||
719 | [ C(OP_PREFETCH) ] = { | ||
720 | [ C(RESULT_ACCESS) ] = -1, | ||
721 | [ C(RESULT_MISS) ] = -1, | ||
722 | }, | ||
723 | }, | ||
724 | [ C(NODE) ] = { | ||
725 | [ C(OP_READ) ] = { | ||
726 | [ C(RESULT_ACCESS) ] = -1, | ||
727 | [ C(RESULT_MISS) ] = -1, | ||
728 | }, | ||
729 | [ C(OP_WRITE) ] = { | ||
730 | [ C(RESULT_ACCESS) ] = -1, | ||
731 | [ C(RESULT_MISS) ] = -1, | ||
732 | }, | ||
733 | [ C(OP_PREFETCH) ] = { | ||
734 | [ C(RESULT_ACCESS) ] = -1, | ||
735 | [ C(RESULT_MISS) ] = -1, | ||
736 | }, | ||
737 | }, | ||
738 | }; | ||
739 | |||
740 | #undef C | ||
741 | |||
599 | static struct power_pmu power8_pmu = { | 742 | static struct power_pmu power8_pmu = { |
600 | .name = "POWER8", | 743 | .name = "POWER8", |
601 | .n_counter = 6, | 744 | .n_counter = 6, |
@@ -611,6 +754,7 @@ static struct power_pmu power8_pmu = { | |||
611 | .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB, | 754 | .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB, |
612 | .n_generic = ARRAY_SIZE(power8_generic_events), | 755 | .n_generic = ARRAY_SIZE(power8_generic_events), |
613 | .generic_events = power8_generic_events, | 756 | .generic_events = power8_generic_events, |
757 | .cache_events = &power8_cache_events, | ||
614 | .attr_groups = power8_pmu_attr_groups, | 758 | .attr_groups = power8_pmu_attr_groups, |
615 | .bhrb_nr = 32, | 759 | .bhrb_nr = 32, |
616 | }; | 760 | }; |
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c index e1e71618b70c..f51474336460 100644 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c | |||
@@ -44,7 +44,8 @@ static int ioda_eeh_event(struct notifier_block *nb, | |||
44 | 44 | ||
45 | /* We simply send special EEH event */ | 45 | /* We simply send special EEH event */ |
46 | if ((changed_evts & OPAL_EVENT_PCI_ERROR) && | 46 | if ((changed_evts & OPAL_EVENT_PCI_ERROR) && |
47 | (events & OPAL_EVENT_PCI_ERROR)) | 47 | (events & OPAL_EVENT_PCI_ERROR) && |
48 | eeh_enabled()) | ||
48 | eeh_send_failure_event(NULL); | 49 | eeh_send_failure_event(NULL); |
49 | 50 | ||
50 | return 0; | 51 | return 0; |
@@ -489,8 +490,7 @@ static int ioda_eeh_bridge_reset(struct pci_controller *hose, | |||
489 | static int ioda_eeh_reset(struct eeh_pe *pe, int option) | 490 | static int ioda_eeh_reset(struct eeh_pe *pe, int option) |
490 | { | 491 | { |
491 | struct pci_controller *hose = pe->phb; | 492 | struct pci_controller *hose = pe->phb; |
492 | struct eeh_dev *edev; | 493 | struct pci_bus *bus; |
493 | struct pci_dev *dev; | ||
494 | int ret; | 494 | int ret; |
495 | 495 | ||
496 | /* | 496 | /* |
@@ -519,31 +519,11 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option) | |||
519 | if (pe->type & EEH_PE_PHB) { | 519 | if (pe->type & EEH_PE_PHB) { |
520 | ret = ioda_eeh_phb_reset(hose, option); | 520 | ret = ioda_eeh_phb_reset(hose, option); |
521 | } else { | 521 | } else { |
522 | if (pe->type & EEH_PE_DEVICE) { | 522 | bus = eeh_pe_bus_get(pe); |
523 | /* | 523 | if (pci_is_root_bus(bus)) |
524 | * If it's device PE, we didn't refer to the parent | ||
525 | * PCI bus yet. So we have to figure it out indirectly. | ||
526 | */ | ||
527 | edev = list_first_entry(&pe->edevs, | ||
528 | struct eeh_dev, list); | ||
529 | dev = eeh_dev_to_pci_dev(edev); | ||
530 | dev = dev->bus->self; | ||
531 | } else { | ||
532 | /* | ||
533 | * If it's bus PE, the parent PCI bus is already there | ||
534 | * and just pick it up. | ||
535 | */ | ||
536 | dev = pe->bus->self; | ||
537 | } | ||
538 | |||
539 | /* | ||
540 | * Do reset based on the fact that the direct upstream bridge | ||
541 | * is root bridge (port) or not. | ||
542 | */ | ||
543 | if (dev->bus->number == 0) | ||
544 | ret = ioda_eeh_root_reset(hose, option); | 524 | ret = ioda_eeh_root_reset(hose, option); |
545 | else | 525 | else |
546 | ret = ioda_eeh_bridge_reset(hose, dev, option); | 526 | ret = ioda_eeh_bridge_reset(hose, bus->self, option); |
547 | } | 527 | } |
548 | 528 | ||
549 | return ret; | 529 | return ret; |
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index a79fddc5e74e..a59788e83b8b 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c | |||
@@ -145,7 +145,7 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag) | |||
145 | * Enable EEH explicitly so that we will do EEH check | 145 | * Enable EEH explicitly so that we will do EEH check |
146 | * while accessing I/O stuff | 146 | * while accessing I/O stuff |
147 | */ | 147 | */ |
148 | eeh_subsystem_enabled = 1; | 148 | eeh_set_enable(true); |
149 | 149 | ||
150 | /* Save memory bars */ | 150 | /* Save memory bars */ |
151 | eeh_save_bars(edev); | 151 | eeh_save_bars(edev); |
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 7d6dcc6d5fa9..3b2b4fb3585b 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/irq.h> | 21 | #include <linux/irq.h> |
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/msi.h> | 23 | #include <linux/msi.h> |
24 | #include <linux/memblock.h> | ||
24 | 25 | ||
25 | #include <asm/sections.h> | 26 | #include <asm/sections.h> |
26 | #include <asm/io.h> | 27 | #include <asm/io.h> |
@@ -460,9 +461,39 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev | |||
460 | return; | 461 | return; |
461 | 462 | ||
462 | pe = &phb->ioda.pe_array[pdn->pe_number]; | 463 | pe = &phb->ioda.pe_array[pdn->pe_number]; |
464 | WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops); | ||
463 | set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table); | 465 | set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table); |
464 | } | 466 | } |
465 | 467 | ||
468 | static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb, | ||
469 | struct pci_dev *pdev, u64 dma_mask) | ||
470 | { | ||
471 | struct pci_dn *pdn = pci_get_pdn(pdev); | ||
472 | struct pnv_ioda_pe *pe; | ||
473 | uint64_t top; | ||
474 | bool bypass = false; | ||
475 | |||
476 | if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) | ||
477 | return -ENODEV;; | ||
478 | |||
479 | pe = &phb->ioda.pe_array[pdn->pe_number]; | ||
480 | if (pe->tce_bypass_enabled) { | ||
481 | top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1; | ||
482 | bypass = (dma_mask >= top); | ||
483 | } | ||
484 | |||
485 | if (bypass) { | ||
486 | dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n"); | ||
487 | set_dma_ops(&pdev->dev, &dma_direct_ops); | ||
488 | set_dma_offset(&pdev->dev, pe->tce_bypass_base); | ||
489 | } else { | ||
490 | dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n"); | ||
491 | set_dma_ops(&pdev->dev, &dma_iommu_ops); | ||
492 | set_iommu_table_base(&pdev->dev, &pe->tce32_table); | ||
493 | } | ||
494 | return 0; | ||
495 | } | ||
496 | |||
466 | static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) | 497 | static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) |
467 | { | 498 | { |
468 | struct pci_dev *dev; | 499 | struct pci_dev *dev; |
@@ -657,6 +688,56 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb, | |||
657 | __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs)); | 688 | __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs)); |
658 | } | 689 | } |
659 | 690 | ||
691 | static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable) | ||
692 | { | ||
693 | struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe, | ||
694 | tce32_table); | ||
695 | uint16_t window_id = (pe->pe_number << 1 ) + 1; | ||
696 | int64_t rc; | ||
697 | |||
698 | pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis"); | ||
699 | if (enable) { | ||
700 | phys_addr_t top = memblock_end_of_DRAM(); | ||
701 | |||
702 | top = roundup_pow_of_two(top); | ||
703 | rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, | ||
704 | pe->pe_number, | ||
705 | window_id, | ||
706 | pe->tce_bypass_base, | ||
707 | top); | ||
708 | } else { | ||
709 | rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, | ||
710 | pe->pe_number, | ||
711 | window_id, | ||
712 | pe->tce_bypass_base, | ||
713 | 0); | ||
714 | |||
715 | /* | ||
716 | * We might want to reset the DMA ops of all devices on | ||
717 | * this PE. However in theory, that shouldn't be necessary | ||
718 | * as this is used for VFIO/KVM pass-through and the device | ||
719 | * hasn't yet been returned to its kernel driver | ||
720 | */ | ||
721 | } | ||
722 | if (rc) | ||
723 | pe_err(pe, "OPAL error %lld configuring bypass window\n", rc); | ||
724 | else | ||
725 | pe->tce_bypass_enabled = enable; | ||
726 | } | ||
727 | |||
728 | static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb, | ||
729 | struct pnv_ioda_pe *pe) | ||
730 | { | ||
731 | /* TVE #1 is selected by PCI address bit 59 */ | ||
732 | pe->tce_bypass_base = 1ull << 59; | ||
733 | |||
734 | /* Install set_bypass callback for VFIO */ | ||
735 | pe->tce32_table.set_bypass = pnv_pci_ioda2_set_bypass; | ||
736 | |||
737 | /* Enable bypass by default */ | ||
738 | pnv_pci_ioda2_set_bypass(&pe->tce32_table, true); | ||
739 | } | ||
740 | |||
660 | static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, | 741 | static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, |
661 | struct pnv_ioda_pe *pe) | 742 | struct pnv_ioda_pe *pe) |
662 | { | 743 | { |
@@ -727,6 +808,8 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, | |||
727 | else | 808 | else |
728 | pnv_ioda_setup_bus_dma(pe, pe->pbus); | 809 | pnv_ioda_setup_bus_dma(pe, pe->pbus); |
729 | 810 | ||
811 | /* Also create a bypass window */ | ||
812 | pnv_pci_ioda2_setup_bypass_pe(phb, pe); | ||
730 | return; | 813 | return; |
731 | fail: | 814 | fail: |
732 | if (pe->tce32_seg >= 0) | 815 | if (pe->tce32_seg >= 0) |
@@ -1286,6 +1369,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, | |||
1286 | 1369 | ||
1287 | /* Setup TCEs */ | 1370 | /* Setup TCEs */ |
1288 | phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; | 1371 | phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; |
1372 | phb->dma_set_mask = pnv_pci_ioda_dma_set_mask; | ||
1289 | 1373 | ||
1290 | /* Setup shutdown function for kexec */ | 1374 | /* Setup shutdown function for kexec */ |
1291 | phb->shutdown = pnv_pci_ioda_shutdown; | 1375 | phb->shutdown = pnv_pci_ioda_shutdown; |
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index b555ebc57ef5..95633d79ef5d 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c | |||
@@ -634,6 +634,16 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev) | |||
634 | pnv_pci_dma_fallback_setup(hose, pdev); | 634 | pnv_pci_dma_fallback_setup(hose, pdev); |
635 | } | 635 | } |
636 | 636 | ||
637 | int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) | ||
638 | { | ||
639 | struct pci_controller *hose = pci_bus_to_host(pdev->bus); | ||
640 | struct pnv_phb *phb = hose->private_data; | ||
641 | |||
642 | if (phb && phb->dma_set_mask) | ||
643 | return phb->dma_set_mask(phb, pdev, dma_mask); | ||
644 | return __dma_set_mask(&pdev->dev, dma_mask); | ||
645 | } | ||
646 | |||
637 | void pnv_pci_shutdown(void) | 647 | void pnv_pci_shutdown(void) |
638 | { | 648 | { |
639 | struct pci_controller *hose; | 649 | struct pci_controller *hose; |
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 13f1942a9a5f..cde169442775 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h | |||
@@ -54,7 +54,9 @@ struct pnv_ioda_pe { | |||
54 | struct iommu_table tce32_table; | 54 | struct iommu_table tce32_table; |
55 | phys_addr_t tce_inval_reg_phys; | 55 | phys_addr_t tce_inval_reg_phys; |
56 | 56 | ||
57 | /* XXX TODO: Add support for additional 64-bit iommus */ | 57 | /* 64-bit TCE bypass region */ |
58 | bool tce_bypass_enabled; | ||
59 | uint64_t tce_bypass_base; | ||
58 | 60 | ||
59 | /* MSIs. MVE index is identical for for 32 and 64 bit MSI | 61 | /* MSIs. MVE index is identical for for 32 and 64 bit MSI |
60 | * and -1 if not supported. (It's actually identical to the | 62 | * and -1 if not supported. (It's actually identical to the |
@@ -113,6 +115,8 @@ struct pnv_phb { | |||
113 | unsigned int hwirq, unsigned int virq, | 115 | unsigned int hwirq, unsigned int virq, |
114 | unsigned int is_64, struct msi_msg *msg); | 116 | unsigned int is_64, struct msi_msg *msg); |
115 | void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev); | 117 | void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev); |
118 | int (*dma_set_mask)(struct pnv_phb *phb, struct pci_dev *pdev, | ||
119 | u64 dma_mask); | ||
116 | void (*fixup_phb)(struct pci_controller *hose); | 120 | void (*fixup_phb)(struct pci_controller *hose); |
117 | u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); | 121 | u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); |
118 | void (*shutdown)(struct pnv_phb *phb); | 122 | void (*shutdown)(struct pnv_phb *phb); |
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h index de6819be1f95..0051e108ef0f 100644 --- a/arch/powerpc/platforms/powernv/powernv.h +++ b/arch/powerpc/platforms/powernv/powernv.h | |||
@@ -7,12 +7,20 @@ extern void pnv_smp_init(void); | |||
7 | static inline void pnv_smp_init(void) { } | 7 | static inline void pnv_smp_init(void) { } |
8 | #endif | 8 | #endif |
9 | 9 | ||
10 | struct pci_dev; | ||
11 | |||
10 | #ifdef CONFIG_PCI | 12 | #ifdef CONFIG_PCI |
11 | extern void pnv_pci_init(void); | 13 | extern void pnv_pci_init(void); |
12 | extern void pnv_pci_shutdown(void); | 14 | extern void pnv_pci_shutdown(void); |
15 | extern int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask); | ||
13 | #else | 16 | #else |
14 | static inline void pnv_pci_init(void) { } | 17 | static inline void pnv_pci_init(void) { } |
15 | static inline void pnv_pci_shutdown(void) { } | 18 | static inline void pnv_pci_shutdown(void) { } |
19 | |||
20 | static inline int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) | ||
21 | { | ||
22 | return -ENODEV; | ||
23 | } | ||
16 | #endif | 24 | #endif |
17 | 25 | ||
18 | extern void pnv_lpc_init(void); | 26 | extern void pnv_lpc_init(void); |
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 21166f65c97c..110f4fbd319f 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/interrupt.h> | 27 | #include <linux/interrupt.h> |
28 | #include <linux/bug.h> | 28 | #include <linux/bug.h> |
29 | #include <linux/cpuidle.h> | 29 | #include <linux/cpuidle.h> |
30 | #include <linux/pci.h> | ||
30 | 31 | ||
31 | #include <asm/machdep.h> | 32 | #include <asm/machdep.h> |
32 | #include <asm/firmware.h> | 33 | #include <asm/firmware.h> |
@@ -141,6 +142,13 @@ static void pnv_progress(char *s, unsigned short hex) | |||
141 | { | 142 | { |
142 | } | 143 | } |
143 | 144 | ||
145 | static int pnv_dma_set_mask(struct device *dev, u64 dma_mask) | ||
146 | { | ||
147 | if (dev_is_pci(dev)) | ||
148 | return pnv_pci_dma_set_mask(to_pci_dev(dev), dma_mask); | ||
149 | return __dma_set_mask(dev, dma_mask); | ||
150 | } | ||
151 | |||
144 | static void pnv_shutdown(void) | 152 | static void pnv_shutdown(void) |
145 | { | 153 | { |
146 | /* Let the PCI code clear up IODA tables */ | 154 | /* Let the PCI code clear up IODA tables */ |
@@ -238,6 +246,7 @@ define_machine(powernv) { | |||
238 | .machine_shutdown = pnv_shutdown, | 246 | .machine_shutdown = pnv_shutdown, |
239 | .power_save = powernv_idle, | 247 | .power_save = powernv_idle, |
240 | .calibrate_decr = generic_calibrate_decr, | 248 | .calibrate_decr = generic_calibrate_decr, |
249 | .dma_set_mask = pnv_dma_set_mask, | ||
241 | #ifdef CONFIG_KEXEC | 250 | #ifdef CONFIG_KEXEC |
242 | .kexec_cpu_down = pnv_kexec_cpu_down, | 251 | .kexec_cpu_down = pnv_kexec_cpu_down, |
243 | #endif | 252 | #endif |
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index 37300f6ee244..80b1d57c306a 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig | |||
@@ -20,6 +20,7 @@ config PPC_PSERIES | |||
20 | select PPC_DOORBELL | 20 | select PPC_DOORBELL |
21 | select HAVE_CONTEXT_TRACKING | 21 | select HAVE_CONTEXT_TRACKING |
22 | select HOTPLUG_CPU if SMP | 22 | select HOTPLUG_CPU if SMP |
23 | select ARCH_RANDOM | ||
23 | default y | 24 | default y |
24 | 25 | ||
25 | config PPC_SPLPAR | 26 | config PPC_SPLPAR |
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 9ef3cc8ebc11..8a8f0472d98f 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c | |||
@@ -265,7 +265,7 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag) | |||
265 | enable = 1; | 265 | enable = 1; |
266 | 266 | ||
267 | if (enable) { | 267 | if (enable) { |
268 | eeh_subsystem_enabled = 1; | 268 | eeh_set_enable(true); |
269 | eeh_add_to_parent_pe(edev); | 269 | eeh_add_to_parent_pe(edev); |
270 | 270 | ||
271 | pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n", | 271 | pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n", |
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 70670a2d9cf2..c413ec158ff5 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c | |||
@@ -113,7 +113,8 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) | |||
113 | { | 113 | { |
114 | struct device_node *dn, *pdn; | 114 | struct device_node *dn, *pdn; |
115 | struct pci_bus *bus; | 115 | struct pci_bus *bus; |
116 | const __be32 *pcie_link_speed_stats; | 116 | u32 pcie_link_speed_stats[2]; |
117 | int rc; | ||
117 | 118 | ||
118 | bus = bridge->bus; | 119 | bus = bridge->bus; |
119 | 120 | ||
@@ -122,38 +123,45 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) | |||
122 | return 0; | 123 | return 0; |
123 | 124 | ||
124 | for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { | 125 | for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { |
125 | pcie_link_speed_stats = of_get_property(pdn, | 126 | rc = of_property_read_u32_array(pdn, |
126 | "ibm,pcie-link-speed-stats", NULL); | 127 | "ibm,pcie-link-speed-stats", |
127 | if (pcie_link_speed_stats) | 128 | &pcie_link_speed_stats[0], 2); |
129 | if (!rc) | ||
128 | break; | 130 | break; |
129 | } | 131 | } |
130 | 132 | ||
131 | of_node_put(pdn); | 133 | of_node_put(pdn); |
132 | 134 | ||
133 | if (!pcie_link_speed_stats) { | 135 | if (rc) { |
134 | pr_err("no ibm,pcie-link-speed-stats property\n"); | 136 | pr_err("no ibm,pcie-link-speed-stats property\n"); |
135 | return 0; | 137 | return 0; |
136 | } | 138 | } |
137 | 139 | ||
138 | switch (be32_to_cpup(pcie_link_speed_stats)) { | 140 | switch (pcie_link_speed_stats[0]) { |
139 | case 0x01: | 141 | case 0x01: |
140 | bus->max_bus_speed = PCIE_SPEED_2_5GT; | 142 | bus->max_bus_speed = PCIE_SPEED_2_5GT; |
141 | break; | 143 | break; |
142 | case 0x02: | 144 | case 0x02: |
143 | bus->max_bus_speed = PCIE_SPEED_5_0GT; | 145 | bus->max_bus_speed = PCIE_SPEED_5_0GT; |
144 | break; | 146 | break; |
147 | case 0x04: | ||
148 | bus->max_bus_speed = PCIE_SPEED_8_0GT; | ||
149 | break; | ||
145 | default: | 150 | default: |
146 | bus->max_bus_speed = PCI_SPEED_UNKNOWN; | 151 | bus->max_bus_speed = PCI_SPEED_UNKNOWN; |
147 | break; | 152 | break; |
148 | } | 153 | } |
149 | 154 | ||
150 | switch (be32_to_cpup(pcie_link_speed_stats)) { | 155 | switch (pcie_link_speed_stats[1]) { |
151 | case 0x01: | 156 | case 0x01: |
152 | bus->cur_bus_speed = PCIE_SPEED_2_5GT; | 157 | bus->cur_bus_speed = PCIE_SPEED_2_5GT; |
153 | break; | 158 | break; |
154 | case 0x02: | 159 | case 0x02: |
155 | bus->cur_bus_speed = PCIE_SPEED_5_0GT; | 160 | bus->cur_bus_speed = PCIE_SPEED_5_0GT; |
156 | break; | 161 | break; |
162 | case 0x04: | ||
163 | bus->cur_bus_speed = PCIE_SPEED_8_0GT; | ||
164 | break; | ||
157 | default: | 165 | default: |
158 | bus->cur_bus_speed = PCI_SPEED_UNKNOWN; | 166 | bus->cur_bus_speed = PCI_SPEED_UNKNOWN; |
159 | break; | 167 | break; |
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 8e639d7cbda7..972df0ffd4dc 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -430,8 +430,7 @@ static void pSeries_machine_kexec(struct kimage *image) | |||
430 | { | 430 | { |
431 | long rc; | 431 | long rc; |
432 | 432 | ||
433 | if (firmware_has_feature(FW_FEATURE_SET_MODE) && | 433 | if (firmware_has_feature(FW_FEATURE_SET_MODE)) { |
434 | (image->type != KEXEC_TYPE_CRASH)) { | ||
435 | rc = pSeries_disable_reloc_on_exc(); | 434 | rc = pSeries_disable_reloc_on_exc(); |
436 | if (rc != H_SUCCESS) | 435 | if (rc != H_SUCCESS) |
437 | pr_warning("Warning: Failed to disable relocation on " | 436 | pr_warning("Warning: Failed to disable relocation on " |
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 0e166ed4cd16..8209744b2829 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c | |||
@@ -886,25 +886,25 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type) | |||
886 | 886 | ||
887 | /* Default: read HW settings */ | 887 | /* Default: read HW settings */ |
888 | if (flow_type == IRQ_TYPE_DEFAULT) { | 888 | if (flow_type == IRQ_TYPE_DEFAULT) { |
889 | switch(vold & (MPIC_INFO(VECPRI_POLARITY_MASK) | | 889 | int vold_ps; |
890 | MPIC_INFO(VECPRI_SENSE_MASK))) { | 890 | |
891 | case MPIC_INFO(VECPRI_SENSE_EDGE) | | 891 | vold_ps = vold & (MPIC_INFO(VECPRI_POLARITY_MASK) | |
892 | MPIC_INFO(VECPRI_POLARITY_POSITIVE): | 892 | MPIC_INFO(VECPRI_SENSE_MASK)); |
893 | flow_type = IRQ_TYPE_EDGE_RISING; | 893 | |
894 | break; | 894 | if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) | |
895 | case MPIC_INFO(VECPRI_SENSE_EDGE) | | 895 | MPIC_INFO(VECPRI_POLARITY_POSITIVE))) |
896 | MPIC_INFO(VECPRI_POLARITY_NEGATIVE): | 896 | flow_type = IRQ_TYPE_EDGE_RISING; |
897 | flow_type = IRQ_TYPE_EDGE_FALLING; | 897 | else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) | |
898 | break; | 898 | MPIC_INFO(VECPRI_POLARITY_NEGATIVE))) |
899 | case MPIC_INFO(VECPRI_SENSE_LEVEL) | | 899 | flow_type = IRQ_TYPE_EDGE_FALLING; |
900 | MPIC_INFO(VECPRI_POLARITY_POSITIVE): | 900 | else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) | |
901 | flow_type = IRQ_TYPE_LEVEL_HIGH; | 901 | MPIC_INFO(VECPRI_POLARITY_POSITIVE))) |
902 | break; | 902 | flow_type = IRQ_TYPE_LEVEL_HIGH; |
903 | case MPIC_INFO(VECPRI_SENSE_LEVEL) | | 903 | else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) | |
904 | MPIC_INFO(VECPRI_POLARITY_NEGATIVE): | 904 | MPIC_INFO(VECPRI_POLARITY_NEGATIVE))) |
905 | flow_type = IRQ_TYPE_LEVEL_LOW; | 905 | flow_type = IRQ_TYPE_LEVEL_LOW; |
906 | break; | 906 | else |
907 | } | 907 | WARN_ONCE(1, "mpic: unknown IRQ type %d\n", vold); |
908 | } | 908 | } |
909 | 909 | ||
910 | /* Apply to irq desc */ | 910 | /* Apply to irq desc */ |
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index a90731b3d44a..b07909850f77 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c | |||
@@ -309,16 +309,23 @@ static void get_output_lock(void) | |||
309 | 309 | ||
310 | if (xmon_speaker == me) | 310 | if (xmon_speaker == me) |
311 | return; | 311 | return; |
312 | |||
312 | for (;;) { | 313 | for (;;) { |
313 | if (xmon_speaker == 0) { | 314 | last_speaker = cmpxchg(&xmon_speaker, 0, me); |
314 | last_speaker = cmpxchg(&xmon_speaker, 0, me); | 315 | if (last_speaker == 0) |
315 | if (last_speaker == 0) | 316 | return; |
316 | return; | 317 | |
317 | } | 318 | /* |
318 | timeout = 10000000; | 319 | * Wait a full second for the lock, we might be on a slow |
320 | * console, but check every 100us. | ||
321 | */ | ||
322 | timeout = 10000; | ||
319 | while (xmon_speaker == last_speaker) { | 323 | while (xmon_speaker == last_speaker) { |
320 | if (--timeout > 0) | 324 | if (--timeout > 0) { |
325 | udelay(100); | ||
321 | continue; | 326 | continue; |
327 | } | ||
328 | |||
322 | /* hostile takeover */ | 329 | /* hostile takeover */ |
323 | prev = cmpxchg(&xmon_speaker, last_speaker, me); | 330 | prev = cmpxchg(&xmon_speaker, last_speaker, me); |
324 | if (prev == last_speaker) | 331 | if (prev == last_speaker) |
@@ -397,7 +404,6 @@ static int xmon_core(struct pt_regs *regs, int fromipi) | |||
397 | } | 404 | } |
398 | 405 | ||
399 | xmon_fault_jmp[cpu] = recurse_jmp; | 406 | xmon_fault_jmp[cpu] = recurse_jmp; |
400 | cpumask_set_cpu(cpu, &cpus_in_xmon); | ||
401 | 407 | ||
402 | bp = NULL; | 408 | bp = NULL; |
403 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) | 409 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) |
@@ -419,6 +425,8 @@ static int xmon_core(struct pt_regs *regs, int fromipi) | |||
419 | release_output_lock(); | 425 | release_output_lock(); |
420 | } | 426 | } |
421 | 427 | ||
428 | cpumask_set_cpu(cpu, &cpus_in_xmon); | ||
429 | |||
422 | waiting: | 430 | waiting: |
423 | secondary = 1; | 431 | secondary = 1; |
424 | while (secondary && !xmon_gate) { | 432 | while (secondary && !xmon_gate) { |
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 3b978c472d08..3d6b9f81cc68 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h | |||
@@ -132,6 +132,8 @@ extern void __init efi_map_region_fixed(efi_memory_desc_t *md); | |||
132 | extern void efi_sync_low_kernel_mappings(void); | 132 | extern void efi_sync_low_kernel_mappings(void); |
133 | extern void efi_setup_page_tables(void); | 133 | extern void efi_setup_page_tables(void); |
134 | extern void __init old_map_region(efi_memory_desc_t *md); | 134 | extern void __init old_map_region(efi_memory_desc_t *md); |
135 | extern void __init runtime_code_page_mkexec(void); | ||
136 | extern void __init efi_runtime_mkexec(void); | ||
135 | 137 | ||
136 | struct efi_setup_data { | 138 | struct efi_setup_data { |
137 | u64 fw_vendor; | 139 | u64 fw_vendor; |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 24b6fd10625a..8e28bf2fc3ef 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -284,8 +284,13 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c) | |||
284 | raw_local_save_flags(eflags); | 284 | raw_local_save_flags(eflags); |
285 | BUG_ON(eflags & X86_EFLAGS_AC); | 285 | BUG_ON(eflags & X86_EFLAGS_AC); |
286 | 286 | ||
287 | if (cpu_has(c, X86_FEATURE_SMAP)) | 287 | if (cpu_has(c, X86_FEATURE_SMAP)) { |
288 | #ifdef CONFIG_X86_SMAP | ||
288 | set_in_cr4(X86_CR4_SMAP); | 289 | set_in_cr4(X86_CR4_SMAP); |
290 | #else | ||
291 | clear_in_cr4(X86_CR4_SMAP); | ||
292 | #endif | ||
293 | } | ||
289 | } | 294 | } |
290 | 295 | ||
291 | /* | 296 | /* |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index d4bdd253fea7..e6253195a301 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -77,8 +77,7 @@ within(unsigned long addr, unsigned long start, unsigned long end) | |||
77 | return addr >= start && addr < end; | 77 | return addr >= start && addr < end; |
78 | } | 78 | } |
79 | 79 | ||
80 | static int | 80 | static unsigned long text_ip_addr(unsigned long ip) |
81 | do_ftrace_mod_code(unsigned long ip, const void *new_code) | ||
82 | { | 81 | { |
83 | /* | 82 | /* |
84 | * On x86_64, kernel text mappings are mapped read-only with | 83 | * On x86_64, kernel text mappings are mapped read-only with |
@@ -91,7 +90,7 @@ do_ftrace_mod_code(unsigned long ip, const void *new_code) | |||
91 | if (within(ip, (unsigned long)_text, (unsigned long)_etext)) | 90 | if (within(ip, (unsigned long)_text, (unsigned long)_etext)) |
92 | ip = (unsigned long)__va(__pa_symbol(ip)); | 91 | ip = (unsigned long)__va(__pa_symbol(ip)); |
93 | 92 | ||
94 | return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE); | 93 | return ip; |
95 | } | 94 | } |
96 | 95 | ||
97 | static const unsigned char *ftrace_nop_replace(void) | 96 | static const unsigned char *ftrace_nop_replace(void) |
@@ -123,8 +122,10 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code, | |||
123 | if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) | 122 | if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) |
124 | return -EINVAL; | 123 | return -EINVAL; |
125 | 124 | ||
125 | ip = text_ip_addr(ip); | ||
126 | |||
126 | /* replace the text with the new text */ | 127 | /* replace the text with the new text */ |
127 | if (do_ftrace_mod_code(ip, new_code)) | 128 | if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) |
128 | return -EPERM; | 129 | return -EPERM; |
129 | 130 | ||
130 | sync_core(); | 131 | sync_core(); |
@@ -221,37 +222,51 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, | |||
221 | return -EINVAL; | 222 | return -EINVAL; |
222 | } | 223 | } |
223 | 224 | ||
224 | int ftrace_update_ftrace_func(ftrace_func_t func) | 225 | static unsigned long ftrace_update_func; |
226 | |||
227 | static int update_ftrace_func(unsigned long ip, void *new) | ||
225 | { | 228 | { |
226 | unsigned long ip = (unsigned long)(&ftrace_call); | 229 | unsigned char old[MCOUNT_INSN_SIZE]; |
227 | unsigned char old[MCOUNT_INSN_SIZE], *new; | ||
228 | int ret; | 230 | int ret; |
229 | 231 | ||
230 | memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); | 232 | memcpy(old, (void *)ip, MCOUNT_INSN_SIZE); |
231 | new = ftrace_call_replace(ip, (unsigned long)func); | 233 | |
234 | ftrace_update_func = ip; | ||
235 | /* Make sure the breakpoints see the ftrace_update_func update */ | ||
236 | smp_wmb(); | ||
232 | 237 | ||
233 | /* See comment above by declaration of modifying_ftrace_code */ | 238 | /* See comment above by declaration of modifying_ftrace_code */ |
234 | atomic_inc(&modifying_ftrace_code); | 239 | atomic_inc(&modifying_ftrace_code); |
235 | 240 | ||
236 | ret = ftrace_modify_code(ip, old, new); | 241 | ret = ftrace_modify_code(ip, old, new); |
237 | 242 | ||
243 | atomic_dec(&modifying_ftrace_code); | ||
244 | |||
245 | return ret; | ||
246 | } | ||
247 | |||
248 | int ftrace_update_ftrace_func(ftrace_func_t func) | ||
249 | { | ||
250 | unsigned long ip = (unsigned long)(&ftrace_call); | ||
251 | unsigned char *new; | ||
252 | int ret; | ||
253 | |||
254 | new = ftrace_call_replace(ip, (unsigned long)func); | ||
255 | ret = update_ftrace_func(ip, new); | ||
256 | |||
238 | /* Also update the regs callback function */ | 257 | /* Also update the regs callback function */ |
239 | if (!ret) { | 258 | if (!ret) { |
240 | ip = (unsigned long)(&ftrace_regs_call); | 259 | ip = (unsigned long)(&ftrace_regs_call); |
241 | memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE); | ||
242 | new = ftrace_call_replace(ip, (unsigned long)func); | 260 | new = ftrace_call_replace(ip, (unsigned long)func); |
243 | ret = ftrace_modify_code(ip, old, new); | 261 | ret = update_ftrace_func(ip, new); |
244 | } | 262 | } |
245 | 263 | ||
246 | atomic_dec(&modifying_ftrace_code); | ||
247 | |||
248 | return ret; | 264 | return ret; |
249 | } | 265 | } |
250 | 266 | ||
251 | static int is_ftrace_caller(unsigned long ip) | 267 | static int is_ftrace_caller(unsigned long ip) |
252 | { | 268 | { |
253 | if (ip == (unsigned long)(&ftrace_call) || | 269 | if (ip == ftrace_update_func) |
254 | ip == (unsigned long)(&ftrace_regs_call)) | ||
255 | return 1; | 270 | return 1; |
256 | 271 | ||
257 | return 0; | 272 | return 0; |
@@ -677,45 +692,41 @@ int __init ftrace_dyn_arch_init(void *data) | |||
677 | #ifdef CONFIG_DYNAMIC_FTRACE | 692 | #ifdef CONFIG_DYNAMIC_FTRACE |
678 | extern void ftrace_graph_call(void); | 693 | extern void ftrace_graph_call(void); |
679 | 694 | ||
680 | static int ftrace_mod_jmp(unsigned long ip, | 695 | static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr) |
681 | int old_offset, int new_offset) | ||
682 | { | 696 | { |
683 | unsigned char code[MCOUNT_INSN_SIZE]; | 697 | static union ftrace_code_union calc; |
684 | 698 | ||
685 | if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) | 699 | /* Jmp not a call (ignore the .e8) */ |
686 | return -EFAULT; | 700 | calc.e8 = 0xe9; |
701 | calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr); | ||
687 | 702 | ||
688 | if (code[0] != 0xe9 || old_offset != *(int *)(&code[1])) | 703 | /* |
689 | return -EINVAL; | 704 | * ftrace external locks synchronize the access to the static variable. |
705 | */ | ||
706 | return calc.code; | ||
707 | } | ||
690 | 708 | ||
691 | *(int *)(&code[1]) = new_offset; | 709 | static int ftrace_mod_jmp(unsigned long ip, void *func) |
710 | { | ||
711 | unsigned char *new; | ||
692 | 712 | ||
693 | if (do_ftrace_mod_code(ip, &code)) | 713 | new = ftrace_jmp_replace(ip, (unsigned long)func); |
694 | return -EPERM; | ||
695 | 714 | ||
696 | return 0; | 715 | return update_ftrace_func(ip, new); |
697 | } | 716 | } |
698 | 717 | ||
699 | int ftrace_enable_ftrace_graph_caller(void) | 718 | int ftrace_enable_ftrace_graph_caller(void) |
700 | { | 719 | { |
701 | unsigned long ip = (unsigned long)(&ftrace_graph_call); | 720 | unsigned long ip = (unsigned long)(&ftrace_graph_call); |
702 | int old_offset, new_offset; | ||
703 | 721 | ||
704 | old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); | 722 | return ftrace_mod_jmp(ip, &ftrace_graph_caller); |
705 | new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); | ||
706 | |||
707 | return ftrace_mod_jmp(ip, old_offset, new_offset); | ||
708 | } | 723 | } |
709 | 724 | ||
710 | int ftrace_disable_ftrace_graph_caller(void) | 725 | int ftrace_disable_ftrace_graph_caller(void) |
711 | { | 726 | { |
712 | unsigned long ip = (unsigned long)(&ftrace_graph_call); | 727 | unsigned long ip = (unsigned long)(&ftrace_graph_call); |
713 | int old_offset, new_offset; | ||
714 | |||
715 | old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); | ||
716 | new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); | ||
717 | 728 | ||
718 | return ftrace_mod_jmp(ip, old_offset, new_offset); | 729 | return ftrace_mod_jmp(ip, &ftrace_stub); |
719 | } | 730 | } |
720 | 731 | ||
721 | #endif /* !CONFIG_DYNAMIC_FTRACE */ | 732 | #endif /* !CONFIG_DYNAMIC_FTRACE */ |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 19e5adb49a27..acb3b606613e 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -209,7 +209,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc) | |||
209 | * dance when its actually needed. | 209 | * dance when its actually needed. |
210 | */ | 210 | */ |
211 | 211 | ||
212 | preempt_disable(); | 212 | preempt_disable_notrace(); |
213 | data = this_cpu_read(cyc2ns.head); | 213 | data = this_cpu_read(cyc2ns.head); |
214 | tail = this_cpu_read(cyc2ns.tail); | 214 | tail = this_cpu_read(cyc2ns.tail); |
215 | 215 | ||
@@ -229,7 +229,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc) | |||
229 | if (!--data->__count) | 229 | if (!--data->__count) |
230 | this_cpu_write(cyc2ns.tail, data); | 230 | this_cpu_write(cyc2ns.tail, data); |
231 | } | 231 | } |
232 | preempt_enable(); | 232 | preempt_enable_notrace(); |
233 | 233 | ||
234 | return ns; | 234 | return ns; |
235 | } | 235 | } |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9d591c895803..6dea040cc3a1 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -1001,6 +1001,12 @@ static int fault_in_kernel_space(unsigned long address) | |||
1001 | 1001 | ||
1002 | static inline bool smap_violation(int error_code, struct pt_regs *regs) | 1002 | static inline bool smap_violation(int error_code, struct pt_regs *regs) |
1003 | { | 1003 | { |
1004 | if (!IS_ENABLED(CONFIG_X86_SMAP)) | ||
1005 | return false; | ||
1006 | |||
1007 | if (!static_cpu_has(X86_FEATURE_SMAP)) | ||
1008 | return false; | ||
1009 | |||
1004 | if (error_code & PF_USER) | 1010 | if (error_code & PF_USER) |
1005 | return false; | 1011 | return false; |
1006 | 1012 | ||
@@ -1087,11 +1093,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
1087 | if (unlikely(error_code & PF_RSVD)) | 1093 | if (unlikely(error_code & PF_RSVD)) |
1088 | pgtable_bad(regs, error_code, address); | 1094 | pgtable_bad(regs, error_code, address); |
1089 | 1095 | ||
1090 | if (static_cpu_has(X86_FEATURE_SMAP)) { | 1096 | if (unlikely(smap_violation(error_code, regs))) { |
1091 | if (unlikely(smap_violation(error_code, regs))) { | 1097 | bad_area_nosemaphore(regs, error_code, address); |
1092 | bad_area_nosemaphore(regs, error_code, address); | 1098 | return; |
1093 | return; | ||
1094 | } | ||
1095 | } | 1099 | } |
1096 | 1100 | ||
1097 | /* | 1101 | /* |
diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c index 4df9591eadad..f15103dff4b4 100644 --- a/arch/x86/platform/efi/efi-bgrt.c +++ b/arch/x86/platform/efi/efi-bgrt.c | |||
@@ -42,7 +42,7 @@ void __init efi_bgrt_init(void) | |||
42 | 42 | ||
43 | if (bgrt_tab->header.length < sizeof(*bgrt_tab)) | 43 | if (bgrt_tab->header.length < sizeof(*bgrt_tab)) |
44 | return; | 44 | return; |
45 | if (bgrt_tab->version != 1) | 45 | if (bgrt_tab->version != 1 || bgrt_tab->status != 1) |
46 | return; | 46 | return; |
47 | if (bgrt_tab->image_type != 0 || !bgrt_tab->image_address) | 47 | if (bgrt_tab->image_type != 0 || !bgrt_tab->image_address) |
48 | return; | 48 | return; |
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index d62ec87a2b26..1a201ac7cef8 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c | |||
@@ -792,7 +792,7 @@ void __init efi_set_executable(efi_memory_desc_t *md, bool executable) | |||
792 | set_memory_nx(addr, npages); | 792 | set_memory_nx(addr, npages); |
793 | } | 793 | } |
794 | 794 | ||
795 | static void __init runtime_code_page_mkexec(void) | 795 | void __init runtime_code_page_mkexec(void) |
796 | { | 796 | { |
797 | efi_memory_desc_t *md; | 797 | efi_memory_desc_t *md; |
798 | void *p; | 798 | void *p; |
@@ -1069,8 +1069,7 @@ void __init efi_enter_virtual_mode(void) | |||
1069 | efi.update_capsule = virt_efi_update_capsule; | 1069 | efi.update_capsule = virt_efi_update_capsule; |
1070 | efi.query_capsule_caps = virt_efi_query_capsule_caps; | 1070 | efi.query_capsule_caps = virt_efi_query_capsule_caps; |
1071 | 1071 | ||
1072 | if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX)) | 1072 | efi_runtime_mkexec(); |
1073 | runtime_code_page_mkexec(); | ||
1074 | 1073 | ||
1075 | kfree(new_memmap); | 1074 | kfree(new_memmap); |
1076 | 1075 | ||
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c index 249b183cf417..0b74cdf7f816 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c | |||
@@ -77,3 +77,9 @@ void efi_call_phys_epilog(void) | |||
77 | 77 | ||
78 | local_irq_restore(efi_rt_eflags); | 78 | local_irq_restore(efi_rt_eflags); |
79 | } | 79 | } |
80 | |||
81 | void __init efi_runtime_mkexec(void) | ||
82 | { | ||
83 | if (__supported_pte_mask & _PAGE_NX) | ||
84 | runtime_code_page_mkexec(); | ||
85 | } | ||
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 6284f158a47d..0c2a234fef1e 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c | |||
@@ -233,3 +233,12 @@ void __init parse_efi_setup(u64 phys_addr, u32 data_len) | |||
233 | { | 233 | { |
234 | efi_setup = phys_addr + sizeof(struct setup_data); | 234 | efi_setup = phys_addr + sizeof(struct setup_data); |
235 | } | 235 | } |
236 | |||
237 | void __init efi_runtime_mkexec(void) | ||
238 | { | ||
239 | if (!efi_enabled(EFI_OLD_MEMMAP)) | ||
240 | return; | ||
241 | |||
242 | if (__supported_pte_mask & _PAGE_NX) | ||
243 | runtime_code_page_mkexec(); | ||
244 | } | ||
diff --git a/block/blk-core.c b/block/blk-core.c index c00e0bdeab4a..853f92749202 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -693,11 +693,20 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | |||
693 | if (!uninit_q) | 693 | if (!uninit_q) |
694 | return NULL; | 694 | return NULL; |
695 | 695 | ||
696 | uninit_q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL); | ||
697 | if (!uninit_q->flush_rq) | ||
698 | goto out_cleanup_queue; | ||
699 | |||
696 | q = blk_init_allocated_queue(uninit_q, rfn, lock); | 700 | q = blk_init_allocated_queue(uninit_q, rfn, lock); |
697 | if (!q) | 701 | if (!q) |
698 | blk_cleanup_queue(uninit_q); | 702 | goto out_free_flush_rq; |
699 | |||
700 | return q; | 703 | return q; |
704 | |||
705 | out_free_flush_rq: | ||
706 | kfree(uninit_q->flush_rq); | ||
707 | out_cleanup_queue: | ||
708 | blk_cleanup_queue(uninit_q); | ||
709 | return NULL; | ||
701 | } | 710 | } |
702 | EXPORT_SYMBOL(blk_init_queue_node); | 711 | EXPORT_SYMBOL(blk_init_queue_node); |
703 | 712 | ||
@@ -1127,7 +1136,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw, | |||
1127 | struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) | 1136 | struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) |
1128 | { | 1137 | { |
1129 | if (q->mq_ops) | 1138 | if (q->mq_ops) |
1130 | return blk_mq_alloc_request(q, rw, gfp_mask, false); | 1139 | return blk_mq_alloc_request(q, rw, gfp_mask); |
1131 | else | 1140 | else |
1132 | return blk_old_get_request(q, rw, gfp_mask); | 1141 | return blk_old_get_request(q, rw, gfp_mask); |
1133 | } | 1142 | } |
@@ -1278,6 +1287,11 @@ void __blk_put_request(struct request_queue *q, struct request *req) | |||
1278 | if (unlikely(!q)) | 1287 | if (unlikely(!q)) |
1279 | return; | 1288 | return; |
1280 | 1289 | ||
1290 | if (q->mq_ops) { | ||
1291 | blk_mq_free_request(req); | ||
1292 | return; | ||
1293 | } | ||
1294 | |||
1281 | blk_pm_put_request(req); | 1295 | blk_pm_put_request(req); |
1282 | 1296 | ||
1283 | elv_completed_request(q, req); | 1297 | elv_completed_request(q, req); |
diff --git a/block/blk-exec.c b/block/blk-exec.c index bbfc072a79c2..c68613bb4c79 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c | |||
@@ -65,7 +65,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, | |||
65 | * be resued after dying flag is set | 65 | * be resued after dying flag is set |
66 | */ | 66 | */ |
67 | if (q->mq_ops) { | 67 | if (q->mq_ops) { |
68 | blk_mq_insert_request(q, rq, true); | 68 | blk_mq_insert_request(q, rq, at_head, true); |
69 | return; | 69 | return; |
70 | } | 70 | } |
71 | 71 | ||
diff --git a/block/blk-flush.c b/block/blk-flush.c index 9288aaf35c21..66e2b697f5db 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -130,20 +130,26 @@ static void blk_flush_restore_request(struct request *rq) | |||
130 | blk_clear_rq_complete(rq); | 130 | blk_clear_rq_complete(rq); |
131 | } | 131 | } |
132 | 132 | ||
133 | static void mq_flush_data_run(struct work_struct *work) | 133 | static void mq_flush_run(struct work_struct *work) |
134 | { | 134 | { |
135 | struct request *rq; | 135 | struct request *rq; |
136 | 136 | ||
137 | rq = container_of(work, struct request, mq_flush_data); | 137 | rq = container_of(work, struct request, mq_flush_work); |
138 | 138 | ||
139 | memset(&rq->csd, 0, sizeof(rq->csd)); | 139 | memset(&rq->csd, 0, sizeof(rq->csd)); |
140 | blk_mq_run_request(rq, true, false); | 140 | blk_mq_run_request(rq, true, false); |
141 | } | 141 | } |
142 | 142 | ||
143 | static void blk_mq_flush_data_insert(struct request *rq) | 143 | static bool blk_flush_queue_rq(struct request *rq) |
144 | { | 144 | { |
145 | INIT_WORK(&rq->mq_flush_data, mq_flush_data_run); | 145 | if (rq->q->mq_ops) { |
146 | kblockd_schedule_work(rq->q, &rq->mq_flush_data); | 146 | INIT_WORK(&rq->mq_flush_work, mq_flush_run); |
147 | kblockd_schedule_work(rq->q, &rq->mq_flush_work); | ||
148 | return false; | ||
149 | } else { | ||
150 | list_add_tail(&rq->queuelist, &rq->q->queue_head); | ||
151 | return true; | ||
152 | } | ||
147 | } | 153 | } |
148 | 154 | ||
149 | /** | 155 | /** |
@@ -187,12 +193,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq, | |||
187 | 193 | ||
188 | case REQ_FSEQ_DATA: | 194 | case REQ_FSEQ_DATA: |
189 | list_move_tail(&rq->flush.list, &q->flush_data_in_flight); | 195 | list_move_tail(&rq->flush.list, &q->flush_data_in_flight); |
190 | if (q->mq_ops) | 196 | queued = blk_flush_queue_rq(rq); |
191 | blk_mq_flush_data_insert(rq); | ||
192 | else { | ||
193 | list_add(&rq->queuelist, &q->queue_head); | ||
194 | queued = true; | ||
195 | } | ||
196 | break; | 197 | break; |
197 | 198 | ||
198 | case REQ_FSEQ_DONE: | 199 | case REQ_FSEQ_DONE: |
@@ -216,9 +217,6 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq, | |||
216 | } | 217 | } |
217 | 218 | ||
218 | kicked = blk_kick_flush(q); | 219 | kicked = blk_kick_flush(q); |
219 | /* blk_mq_run_flush will run queue */ | ||
220 | if (q->mq_ops) | ||
221 | return queued; | ||
222 | return kicked | queued; | 220 | return kicked | queued; |
223 | } | 221 | } |
224 | 222 | ||
@@ -230,10 +228,9 @@ static void flush_end_io(struct request *flush_rq, int error) | |||
230 | struct request *rq, *n; | 228 | struct request *rq, *n; |
231 | unsigned long flags = 0; | 229 | unsigned long flags = 0; |
232 | 230 | ||
233 | if (q->mq_ops) { | 231 | if (q->mq_ops) |
234 | blk_mq_free_request(flush_rq); | ||
235 | spin_lock_irqsave(&q->mq_flush_lock, flags); | 232 | spin_lock_irqsave(&q->mq_flush_lock, flags); |
236 | } | 233 | |
237 | running = &q->flush_queue[q->flush_running_idx]; | 234 | running = &q->flush_queue[q->flush_running_idx]; |
238 | BUG_ON(q->flush_pending_idx == q->flush_running_idx); | 235 | BUG_ON(q->flush_pending_idx == q->flush_running_idx); |
239 | 236 | ||
@@ -263,49 +260,14 @@ static void flush_end_io(struct request *flush_rq, int error) | |||
263 | * kblockd. | 260 | * kblockd. |
264 | */ | 261 | */ |
265 | if (queued || q->flush_queue_delayed) { | 262 | if (queued || q->flush_queue_delayed) { |
266 | if (!q->mq_ops) | 263 | WARN_ON(q->mq_ops); |
267 | blk_run_queue_async(q); | 264 | blk_run_queue_async(q); |
268 | else | ||
269 | /* | ||
270 | * This can be optimized to only run queues with requests | ||
271 | * queued if necessary. | ||
272 | */ | ||
273 | blk_mq_run_queues(q, true); | ||
274 | } | 265 | } |
275 | q->flush_queue_delayed = 0; | 266 | q->flush_queue_delayed = 0; |
276 | if (q->mq_ops) | 267 | if (q->mq_ops) |
277 | spin_unlock_irqrestore(&q->mq_flush_lock, flags); | 268 | spin_unlock_irqrestore(&q->mq_flush_lock, flags); |
278 | } | 269 | } |
279 | 270 | ||
280 | static void mq_flush_work(struct work_struct *work) | ||
281 | { | ||
282 | struct request_queue *q; | ||
283 | struct request *rq; | ||
284 | |||
285 | q = container_of(work, struct request_queue, mq_flush_work); | ||
286 | |||
287 | /* We don't need set REQ_FLUSH_SEQ, it's for consistency */ | ||
288 | rq = blk_mq_alloc_request(q, WRITE_FLUSH|REQ_FLUSH_SEQ, | ||
289 | __GFP_WAIT|GFP_ATOMIC, true); | ||
290 | rq->cmd_type = REQ_TYPE_FS; | ||
291 | rq->end_io = flush_end_io; | ||
292 | |||
293 | blk_mq_run_request(rq, true, false); | ||
294 | } | ||
295 | |||
296 | /* | ||
297 | * We can't directly use q->flush_rq, because it doesn't have tag and is not in | ||
298 | * hctx->rqs[]. so we must allocate a new request, since we can't sleep here, | ||
299 | * so offload the work to workqueue. | ||
300 | * | ||
301 | * Note: we assume a flush request finished in any hardware queue will flush | ||
302 | * the whole disk cache. | ||
303 | */ | ||
304 | static void mq_run_flush(struct request_queue *q) | ||
305 | { | ||
306 | kblockd_schedule_work(q, &q->mq_flush_work); | ||
307 | } | ||
308 | |||
309 | /** | 271 | /** |
310 | * blk_kick_flush - consider issuing flush request | 272 | * blk_kick_flush - consider issuing flush request |
311 | * @q: request_queue being kicked | 273 | * @q: request_queue being kicked |
@@ -340,19 +302,31 @@ static bool blk_kick_flush(struct request_queue *q) | |||
340 | * different from running_idx, which means flush is in flight. | 302 | * different from running_idx, which means flush is in flight. |
341 | */ | 303 | */ |
342 | q->flush_pending_idx ^= 1; | 304 | q->flush_pending_idx ^= 1; |
305 | |||
343 | if (q->mq_ops) { | 306 | if (q->mq_ops) { |
344 | mq_run_flush(q); | 307 | struct blk_mq_ctx *ctx = first_rq->mq_ctx; |
345 | return true; | 308 | struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu); |
309 | |||
310 | blk_mq_rq_init(hctx, q->flush_rq); | ||
311 | q->flush_rq->mq_ctx = ctx; | ||
312 | |||
313 | /* | ||
314 | * Reuse the tag value from the fist waiting request, | ||
315 | * with blk-mq the tag is generated during request | ||
316 | * allocation and drivers can rely on it being inside | ||
317 | * the range they asked for. | ||
318 | */ | ||
319 | q->flush_rq->tag = first_rq->tag; | ||
320 | } else { | ||
321 | blk_rq_init(q, q->flush_rq); | ||
346 | } | 322 | } |
347 | 323 | ||
348 | blk_rq_init(q, &q->flush_rq); | 324 | q->flush_rq->cmd_type = REQ_TYPE_FS; |
349 | q->flush_rq.cmd_type = REQ_TYPE_FS; | 325 | q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; |
350 | q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; | 326 | q->flush_rq->rq_disk = first_rq->rq_disk; |
351 | q->flush_rq.rq_disk = first_rq->rq_disk; | 327 | q->flush_rq->end_io = flush_end_io; |
352 | q->flush_rq.end_io = flush_end_io; | ||
353 | 328 | ||
354 | list_add_tail(&q->flush_rq.queuelist, &q->queue_head); | 329 | return blk_flush_queue_rq(q->flush_rq); |
355 | return true; | ||
356 | } | 330 | } |
357 | 331 | ||
358 | static void flush_data_end_io(struct request *rq, int error) | 332 | static void flush_data_end_io(struct request *rq, int error) |
@@ -558,5 +532,4 @@ EXPORT_SYMBOL(blkdev_issue_flush); | |||
558 | void blk_mq_init_flush(struct request_queue *q) | 532 | void blk_mq_init_flush(struct request_queue *q) |
559 | { | 533 | { |
560 | spin_lock_init(&q->mq_flush_lock); | 534 | spin_lock_init(&q->mq_flush_lock); |
561 | INIT_WORK(&q->mq_flush_work, mq_flush_work); | ||
562 | } | 535 | } |
diff --git a/block/blk-lib.c b/block/blk-lib.c index 2da76c999ef3..97a733cf3d5f 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c | |||
@@ -119,6 +119,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
119 | 119 | ||
120 | atomic_inc(&bb.done); | 120 | atomic_inc(&bb.done); |
121 | submit_bio(type, bio); | 121 | submit_bio(type, bio); |
122 | |||
123 | /* | ||
124 | * We can loop for a long time in here, if someone does | ||
125 | * full device discards (like mkfs). Be nice and allow | ||
126 | * us to schedule out to avoid softlocking if preempt | ||
127 | * is disabled. | ||
128 | */ | ||
129 | cond_resched(); | ||
122 | } | 130 | } |
123 | blk_finish_plug(&plug); | 131 | blk_finish_plug(&plug); |
124 | 132 | ||
diff --git a/block/blk-merge.c b/block/blk-merge.c index 8f8adaa95466..6c583f9c5b65 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -21,6 +21,16 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | |||
21 | if (!bio) | 21 | if (!bio) |
22 | return 0; | 22 | return 0; |
23 | 23 | ||
24 | /* | ||
25 | * This should probably be returning 0, but blk_add_request_payload() | ||
26 | * (Christoph!!!!) | ||
27 | */ | ||
28 | if (bio->bi_rw & REQ_DISCARD) | ||
29 | return 1; | ||
30 | |||
31 | if (bio->bi_rw & REQ_WRITE_SAME) | ||
32 | return 1; | ||
33 | |||
24 | fbio = bio; | 34 | fbio = bio; |
25 | cluster = blk_queue_cluster(q); | 35 | cluster = blk_queue_cluster(q); |
26 | seg_size = 0; | 36 | seg_size = 0; |
@@ -161,30 +171,60 @@ new_segment: | |||
161 | *bvprv = *bvec; | 171 | *bvprv = *bvec; |
162 | } | 172 | } |
163 | 173 | ||
164 | /* | 174 | static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, |
165 | * map a request to scatterlist, return number of sg entries setup. Caller | 175 | struct scatterlist *sglist, |
166 | * must make sure sg can hold rq->nr_phys_segments entries | 176 | struct scatterlist **sg) |
167 | */ | ||
168 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, | ||
169 | struct scatterlist *sglist) | ||
170 | { | 177 | { |
171 | struct bio_vec bvec, bvprv = { NULL }; | 178 | struct bio_vec bvec, bvprv = { NULL }; |
172 | struct req_iterator iter; | 179 | struct bvec_iter iter; |
173 | struct scatterlist *sg; | ||
174 | int nsegs, cluster; | 180 | int nsegs, cluster; |
175 | 181 | ||
176 | nsegs = 0; | 182 | nsegs = 0; |
177 | cluster = blk_queue_cluster(q); | 183 | cluster = blk_queue_cluster(q); |
178 | 184 | ||
179 | /* | 185 | if (bio->bi_rw & REQ_DISCARD) { |
180 | * for each bio in rq | 186 | /* |
181 | */ | 187 | * This is a hack - drivers should be neither modifying the |
182 | sg = NULL; | 188 | * biovec, nor relying on bi_vcnt - but because of |
183 | rq_for_each_segment(bvec, rq, iter) { | 189 | * blk_add_request_payload(), a discard bio may or may not have |
184 | __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg, | 190 | * a payload we need to set up here (thank you Christoph) and |
185 | &nsegs, &cluster); | 191 | * bi_vcnt is really the only way of telling if we need to. |
186 | } /* segments in rq */ | 192 | */ |
193 | |||
194 | if (bio->bi_vcnt) | ||
195 | goto single_segment; | ||
196 | |||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | if (bio->bi_rw & REQ_WRITE_SAME) { | ||
201 | single_segment: | ||
202 | *sg = sglist; | ||
203 | bvec = bio_iovec(bio); | ||
204 | sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); | ||
205 | return 1; | ||
206 | } | ||
207 | |||
208 | for_each_bio(bio) | ||
209 | bio_for_each_segment(bvec, bio, iter) | ||
210 | __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, | ||
211 | &nsegs, &cluster); | ||
187 | 212 | ||
213 | return nsegs; | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * map a request to scatterlist, return number of sg entries setup. Caller | ||
218 | * must make sure sg can hold rq->nr_phys_segments entries | ||
219 | */ | ||
220 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, | ||
221 | struct scatterlist *sglist) | ||
222 | { | ||
223 | struct scatterlist *sg = NULL; | ||
224 | int nsegs = 0; | ||
225 | |||
226 | if (rq->bio) | ||
227 | nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); | ||
188 | 228 | ||
189 | if (unlikely(rq->cmd_flags & REQ_COPY_USER) && | 229 | if (unlikely(rq->cmd_flags & REQ_COPY_USER) && |
190 | (blk_rq_bytes(rq) & q->dma_pad_mask)) { | 230 | (blk_rq_bytes(rq) & q->dma_pad_mask)) { |
@@ -230,20 +270,13 @@ EXPORT_SYMBOL(blk_rq_map_sg); | |||
230 | int blk_bio_map_sg(struct request_queue *q, struct bio *bio, | 270 | int blk_bio_map_sg(struct request_queue *q, struct bio *bio, |
231 | struct scatterlist *sglist) | 271 | struct scatterlist *sglist) |
232 | { | 272 | { |
233 | struct bio_vec bvec, bvprv = { NULL }; | 273 | struct scatterlist *sg = NULL; |
234 | struct scatterlist *sg; | 274 | int nsegs; |
235 | int nsegs, cluster; | 275 | struct bio *next = bio->bi_next; |
236 | struct bvec_iter iter; | 276 | bio->bi_next = NULL; |
237 | |||
238 | nsegs = 0; | ||
239 | cluster = blk_queue_cluster(q); | ||
240 | |||
241 | sg = NULL; | ||
242 | bio_for_each_segment(bvec, bio, iter) { | ||
243 | __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg, | ||
244 | &nsegs, &cluster); | ||
245 | } /* segments in bio */ | ||
246 | 277 | ||
278 | nsegs = __blk_bios_map_sg(q, bio, sglist, &sg); | ||
279 | bio->bi_next = next; | ||
247 | if (sg) | 280 | if (sg) |
248 | sg_mark_end(sg); | 281 | sg_mark_end(sg); |
249 | 282 | ||
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 5d70edc9855f..83ae96c51a27 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c | |||
@@ -184,7 +184,7 @@ void blk_mq_free_tags(struct blk_mq_tags *tags) | |||
184 | ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) | 184 | ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) |
185 | { | 185 | { |
186 | char *orig_page = page; | 186 | char *orig_page = page; |
187 | int cpu; | 187 | unsigned int cpu; |
188 | 188 | ||
189 | if (!tags) | 189 | if (!tags) |
190 | return 0; | 190 | return 0; |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 57039fcd9c93..1fa9dd153fde 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -226,15 +226,14 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, | |||
226 | return rq; | 226 | return rq; |
227 | } | 227 | } |
228 | 228 | ||
229 | struct request *blk_mq_alloc_request(struct request_queue *q, int rw, | 229 | struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp) |
230 | gfp_t gfp, bool reserved) | ||
231 | { | 230 | { |
232 | struct request *rq; | 231 | struct request *rq; |
233 | 232 | ||
234 | if (blk_mq_queue_enter(q)) | 233 | if (blk_mq_queue_enter(q)) |
235 | return NULL; | 234 | return NULL; |
236 | 235 | ||
237 | rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved); | 236 | rq = blk_mq_alloc_request_pinned(q, rw, gfp, false); |
238 | if (rq) | 237 | if (rq) |
239 | blk_mq_put_ctx(rq->mq_ctx); | 238 | blk_mq_put_ctx(rq->mq_ctx); |
240 | return rq; | 239 | return rq; |
@@ -258,7 +257,7 @@ EXPORT_SYMBOL(blk_mq_alloc_reserved_request); | |||
258 | /* | 257 | /* |
259 | * Re-init and set pdu, if we have it | 258 | * Re-init and set pdu, if we have it |
260 | */ | 259 | */ |
261 | static void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq) | 260 | void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq) |
262 | { | 261 | { |
263 | blk_rq_init(hctx->queue, rq); | 262 | blk_rq_init(hctx->queue, rq); |
264 | 263 | ||
@@ -305,7 +304,7 @@ static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error) | |||
305 | bio_endio(bio, error); | 304 | bio_endio(bio, error); |
306 | } | 305 | } |
307 | 306 | ||
308 | void blk_mq_complete_request(struct request *rq, int error) | 307 | void blk_mq_end_io(struct request *rq, int error) |
309 | { | 308 | { |
310 | struct bio *bio = rq->bio; | 309 | struct bio *bio = rq->bio; |
311 | unsigned int bytes = 0; | 310 | unsigned int bytes = 0; |
@@ -330,48 +329,55 @@ void blk_mq_complete_request(struct request *rq, int error) | |||
330 | else | 329 | else |
331 | blk_mq_free_request(rq); | 330 | blk_mq_free_request(rq); |
332 | } | 331 | } |
332 | EXPORT_SYMBOL(blk_mq_end_io); | ||
333 | 333 | ||
334 | void __blk_mq_end_io(struct request *rq, int error) | 334 | static void __blk_mq_complete_request_remote(void *data) |
335 | { | ||
336 | if (!blk_mark_rq_complete(rq)) | ||
337 | blk_mq_complete_request(rq, error); | ||
338 | } | ||
339 | |||
340 | static void blk_mq_end_io_remote(void *data) | ||
341 | { | 335 | { |
342 | struct request *rq = data; | 336 | struct request *rq = data; |
343 | 337 | ||
344 | __blk_mq_end_io(rq, rq->errors); | 338 | rq->q->softirq_done_fn(rq); |
345 | } | 339 | } |
346 | 340 | ||
347 | /* | 341 | void __blk_mq_complete_request(struct request *rq) |
348 | * End IO on this request on a multiqueue enabled driver. We'll either do | ||
349 | * it directly inline, or punt to a local IPI handler on the matching | ||
350 | * remote CPU. | ||
351 | */ | ||
352 | void blk_mq_end_io(struct request *rq, int error) | ||
353 | { | 342 | { |
354 | struct blk_mq_ctx *ctx = rq->mq_ctx; | 343 | struct blk_mq_ctx *ctx = rq->mq_ctx; |
355 | int cpu; | 344 | int cpu; |
356 | 345 | ||
357 | if (!ctx->ipi_redirect) | 346 | if (!ctx->ipi_redirect) { |
358 | return __blk_mq_end_io(rq, error); | 347 | rq->q->softirq_done_fn(rq); |
348 | return; | ||
349 | } | ||
359 | 350 | ||
360 | cpu = get_cpu(); | 351 | cpu = get_cpu(); |
361 | if (cpu != ctx->cpu && cpu_online(ctx->cpu)) { | 352 | if (cpu != ctx->cpu && cpu_online(ctx->cpu)) { |
362 | rq->errors = error; | 353 | rq->csd.func = __blk_mq_complete_request_remote; |
363 | rq->csd.func = blk_mq_end_io_remote; | ||
364 | rq->csd.info = rq; | 354 | rq->csd.info = rq; |
365 | rq->csd.flags = 0; | 355 | rq->csd.flags = 0; |
366 | __smp_call_function_single(ctx->cpu, &rq->csd, 0); | 356 | __smp_call_function_single(ctx->cpu, &rq->csd, 0); |
367 | } else { | 357 | } else { |
368 | __blk_mq_end_io(rq, error); | 358 | rq->q->softirq_done_fn(rq); |
369 | } | 359 | } |
370 | put_cpu(); | 360 | put_cpu(); |
371 | } | 361 | } |
372 | EXPORT_SYMBOL(blk_mq_end_io); | ||
373 | 362 | ||
374 | static void blk_mq_start_request(struct request *rq) | 363 | /** |
364 | * blk_mq_complete_request - end I/O on a request | ||
365 | * @rq: the request being processed | ||
366 | * | ||
367 | * Description: | ||
368 | * Ends all I/O on a request. It does not handle partial completions. | ||
369 | * The actual completion happens out-of-order, through a IPI handler. | ||
370 | **/ | ||
371 | void blk_mq_complete_request(struct request *rq) | ||
372 | { | ||
373 | if (unlikely(blk_should_fake_timeout(rq->q))) | ||
374 | return; | ||
375 | if (!blk_mark_rq_complete(rq)) | ||
376 | __blk_mq_complete_request(rq); | ||
377 | } | ||
378 | EXPORT_SYMBOL(blk_mq_complete_request); | ||
379 | |||
380 | static void blk_mq_start_request(struct request *rq, bool last) | ||
375 | { | 381 | { |
376 | struct request_queue *q = rq->q; | 382 | struct request_queue *q = rq->q; |
377 | 383 | ||
@@ -384,6 +390,25 @@ static void blk_mq_start_request(struct request *rq) | |||
384 | */ | 390 | */ |
385 | rq->deadline = jiffies + q->rq_timeout; | 391 | rq->deadline = jiffies + q->rq_timeout; |
386 | set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); | 392 | set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); |
393 | |||
394 | if (q->dma_drain_size && blk_rq_bytes(rq)) { | ||
395 | /* | ||
396 | * Make sure space for the drain appears. We know we can do | ||
397 | * this because max_hw_segments has been adjusted to be one | ||
398 | * fewer than the device can handle. | ||
399 | */ | ||
400 | rq->nr_phys_segments++; | ||
401 | } | ||
402 | |||
403 | /* | ||
404 | * Flag the last request in the series so that drivers know when IO | ||
405 | * should be kicked off, if they don't do it on a per-request basis. | ||
406 | * | ||
407 | * Note: the flag isn't the only condition drivers should do kick off. | ||
408 | * If drive is busy, the last request might not have the bit set. | ||
409 | */ | ||
410 | if (last) | ||
411 | rq->cmd_flags |= REQ_END; | ||
387 | } | 412 | } |
388 | 413 | ||
389 | static void blk_mq_requeue_request(struct request *rq) | 414 | static void blk_mq_requeue_request(struct request *rq) |
@@ -392,6 +417,11 @@ static void blk_mq_requeue_request(struct request *rq) | |||
392 | 417 | ||
393 | trace_block_rq_requeue(q, rq); | 418 | trace_block_rq_requeue(q, rq); |
394 | clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); | 419 | clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); |
420 | |||
421 | rq->cmd_flags &= ~REQ_END; | ||
422 | |||
423 | if (q->dma_drain_size && blk_rq_bytes(rq)) | ||
424 | rq->nr_phys_segments--; | ||
395 | } | 425 | } |
396 | 426 | ||
397 | struct blk_mq_timeout_data { | 427 | struct blk_mq_timeout_data { |
@@ -559,19 +589,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) | |||
559 | 589 | ||
560 | rq = list_first_entry(&rq_list, struct request, queuelist); | 590 | rq = list_first_entry(&rq_list, struct request, queuelist); |
561 | list_del_init(&rq->queuelist); | 591 | list_del_init(&rq->queuelist); |
562 | blk_mq_start_request(rq); | ||
563 | 592 | ||
564 | /* | 593 | blk_mq_start_request(rq, list_empty(&rq_list)); |
565 | * Last request in the series. Flag it as such, this | ||
566 | * enables drivers to know when IO should be kicked off, | ||
567 | * if they don't do it on a per-request basis. | ||
568 | * | ||
569 | * Note: the flag isn't the only condition drivers | ||
570 | * should do kick off. If drive is busy, the last | ||
571 | * request might not have the bit set. | ||
572 | */ | ||
573 | if (list_empty(&rq_list)) | ||
574 | rq->cmd_flags |= REQ_END; | ||
575 | 594 | ||
576 | ret = q->mq_ops->queue_rq(hctx, rq); | 595 | ret = q->mq_ops->queue_rq(hctx, rq); |
577 | switch (ret) { | 596 | switch (ret) { |
@@ -589,8 +608,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) | |||
589 | break; | 608 | break; |
590 | default: | 609 | default: |
591 | pr_err("blk-mq: bad return on queue: %d\n", ret); | 610 | pr_err("blk-mq: bad return on queue: %d\n", ret); |
592 | rq->errors = -EIO; | ||
593 | case BLK_MQ_RQ_QUEUE_ERROR: | 611 | case BLK_MQ_RQ_QUEUE_ERROR: |
612 | rq->errors = -EIO; | ||
594 | blk_mq_end_io(rq, rq->errors); | 613 | blk_mq_end_io(rq, rq->errors); |
595 | break; | 614 | break; |
596 | } | 615 | } |
@@ -693,13 +712,16 @@ static void blk_mq_work_fn(struct work_struct *work) | |||
693 | } | 712 | } |
694 | 713 | ||
695 | static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, | 714 | static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, |
696 | struct request *rq) | 715 | struct request *rq, bool at_head) |
697 | { | 716 | { |
698 | struct blk_mq_ctx *ctx = rq->mq_ctx; | 717 | struct blk_mq_ctx *ctx = rq->mq_ctx; |
699 | 718 | ||
700 | trace_block_rq_insert(hctx->queue, rq); | 719 | trace_block_rq_insert(hctx->queue, rq); |
701 | 720 | ||
702 | list_add_tail(&rq->queuelist, &ctx->rq_list); | 721 | if (at_head) |
722 | list_add(&rq->queuelist, &ctx->rq_list); | ||
723 | else | ||
724 | list_add_tail(&rq->queuelist, &ctx->rq_list); | ||
703 | blk_mq_hctx_mark_pending(hctx, ctx); | 725 | blk_mq_hctx_mark_pending(hctx, ctx); |
704 | 726 | ||
705 | /* | 727 | /* |
@@ -709,7 +731,7 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, | |||
709 | } | 731 | } |
710 | 732 | ||
711 | void blk_mq_insert_request(struct request_queue *q, struct request *rq, | 733 | void blk_mq_insert_request(struct request_queue *q, struct request *rq, |
712 | bool run_queue) | 734 | bool at_head, bool run_queue) |
713 | { | 735 | { |
714 | struct blk_mq_hw_ctx *hctx; | 736 | struct blk_mq_hw_ctx *hctx; |
715 | struct blk_mq_ctx *ctx, *current_ctx; | 737 | struct blk_mq_ctx *ctx, *current_ctx; |
@@ -728,7 +750,7 @@ void blk_mq_insert_request(struct request_queue *q, struct request *rq, | |||
728 | rq->mq_ctx = ctx; | 750 | rq->mq_ctx = ctx; |
729 | } | 751 | } |
730 | spin_lock(&ctx->lock); | 752 | spin_lock(&ctx->lock); |
731 | __blk_mq_insert_request(hctx, rq); | 753 | __blk_mq_insert_request(hctx, rq, at_head); |
732 | spin_unlock(&ctx->lock); | 754 | spin_unlock(&ctx->lock); |
733 | 755 | ||
734 | blk_mq_put_ctx(current_ctx); | 756 | blk_mq_put_ctx(current_ctx); |
@@ -760,7 +782,7 @@ void blk_mq_run_request(struct request *rq, bool run_queue, bool async) | |||
760 | 782 | ||
761 | /* ctx->cpu might be offline */ | 783 | /* ctx->cpu might be offline */ |
762 | spin_lock(&ctx->lock); | 784 | spin_lock(&ctx->lock); |
763 | __blk_mq_insert_request(hctx, rq); | 785 | __blk_mq_insert_request(hctx, rq, false); |
764 | spin_unlock(&ctx->lock); | 786 | spin_unlock(&ctx->lock); |
765 | 787 | ||
766 | blk_mq_put_ctx(current_ctx); | 788 | blk_mq_put_ctx(current_ctx); |
@@ -798,7 +820,7 @@ static void blk_mq_insert_requests(struct request_queue *q, | |||
798 | rq = list_first_entry(list, struct request, queuelist); | 820 | rq = list_first_entry(list, struct request, queuelist); |
799 | list_del_init(&rq->queuelist); | 821 | list_del_init(&rq->queuelist); |
800 | rq->mq_ctx = ctx; | 822 | rq->mq_ctx = ctx; |
801 | __blk_mq_insert_request(hctx, rq); | 823 | __blk_mq_insert_request(hctx, rq, false); |
802 | } | 824 | } |
803 | spin_unlock(&ctx->lock); | 825 | spin_unlock(&ctx->lock); |
804 | 826 | ||
@@ -888,6 +910,11 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
888 | 910 | ||
889 | blk_queue_bounce(q, &bio); | 911 | blk_queue_bounce(q, &bio); |
890 | 912 | ||
913 | if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { | ||
914 | bio_endio(bio, -EIO); | ||
915 | return; | ||
916 | } | ||
917 | |||
891 | if (use_plug && blk_attempt_plug_merge(q, bio, &request_count)) | 918 | if (use_plug && blk_attempt_plug_merge(q, bio, &request_count)) |
892 | return; | 919 | return; |
893 | 920 | ||
@@ -950,7 +977,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
950 | __blk_mq_free_request(hctx, ctx, rq); | 977 | __blk_mq_free_request(hctx, ctx, rq); |
951 | else { | 978 | else { |
952 | blk_mq_bio_to_request(rq, bio); | 979 | blk_mq_bio_to_request(rq, bio); |
953 | __blk_mq_insert_request(hctx, rq); | 980 | __blk_mq_insert_request(hctx, rq, false); |
954 | } | 981 | } |
955 | 982 | ||
956 | spin_unlock(&ctx->lock); | 983 | spin_unlock(&ctx->lock); |
@@ -1309,15 +1336,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, | |||
1309 | reg->queue_depth = BLK_MQ_MAX_DEPTH; | 1336 | reg->queue_depth = BLK_MQ_MAX_DEPTH; |
1310 | } | 1337 | } |
1311 | 1338 | ||
1312 | /* | ||
1313 | * Set aside a tag for flush requests. It will only be used while | ||
1314 | * another flush request is in progress but outside the driver. | ||
1315 | * | ||
1316 | * TODO: only allocate if flushes are supported | ||
1317 | */ | ||
1318 | reg->queue_depth++; | ||
1319 | reg->reserved_tags++; | ||
1320 | |||
1321 | if (reg->queue_depth < (reg->reserved_tags + BLK_MQ_TAG_MIN)) | 1339 | if (reg->queue_depth < (reg->reserved_tags + BLK_MQ_TAG_MIN)) |
1322 | return ERR_PTR(-EINVAL); | 1340 | return ERR_PTR(-EINVAL); |
1323 | 1341 | ||
@@ -1360,17 +1378,27 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, | |||
1360 | q->mq_ops = reg->ops; | 1378 | q->mq_ops = reg->ops; |
1361 | q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; | 1379 | q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; |
1362 | 1380 | ||
1381 | q->sg_reserved_size = INT_MAX; | ||
1382 | |||
1363 | blk_queue_make_request(q, blk_mq_make_request); | 1383 | blk_queue_make_request(q, blk_mq_make_request); |
1364 | blk_queue_rq_timed_out(q, reg->ops->timeout); | 1384 | blk_queue_rq_timed_out(q, reg->ops->timeout); |
1365 | if (reg->timeout) | 1385 | if (reg->timeout) |
1366 | blk_queue_rq_timeout(q, reg->timeout); | 1386 | blk_queue_rq_timeout(q, reg->timeout); |
1367 | 1387 | ||
1388 | if (reg->ops->complete) | ||
1389 | blk_queue_softirq_done(q, reg->ops->complete); | ||
1390 | |||
1368 | blk_mq_init_flush(q); | 1391 | blk_mq_init_flush(q); |
1369 | blk_mq_init_cpu_queues(q, reg->nr_hw_queues); | 1392 | blk_mq_init_cpu_queues(q, reg->nr_hw_queues); |
1370 | 1393 | ||
1371 | if (blk_mq_init_hw_queues(q, reg, driver_data)) | 1394 | q->flush_rq = kzalloc(round_up(sizeof(struct request) + reg->cmd_size, |
1395 | cache_line_size()), GFP_KERNEL); | ||
1396 | if (!q->flush_rq) | ||
1372 | goto err_hw; | 1397 | goto err_hw; |
1373 | 1398 | ||
1399 | if (blk_mq_init_hw_queues(q, reg, driver_data)) | ||
1400 | goto err_flush_rq; | ||
1401 | |||
1374 | blk_mq_map_swqueue(q); | 1402 | blk_mq_map_swqueue(q); |
1375 | 1403 | ||
1376 | mutex_lock(&all_q_mutex); | 1404 | mutex_lock(&all_q_mutex); |
@@ -1378,6 +1406,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, | |||
1378 | mutex_unlock(&all_q_mutex); | 1406 | mutex_unlock(&all_q_mutex); |
1379 | 1407 | ||
1380 | return q; | 1408 | return q; |
1409 | |||
1410 | err_flush_rq: | ||
1411 | kfree(q->flush_rq); | ||
1381 | err_hw: | 1412 | err_hw: |
1382 | kfree(q->mq_map); | 1413 | kfree(q->mq_map); |
1383 | err_map: | 1414 | err_map: |
diff --git a/block/blk-mq.h b/block/blk-mq.h index 5c3917984b00..ed0035cd458e 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h | |||
@@ -22,13 +22,13 @@ struct blk_mq_ctx { | |||
22 | struct kobject kobj; | 22 | struct kobject kobj; |
23 | }; | 23 | }; |
24 | 24 | ||
25 | void __blk_mq_end_io(struct request *rq, int error); | 25 | void __blk_mq_complete_request(struct request *rq); |
26 | void blk_mq_complete_request(struct request *rq, int error); | ||
27 | void blk_mq_run_request(struct request *rq, bool run_queue, bool async); | 26 | void blk_mq_run_request(struct request *rq, bool run_queue, bool async); |
28 | void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); | 27 | void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
29 | void blk_mq_init_flush(struct request_queue *q); | 28 | void blk_mq_init_flush(struct request_queue *q); |
30 | void blk_mq_drain_queue(struct request_queue *q); | 29 | void blk_mq_drain_queue(struct request_queue *q); |
31 | void blk_mq_free_queue(struct request_queue *q); | 30 | void blk_mq_free_queue(struct request_queue *q); |
31 | void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq); | ||
32 | 32 | ||
33 | /* | 33 | /* |
34 | * CPU hotplug helpers | 34 | * CPU hotplug helpers |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 8095c4a21fc0..7500f876dae4 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -549,6 +549,8 @@ static void blk_release_queue(struct kobject *kobj) | |||
549 | if (q->mq_ops) | 549 | if (q->mq_ops) |
550 | blk_mq_free_queue(q); | 550 | blk_mq_free_queue(q); |
551 | 551 | ||
552 | kfree(q->flush_rq); | ||
553 | |||
552 | blk_trace_shutdown(q); | 554 | blk_trace_shutdown(q); |
553 | 555 | ||
554 | bdi_destroy(&q->backing_dev_info); | 556 | bdi_destroy(&q->backing_dev_info); |
diff --git a/block/blk-timeout.c b/block/blk-timeout.c index bba81c9348e1..d96f7061c6fd 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c | |||
@@ -91,7 +91,7 @@ static void blk_rq_timed_out(struct request *req) | |||
91 | case BLK_EH_HANDLED: | 91 | case BLK_EH_HANDLED: |
92 | /* Can we use req->errors here? */ | 92 | /* Can we use req->errors here? */ |
93 | if (q->mq_ops) | 93 | if (q->mq_ops) |
94 | blk_mq_complete_request(req, req->errors); | 94 | __blk_mq_complete_request(req); |
95 | else | 95 | else |
96 | __blk_complete_request(req); | 96 | __blk_complete_request(req); |
97 | break; | 97 | break; |
diff --git a/block/blk.h b/block/blk.h index c90e1d8f7a2b..d23b415b8a28 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -113,7 +113,7 @@ static inline struct request *__elv_next_request(struct request_queue *q) | |||
113 | q->flush_queue_delayed = 1; | 113 | q->flush_queue_delayed = 1; |
114 | return NULL; | 114 | return NULL; |
115 | } | 115 | } |
116 | if (unlikely(blk_queue_dying(q)) || | 116 | if (unlikely(blk_queue_bypass(q)) || |
117 | !q->elevator->type->ops.elevator_dispatch_fn(q, 0)) | 117 | !q->elevator->type->ops.elevator_dispatch_fn(q, 0)) |
118 | return NULL; | 118 | return NULL; |
119 | } | 119 | } |
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c index 0b6ae6eb5c4a..368f9ddb8480 100644 --- a/drivers/acpi/container.c +++ b/drivers/acpi/container.c | |||
@@ -79,9 +79,10 @@ static int container_device_attach(struct acpi_device *adev, | |||
79 | ACPI_COMPANION_SET(dev, adev); | 79 | ACPI_COMPANION_SET(dev, adev); |
80 | dev->release = acpi_container_release; | 80 | dev->release = acpi_container_release; |
81 | ret = device_register(dev); | 81 | ret = device_register(dev); |
82 | if (ret) | 82 | if (ret) { |
83 | put_device(dev); | ||
83 | return ret; | 84 | return ret; |
84 | 85 | } | |
85 | adev->driver_data = dev; | 86 | adev->driver_data = dev; |
86 | return 1; | 87 | return 1; |
87 | } | 88 | } |
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index c431c88faaff..e9b3081c4fe9 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c | |||
@@ -609,7 +609,7 @@ static int handle_eject_request(struct dock_station *ds, u32 event) | |||
609 | static void dock_notify(struct dock_station *ds, u32 event) | 609 | static void dock_notify(struct dock_station *ds, u32 event) |
610 | { | 610 | { |
611 | acpi_handle handle = ds->handle; | 611 | acpi_handle handle = ds->handle; |
612 | struct acpi_device *ad; | 612 | struct acpi_device *adev = NULL; |
613 | int surprise_removal = 0; | 613 | int surprise_removal = 0; |
614 | 614 | ||
615 | /* | 615 | /* |
@@ -632,7 +632,8 @@ static void dock_notify(struct dock_station *ds, u32 event) | |||
632 | switch (event) { | 632 | switch (event) { |
633 | case ACPI_NOTIFY_BUS_CHECK: | 633 | case ACPI_NOTIFY_BUS_CHECK: |
634 | case ACPI_NOTIFY_DEVICE_CHECK: | 634 | case ACPI_NOTIFY_DEVICE_CHECK: |
635 | if (!dock_in_progress(ds) && acpi_bus_get_device(handle, &ad)) { | 635 | acpi_bus_get_device(handle, &adev); |
636 | if (!dock_in_progress(ds) && !acpi_device_enumerated(adev)) { | ||
636 | begin_dock(ds); | 637 | begin_dock(ds); |
637 | dock(ds); | 638 | dock(ds); |
638 | if (!dock_present(ds)) { | 639 | if (!dock_present(ds)) { |
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 20a7517bd339..52b8181ddafd 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -4126,12 +4126,14 @@ static int mv_platform_probe(struct platform_device *pdev) | |||
4126 | clk_prepare_enable(hpriv->port_clks[port]); | 4126 | clk_prepare_enable(hpriv->port_clks[port]); |
4127 | 4127 | ||
4128 | sprintf(port_number, "port%d", port); | 4128 | sprintf(port_number, "port%d", port); |
4129 | hpriv->port_phys[port] = devm_phy_get(&pdev->dev, port_number); | 4129 | hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev, |
4130 | port_number); | ||
4130 | if (IS_ERR(hpriv->port_phys[port])) { | 4131 | if (IS_ERR(hpriv->port_phys[port])) { |
4131 | rc = PTR_ERR(hpriv->port_phys[port]); | 4132 | rc = PTR_ERR(hpriv->port_phys[port]); |
4132 | hpriv->port_phys[port] = NULL; | 4133 | hpriv->port_phys[port] = NULL; |
4133 | if ((rc != -EPROBE_DEFER) && (rc != -ENODEV)) | 4134 | if (rc != -EPROBE_DEFER) |
4134 | dev_warn(&pdev->dev, "error getting phy"); | 4135 | dev_warn(&pdev->dev, "error getting phy %d", |
4136 | rc); | ||
4135 | goto err; | 4137 | goto err; |
4136 | } else | 4138 | } else |
4137 | phy_power_on(hpriv->port_phys[port]); | 4139 | phy_power_on(hpriv->port_phys[port]); |
diff --git a/drivers/base/component.c b/drivers/base/component.c index c53efe6c6d8e..c4778995cd72 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c | |||
@@ -133,9 +133,16 @@ static int try_to_bring_up_master(struct master *master, | |||
133 | goto out; | 133 | goto out; |
134 | } | 134 | } |
135 | 135 | ||
136 | if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) { | ||
137 | ret = -ENOMEM; | ||
138 | goto out; | ||
139 | } | ||
140 | |||
136 | /* Found all components */ | 141 | /* Found all components */ |
137 | ret = master->ops->bind(master->dev); | 142 | ret = master->ops->bind(master->dev); |
138 | if (ret < 0) { | 143 | if (ret < 0) { |
144 | devres_release_group(master->dev, NULL); | ||
145 | dev_info(master->dev, "master bind failed: %d\n", ret); | ||
139 | master_remove_components(master); | 146 | master_remove_components(master); |
140 | goto out; | 147 | goto out; |
141 | } | 148 | } |
@@ -166,6 +173,7 @@ static void take_down_master(struct master *master) | |||
166 | { | 173 | { |
167 | if (master->bound) { | 174 | if (master->bound) { |
168 | master->ops->unbind(master->dev); | 175 | master->ops->unbind(master->dev); |
176 | devres_release_group(master->dev, NULL); | ||
169 | master->bound = false; | 177 | master->bound = false; |
170 | } | 178 | } |
171 | 179 | ||
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c index 1e16cbd61da2..61d6d62cc0d3 100644 --- a/drivers/base/dma-buf.c +++ b/drivers/base/dma-buf.c | |||
@@ -616,36 +616,35 @@ static int dma_buf_describe(struct seq_file *s) | |||
616 | if (ret) | 616 | if (ret) |
617 | return ret; | 617 | return ret; |
618 | 618 | ||
619 | seq_printf(s, "\nDma-buf Objects:\n"); | 619 | seq_puts(s, "\nDma-buf Objects:\n"); |
620 | seq_printf(s, "\texp_name\tsize\tflags\tmode\tcount\n"); | 620 | seq_puts(s, "size\tflags\tmode\tcount\texp_name\n"); |
621 | 621 | ||
622 | list_for_each_entry(buf_obj, &db_list.head, list_node) { | 622 | list_for_each_entry(buf_obj, &db_list.head, list_node) { |
623 | ret = mutex_lock_interruptible(&buf_obj->lock); | 623 | ret = mutex_lock_interruptible(&buf_obj->lock); |
624 | 624 | ||
625 | if (ret) { | 625 | if (ret) { |
626 | seq_printf(s, | 626 | seq_puts(s, |
627 | "\tERROR locking buffer object: skipping\n"); | 627 | "\tERROR locking buffer object: skipping\n"); |
628 | continue; | 628 | continue; |
629 | } | 629 | } |
630 | 630 | ||
631 | seq_printf(s, "\t"); | 631 | seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n", |
632 | 632 | buf_obj->size, | |
633 | seq_printf(s, "\t%s\t%08zu\t%08x\t%08x\t%08ld\n", | ||
634 | buf_obj->exp_name, buf_obj->size, | ||
635 | buf_obj->file->f_flags, buf_obj->file->f_mode, | 633 | buf_obj->file->f_flags, buf_obj->file->f_mode, |
636 | (long)(buf_obj->file->f_count.counter)); | 634 | (long)(buf_obj->file->f_count.counter), |
635 | buf_obj->exp_name); | ||
637 | 636 | ||
638 | seq_printf(s, "\t\tAttached Devices:\n"); | 637 | seq_puts(s, "\tAttached Devices:\n"); |
639 | attach_count = 0; | 638 | attach_count = 0; |
640 | 639 | ||
641 | list_for_each_entry(attach_obj, &buf_obj->attachments, node) { | 640 | list_for_each_entry(attach_obj, &buf_obj->attachments, node) { |
642 | seq_printf(s, "\t\t"); | 641 | seq_puts(s, "\t"); |
643 | 642 | ||
644 | seq_printf(s, "%s\n", attach_obj->dev->init_name); | 643 | seq_printf(s, "%s\n", dev_name(attach_obj->dev)); |
645 | attach_count++; | 644 | attach_count++; |
646 | } | 645 | } |
647 | 646 | ||
648 | seq_printf(s, "\n\t\tTotal %d devices attached\n", | 647 | seq_printf(s, "Total %d devices attached\n\n", |
649 | attach_count); | 648 | attach_count); |
650 | 649 | ||
651 | count++; | 650 | count++; |
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 3107282a9741..091b9ea14feb 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c | |||
@@ -60,7 +60,9 @@ enum { | |||
60 | NULL_IRQ_NONE = 0, | 60 | NULL_IRQ_NONE = 0, |
61 | NULL_IRQ_SOFTIRQ = 1, | 61 | NULL_IRQ_SOFTIRQ = 1, |
62 | NULL_IRQ_TIMER = 2, | 62 | NULL_IRQ_TIMER = 2, |
63 | }; | ||
63 | 64 | ||
65 | enum { | ||
64 | NULL_Q_BIO = 0, | 66 | NULL_Q_BIO = 0, |
65 | NULL_Q_RQ = 1, | 67 | NULL_Q_RQ = 1, |
66 | NULL_Q_MQ = 2, | 68 | NULL_Q_MQ = 2, |
@@ -172,18 +174,20 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait) | |||
172 | 174 | ||
173 | static void end_cmd(struct nullb_cmd *cmd) | 175 | static void end_cmd(struct nullb_cmd *cmd) |
174 | { | 176 | { |
175 | if (cmd->rq) { | 177 | switch (queue_mode) { |
176 | if (queue_mode == NULL_Q_MQ) | 178 | case NULL_Q_MQ: |
177 | blk_mq_end_io(cmd->rq, 0); | 179 | blk_mq_end_io(cmd->rq, 0); |
178 | else { | 180 | return; |
179 | INIT_LIST_HEAD(&cmd->rq->queuelist); | 181 | case NULL_Q_RQ: |
180 | blk_end_request_all(cmd->rq, 0); | 182 | INIT_LIST_HEAD(&cmd->rq->queuelist); |
181 | } | 183 | blk_end_request_all(cmd->rq, 0); |
182 | } else if (cmd->bio) | 184 | break; |
185 | case NULL_Q_BIO: | ||
183 | bio_endio(cmd->bio, 0); | 186 | bio_endio(cmd->bio, 0); |
187 | break; | ||
188 | } | ||
184 | 189 | ||
185 | if (queue_mode != NULL_Q_MQ) | 190 | free_cmd(cmd); |
186 | free_cmd(cmd); | ||
187 | } | 191 | } |
188 | 192 | ||
189 | static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) | 193 | static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) |
@@ -195,6 +199,7 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) | |||
195 | cq = &per_cpu(completion_queues, smp_processor_id()); | 199 | cq = &per_cpu(completion_queues, smp_processor_id()); |
196 | 200 | ||
197 | while ((entry = llist_del_all(&cq->list)) != NULL) { | 201 | while ((entry = llist_del_all(&cq->list)) != NULL) { |
202 | entry = llist_reverse_order(entry); | ||
198 | do { | 203 | do { |
199 | cmd = container_of(entry, struct nullb_cmd, ll_list); | 204 | cmd = container_of(entry, struct nullb_cmd, ll_list); |
200 | end_cmd(cmd); | 205 | end_cmd(cmd); |
@@ -221,61 +226,31 @@ static void null_cmd_end_timer(struct nullb_cmd *cmd) | |||
221 | 226 | ||
222 | static void null_softirq_done_fn(struct request *rq) | 227 | static void null_softirq_done_fn(struct request *rq) |
223 | { | 228 | { |
224 | blk_end_request_all(rq, 0); | 229 | end_cmd(rq->special); |
225 | } | ||
226 | |||
227 | #ifdef CONFIG_SMP | ||
228 | |||
229 | static void null_ipi_cmd_end_io(void *data) | ||
230 | { | ||
231 | struct completion_queue *cq; | ||
232 | struct llist_node *entry, *next; | ||
233 | struct nullb_cmd *cmd; | ||
234 | |||
235 | cq = &per_cpu(completion_queues, smp_processor_id()); | ||
236 | |||
237 | entry = llist_del_all(&cq->list); | ||
238 | |||
239 | while (entry) { | ||
240 | next = entry->next; | ||
241 | cmd = llist_entry(entry, struct nullb_cmd, ll_list); | ||
242 | end_cmd(cmd); | ||
243 | entry = next; | ||
244 | } | ||
245 | } | ||
246 | |||
247 | static void null_cmd_end_ipi(struct nullb_cmd *cmd) | ||
248 | { | ||
249 | struct call_single_data *data = &cmd->csd; | ||
250 | int cpu = get_cpu(); | ||
251 | struct completion_queue *cq = &per_cpu(completion_queues, cpu); | ||
252 | |||
253 | cmd->ll_list.next = NULL; | ||
254 | |||
255 | if (llist_add(&cmd->ll_list, &cq->list)) { | ||
256 | data->func = null_ipi_cmd_end_io; | ||
257 | data->flags = 0; | ||
258 | __smp_call_function_single(cpu, data, 0); | ||
259 | } | ||
260 | |||
261 | put_cpu(); | ||
262 | } | 230 | } |
263 | 231 | ||
264 | #endif /* CONFIG_SMP */ | ||
265 | |||
266 | static inline void null_handle_cmd(struct nullb_cmd *cmd) | 232 | static inline void null_handle_cmd(struct nullb_cmd *cmd) |
267 | { | 233 | { |
268 | /* Complete IO by inline, softirq or timer */ | 234 | /* Complete IO by inline, softirq or timer */ |
269 | switch (irqmode) { | 235 | switch (irqmode) { |
270 | case NULL_IRQ_NONE: | ||
271 | end_cmd(cmd); | ||
272 | break; | ||
273 | case NULL_IRQ_SOFTIRQ: | 236 | case NULL_IRQ_SOFTIRQ: |
274 | #ifdef CONFIG_SMP | 237 | switch (queue_mode) { |
275 | null_cmd_end_ipi(cmd); | 238 | case NULL_Q_MQ: |
276 | #else | 239 | blk_mq_complete_request(cmd->rq); |
240 | break; | ||
241 | case NULL_Q_RQ: | ||
242 | blk_complete_request(cmd->rq); | ||
243 | break; | ||
244 | case NULL_Q_BIO: | ||
245 | /* | ||
246 | * XXX: no proper submitting cpu information available. | ||
247 | */ | ||
248 | end_cmd(cmd); | ||
249 | break; | ||
250 | } | ||
251 | break; | ||
252 | case NULL_IRQ_NONE: | ||
277 | end_cmd(cmd); | 253 | end_cmd(cmd); |
278 | #endif | ||
279 | break; | 254 | break; |
280 | case NULL_IRQ_TIMER: | 255 | case NULL_IRQ_TIMER: |
281 | null_cmd_end_timer(cmd); | 256 | null_cmd_end_timer(cmd); |
@@ -411,6 +386,7 @@ static struct blk_mq_ops null_mq_ops = { | |||
411 | .queue_rq = null_queue_rq, | 386 | .queue_rq = null_queue_rq, |
412 | .map_queue = blk_mq_map_queue, | 387 | .map_queue = blk_mq_map_queue, |
413 | .init_hctx = null_init_hctx, | 388 | .init_hctx = null_init_hctx, |
389 | .complete = null_softirq_done_fn, | ||
414 | }; | 390 | }; |
415 | 391 | ||
416 | static struct blk_mq_reg null_mq_reg = { | 392 | static struct blk_mq_reg null_mq_reg = { |
@@ -609,13 +585,6 @@ static int __init null_init(void) | |||
609 | { | 585 | { |
610 | unsigned int i; | 586 | unsigned int i; |
611 | 587 | ||
612 | #if !defined(CONFIG_SMP) | ||
613 | if (irqmode == NULL_IRQ_SOFTIRQ) { | ||
614 | pr_warn("null_blk: softirq completions not available.\n"); | ||
615 | pr_warn("null_blk: using direct completions.\n"); | ||
616 | irqmode = NULL_IRQ_NONE; | ||
617 | } | ||
618 | #endif | ||
619 | if (bs > PAGE_SIZE) { | 588 | if (bs > PAGE_SIZE) { |
620 | pr_warn("null_blk: invalid block size\n"); | 589 | pr_warn("null_blk: invalid block size\n"); |
621 | pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE); | 590 | pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE); |
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 6a680d4de7f1..b1cb3f4c4db4 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -110,9 +110,9 @@ static int __virtblk_add_req(struct virtqueue *vq, | |||
110 | return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); | 110 | return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); |
111 | } | 111 | } |
112 | 112 | ||
113 | static inline void virtblk_request_done(struct virtblk_req *vbr) | 113 | static inline void virtblk_request_done(struct request *req) |
114 | { | 114 | { |
115 | struct request *req = vbr->req; | 115 | struct virtblk_req *vbr = req->special; |
116 | int error = virtblk_result(vbr); | 116 | int error = virtblk_result(vbr); |
117 | 117 | ||
118 | if (req->cmd_type == REQ_TYPE_BLOCK_PC) { | 118 | if (req->cmd_type == REQ_TYPE_BLOCK_PC) { |
@@ -138,7 +138,7 @@ static void virtblk_done(struct virtqueue *vq) | |||
138 | do { | 138 | do { |
139 | virtqueue_disable_cb(vq); | 139 | virtqueue_disable_cb(vq); |
140 | while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { | 140 | while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { |
141 | virtblk_request_done(vbr); | 141 | blk_mq_complete_request(vbr->req); |
142 | req_done = true; | 142 | req_done = true; |
143 | } | 143 | } |
144 | if (unlikely(virtqueue_is_broken(vq))) | 144 | if (unlikely(virtqueue_is_broken(vq))) |
@@ -479,6 +479,7 @@ static struct blk_mq_ops virtio_mq_ops = { | |||
479 | .map_queue = blk_mq_map_queue, | 479 | .map_queue = blk_mq_map_queue, |
480 | .alloc_hctx = blk_mq_alloc_single_hw_queue, | 480 | .alloc_hctx = blk_mq_alloc_single_hw_queue, |
481 | .free_hctx = blk_mq_free_single_hw_queue, | 481 | .free_hctx = blk_mq_free_single_hw_queue, |
482 | .complete = virtblk_request_done, | ||
482 | }; | 483 | }; |
483 | 484 | ||
484 | static struct blk_mq_reg virtio_mq_reg = { | 485 | static struct blk_mq_reg virtio_mq_reg = { |
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 4b97b86da926..64c60edcdfbc 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
@@ -299,7 +299,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, | |||
299 | BUG_ON(num != 0); | 299 | BUG_ON(num != 0); |
300 | } | 300 | } |
301 | 301 | ||
302 | static void unmap_purged_grants(struct work_struct *work) | 302 | void xen_blkbk_unmap_purged_grants(struct work_struct *work) |
303 | { | 303 | { |
304 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 304 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
305 | struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 305 | struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
@@ -375,7 +375,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif) | |||
375 | 375 | ||
376 | pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean); | 376 | pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean); |
377 | 377 | ||
378 | INIT_LIST_HEAD(&blkif->persistent_purge_list); | 378 | BUG_ON(!list_empty(&blkif->persistent_purge_list)); |
379 | root = &blkif->persistent_gnts; | 379 | root = &blkif->persistent_gnts; |
380 | purge_list: | 380 | purge_list: |
381 | foreach_grant_safe(persistent_gnt, n, root, node) { | 381 | foreach_grant_safe(persistent_gnt, n, root, node) { |
@@ -420,7 +420,6 @@ finished: | |||
420 | blkif->vbd.overflow_max_grants = 0; | 420 | blkif->vbd.overflow_max_grants = 0; |
421 | 421 | ||
422 | /* We can defer this work */ | 422 | /* We can defer this work */ |
423 | INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants); | ||
424 | schedule_work(&blkif->persistent_purge_work); | 423 | schedule_work(&blkif->persistent_purge_work); |
425 | pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total); | 424 | pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total); |
426 | return; | 425 | return; |
@@ -625,9 +624,23 @@ purge_gnt_list: | |||
625 | print_stats(blkif); | 624 | print_stats(blkif); |
626 | } | 625 | } |
627 | 626 | ||
628 | /* Since we are shutting down remove all pages from the buffer */ | 627 | /* Drain pending purge work */ |
629 | shrink_free_pagepool(blkif, 0 /* All */); | 628 | flush_work(&blkif->persistent_purge_work); |
630 | 629 | ||
630 | if (log_stats) | ||
631 | print_stats(blkif); | ||
632 | |||
633 | blkif->xenblkd = NULL; | ||
634 | xen_blkif_put(blkif); | ||
635 | |||
636 | return 0; | ||
637 | } | ||
638 | |||
639 | /* | ||
640 | * Remove persistent grants and empty the pool of free pages | ||
641 | */ | ||
642 | void xen_blkbk_free_caches(struct xen_blkif *blkif) | ||
643 | { | ||
631 | /* Free all persistent grant pages */ | 644 | /* Free all persistent grant pages */ |
632 | if (!RB_EMPTY_ROOT(&blkif->persistent_gnts)) | 645 | if (!RB_EMPTY_ROOT(&blkif->persistent_gnts)) |
633 | free_persistent_gnts(blkif, &blkif->persistent_gnts, | 646 | free_persistent_gnts(blkif, &blkif->persistent_gnts, |
@@ -636,13 +649,8 @@ purge_gnt_list: | |||
636 | BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); | 649 | BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); |
637 | blkif->persistent_gnt_c = 0; | 650 | blkif->persistent_gnt_c = 0; |
638 | 651 | ||
639 | if (log_stats) | 652 | /* Since we are shutting down remove all pages from the buffer */ |
640 | print_stats(blkif); | 653 | shrink_free_pagepool(blkif, 0 /* All */); |
641 | |||
642 | blkif->xenblkd = NULL; | ||
643 | xen_blkif_put(blkif); | ||
644 | |||
645 | return 0; | ||
646 | } | 654 | } |
647 | 655 | ||
648 | /* | 656 | /* |
@@ -838,7 +846,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req, | |||
838 | struct grant_page **pages = pending_req->indirect_pages; | 846 | struct grant_page **pages = pending_req->indirect_pages; |
839 | struct xen_blkif *blkif = pending_req->blkif; | 847 | struct xen_blkif *blkif = pending_req->blkif; |
840 | int indirect_grefs, rc, n, nseg, i; | 848 | int indirect_grefs, rc, n, nseg, i; |
841 | struct blkif_request_segment_aligned *segments = NULL; | 849 | struct blkif_request_segment *segments = NULL; |
842 | 850 | ||
843 | nseg = pending_req->nr_pages; | 851 | nseg = pending_req->nr_pages; |
844 | indirect_grefs = INDIRECT_PAGES(nseg); | 852 | indirect_grefs = INDIRECT_PAGES(nseg); |
@@ -934,9 +942,7 @@ static void xen_blk_drain_io(struct xen_blkif *blkif) | |||
934 | { | 942 | { |
935 | atomic_set(&blkif->drain, 1); | 943 | atomic_set(&blkif->drain, 1); |
936 | do { | 944 | do { |
937 | /* The initial value is one, and one refcnt taken at the | 945 | if (atomic_read(&blkif->inflight) == 0) |
938 | * start of the xen_blkif_schedule thread. */ | ||
939 | if (atomic_read(&blkif->refcnt) <= 2) | ||
940 | break; | 946 | break; |
941 | wait_for_completion_interruptible_timeout( | 947 | wait_for_completion_interruptible_timeout( |
942 | &blkif->drain_complete, HZ); | 948 | &blkif->drain_complete, HZ); |
@@ -976,17 +982,30 @@ static void __end_block_io_op(struct pending_req *pending_req, int error) | |||
976 | * the proper response on the ring. | 982 | * the proper response on the ring. |
977 | */ | 983 | */ |
978 | if (atomic_dec_and_test(&pending_req->pendcnt)) { | 984 | if (atomic_dec_and_test(&pending_req->pendcnt)) { |
979 | xen_blkbk_unmap(pending_req->blkif, | 985 | struct xen_blkif *blkif = pending_req->blkif; |
986 | |||
987 | xen_blkbk_unmap(blkif, | ||
980 | pending_req->segments, | 988 | pending_req->segments, |
981 | pending_req->nr_pages); | 989 | pending_req->nr_pages); |
982 | make_response(pending_req->blkif, pending_req->id, | 990 | make_response(blkif, pending_req->id, |
983 | pending_req->operation, pending_req->status); | 991 | pending_req->operation, pending_req->status); |
984 | xen_blkif_put(pending_req->blkif); | 992 | free_req(blkif, pending_req); |
985 | if (atomic_read(&pending_req->blkif->refcnt) <= 2) { | 993 | /* |
986 | if (atomic_read(&pending_req->blkif->drain)) | 994 | * Make sure the request is freed before releasing blkif, |
987 | complete(&pending_req->blkif->drain_complete); | 995 | * or there could be a race between free_req and the |
996 | * cleanup done in xen_blkif_free during shutdown. | ||
997 | * | ||
998 | * NB: The fact that we might try to wake up pending_free_wq | ||
999 | * before drain_complete (in case there's a drain going on) | ||
1000 | * it's not a problem with our current implementation | ||
1001 | * because we can assure there's no thread waiting on | ||
1002 | * pending_free_wq if there's a drain going on, but it has | ||
1003 | * to be taken into account if the current model is changed. | ||
1004 | */ | ||
1005 | if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) { | ||
1006 | complete(&blkif->drain_complete); | ||
988 | } | 1007 | } |
989 | free_req(pending_req->blkif, pending_req); | 1008 | xen_blkif_put(blkif); |
990 | } | 1009 | } |
991 | } | 1010 | } |
992 | 1011 | ||
@@ -1240,6 +1259,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
1240 | * below (in "!bio") if we are handling a BLKIF_OP_DISCARD. | 1259 | * below (in "!bio") if we are handling a BLKIF_OP_DISCARD. |
1241 | */ | 1260 | */ |
1242 | xen_blkif_get(blkif); | 1261 | xen_blkif_get(blkif); |
1262 | atomic_inc(&blkif->inflight); | ||
1243 | 1263 | ||
1244 | for (i = 0; i < nseg; i++) { | 1264 | for (i = 0; i < nseg; i++) { |
1245 | while ((bio == NULL) || | 1265 | while ((bio == NULL) || |
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 8d8807563d99..be052773ad03 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h | |||
@@ -57,7 +57,7 @@ | |||
57 | #define MAX_INDIRECT_SEGMENTS 256 | 57 | #define MAX_INDIRECT_SEGMENTS 256 |
58 | 58 | ||
59 | #define SEGS_PER_INDIRECT_FRAME \ | 59 | #define SEGS_PER_INDIRECT_FRAME \ |
60 | (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned)) | 60 | (PAGE_SIZE/sizeof(struct blkif_request_segment)) |
61 | #define MAX_INDIRECT_PAGES \ | 61 | #define MAX_INDIRECT_PAGES \ |
62 | ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) | 62 | ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) |
63 | #define INDIRECT_PAGES(_segs) \ | 63 | #define INDIRECT_PAGES(_segs) \ |
@@ -278,6 +278,7 @@ struct xen_blkif { | |||
278 | /* for barrier (drain) requests */ | 278 | /* for barrier (drain) requests */ |
279 | struct completion drain_complete; | 279 | struct completion drain_complete; |
280 | atomic_t drain; | 280 | atomic_t drain; |
281 | atomic_t inflight; | ||
281 | /* One thread per one blkif. */ | 282 | /* One thread per one blkif. */ |
282 | struct task_struct *xenblkd; | 283 | struct task_struct *xenblkd; |
283 | unsigned int waiting_reqs; | 284 | unsigned int waiting_reqs; |
@@ -376,6 +377,7 @@ int xen_blkif_xenbus_init(void); | |||
376 | irqreturn_t xen_blkif_be_int(int irq, void *dev_id); | 377 | irqreturn_t xen_blkif_be_int(int irq, void *dev_id); |
377 | int xen_blkif_schedule(void *arg); | 378 | int xen_blkif_schedule(void *arg); |
378 | int xen_blkif_purge_persistent(void *arg); | 379 | int xen_blkif_purge_persistent(void *arg); |
380 | void xen_blkbk_free_caches(struct xen_blkif *blkif); | ||
379 | 381 | ||
380 | int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, | 382 | int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, |
381 | struct backend_info *be, int state); | 383 | struct backend_info *be, int state); |
@@ -383,6 +385,7 @@ int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, | |||
383 | int xen_blkbk_barrier(struct xenbus_transaction xbt, | 385 | int xen_blkbk_barrier(struct xenbus_transaction xbt, |
384 | struct backend_info *be, int state); | 386 | struct backend_info *be, int state); |
385 | struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be); | 387 | struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be); |
388 | void xen_blkbk_unmap_purged_grants(struct work_struct *work); | ||
386 | 389 | ||
387 | static inline void blkif_get_x86_32_req(struct blkif_request *dst, | 390 | static inline void blkif_get_x86_32_req(struct blkif_request *dst, |
388 | struct blkif_x86_32_request *src) | 391 | struct blkif_x86_32_request *src) |
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index c2014a0aa206..9a547e6b6ebf 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c | |||
@@ -125,8 +125,11 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid) | |||
125 | blkif->persistent_gnts.rb_node = NULL; | 125 | blkif->persistent_gnts.rb_node = NULL; |
126 | spin_lock_init(&blkif->free_pages_lock); | 126 | spin_lock_init(&blkif->free_pages_lock); |
127 | INIT_LIST_HEAD(&blkif->free_pages); | 127 | INIT_LIST_HEAD(&blkif->free_pages); |
128 | INIT_LIST_HEAD(&blkif->persistent_purge_list); | ||
128 | blkif->free_pages_num = 0; | 129 | blkif->free_pages_num = 0; |
129 | atomic_set(&blkif->persistent_gnt_in_use, 0); | 130 | atomic_set(&blkif->persistent_gnt_in_use, 0); |
131 | atomic_set(&blkif->inflight, 0); | ||
132 | INIT_WORK(&blkif->persistent_purge_work, xen_blkbk_unmap_purged_grants); | ||
130 | 133 | ||
131 | INIT_LIST_HEAD(&blkif->pending_free); | 134 | INIT_LIST_HEAD(&blkif->pending_free); |
132 | 135 | ||
@@ -259,6 +262,17 @@ static void xen_blkif_free(struct xen_blkif *blkif) | |||
259 | if (!atomic_dec_and_test(&blkif->refcnt)) | 262 | if (!atomic_dec_and_test(&blkif->refcnt)) |
260 | BUG(); | 263 | BUG(); |
261 | 264 | ||
265 | /* Remove all persistent grants and the cache of ballooned pages. */ | ||
266 | xen_blkbk_free_caches(blkif); | ||
267 | |||
268 | /* Make sure everything is drained before shutting down */ | ||
269 | BUG_ON(blkif->persistent_gnt_c != 0); | ||
270 | BUG_ON(atomic_read(&blkif->persistent_gnt_in_use) != 0); | ||
271 | BUG_ON(blkif->free_pages_num != 0); | ||
272 | BUG_ON(!list_empty(&blkif->persistent_purge_list)); | ||
273 | BUG_ON(!list_empty(&blkif->free_pages)); | ||
274 | BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); | ||
275 | |||
262 | /* Check that there is no request in use */ | 276 | /* Check that there is no request in use */ |
263 | list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { | 277 | list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { |
264 | list_del(&req->free_list); | 278 | list_del(&req->free_list); |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 8dcfb54f1603..efe1b4761735 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -162,7 +162,7 @@ static DEFINE_SPINLOCK(minor_lock); | |||
162 | #define DEV_NAME "xvd" /* name in /dev */ | 162 | #define DEV_NAME "xvd" /* name in /dev */ |
163 | 163 | ||
164 | #define SEGS_PER_INDIRECT_FRAME \ | 164 | #define SEGS_PER_INDIRECT_FRAME \ |
165 | (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned)) | 165 | (PAGE_SIZE/sizeof(struct blkif_request_segment)) |
166 | #define INDIRECT_GREFS(_segs) \ | 166 | #define INDIRECT_GREFS(_segs) \ |
167 | ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) | 167 | ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) |
168 | 168 | ||
@@ -393,7 +393,7 @@ static int blkif_queue_request(struct request *req) | |||
393 | unsigned long id; | 393 | unsigned long id; |
394 | unsigned int fsect, lsect; | 394 | unsigned int fsect, lsect; |
395 | int i, ref, n; | 395 | int i, ref, n; |
396 | struct blkif_request_segment_aligned *segments = NULL; | 396 | struct blkif_request_segment *segments = NULL; |
397 | 397 | ||
398 | /* | 398 | /* |
399 | * Used to store if we are able to queue the request by just using | 399 | * Used to store if we are able to queue the request by just using |
@@ -550,7 +550,7 @@ static int blkif_queue_request(struct request *req) | |||
550 | } else { | 550 | } else { |
551 | n = i % SEGS_PER_INDIRECT_FRAME; | 551 | n = i % SEGS_PER_INDIRECT_FRAME; |
552 | segments[n] = | 552 | segments[n] = |
553 | (struct blkif_request_segment_aligned) { | 553 | (struct blkif_request_segment) { |
554 | .gref = ref, | 554 | .gref = ref, |
555 | .first_sect = fsect, | 555 | .first_sect = fsect, |
556 | .last_sect = lsect }; | 556 | .last_sect = lsect }; |
@@ -1904,13 +1904,16 @@ static void blkback_changed(struct xenbus_device *dev, | |||
1904 | case XenbusStateReconfiguring: | 1904 | case XenbusStateReconfiguring: |
1905 | case XenbusStateReconfigured: | 1905 | case XenbusStateReconfigured: |
1906 | case XenbusStateUnknown: | 1906 | case XenbusStateUnknown: |
1907 | case XenbusStateClosed: | ||
1908 | break; | 1907 | break; |
1909 | 1908 | ||
1910 | case XenbusStateConnected: | 1909 | case XenbusStateConnected: |
1911 | blkfront_connect(info); | 1910 | blkfront_connect(info); |
1912 | break; | 1911 | break; |
1913 | 1912 | ||
1913 | case XenbusStateClosed: | ||
1914 | if (dev->state == XenbusStateClosed) | ||
1915 | break; | ||
1916 | /* Missed the backend's Closing state -- fallthrough */ | ||
1914 | case XenbusStateClosing: | 1917 | case XenbusStateClosing: |
1915 | blkfront_closing(info); | 1918 | blkfront_closing(info); |
1916 | break; | 1919 | break; |
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index fa3243d71c76..1386749b48ff 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -499,6 +499,7 @@ config RAW_DRIVER | |||
499 | config MAX_RAW_DEVS | 499 | config MAX_RAW_DEVS |
500 | int "Maximum number of RAW devices to support (1-65536)" | 500 | int "Maximum number of RAW devices to support (1-65536)" |
501 | depends on RAW_DRIVER | 501 | depends on RAW_DRIVER |
502 | range 1 65536 | ||
502 | default "256" | 503 | default "256" |
503 | help | 504 | help |
504 | The maximum number of RAW devices that are supported. | 505 | The maximum number of RAW devices that are supported. |
diff --git a/drivers/char/raw.c b/drivers/char/raw.c index f3223aac4df1..6e8d65e9b1d3 100644 --- a/drivers/char/raw.c +++ b/drivers/char/raw.c | |||
@@ -190,7 +190,7 @@ static int bind_get(int number, dev_t *dev) | |||
190 | struct raw_device_data *rawdev; | 190 | struct raw_device_data *rawdev; |
191 | struct block_device *bdev; | 191 | struct block_device *bdev; |
192 | 192 | ||
193 | if (number <= 0 || number >= MAX_RAW_MINORS) | 193 | if (number <= 0 || number >= max_raw_minors) |
194 | return -EINVAL; | 194 | return -EINVAL; |
195 | 195 | ||
196 | rawdev = &raw_devices[number]; | 196 | rawdev = &raw_devices[number]; |
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c index 974b2db2fe10..0595dc6c453e 100644 --- a/drivers/clocksource/bcm_kona_timer.c +++ b/drivers/clocksource/bcm_kona_timer.c | |||
@@ -99,31 +99,6 @@ kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw) | |||
99 | return; | 99 | return; |
100 | } | 100 | } |
101 | 101 | ||
102 | static void __init kona_timers_init(struct device_node *node) | ||
103 | { | ||
104 | u32 freq; | ||
105 | struct clk *external_clk; | ||
106 | |||
107 | external_clk = of_clk_get_by_name(node, NULL); | ||
108 | |||
109 | if (!IS_ERR(external_clk)) { | ||
110 | arch_timer_rate = clk_get_rate(external_clk); | ||
111 | clk_prepare_enable(external_clk); | ||
112 | } else if (!of_property_read_u32(node, "clock-frequency", &freq)) { | ||
113 | arch_timer_rate = freq; | ||
114 | } else { | ||
115 | panic("unable to determine clock-frequency"); | ||
116 | } | ||
117 | |||
118 | /* Setup IRQ numbers */ | ||
119 | timers.tmr_irq = irq_of_parse_and_map(node, 0); | ||
120 | |||
121 | /* Setup IO addresses */ | ||
122 | timers.tmr_regs = of_iomap(node, 0); | ||
123 | |||
124 | kona_timer_disable_and_clear(timers.tmr_regs); | ||
125 | } | ||
126 | |||
127 | static int kona_timer_set_next_event(unsigned long clc, | 102 | static int kona_timer_set_next_event(unsigned long clc, |
128 | struct clock_event_device *unused) | 103 | struct clock_event_device *unused) |
129 | { | 104 | { |
@@ -198,7 +173,34 @@ static struct irqaction kona_timer_irq = { | |||
198 | 173 | ||
199 | static void __init kona_timer_init(struct device_node *node) | 174 | static void __init kona_timer_init(struct device_node *node) |
200 | { | 175 | { |
201 | kona_timers_init(node); | 176 | u32 freq; |
177 | struct clk *external_clk; | ||
178 | |||
179 | if (!of_device_is_available(node)) { | ||
180 | pr_info("Kona Timer v1 marked as disabled in device tree\n"); | ||
181 | return; | ||
182 | } | ||
183 | |||
184 | external_clk = of_clk_get_by_name(node, NULL); | ||
185 | |||
186 | if (!IS_ERR(external_clk)) { | ||
187 | arch_timer_rate = clk_get_rate(external_clk); | ||
188 | clk_prepare_enable(external_clk); | ||
189 | } else if (!of_property_read_u32(node, "clock-frequency", &freq)) { | ||
190 | arch_timer_rate = freq; | ||
191 | } else { | ||
192 | pr_err("Kona Timer v1 unable to determine clock-frequency"); | ||
193 | return; | ||
194 | } | ||
195 | |||
196 | /* Setup IRQ numbers */ | ||
197 | timers.tmr_irq = irq_of_parse_and_map(node, 0); | ||
198 | |||
199 | /* Setup IO addresses */ | ||
200 | timers.tmr_regs = of_iomap(node, 0); | ||
201 | |||
202 | kona_timer_disable_and_clear(timers.tmr_regs); | ||
203 | |||
202 | kona_timer_clockevents_init(); | 204 | kona_timer_clockevents_init(); |
203 | setup_irq(timers.tmr_irq, &kona_timer_irq); | 205 | setup_irq(timers.tmr_irq, &kona_timer_irq); |
204 | kona_timer_set_next_event((arch_timer_rate / HZ), NULL); | 206 | kona_timer_set_next_event((arch_timer_rate / HZ), NULL); |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 79606f473f48..c788abf1c457 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -51,8 +51,6 @@ static inline int32_t div_fp(int32_t x, int32_t y) | |||
51 | return div_s64((int64_t)x << FRAC_BITS, (int64_t)y); | 51 | return div_s64((int64_t)x << FRAC_BITS, (int64_t)y); |
52 | } | 52 | } |
53 | 53 | ||
54 | static u64 energy_divisor; | ||
55 | |||
56 | struct sample { | 54 | struct sample { |
57 | int32_t core_pct_busy; | 55 | int32_t core_pct_busy; |
58 | u64 aperf; | 56 | u64 aperf; |
@@ -630,12 +628,10 @@ static void intel_pstate_timer_func(unsigned long __data) | |||
630 | { | 628 | { |
631 | struct cpudata *cpu = (struct cpudata *) __data; | 629 | struct cpudata *cpu = (struct cpudata *) __data; |
632 | struct sample *sample; | 630 | struct sample *sample; |
633 | u64 energy; | ||
634 | 631 | ||
635 | intel_pstate_sample(cpu); | 632 | intel_pstate_sample(cpu); |
636 | 633 | ||
637 | sample = &cpu->samples[cpu->sample_ptr]; | 634 | sample = &cpu->samples[cpu->sample_ptr]; |
638 | rdmsrl(MSR_PKG_ENERGY_STATUS, energy); | ||
639 | 635 | ||
640 | intel_pstate_adjust_busy_pstate(cpu); | 636 | intel_pstate_adjust_busy_pstate(cpu); |
641 | 637 | ||
@@ -644,7 +640,6 @@ static void intel_pstate_timer_func(unsigned long __data) | |||
644 | cpu->pstate.current_pstate, | 640 | cpu->pstate.current_pstate, |
645 | sample->mperf, | 641 | sample->mperf, |
646 | sample->aperf, | 642 | sample->aperf, |
647 | div64_u64(energy, energy_divisor), | ||
648 | sample->freq); | 643 | sample->freq); |
649 | 644 | ||
650 | intel_pstate_set_sample_time(cpu); | 645 | intel_pstate_set_sample_time(cpu); |
@@ -926,7 +921,6 @@ static int __init intel_pstate_init(void) | |||
926 | int cpu, rc = 0; | 921 | int cpu, rc = 0; |
927 | const struct x86_cpu_id *id; | 922 | const struct x86_cpu_id *id; |
928 | struct cpu_defaults *cpu_info; | 923 | struct cpu_defaults *cpu_info; |
929 | u64 units; | ||
930 | 924 | ||
931 | if (no_load) | 925 | if (no_load) |
932 | return -ENODEV; | 926 | return -ENODEV; |
@@ -960,9 +954,6 @@ static int __init intel_pstate_init(void) | |||
960 | if (rc) | 954 | if (rc) |
961 | goto out; | 955 | goto out; |
962 | 956 | ||
963 | rdmsrl(MSR_RAPL_POWER_UNIT, units); | ||
964 | energy_divisor = 1 << ((units >> 8) & 0x1f); /* bits{12:8} */ | ||
965 | |||
966 | intel_pstate_debug_expose_params(); | 957 | intel_pstate_debug_expose_params(); |
967 | intel_pstate_sysfs_expose_params(); | 958 | intel_pstate_sysfs_expose_params(); |
968 | 959 | ||
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c index 6c4c000671c5..1e5481d88a26 100644 --- a/drivers/crypto/nx/nx-842.c +++ b/drivers/crypto/nx/nx-842.c | |||
@@ -158,6 +158,15 @@ static inline unsigned long nx842_get_scatterlist_size( | |||
158 | return sl->entry_nr * sizeof(struct nx842_slentry); | 158 | return sl->entry_nr * sizeof(struct nx842_slentry); |
159 | } | 159 | } |
160 | 160 | ||
161 | static inline unsigned long nx842_get_pa(void *addr) | ||
162 | { | ||
163 | if (is_vmalloc_addr(addr)) | ||
164 | return page_to_phys(vmalloc_to_page(addr)) | ||
165 | + offset_in_page(addr); | ||
166 | else | ||
167 | return __pa(addr); | ||
168 | } | ||
169 | |||
161 | static int nx842_build_scatterlist(unsigned long buf, int len, | 170 | static int nx842_build_scatterlist(unsigned long buf, int len, |
162 | struct nx842_scatterlist *sl) | 171 | struct nx842_scatterlist *sl) |
163 | { | 172 | { |
@@ -168,7 +177,7 @@ static int nx842_build_scatterlist(unsigned long buf, int len, | |||
168 | 177 | ||
169 | entry = sl->entries; | 178 | entry = sl->entries; |
170 | while (len) { | 179 | while (len) { |
171 | entry->ptr = __pa(buf); | 180 | entry->ptr = nx842_get_pa((void *)buf); |
172 | nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE); | 181 | nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE); |
173 | if (nextpage < buf + len) { | 182 | if (nextpage < buf + len) { |
174 | /* we aren't at the end yet */ | 183 | /* we aren't at the end yet */ |
@@ -370,8 +379,8 @@ int nx842_compress(const unsigned char *in, unsigned int inlen, | |||
370 | op.flags = NX842_OP_COMPRESS; | 379 | op.flags = NX842_OP_COMPRESS; |
371 | csbcpb = &workmem->csbcpb; | 380 | csbcpb = &workmem->csbcpb; |
372 | memset(csbcpb, 0, sizeof(*csbcpb)); | 381 | memset(csbcpb, 0, sizeof(*csbcpb)); |
373 | op.csbcpb = __pa(csbcpb); | 382 | op.csbcpb = nx842_get_pa(csbcpb); |
374 | op.out = __pa(slout.entries); | 383 | op.out = nx842_get_pa(slout.entries); |
375 | 384 | ||
376 | for (i = 0; i < hdr->blocks_nr; i++) { | 385 | for (i = 0; i < hdr->blocks_nr; i++) { |
377 | /* | 386 | /* |
@@ -401,13 +410,13 @@ int nx842_compress(const unsigned char *in, unsigned int inlen, | |||
401 | */ | 410 | */ |
402 | if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) { | 411 | if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) { |
403 | /* Create direct DDE */ | 412 | /* Create direct DDE */ |
404 | op.in = __pa(inbuf); | 413 | op.in = nx842_get_pa((void *)inbuf); |
405 | op.inlen = max_sync_size; | 414 | op.inlen = max_sync_size; |
406 | 415 | ||
407 | } else { | 416 | } else { |
408 | /* Create indirect DDE (scatterlist) */ | 417 | /* Create indirect DDE (scatterlist) */ |
409 | nx842_build_scatterlist(inbuf, max_sync_size, &slin); | 418 | nx842_build_scatterlist(inbuf, max_sync_size, &slin); |
410 | op.in = __pa(slin.entries); | 419 | op.in = nx842_get_pa(slin.entries); |
411 | op.inlen = -nx842_get_scatterlist_size(&slin); | 420 | op.inlen = -nx842_get_scatterlist_size(&slin); |
412 | } | 421 | } |
413 | 422 | ||
@@ -565,7 +574,7 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen, | |||
565 | op.flags = NX842_OP_DECOMPRESS; | 574 | op.flags = NX842_OP_DECOMPRESS; |
566 | csbcpb = &workmem->csbcpb; | 575 | csbcpb = &workmem->csbcpb; |
567 | memset(csbcpb, 0, sizeof(*csbcpb)); | 576 | memset(csbcpb, 0, sizeof(*csbcpb)); |
568 | op.csbcpb = __pa(csbcpb); | 577 | op.csbcpb = nx842_get_pa(csbcpb); |
569 | 578 | ||
570 | /* | 579 | /* |
571 | * max_sync_size may have changed since compression, | 580 | * max_sync_size may have changed since compression, |
@@ -597,12 +606,12 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen, | |||
597 | if (likely((inbuf & NX842_HW_PAGE_MASK) == | 606 | if (likely((inbuf & NX842_HW_PAGE_MASK) == |
598 | ((inbuf + hdr->sizes[i] - 1) & NX842_HW_PAGE_MASK))) { | 607 | ((inbuf + hdr->sizes[i] - 1) & NX842_HW_PAGE_MASK))) { |
599 | /* Create direct DDE */ | 608 | /* Create direct DDE */ |
600 | op.in = __pa(inbuf); | 609 | op.in = nx842_get_pa((void *)inbuf); |
601 | op.inlen = hdr->sizes[i]; | 610 | op.inlen = hdr->sizes[i]; |
602 | } else { | 611 | } else { |
603 | /* Create indirect DDE (scatterlist) */ | 612 | /* Create indirect DDE (scatterlist) */ |
604 | nx842_build_scatterlist(inbuf, hdr->sizes[i] , &slin); | 613 | nx842_build_scatterlist(inbuf, hdr->sizes[i] , &slin); |
605 | op.in = __pa(slin.entries); | 614 | op.in = nx842_get_pa(slin.entries); |
606 | op.inlen = -nx842_get_scatterlist_size(&slin); | 615 | op.inlen = -nx842_get_scatterlist_size(&slin); |
607 | } | 616 | } |
608 | 617 | ||
@@ -613,12 +622,12 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen, | |||
613 | */ | 622 | */ |
614 | if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) { | 623 | if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) { |
615 | /* Create direct DDE */ | 624 | /* Create direct DDE */ |
616 | op.out = __pa(outbuf); | 625 | op.out = nx842_get_pa((void *)outbuf); |
617 | op.outlen = max_sync_size; | 626 | op.outlen = max_sync_size; |
618 | } else { | 627 | } else { |
619 | /* Create indirect DDE (scatterlist) */ | 628 | /* Create indirect DDE (scatterlist) */ |
620 | nx842_build_scatterlist(outbuf, max_sync_size, &slout); | 629 | nx842_build_scatterlist(outbuf, max_sync_size, &slout); |
621 | op.out = __pa(slout.entries); | 630 | op.out = nx842_get_pa(slout.entries); |
622 | op.outlen = -nx842_get_scatterlist_size(&slout); | 631 | op.outlen = -nx842_get_scatterlist_size(&slout); |
623 | } | 632 | } |
624 | 633 | ||
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 9bed1a2a67a1..605b016bcea4 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -346,6 +346,7 @@ config MOXART_DMA | |||
346 | tristate "MOXART DMA support" | 346 | tristate "MOXART DMA support" |
347 | depends on ARCH_MOXART | 347 | depends on ARCH_MOXART |
348 | select DMA_ENGINE | 348 | select DMA_ENGINE |
349 | select DMA_OF | ||
349 | select DMA_VIRTUAL_CHANNELS | 350 | select DMA_VIRTUAL_CHANNELS |
350 | help | 351 | help |
351 | Enable support for the MOXA ART SoC DMA controller. | 352 | Enable support for the MOXA ART SoC DMA controller. |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 53fb0c8365b0..766b68ed505c 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -497,8 +497,8 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) | |||
497 | if (!mv_can_chain(grp_start)) | 497 | if (!mv_can_chain(grp_start)) |
498 | goto submit_done; | 498 | goto submit_done; |
499 | 499 | ||
500 | dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n", | 500 | dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n", |
501 | old_chain_tail->async_tx.phys); | 501 | &old_chain_tail->async_tx.phys); |
502 | 502 | ||
503 | /* fix up the hardware chain */ | 503 | /* fix up the hardware chain */ |
504 | mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); | 504 | mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); |
@@ -527,7 +527,8 @@ submit_done: | |||
527 | /* returns the number of allocated descriptors */ | 527 | /* returns the number of allocated descriptors */ |
528 | static int mv_xor_alloc_chan_resources(struct dma_chan *chan) | 528 | static int mv_xor_alloc_chan_resources(struct dma_chan *chan) |
529 | { | 529 | { |
530 | char *hw_desc; | 530 | void *virt_desc; |
531 | dma_addr_t dma_desc; | ||
531 | int idx; | 532 | int idx; |
532 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | 533 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); |
533 | struct mv_xor_desc_slot *slot = NULL; | 534 | struct mv_xor_desc_slot *slot = NULL; |
@@ -542,17 +543,16 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) | |||
542 | " %d descriptor slots", idx); | 543 | " %d descriptor slots", idx); |
543 | break; | 544 | break; |
544 | } | 545 | } |
545 | hw_desc = (char *) mv_chan->dma_desc_pool_virt; | 546 | virt_desc = mv_chan->dma_desc_pool_virt; |
546 | slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; | 547 | slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE; |
547 | 548 | ||
548 | dma_async_tx_descriptor_init(&slot->async_tx, chan); | 549 | dma_async_tx_descriptor_init(&slot->async_tx, chan); |
549 | slot->async_tx.tx_submit = mv_xor_tx_submit; | 550 | slot->async_tx.tx_submit = mv_xor_tx_submit; |
550 | INIT_LIST_HEAD(&slot->chain_node); | 551 | INIT_LIST_HEAD(&slot->chain_node); |
551 | INIT_LIST_HEAD(&slot->slot_node); | 552 | INIT_LIST_HEAD(&slot->slot_node); |
552 | INIT_LIST_HEAD(&slot->tx_list); | 553 | INIT_LIST_HEAD(&slot->tx_list); |
553 | hw_desc = (char *) mv_chan->dma_desc_pool; | 554 | dma_desc = mv_chan->dma_desc_pool; |
554 | slot->async_tx.phys = | 555 | slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE; |
555 | (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; | ||
556 | slot->idx = idx++; | 556 | slot->idx = idx++; |
557 | 557 | ||
558 | spin_lock_bh(&mv_chan->lock); | 558 | spin_lock_bh(&mv_chan->lock); |
@@ -582,8 +582,8 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
582 | int slot_cnt; | 582 | int slot_cnt; |
583 | 583 | ||
584 | dev_dbg(mv_chan_to_devp(mv_chan), | 584 | dev_dbg(mv_chan_to_devp(mv_chan), |
585 | "%s dest: %x src %x len: %u flags: %ld\n", | 585 | "%s dest: %pad src %pad len: %u flags: %ld\n", |
586 | __func__, dest, src, len, flags); | 586 | __func__, &dest, &src, len, flags); |
587 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | 587 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) |
588 | return NULL; | 588 | return NULL; |
589 | 589 | ||
@@ -626,8 +626,8 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |||
626 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); | 626 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); |
627 | 627 | ||
628 | dev_dbg(mv_chan_to_devp(mv_chan), | 628 | dev_dbg(mv_chan_to_devp(mv_chan), |
629 | "%s src_cnt: %d len: dest %x %u flags: %ld\n", | 629 | "%s src_cnt: %d len: %u dest %pad flags: %ld\n", |
630 | __func__, src_cnt, len, dest, flags); | 630 | __func__, src_cnt, len, &dest, flags); |
631 | 631 | ||
632 | spin_lock_bh(&mv_chan->lock); | 632 | spin_lock_bh(&mv_chan->lock); |
633 | slot_cnt = mv_chan_xor_slot_count(len, src_cnt); | 633 | slot_cnt = mv_chan_xor_slot_count(len, src_cnt); |
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index e8c9ef03495b..33edd6766344 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c | |||
@@ -559,7 +559,8 @@ static void edac_mc_workq_function(struct work_struct *work_req) | |||
559 | * | 559 | * |
560 | * called with the mem_ctls_mutex held | 560 | * called with the mem_ctls_mutex held |
561 | */ | 561 | */ |
562 | static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec) | 562 | static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec, |
563 | bool init) | ||
563 | { | 564 | { |
564 | edac_dbg(0, "\n"); | 565 | edac_dbg(0, "\n"); |
565 | 566 | ||
@@ -567,7 +568,9 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec) | |||
567 | if (mci->op_state != OP_RUNNING_POLL) | 568 | if (mci->op_state != OP_RUNNING_POLL) |
568 | return; | 569 | return; |
569 | 570 | ||
570 | INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); | 571 | if (init) |
572 | INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); | ||
573 | |||
571 | mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec)); | 574 | mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec)); |
572 | } | 575 | } |
573 | 576 | ||
@@ -601,7 +604,7 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci) | |||
601 | * user space has updated our poll period value, need to | 604 | * user space has updated our poll period value, need to |
602 | * reset our workq delays | 605 | * reset our workq delays |
603 | */ | 606 | */ |
604 | void edac_mc_reset_delay_period(int value) | 607 | void edac_mc_reset_delay_period(unsigned long value) |
605 | { | 608 | { |
606 | struct mem_ctl_info *mci; | 609 | struct mem_ctl_info *mci; |
607 | struct list_head *item; | 610 | struct list_head *item; |
@@ -611,7 +614,7 @@ void edac_mc_reset_delay_period(int value) | |||
611 | list_for_each(item, &mc_devices) { | 614 | list_for_each(item, &mc_devices) { |
612 | mci = list_entry(item, struct mem_ctl_info, link); | 615 | mci = list_entry(item, struct mem_ctl_info, link); |
613 | 616 | ||
614 | edac_mc_workq_setup(mci, (unsigned long) value); | 617 | edac_mc_workq_setup(mci, value, false); |
615 | } | 618 | } |
616 | 619 | ||
617 | mutex_unlock(&mem_ctls_mutex); | 620 | mutex_unlock(&mem_ctls_mutex); |
@@ -782,7 +785,7 @@ int edac_mc_add_mc(struct mem_ctl_info *mci) | |||
782 | /* This instance is NOW RUNNING */ | 785 | /* This instance is NOW RUNNING */ |
783 | mci->op_state = OP_RUNNING_POLL; | 786 | mci->op_state = OP_RUNNING_POLL; |
784 | 787 | ||
785 | edac_mc_workq_setup(mci, edac_mc_get_poll_msec()); | 788 | edac_mc_workq_setup(mci, edac_mc_get_poll_msec(), true); |
786 | } else { | 789 | } else { |
787 | mci->op_state = OP_RUNNING_INTERRUPT; | 790 | mci->op_state = OP_RUNNING_INTERRUPT; |
788 | } | 791 | } |
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index 8ec1747b1c39..b335c6ab5efe 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c | |||
@@ -52,18 +52,20 @@ int edac_mc_get_poll_msec(void) | |||
52 | 52 | ||
53 | static int edac_set_poll_msec(const char *val, struct kernel_param *kp) | 53 | static int edac_set_poll_msec(const char *val, struct kernel_param *kp) |
54 | { | 54 | { |
55 | long l; | 55 | unsigned long l; |
56 | int ret; | 56 | int ret; |
57 | 57 | ||
58 | if (!val) | 58 | if (!val) |
59 | return -EINVAL; | 59 | return -EINVAL; |
60 | 60 | ||
61 | ret = kstrtol(val, 0, &l); | 61 | ret = kstrtoul(val, 0, &l); |
62 | if (ret) | 62 | if (ret) |
63 | return ret; | 63 | return ret; |
64 | if (!l || ((int)l != l)) | 64 | |
65 | if (l < 1000) | ||
65 | return -EINVAL; | 66 | return -EINVAL; |
66 | *((int *)kp->arg) = l; | 67 | |
68 | *((unsigned long *)kp->arg) = l; | ||
67 | 69 | ||
68 | /* notify edac_mc engine to reset the poll period */ | 70 | /* notify edac_mc engine to reset the poll period */ |
69 | edac_mc_reset_delay_period(l); | 71 | edac_mc_reset_delay_period(l); |
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h index 3d139c6e7fe3..f2118bfcf8df 100644 --- a/drivers/edac/edac_module.h +++ b/drivers/edac/edac_module.h | |||
@@ -52,7 +52,7 @@ extern void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev, | |||
52 | extern void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev); | 52 | extern void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev); |
53 | extern void edac_device_reset_delay_period(struct edac_device_ctl_info | 53 | extern void edac_device_reset_delay_period(struct edac_device_ctl_info |
54 | *edac_dev, unsigned long value); | 54 | *edac_dev, unsigned long value); |
55 | extern void edac_mc_reset_delay_period(int value); | 55 | extern void edac_mc_reset_delay_period(unsigned long value); |
56 | 56 | ||
57 | extern void *edac_align_ptr(void **p, unsigned size, int n_elems); | 57 | extern void *edac_align_ptr(void **p, unsigned size, int n_elems); |
58 | 58 | ||
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 697338772b64..903f24d28ba0 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
@@ -403,6 +403,7 @@ config GPIO_GRGPIO | |||
403 | 403 | ||
404 | config GPIO_TB10X | 404 | config GPIO_TB10X |
405 | bool | 405 | bool |
406 | select GENERIC_IRQ_CHIP | ||
406 | select OF_GPIO | 407 | select OF_GPIO |
407 | 408 | ||
408 | comment "I2C GPIO expanders:" | 409 | comment "I2C GPIO expanders:" |
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c index 233d088ac59f..f32357e2d78d 100644 --- a/drivers/gpio/gpio-bcm-kona.c +++ b/drivers/gpio/gpio-bcm-kona.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2012-2013 Broadcom Corporation | 2 | * Copyright (C) 2012-2014 Broadcom Corporation |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or | 4 | * This program is free software; you can redistribute it and/or |
5 | * modify it under the terms of the GNU General Public License as | 5 | * modify it under the terms of the GNU General Public License as |
@@ -657,6 +657,6 @@ static struct platform_driver bcm_kona_gpio_driver = { | |||
657 | 657 | ||
658 | module_platform_driver(bcm_kona_gpio_driver); | 658 | module_platform_driver(bcm_kona_gpio_driver); |
659 | 659 | ||
660 | MODULE_AUTHOR("Broadcom"); | 660 | MODULE_AUTHOR("Broadcom Corporation <bcm-kernel-feedback-list@broadcom.com>"); |
661 | MODULE_DESCRIPTION("Broadcom Kona GPIO Driver"); | 661 | MODULE_DESCRIPTION("Broadcom Kona GPIO Driver"); |
662 | MODULE_LICENSE("GPL v2"); | 662 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c index d3550274b8f7..3c2ba2ad0ada 100644 --- a/drivers/gpio/gpio-clps711x.c +++ b/drivers/gpio/gpio-clps711x.c | |||
@@ -97,3 +97,4 @@ module_platform_driver(clps711x_gpio_driver); | |||
97 | MODULE_LICENSE("GPL"); | 97 | MODULE_LICENSE("GPL"); |
98 | MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>"); | 98 | MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>"); |
99 | MODULE_DESCRIPTION("CLPS711X GPIO driver"); | 99 | MODULE_DESCRIPTION("CLPS711X GPIO driver"); |
100 | MODULE_ALIAS("platform:clps711x-gpio"); | ||
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c index d1b50ef5fab8..e585163f1ad5 100644 --- a/drivers/gpio/gpio-intel-mid.c +++ b/drivers/gpio/gpio-intel-mid.c | |||
@@ -394,8 +394,8 @@ static const struct irq_domain_ops intel_gpio_irq_ops = { | |||
394 | 394 | ||
395 | static int intel_gpio_runtime_idle(struct device *dev) | 395 | static int intel_gpio_runtime_idle(struct device *dev) |
396 | { | 396 | { |
397 | pm_schedule_suspend(dev, 500); | 397 | int err = pm_schedule_suspend(dev, 500); |
398 | return -EBUSY; | 398 | return err ?: -EBUSY; |
399 | } | 399 | } |
400 | 400 | ||
401 | static const struct dev_pm_ops intel_gpio_pm_ops = { | 401 | static const struct dev_pm_ops intel_gpio_pm_ops = { |
diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c index 1d136eceda62..7081304d6797 100644 --- a/drivers/gpio/gpio-xtensa.c +++ b/drivers/gpio/gpio-xtensa.c | |||
@@ -40,6 +40,8 @@ | |||
40 | #error GPIO32 option is not enabled for your xtensa core variant | 40 | #error GPIO32 option is not enabled for your xtensa core variant |
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | #if XCHAL_HAVE_CP | ||
44 | |||
43 | static inline unsigned long enable_cp(unsigned long *cpenable) | 45 | static inline unsigned long enable_cp(unsigned long *cpenable) |
44 | { | 46 | { |
45 | unsigned long flags; | 47 | unsigned long flags; |
@@ -57,6 +59,20 @@ static inline void disable_cp(unsigned long flags, unsigned long cpenable) | |||
57 | local_irq_restore(flags); | 59 | local_irq_restore(flags); |
58 | } | 60 | } |
59 | 61 | ||
62 | #else | ||
63 | |||
64 | static inline unsigned long enable_cp(unsigned long *cpenable) | ||
65 | { | ||
66 | *cpenable = 0; /* avoid uninitialized value warning */ | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | static inline void disable_cp(unsigned long flags, unsigned long cpenable) | ||
71 | { | ||
72 | } | ||
73 | |||
74 | #endif /* XCHAL_HAVE_CP */ | ||
75 | |||
60 | static int xtensa_impwire_get_direction(struct gpio_chip *gc, unsigned offset) | 76 | static int xtensa_impwire_get_direction(struct gpio_chip *gc, unsigned offset) |
61 | { | 77 | { |
62 | return 1; /* input only */ | 78 | return 1; /* input only */ |
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index dffc836144cc..f4dc9b7a3831 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c | |||
@@ -296,6 +296,18 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
296 | case DRM_CAP_ASYNC_PAGE_FLIP: | 296 | case DRM_CAP_ASYNC_PAGE_FLIP: |
297 | req->value = dev->mode_config.async_page_flip; | 297 | req->value = dev->mode_config.async_page_flip; |
298 | break; | 298 | break; |
299 | case DRM_CAP_CURSOR_WIDTH: | ||
300 | if (dev->mode_config.cursor_width) | ||
301 | req->value = dev->mode_config.cursor_width; | ||
302 | else | ||
303 | req->value = 64; | ||
304 | break; | ||
305 | case DRM_CAP_CURSOR_HEIGHT: | ||
306 | if (dev->mode_config.cursor_height) | ||
307 | req->value = dev->mode_config.cursor_height; | ||
308 | else | ||
309 | req->value = 64; | ||
310 | break; | ||
299 | default: | 311 | default: |
300 | return -EINVAL; | 312 | return -EINVAL; |
301 | } | 313 | } |
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index f227f544aa36..6e1a1a20cf6b 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig | |||
@@ -51,7 +51,7 @@ config DRM_EXYNOS_G2D | |||
51 | 51 | ||
52 | config DRM_EXYNOS_IPP | 52 | config DRM_EXYNOS_IPP |
53 | bool "Exynos DRM IPP" | 53 | bool "Exynos DRM IPP" |
54 | depends on DRM_EXYNOS && !ARCH_MULTIPLATFORM | 54 | depends on DRM_EXYNOS |
55 | help | 55 | help |
56 | Choose this option if you want to use IPP feature for DRM. | 56 | Choose this option if you want to use IPP feature for DRM. |
57 | 57 | ||
@@ -69,6 +69,6 @@ config DRM_EXYNOS_ROTATOR | |||
69 | 69 | ||
70 | config DRM_EXYNOS_GSC | 70 | config DRM_EXYNOS_GSC |
71 | bool "Exynos DRM GSC" | 71 | bool "Exynos DRM GSC" |
72 | depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 | 72 | depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !ARCH_MULTIPLATFORM |
73 | help | 73 | help |
74 | Choose this option if you want to use Exynos GSC for DRM. | 74 | Choose this option if you want to use Exynos GSC for DRM. |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 9d096a0c5f8d..215131ab1dd2 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
@@ -171,22 +171,24 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) | |||
171 | file->driver_priv = file_priv; | 171 | file->driver_priv = file_priv; |
172 | 172 | ||
173 | ret = exynos_drm_subdrv_open(dev, file); | 173 | ret = exynos_drm_subdrv_open(dev, file); |
174 | if (ret) { | 174 | if (ret) |
175 | kfree(file_priv); | 175 | goto out; |
176 | file->driver_priv = NULL; | ||
177 | } | ||
178 | 176 | ||
179 | anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops, | 177 | anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops, |
180 | NULL, 0); | 178 | NULL, 0); |
181 | if (IS_ERR(anon_filp)) { | 179 | if (IS_ERR(anon_filp)) { |
182 | kfree(file_priv); | 180 | ret = PTR_ERR(anon_filp); |
183 | return PTR_ERR(anon_filp); | 181 | goto out; |
184 | } | 182 | } |
185 | 183 | ||
186 | anon_filp->f_mode = FMODE_READ | FMODE_WRITE; | 184 | anon_filp->f_mode = FMODE_READ | FMODE_WRITE; |
187 | file_priv->anon_filp = anon_filp; | 185 | file_priv->anon_filp = anon_filp; |
188 | 186 | ||
189 | return ret; | 187 | return ret; |
188 | out: | ||
189 | kfree(file_priv); | ||
190 | file->driver_priv = NULL; | ||
191 | return ret; | ||
190 | } | 192 | } |
191 | 193 | ||
192 | static void exynos_drm_preclose(struct drm_device *dev, | 194 | static void exynos_drm_preclose(struct drm_device *dev, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 380aec28840b..6c1885eedfdf 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c | |||
@@ -607,7 +607,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset) | |||
607 | reg_type = REG_TYPE_NONE; | 607 | reg_type = REG_TYPE_NONE; |
608 | DRM_ERROR("Unknown register offset![%d]\n", reg_offset); | 608 | DRM_ERROR("Unknown register offset![%d]\n", reg_offset); |
609 | break; | 609 | break; |
610 | }; | 610 | } |
611 | 611 | ||
612 | return reg_type; | 612 | return reg_type; |
613 | } | 613 | } |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c index d519a4e5fe40..09312b877470 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
18 | #include <linux/pm_runtime.h> | 18 | #include <linux/pm_runtime.h> |
19 | #include <plat/map-base.h> | ||
20 | 19 | ||
21 | #include <drm/drmP.h> | 20 | #include <drm/drmP.h> |
22 | #include <drm/exynos_drm.h> | 21 | #include <drm/exynos_drm.h> |
@@ -826,7 +825,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node, | |||
826 | DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e); | 825 | DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e); |
827 | 826 | ||
828 | /* | 827 | /* |
829 | * quf == NULL condition means all event deletion. | 828 | * qbuf == NULL condition means all event deletion. |
830 | * stop operations want to delete all event list. | 829 | * stop operations want to delete all event list. |
831 | * another case delete only same buf id. | 830 | * another case delete only same buf id. |
832 | */ | 831 | */ |
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index a0e10aeb0e67..c021ddc1ffb4 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/io.h> | 34 | #include <linux/io.h> |
35 | #include <linux/of.h> | 35 | #include <linux/of.h> |
36 | #include <linux/of_gpio.h> | 36 | #include <linux/of_gpio.h> |
37 | #include <linux/hdmi.h> | ||
37 | 38 | ||
38 | #include <drm/exynos_drm.h> | 39 | #include <drm/exynos_drm.h> |
39 | 40 | ||
@@ -59,19 +60,6 @@ | |||
59 | #define HDMI_AUI_VERSION 0x01 | 60 | #define HDMI_AUI_VERSION 0x01 |
60 | #define HDMI_AUI_LENGTH 0x0A | 61 | #define HDMI_AUI_LENGTH 0x0A |
61 | 62 | ||
62 | /* HDMI infoframe to configure HDMI out packet header, AUI and AVI */ | ||
63 | enum HDMI_PACKET_TYPE { | ||
64 | /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */ | ||
65 | /* InfoFrame packet type */ | ||
66 | HDMI_PACKET_TYPE_INFOFRAME = 0x80, | ||
67 | /* Vendor-Specific InfoFrame */ | ||
68 | HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1, | ||
69 | /* Auxiliary Video information InfoFrame */ | ||
70 | HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2, | ||
71 | /* Audio information InfoFrame */ | ||
72 | HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4 | ||
73 | }; | ||
74 | |||
75 | enum hdmi_type { | 63 | enum hdmi_type { |
76 | HDMI_TYPE13, | 64 | HDMI_TYPE13, |
77 | HDMI_TYPE14, | 65 | HDMI_TYPE14, |
@@ -379,12 +367,6 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = { | |||
379 | }, | 367 | }, |
380 | }; | 368 | }; |
381 | 369 | ||
382 | struct hdmi_infoframe { | ||
383 | enum HDMI_PACKET_TYPE type; | ||
384 | u8 ver; | ||
385 | u8 len; | ||
386 | }; | ||
387 | |||
388 | static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id) | 370 | static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id) |
389 | { | 371 | { |
390 | return readl(hdata->regs + reg_id); | 372 | return readl(hdata->regs + reg_id); |
@@ -682,7 +664,7 @@ static u8 hdmi_chksum(struct hdmi_context *hdata, | |||
682 | } | 664 | } |
683 | 665 | ||
684 | static void hdmi_reg_infoframe(struct hdmi_context *hdata, | 666 | static void hdmi_reg_infoframe(struct hdmi_context *hdata, |
685 | struct hdmi_infoframe *infoframe) | 667 | union hdmi_infoframe *infoframe) |
686 | { | 668 | { |
687 | u32 hdr_sum; | 669 | u32 hdr_sum; |
688 | u8 chksum; | 670 | u8 chksum; |
@@ -700,13 +682,15 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata, | |||
700 | return; | 682 | return; |
701 | } | 683 | } |
702 | 684 | ||
703 | switch (infoframe->type) { | 685 | switch (infoframe->any.type) { |
704 | case HDMI_PACKET_TYPE_AVI: | 686 | case HDMI_INFOFRAME_TYPE_AVI: |
705 | hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC); | 687 | hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC); |
706 | hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type); | 688 | hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->any.type); |
707 | hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver); | 689 | hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, |
708 | hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len); | 690 | infoframe->any.version); |
709 | hdr_sum = infoframe->type + infoframe->ver + infoframe->len; | 691 | hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->any.length); |
692 | hdr_sum = infoframe->any.type + infoframe->any.version + | ||
693 | infoframe->any.length; | ||
710 | 694 | ||
711 | /* Output format zero hardcoded ,RGB YBCR selection */ | 695 | /* Output format zero hardcoded ,RGB YBCR selection */ |
712 | hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 | | 696 | hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 | |
@@ -722,18 +706,20 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata, | |||
722 | hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic); | 706 | hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic); |
723 | 707 | ||
724 | chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1), | 708 | chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1), |
725 | infoframe->len, hdr_sum); | 709 | infoframe->any.length, hdr_sum); |
726 | DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum); | 710 | DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum); |
727 | hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum); | 711 | hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum); |
728 | break; | 712 | break; |
729 | case HDMI_PACKET_TYPE_AUI: | 713 | case HDMI_INFOFRAME_TYPE_AUDIO: |
730 | hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02); | 714 | hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02); |
731 | hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type); | 715 | hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->any.type); |
732 | hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver); | 716 | hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, |
733 | hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len); | 717 | infoframe->any.version); |
734 | hdr_sum = infoframe->type + infoframe->ver + infoframe->len; | 718 | hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->any.length); |
719 | hdr_sum = infoframe->any.type + infoframe->any.version + | ||
720 | infoframe->any.length; | ||
735 | chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1), | 721 | chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1), |
736 | infoframe->len, hdr_sum); | 722 | infoframe->any.length, hdr_sum); |
737 | DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum); | 723 | DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum); |
738 | hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum); | 724 | hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum); |
739 | break; | 725 | break; |
@@ -985,7 +971,7 @@ static void hdmi_conf_reset(struct hdmi_context *hdata) | |||
985 | 971 | ||
986 | static void hdmi_conf_init(struct hdmi_context *hdata) | 972 | static void hdmi_conf_init(struct hdmi_context *hdata) |
987 | { | 973 | { |
988 | struct hdmi_infoframe infoframe; | 974 | union hdmi_infoframe infoframe; |
989 | 975 | ||
990 | /* disable HPD interrupts from HDMI IP block, use GPIO instead */ | 976 | /* disable HPD interrupts from HDMI IP block, use GPIO instead */ |
991 | hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | | 977 | hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | |
@@ -1021,14 +1007,14 @@ static void hdmi_conf_init(struct hdmi_context *hdata) | |||
1021 | hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02); | 1007 | hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02); |
1022 | hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04); | 1008 | hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04); |
1023 | } else { | 1009 | } else { |
1024 | infoframe.type = HDMI_PACKET_TYPE_AVI; | 1010 | infoframe.any.type = HDMI_INFOFRAME_TYPE_AVI; |
1025 | infoframe.ver = HDMI_AVI_VERSION; | 1011 | infoframe.any.version = HDMI_AVI_VERSION; |
1026 | infoframe.len = HDMI_AVI_LENGTH; | 1012 | infoframe.any.length = HDMI_AVI_LENGTH; |
1027 | hdmi_reg_infoframe(hdata, &infoframe); | 1013 | hdmi_reg_infoframe(hdata, &infoframe); |
1028 | 1014 | ||
1029 | infoframe.type = HDMI_PACKET_TYPE_AUI; | 1015 | infoframe.any.type = HDMI_INFOFRAME_TYPE_AUDIO; |
1030 | infoframe.ver = HDMI_AUI_VERSION; | 1016 | infoframe.any.version = HDMI_AUI_VERSION; |
1031 | infoframe.len = HDMI_AUI_LENGTH; | 1017 | infoframe.any.length = HDMI_AUI_LENGTH; |
1032 | hdmi_reg_infoframe(hdata, &infoframe); | 1018 | hdmi_reg_infoframe(hdata, &infoframe); |
1033 | 1019 | ||
1034 | /* enable AVI packet every vsync, fixes purple line problem */ | 1020 | /* enable AVI packet every vsync, fixes purple line problem */ |
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 400b0c4a10fb..faa77f543a07 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c | |||
@@ -208,7 +208,7 @@ struct tda998x_priv { | |||
208 | # define PLL_SERIAL_1_SRL_IZ(x) (((x) & 3) << 1) | 208 | # define PLL_SERIAL_1_SRL_IZ(x) (((x) & 3) << 1) |
209 | # define PLL_SERIAL_1_SRL_MAN_IZ (1 << 6) | 209 | # define PLL_SERIAL_1_SRL_MAN_IZ (1 << 6) |
210 | #define REG_PLL_SERIAL_2 REG(0x02, 0x01) /* read/write */ | 210 | #define REG_PLL_SERIAL_2 REG(0x02, 0x01) /* read/write */ |
211 | # define PLL_SERIAL_2_SRL_NOSC(x) (((x) & 3) << 0) | 211 | # define PLL_SERIAL_2_SRL_NOSC(x) ((x) << 0) |
212 | # define PLL_SERIAL_2_SRL_PR(x) (((x) & 0xf) << 4) | 212 | # define PLL_SERIAL_2_SRL_PR(x) (((x) & 0xf) << 4) |
213 | #define REG_PLL_SERIAL_3 REG(0x02, 0x02) /* read/write */ | 213 | #define REG_PLL_SERIAL_3 REG(0x02, 0x02) /* read/write */ |
214 | # define PLL_SERIAL_3_SRL_CCIR (1 << 0) | 214 | # define PLL_SERIAL_3_SRL_CCIR (1 << 0) |
@@ -528,10 +528,10 @@ tda998x_write_aif(struct drm_encoder *encoder, struct tda998x_encoder_params *p) | |||
528 | { | 528 | { |
529 | uint8_t buf[PB(5) + 1]; | 529 | uint8_t buf[PB(5) + 1]; |
530 | 530 | ||
531 | memset(buf, 0, sizeof(buf)); | ||
531 | buf[HB(0)] = 0x84; | 532 | buf[HB(0)] = 0x84; |
532 | buf[HB(1)] = 0x01; | 533 | buf[HB(1)] = 0x01; |
533 | buf[HB(2)] = 10; | 534 | buf[HB(2)] = 10; |
534 | buf[PB(0)] = 0; | ||
535 | buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */ | 535 | buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */ |
536 | buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */ | 536 | buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */ |
537 | buf[PB(4)] = p->audio_frame[4]; | 537 | buf[PB(4)] = p->audio_frame[4]; |
@@ -824,6 +824,11 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder, | |||
824 | } | 824 | } |
825 | 825 | ||
826 | div = 148500 / mode->clock; | 826 | div = 148500 / mode->clock; |
827 | if (div != 0) { | ||
828 | div--; | ||
829 | if (div > 3) | ||
830 | div = 3; | ||
831 | } | ||
827 | 832 | ||
828 | /* mute the audio FIFO: */ | 833 | /* mute the audio FIFO: */ |
829 | reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO); | 834 | reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO); |
@@ -913,7 +918,7 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder, | |||
913 | 918 | ||
914 | if (priv->rev == TDA19988) { | 919 | if (priv->rev == TDA19988) { |
915 | /* let incoming pixels fill the active space (if any) */ | 920 | /* let incoming pixels fill the active space (if any) */ |
916 | reg_write(encoder, REG_ENABLE_SPACE, 0x01); | 921 | reg_write(encoder, REG_ENABLE_SPACE, 0x00); |
917 | } | 922 | } |
918 | 923 | ||
919 | /* must be last register set: */ | 924 | /* must be last register set: */ |
@@ -1094,6 +1099,8 @@ tda998x_encoder_destroy(struct drm_encoder *encoder) | |||
1094 | { | 1099 | { |
1095 | struct tda998x_priv *priv = to_tda998x_priv(encoder); | 1100 | struct tda998x_priv *priv = to_tda998x_priv(encoder); |
1096 | drm_i2c_encoder_destroy(encoder); | 1101 | drm_i2c_encoder_destroy(encoder); |
1102 | if (priv->cec) | ||
1103 | i2c_unregister_device(priv->cec); | ||
1097 | kfree(priv); | 1104 | kfree(priv); |
1098 | } | 1105 | } |
1099 | 1106 | ||
@@ -1142,8 +1149,12 @@ tda998x_encoder_init(struct i2c_client *client, | |||
1142 | priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); | 1149 | priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); |
1143 | priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5); | 1150 | priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5); |
1144 | 1151 | ||
1145 | priv->current_page = 0; | 1152 | priv->current_page = 0xff; |
1146 | priv->cec = i2c_new_dummy(client->adapter, 0x34); | 1153 | priv->cec = i2c_new_dummy(client->adapter, 0x34); |
1154 | if (!priv->cec) { | ||
1155 | kfree(priv); | ||
1156 | return -ENODEV; | ||
1157 | } | ||
1147 | priv->dpms = DRM_MODE_DPMS_OFF; | 1158 | priv->dpms = DRM_MODE_DPMS_OFF; |
1148 | 1159 | ||
1149 | encoder_slave->slave_priv = priv; | 1160 | encoder_slave->slave_priv = priv; |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4a2bf8e3f739..df77e20e3c3d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -1831,6 +1831,14 @@ struct drm_i915_file_private { | |||
1831 | 1831 | ||
1832 | /* Early gen2 have a totally busted CS tlb and require pinned batches. */ | 1832 | /* Early gen2 have a totally busted CS tlb and require pinned batches. */ |
1833 | #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) | 1833 | #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) |
1834 | /* | ||
1835 | * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts | ||
1836 | * even when in MSI mode. This results in spurious interrupt warnings if the | ||
1837 | * legacy irq no. is shared with another device. The kernel then disables that | ||
1838 | * interrupt source and so prevents the other device from working properly. | ||
1839 | */ | ||
1840 | #define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) | ||
1841 | #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) | ||
1834 | 1842 | ||
1835 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte | 1843 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte |
1836 | * rows, which changed the alignment requirements and fence programming. | 1844 | * rows, which changed the alignment requirements and fence programming. |
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index d7fd2fd2f0a5..990cf8f43efd 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c | |||
@@ -146,7 +146,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e, | |||
146 | va_list tmp; | 146 | va_list tmp; |
147 | 147 | ||
148 | va_copy(tmp, args); | 148 | va_copy(tmp, args); |
149 | if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp))) | 149 | len = vsnprintf(NULL, 0, f, tmp); |
150 | va_end(tmp); | ||
151 | |||
152 | if (!__i915_error_seek(e, len)) | ||
150 | return; | 153 | return; |
151 | } | 154 | } |
152 | 155 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 17d8fcb1b6f7..9fec71175571 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -567,8 +567,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) | |||
567 | 567 | ||
568 | vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; | 568 | vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; |
569 | } else { | 569 | } else { |
570 | enum transcoder cpu_transcoder = | 570 | enum transcoder cpu_transcoder = (enum transcoder) pipe; |
571 | intel_pipe_to_cpu_transcoder(dev_priv, pipe); | ||
572 | u32 htotal; | 571 | u32 htotal; |
573 | 572 | ||
574 | htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; | 573 | htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9fa24347963a..4c1672809493 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -8586,6 +8586,20 @@ static int intel_gen7_queue_flip(struct drm_device *dev, | |||
8586 | if (ring->id == RCS) | 8586 | if (ring->id == RCS) |
8587 | len += 6; | 8587 | len += 6; |
8588 | 8588 | ||
8589 | /* | ||
8590 | * BSpec MI_DISPLAY_FLIP for IVB: | ||
8591 | * "The full packet must be contained within the same cache line." | ||
8592 | * | ||
8593 | * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same | ||
8594 | * cacheline, if we ever start emitting more commands before | ||
8595 | * the MI_DISPLAY_FLIP we may need to first emit everything else, | ||
8596 | * then do the cacheline alignment, and finally emit the | ||
8597 | * MI_DISPLAY_FLIP. | ||
8598 | */ | ||
8599 | ret = intel_ring_cacheline_align(ring); | ||
8600 | if (ret) | ||
8601 | goto err_unpin; | ||
8602 | |||
8589 | ret = intel_ring_begin(ring, len); | 8603 | ret = intel_ring_begin(ring, len); |
8590 | if (ret) | 8604 | if (ret) |
8591 | goto err_unpin; | 8605 | goto err_unpin; |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 5ede4e8e290d..57552eb386b0 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -404,7 +404,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
404 | int i, ret, recv_bytes; | 404 | int i, ret, recv_bytes; |
405 | uint32_t status; | 405 | uint32_t status; |
406 | int try, precharge, clock = 0; | 406 | int try, precharge, clock = 0; |
407 | bool has_aux_irq = true; | 407 | bool has_aux_irq = HAS_AUX_IRQ(dev); |
408 | uint32_t timeout; | 408 | uint32_t timeout; |
409 | 409 | ||
410 | /* dp aux is extremely sensitive to irq latency, hence request the | 410 | /* dp aux is extremely sensitive to irq latency, hence request the |
@@ -537,6 +537,7 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp, | |||
537 | uint8_t msg[20]; | 537 | uint8_t msg[20]; |
538 | int msg_bytes; | 538 | int msg_bytes; |
539 | uint8_t ack; | 539 | uint8_t ack; |
540 | int retry; | ||
540 | 541 | ||
541 | if (WARN_ON(send_bytes > 16)) | 542 | if (WARN_ON(send_bytes > 16)) |
542 | return -E2BIG; | 543 | return -E2BIG; |
@@ -548,19 +549,21 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp, | |||
548 | msg[3] = send_bytes - 1; | 549 | msg[3] = send_bytes - 1; |
549 | memcpy(&msg[4], send, send_bytes); | 550 | memcpy(&msg[4], send, send_bytes); |
550 | msg_bytes = send_bytes + 4; | 551 | msg_bytes = send_bytes + 4; |
551 | for (;;) { | 552 | for (retry = 0; retry < 7; retry++) { |
552 | ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); | 553 | ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); |
553 | if (ret < 0) | 554 | if (ret < 0) |
554 | return ret; | 555 | return ret; |
555 | ack >>= 4; | 556 | ack >>= 4; |
556 | if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) | 557 | if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) |
557 | break; | 558 | return send_bytes; |
558 | else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) | 559 | else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) |
559 | udelay(100); | 560 | usleep_range(400, 500); |
560 | else | 561 | else |
561 | return -EIO; | 562 | return -EIO; |
562 | } | 563 | } |
563 | return send_bytes; | 564 | |
565 | DRM_ERROR("too many retries, giving up\n"); | ||
566 | return -EIO; | ||
564 | } | 567 | } |
565 | 568 | ||
566 | /* Write a single byte to the aux channel in native mode */ | 569 | /* Write a single byte to the aux channel in native mode */ |
@@ -582,6 +585,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp, | |||
582 | int reply_bytes; | 585 | int reply_bytes; |
583 | uint8_t ack; | 586 | uint8_t ack; |
584 | int ret; | 587 | int ret; |
588 | int retry; | ||
585 | 589 | ||
586 | if (WARN_ON(recv_bytes > 19)) | 590 | if (WARN_ON(recv_bytes > 19)) |
587 | return -E2BIG; | 591 | return -E2BIG; |
@@ -595,7 +599,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp, | |||
595 | msg_bytes = 4; | 599 | msg_bytes = 4; |
596 | reply_bytes = recv_bytes + 1; | 600 | reply_bytes = recv_bytes + 1; |
597 | 601 | ||
598 | for (;;) { | 602 | for (retry = 0; retry < 7; retry++) { |
599 | ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, | 603 | ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, |
600 | reply, reply_bytes); | 604 | reply, reply_bytes); |
601 | if (ret == 0) | 605 | if (ret == 0) |
@@ -608,10 +612,13 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp, | |||
608 | return ret - 1; | 612 | return ret - 1; |
609 | } | 613 | } |
610 | else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) | 614 | else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) |
611 | udelay(100); | 615 | usleep_range(400, 500); |
612 | else | 616 | else |
613 | return -EIO; | 617 | return -EIO; |
614 | } | 618 | } |
619 | |||
620 | DRM_ERROR("too many retries, giving up\n"); | ||
621 | return -EIO; | ||
615 | } | 622 | } |
616 | 623 | ||
617 | static int | 624 | static int |
@@ -1869,10 +1876,12 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder) | |||
1869 | 1876 | ||
1870 | mutex_unlock(&dev_priv->dpio_lock); | 1877 | mutex_unlock(&dev_priv->dpio_lock); |
1871 | 1878 | ||
1872 | /* init power sequencer on this pipe and port */ | 1879 | if (is_edp(intel_dp)) { |
1873 | intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); | 1880 | /* init power sequencer on this pipe and port */ |
1874 | intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, | 1881 | intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); |
1875 | &power_seq); | 1882 | intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, |
1883 | &power_seq); | ||
1884 | } | ||
1876 | 1885 | ||
1877 | intel_enable_dp(encoder); | 1886 | intel_enable_dp(encoder); |
1878 | 1887 | ||
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index b1dc33f47899..d33b61d0dd33 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -258,13 +258,6 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin) | |||
258 | algo->data = bus; | 258 | algo->data = bus; |
259 | } | 259 | } |
260 | 260 | ||
261 | /* | ||
262 | * gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI | ||
263 | * mode. This results in spurious interrupt warnings if the legacy irq no. is | ||
264 | * shared with another device. The kernel then disables that interrupt source | ||
265 | * and so prevents the other device from working properly. | ||
266 | */ | ||
267 | #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) | ||
268 | static int | 261 | static int |
269 | gmbus_wait_hw_status(struct drm_i915_private *dev_priv, | 262 | gmbus_wait_hw_status(struct drm_i915_private *dev_priv, |
270 | u32 gmbus2_status, | 263 | u32 gmbus2_status, |
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 4e960ec7419f..acde2945eb8a 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -226,6 +226,8 @@ struct opregion_asle { | |||
226 | #define ACPI_DIGITAL_OUTPUT (3<<8) | 226 | #define ACPI_DIGITAL_OUTPUT (3<<8) |
227 | #define ACPI_LVDS_OUTPUT (4<<8) | 227 | #define ACPI_LVDS_OUTPUT (4<<8) |
228 | 228 | ||
229 | #define MAX_DSLP 1500 | ||
230 | |||
229 | #ifdef CONFIG_ACPI | 231 | #ifdef CONFIG_ACPI |
230 | static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) | 232 | static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) |
231 | { | 233 | { |
@@ -260,10 +262,11 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) | |||
260 | /* The spec says 2ms should be the default, but it's too small | 262 | /* The spec says 2ms should be the default, but it's too small |
261 | * for some machines. */ | 263 | * for some machines. */ |
262 | dslp = 50; | 264 | dslp = 50; |
263 | } else if (dslp > 500) { | 265 | } else if (dslp > MAX_DSLP) { |
264 | /* Hey bios, trust must be earned. */ | 266 | /* Hey bios, trust must be earned. */ |
265 | WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp); | 267 | DRM_INFO_ONCE("ACPI BIOS requests an excessive sleep of %u ms, " |
266 | dslp = 500; | 268 | "using %u ms instead\n", dslp, MAX_DSLP); |
269 | dslp = MAX_DSLP; | ||
267 | } | 270 | } |
268 | 271 | ||
269 | /* The spec tells us to do this, but we are the only user... */ | 272 | /* The spec tells us to do this, but we are the only user... */ |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index b7f1742caf87..31b36c5ac894 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -1653,6 +1653,27 @@ int intel_ring_begin(struct intel_ring_buffer *ring, | |||
1653 | return 0; | 1653 | return 0; |
1654 | } | 1654 | } |
1655 | 1655 | ||
1656 | /* Align the ring tail to a cacheline boundary */ | ||
1657 | int intel_ring_cacheline_align(struct intel_ring_buffer *ring) | ||
1658 | { | ||
1659 | int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t); | ||
1660 | int ret; | ||
1661 | |||
1662 | if (num_dwords == 0) | ||
1663 | return 0; | ||
1664 | |||
1665 | ret = intel_ring_begin(ring, num_dwords); | ||
1666 | if (ret) | ||
1667 | return ret; | ||
1668 | |||
1669 | while (num_dwords--) | ||
1670 | intel_ring_emit(ring, MI_NOOP); | ||
1671 | |||
1672 | intel_ring_advance(ring); | ||
1673 | |||
1674 | return 0; | ||
1675 | } | ||
1676 | |||
1656 | void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) | 1677 | void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) |
1657 | { | 1678 | { |
1658 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 1679 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 71a73f4fe252..0b243ce33714 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -233,6 +233,7 @@ intel_write_status_page(struct intel_ring_buffer *ring, | |||
233 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); | 233 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); |
234 | 234 | ||
235 | int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); | 235 | int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); |
236 | int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring); | ||
236 | static inline void intel_ring_emit(struct intel_ring_buffer *ring, | 237 | static inline void intel_ring_emit(struct intel_ring_buffer *ring, |
237 | u32 data) | 238 | u32 data) |
238 | { | 239 | { |
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 1964f4f0d452..84c5b13b33c9 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c | |||
@@ -39,6 +39,7 @@ struct mdp4_crtc { | |||
39 | spinlock_t lock; | 39 | spinlock_t lock; |
40 | bool stale; | 40 | bool stale; |
41 | uint32_t width, height; | 41 | uint32_t width, height; |
42 | uint32_t x, y; | ||
42 | 43 | ||
43 | /* next cursor to scan-out: */ | 44 | /* next cursor to scan-out: */ |
44 | uint32_t next_iova; | 45 | uint32_t next_iova; |
@@ -57,9 +58,16 @@ struct mdp4_crtc { | |||
57 | #define PENDING_FLIP 0x2 | 58 | #define PENDING_FLIP 0x2 |
58 | atomic_t pending; | 59 | atomic_t pending; |
59 | 60 | ||
60 | /* the fb that we currently hold a scanout ref to: */ | 61 | /* the fb that we logically (from PoV of KMS API) hold a ref |
62 | * to. Which we may not yet be scanning out (we may still | ||
63 | * be scanning out previous in case of page_flip while waiting | ||
64 | * for gpu rendering to complete: | ||
65 | */ | ||
61 | struct drm_framebuffer *fb; | 66 | struct drm_framebuffer *fb; |
62 | 67 | ||
68 | /* the fb that we currently hold a scanout ref to: */ | ||
69 | struct drm_framebuffer *scanout_fb; | ||
70 | |||
63 | /* for unref'ing framebuffers after scanout completes: */ | 71 | /* for unref'ing framebuffers after scanout completes: */ |
64 | struct drm_flip_work unref_fb_work; | 72 | struct drm_flip_work unref_fb_work; |
65 | 73 | ||
@@ -77,24 +85,73 @@ static struct mdp4_kms *get_kms(struct drm_crtc *crtc) | |||
77 | return to_mdp4_kms(to_mdp_kms(priv->kms)); | 85 | return to_mdp4_kms(to_mdp_kms(priv->kms)); |
78 | } | 86 | } |
79 | 87 | ||
80 | static void update_fb(struct drm_crtc *crtc, bool async, | 88 | static void request_pending(struct drm_crtc *crtc, uint32_t pending) |
81 | struct drm_framebuffer *new_fb) | ||
82 | { | 89 | { |
83 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | 90 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); |
84 | struct drm_framebuffer *old_fb = mdp4_crtc->fb; | ||
85 | 91 | ||
86 | if (old_fb) | 92 | atomic_or(pending, &mdp4_crtc->pending); |
87 | drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb); | 93 | mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); |
94 | } | ||
95 | |||
96 | static void crtc_flush(struct drm_crtc *crtc) | ||
97 | { | ||
98 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | ||
99 | struct mdp4_kms *mdp4_kms = get_kms(crtc); | ||
100 | uint32_t i, flush = 0; | ||
101 | |||
102 | for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) { | ||
103 | struct drm_plane *plane = mdp4_crtc->planes[i]; | ||
104 | if (plane) { | ||
105 | enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); | ||
106 | flush |= pipe2flush(pipe_id); | ||
107 | } | ||
108 | } | ||
109 | flush |= ovlp2flush(mdp4_crtc->ovlp); | ||
110 | |||
111 | DBG("%s: flush=%08x", mdp4_crtc->name, flush); | ||
112 | |||
113 | mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush); | ||
114 | } | ||
115 | |||
116 | static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb) | ||
117 | { | ||
118 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | ||
119 | struct drm_framebuffer *old_fb = mdp4_crtc->fb; | ||
88 | 120 | ||
89 | /* grab reference to incoming scanout fb: */ | 121 | /* grab reference to incoming scanout fb: */ |
90 | drm_framebuffer_reference(new_fb); | 122 | drm_framebuffer_reference(new_fb); |
91 | mdp4_crtc->base.fb = new_fb; | 123 | mdp4_crtc->base.fb = new_fb; |
92 | mdp4_crtc->fb = new_fb; | 124 | mdp4_crtc->fb = new_fb; |
93 | 125 | ||
94 | if (!async) { | 126 | if (old_fb) |
95 | /* enable vblank to pick up the old_fb */ | 127 | drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb); |
96 | mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); | 128 | } |
97 | } | 129 | |
130 | /* unlike update_fb(), take a ref to the new scanout fb *before* updating | ||
131 | * plane, then call this. Needed to ensure we don't unref the buffer that | ||
132 | * is actually still being scanned out. | ||
133 | * | ||
134 | * Note that this whole thing goes away with atomic.. since we can defer | ||
135 | * calling into driver until rendering is done. | ||
136 | */ | ||
137 | static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb) | ||
138 | { | ||
139 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | ||
140 | |||
141 | /* flush updates, to make sure hw is updated to new scanout fb, | ||
142 | * so that we can safely queue unref to current fb (ie. next | ||
143 | * vblank we know hw is done w/ previous scanout_fb). | ||
144 | */ | ||
145 | crtc_flush(crtc); | ||
146 | |||
147 | if (mdp4_crtc->scanout_fb) | ||
148 | drm_flip_work_queue(&mdp4_crtc->unref_fb_work, | ||
149 | mdp4_crtc->scanout_fb); | ||
150 | |||
151 | mdp4_crtc->scanout_fb = fb; | ||
152 | |||
153 | /* enable vblank to complete flip: */ | ||
154 | request_pending(crtc, PENDING_FLIP); | ||
98 | } | 155 | } |
99 | 156 | ||
100 | /* if file!=NULL, this is preclose potential cancel-flip path */ | 157 | /* if file!=NULL, this is preclose potential cancel-flip path */ |
@@ -120,34 +177,6 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) | |||
120 | spin_unlock_irqrestore(&dev->event_lock, flags); | 177 | spin_unlock_irqrestore(&dev->event_lock, flags); |
121 | } | 178 | } |
122 | 179 | ||
123 | static void crtc_flush(struct drm_crtc *crtc) | ||
124 | { | ||
125 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | ||
126 | struct mdp4_kms *mdp4_kms = get_kms(crtc); | ||
127 | uint32_t i, flush = 0; | ||
128 | |||
129 | for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) { | ||
130 | struct drm_plane *plane = mdp4_crtc->planes[i]; | ||
131 | if (plane) { | ||
132 | enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); | ||
133 | flush |= pipe2flush(pipe_id); | ||
134 | } | ||
135 | } | ||
136 | flush |= ovlp2flush(mdp4_crtc->ovlp); | ||
137 | |||
138 | DBG("%s: flush=%08x", mdp4_crtc->name, flush); | ||
139 | |||
140 | mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush); | ||
141 | } | ||
142 | |||
143 | static void request_pending(struct drm_crtc *crtc, uint32_t pending) | ||
144 | { | ||
145 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | ||
146 | |||
147 | atomic_or(pending, &mdp4_crtc->pending); | ||
148 | mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); | ||
149 | } | ||
150 | |||
151 | static void pageflip_cb(struct msm_fence_cb *cb) | 180 | static void pageflip_cb(struct msm_fence_cb *cb) |
152 | { | 181 | { |
153 | struct mdp4_crtc *mdp4_crtc = | 182 | struct mdp4_crtc *mdp4_crtc = |
@@ -158,11 +187,9 @@ static void pageflip_cb(struct msm_fence_cb *cb) | |||
158 | if (!fb) | 187 | if (!fb) |
159 | return; | 188 | return; |
160 | 189 | ||
190 | drm_framebuffer_reference(fb); | ||
161 | mdp4_plane_set_scanout(mdp4_crtc->plane, fb); | 191 | mdp4_plane_set_scanout(mdp4_crtc->plane, fb); |
162 | crtc_flush(crtc); | 192 | update_scanout(crtc, fb); |
163 | |||
164 | /* enable vblank to complete flip: */ | ||
165 | request_pending(crtc, PENDING_FLIP); | ||
166 | } | 193 | } |
167 | 194 | ||
168 | static void unref_fb_worker(struct drm_flip_work *work, void *val) | 195 | static void unref_fb_worker(struct drm_flip_work *work, void *val) |
@@ -320,6 +347,20 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc, | |||
320 | mode->vsync_end, mode->vtotal, | 347 | mode->vsync_end, mode->vtotal, |
321 | mode->type, mode->flags); | 348 | mode->type, mode->flags); |
322 | 349 | ||
350 | /* grab extra ref for update_scanout() */ | ||
351 | drm_framebuffer_reference(crtc->fb); | ||
352 | |||
353 | ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb, | ||
354 | 0, 0, mode->hdisplay, mode->vdisplay, | ||
355 | x << 16, y << 16, | ||
356 | mode->hdisplay << 16, mode->vdisplay << 16); | ||
357 | if (ret) { | ||
358 | drm_framebuffer_unreference(crtc->fb); | ||
359 | dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", | ||
360 | mdp4_crtc->name, ret); | ||
361 | return ret; | ||
362 | } | ||
363 | |||
323 | mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma), | 364 | mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma), |
324 | MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) | | 365 | MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) | |
325 | MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay)); | 366 | MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay)); |
@@ -341,24 +382,15 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc, | |||
341 | 382 | ||
342 | mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1); | 383 | mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1); |
343 | 384 | ||
344 | update_fb(crtc, false, crtc->fb); | ||
345 | |||
346 | ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb, | ||
347 | 0, 0, mode->hdisplay, mode->vdisplay, | ||
348 | x << 16, y << 16, | ||
349 | mode->hdisplay << 16, mode->vdisplay << 16); | ||
350 | if (ret) { | ||
351 | dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", | ||
352 | mdp4_crtc->name, ret); | ||
353 | return ret; | ||
354 | } | ||
355 | |||
356 | if (dma == DMA_E) { | 385 | if (dma == DMA_E) { |
357 | mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000); | 386 | mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000); |
358 | mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000); | 387 | mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000); |
359 | mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000); | 388 | mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000); |
360 | } | 389 | } |
361 | 390 | ||
391 | update_fb(crtc, crtc->fb); | ||
392 | update_scanout(crtc, crtc->fb); | ||
393 | |||
362 | return 0; | 394 | return 0; |
363 | } | 395 | } |
364 | 396 | ||
@@ -385,13 +417,24 @@ static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, | |||
385 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | 417 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); |
386 | struct drm_plane *plane = mdp4_crtc->plane; | 418 | struct drm_plane *plane = mdp4_crtc->plane; |
387 | struct drm_display_mode *mode = &crtc->mode; | 419 | struct drm_display_mode *mode = &crtc->mode; |
420 | int ret; | ||
388 | 421 | ||
389 | update_fb(crtc, false, crtc->fb); | 422 | /* grab extra ref for update_scanout() */ |
423 | drm_framebuffer_reference(crtc->fb); | ||
390 | 424 | ||
391 | return mdp4_plane_mode_set(plane, crtc, crtc->fb, | 425 | ret = mdp4_plane_mode_set(plane, crtc, crtc->fb, |
392 | 0, 0, mode->hdisplay, mode->vdisplay, | 426 | 0, 0, mode->hdisplay, mode->vdisplay, |
393 | x << 16, y << 16, | 427 | x << 16, y << 16, |
394 | mode->hdisplay << 16, mode->vdisplay << 16); | 428 | mode->hdisplay << 16, mode->vdisplay << 16); |
429 | if (ret) { | ||
430 | drm_framebuffer_unreference(crtc->fb); | ||
431 | return ret; | ||
432 | } | ||
433 | |||
434 | update_fb(crtc, crtc->fb); | ||
435 | update_scanout(crtc, crtc->fb); | ||
436 | |||
437 | return 0; | ||
395 | } | 438 | } |
396 | 439 | ||
397 | static void mdp4_crtc_load_lut(struct drm_crtc *crtc) | 440 | static void mdp4_crtc_load_lut(struct drm_crtc *crtc) |
@@ -419,7 +462,7 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc, | |||
419 | mdp4_crtc->event = event; | 462 | mdp4_crtc->event = event; |
420 | spin_unlock_irqrestore(&dev->event_lock, flags); | 463 | spin_unlock_irqrestore(&dev->event_lock, flags); |
421 | 464 | ||
422 | update_fb(crtc, true, new_fb); | 465 | update_fb(crtc, new_fb); |
423 | 466 | ||
424 | return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb); | 467 | return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb); |
425 | } | 468 | } |
@@ -442,12 +485,12 @@ static int mdp4_crtc_set_property(struct drm_crtc *crtc, | |||
442 | static void update_cursor(struct drm_crtc *crtc) | 485 | static void update_cursor(struct drm_crtc *crtc) |
443 | { | 486 | { |
444 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | 487 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); |
488 | struct mdp4_kms *mdp4_kms = get_kms(crtc); | ||
445 | enum mdp4_dma dma = mdp4_crtc->dma; | 489 | enum mdp4_dma dma = mdp4_crtc->dma; |
446 | unsigned long flags; | 490 | unsigned long flags; |
447 | 491 | ||
448 | spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); | 492 | spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); |
449 | if (mdp4_crtc->cursor.stale) { | 493 | if (mdp4_crtc->cursor.stale) { |
450 | struct mdp4_kms *mdp4_kms = get_kms(crtc); | ||
451 | struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo; | 494 | struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo; |
452 | struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo; | 495 | struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo; |
453 | uint32_t iova = mdp4_crtc->cursor.next_iova; | 496 | uint32_t iova = mdp4_crtc->cursor.next_iova; |
@@ -479,6 +522,11 @@ static void update_cursor(struct drm_crtc *crtc) | |||
479 | mdp4_crtc->cursor.scanout_bo = next_bo; | 522 | mdp4_crtc->cursor.scanout_bo = next_bo; |
480 | mdp4_crtc->cursor.stale = false; | 523 | mdp4_crtc->cursor.stale = false; |
481 | } | 524 | } |
525 | |||
526 | mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma), | ||
527 | MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) | | ||
528 | MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y)); | ||
529 | |||
482 | spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); | 530 | spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); |
483 | } | 531 | } |
484 | 532 | ||
@@ -530,6 +578,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc, | |||
530 | drm_gem_object_unreference_unlocked(old_bo); | 578 | drm_gem_object_unreference_unlocked(old_bo); |
531 | } | 579 | } |
532 | 580 | ||
581 | crtc_flush(crtc); | ||
533 | request_pending(crtc, PENDING_CURSOR); | 582 | request_pending(crtc, PENDING_CURSOR); |
534 | 583 | ||
535 | return 0; | 584 | return 0; |
@@ -542,12 +591,15 @@ fail: | |||
542 | static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | 591 | static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) |
543 | { | 592 | { |
544 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | 593 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); |
545 | struct mdp4_kms *mdp4_kms = get_kms(crtc); | 594 | unsigned long flags; |
546 | enum mdp4_dma dma = mdp4_crtc->dma; | ||
547 | 595 | ||
548 | mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma), | 596 | spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); |
549 | MDP4_DMA_CURSOR_POS_X(x) | | 597 | mdp4_crtc->cursor.x = x; |
550 | MDP4_DMA_CURSOR_POS_Y(y)); | 598 | mdp4_crtc->cursor.y = y; |
599 | spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); | ||
600 | |||
601 | crtc_flush(crtc); | ||
602 | request_pending(crtc, PENDING_CURSOR); | ||
551 | 603 | ||
552 | return 0; | 604 | return 0; |
553 | } | 605 | } |
@@ -713,6 +765,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, | |||
713 | crtc = &mdp4_crtc->base; | 765 | crtc = &mdp4_crtc->base; |
714 | 766 | ||
715 | mdp4_crtc->plane = plane; | 767 | mdp4_crtc->plane = plane; |
768 | mdp4_crtc->id = id; | ||
716 | 769 | ||
717 | mdp4_crtc->ovlp = ovlp_id; | 770 | mdp4_crtc->ovlp = ovlp_id; |
718 | mdp4_crtc->dma = dma_id; | 771 | mdp4_crtc->dma = dma_id; |
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c index 2406027200ec..1e893dd13859 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c | |||
@@ -170,8 +170,8 @@ int mdp4_plane_mode_set(struct drm_plane *plane, | |||
170 | MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h)); | 170 | MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h)); |
171 | 171 | ||
172 | mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe), | 172 | mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe), |
173 | MDP4_PIPE_SRC_XY_X(crtc_x) | | 173 | MDP4_PIPE_DST_XY_X(crtc_x) | |
174 | MDP4_PIPE_SRC_XY_Y(crtc_y)); | 174 | MDP4_PIPE_DST_XY_Y(crtc_y)); |
175 | 175 | ||
176 | mdp4_plane_set_scanout(plane, fb); | 176 | mdp4_plane_set_scanout(plane, fb); |
177 | 177 | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 71a3b2345eb3..f2794021f086 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | |||
@@ -296,6 +296,7 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc, | |||
296 | x << 16, y << 16, | 296 | x << 16, y << 16, |
297 | mode->hdisplay << 16, mode->vdisplay << 16); | 297 | mode->hdisplay << 16, mode->vdisplay << 16); |
298 | if (ret) { | 298 | if (ret) { |
299 | drm_framebuffer_unreference(crtc->fb); | ||
299 | dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", | 300 | dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", |
300 | mdp5_crtc->name, ret); | 301 | mdp5_crtc->name, ret); |
301 | return ret; | 302 | return ret; |
@@ -343,11 +344,15 @@ static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, | |||
343 | 0, 0, mode->hdisplay, mode->vdisplay, | 344 | 0, 0, mode->hdisplay, mode->vdisplay, |
344 | x << 16, y << 16, | 345 | x << 16, y << 16, |
345 | mode->hdisplay << 16, mode->vdisplay << 16); | 346 | mode->hdisplay << 16, mode->vdisplay << 16); |
347 | if (ret) { | ||
348 | drm_framebuffer_unreference(crtc->fb); | ||
349 | return ret; | ||
350 | } | ||
346 | 351 | ||
347 | update_fb(crtc, crtc->fb); | 352 | update_fb(crtc, crtc->fb); |
348 | update_scanout(crtc, crtc->fb); | 353 | update_scanout(crtc, crtc->fb); |
349 | 354 | ||
350 | return ret; | 355 | return 0; |
351 | } | 356 | } |
352 | 357 | ||
353 | static void mdp5_crtc_load_lut(struct drm_crtc *crtc) | 358 | static void mdp5_crtc_load_lut(struct drm_crtc *crtc) |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index d8d60c969ac7..3da8264d3039 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
@@ -644,7 +644,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, | |||
644 | 644 | ||
645 | fail: | 645 | fail: |
646 | if (obj) | 646 | if (obj) |
647 | drm_gem_object_unreference_unlocked(obj); | 647 | drm_gem_object_unreference(obj); |
648 | 648 | ||
649 | return ERR_PTR(ret); | 649 | return ERR_PTR(ret); |
650 | } | 650 | } |
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 5281d4bc37f7..5423e914e491 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
@@ -163,7 +163,7 @@ retry: | |||
163 | 163 | ||
164 | 164 | ||
165 | /* if locking succeeded, pin bo: */ | 165 | /* if locking succeeded, pin bo: */ |
166 | ret = msm_gem_get_iova(&msm_obj->base, | 166 | ret = msm_gem_get_iova_locked(&msm_obj->base, |
167 | submit->gpu->id, &iova); | 167 | submit->gpu->id, &iova); |
168 | 168 | ||
169 | /* this would break the logic in the fail path.. there is no | 169 | /* this would break the logic in the fail path.. there is no |
@@ -247,7 +247,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob | |||
247 | /* For now, just map the entire thing. Eventually we probably | 247 | /* For now, just map the entire thing. Eventually we probably |
248 | * to do it page-by-page, w/ kmap() if not vmap()d.. | 248 | * to do it page-by-page, w/ kmap() if not vmap()d.. |
249 | */ | 249 | */ |
250 | ptr = msm_gem_vaddr(&obj->base); | 250 | ptr = msm_gem_vaddr_locked(&obj->base); |
251 | 251 | ||
252 | if (IS_ERR(ptr)) { | 252 | if (IS_ERR(ptr)) { |
253 | ret = PTR_ERR(ptr); | 253 | ret = PTR_ERR(ptr); |
@@ -307,14 +307,12 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool fail) | |||
307 | { | 307 | { |
308 | unsigned i; | 308 | unsigned i; |
309 | 309 | ||
310 | mutex_lock(&submit->dev->struct_mutex); | ||
311 | for (i = 0; i < submit->nr_bos; i++) { | 310 | for (i = 0; i < submit->nr_bos; i++) { |
312 | struct msm_gem_object *msm_obj = submit->bos[i].obj; | 311 | struct msm_gem_object *msm_obj = submit->bos[i].obj; |
313 | submit_unlock_unpin_bo(submit, i); | 312 | submit_unlock_unpin_bo(submit, i); |
314 | list_del_init(&msm_obj->submit_entry); | 313 | list_del_init(&msm_obj->submit_entry); |
315 | drm_gem_object_unreference(&msm_obj->base); | 314 | drm_gem_object_unreference(&msm_obj->base); |
316 | } | 315 | } |
317 | mutex_unlock(&submit->dev->struct_mutex); | ||
318 | 316 | ||
319 | ww_acquire_fini(&submit->ticket); | 317 | ww_acquire_fini(&submit->ticket); |
320 | kfree(submit); | 318 | kfree(submit); |
@@ -342,6 +340,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
342 | if (args->nr_cmds > MAX_CMDS) | 340 | if (args->nr_cmds > MAX_CMDS) |
343 | return -EINVAL; | 341 | return -EINVAL; |
344 | 342 | ||
343 | mutex_lock(&dev->struct_mutex); | ||
344 | |||
345 | submit = submit_create(dev, gpu, args->nr_bos); | 345 | submit = submit_create(dev, gpu, args->nr_bos); |
346 | if (!submit) { | 346 | if (!submit) { |
347 | ret = -ENOMEM; | 347 | ret = -ENOMEM; |
@@ -410,5 +410,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
410 | out: | 410 | out: |
411 | if (submit) | 411 | if (submit) |
412 | submit_cleanup(submit, !!ret); | 412 | submit_cleanup(submit, !!ret); |
413 | mutex_unlock(&dev->struct_mutex); | ||
413 | return ret; | 414 | return ret; |
414 | } | 415 | } |
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 4ebce8be489d..0cfe3f426ee4 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c | |||
@@ -298,8 +298,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, | |||
298 | struct msm_drm_private *priv = dev->dev_private; | 298 | struct msm_drm_private *priv = dev->dev_private; |
299 | int i, ret; | 299 | int i, ret; |
300 | 300 | ||
301 | mutex_lock(&dev->struct_mutex); | ||
302 | |||
303 | submit->fence = ++priv->next_fence; | 301 | submit->fence = ++priv->next_fence; |
304 | 302 | ||
305 | gpu->submitted_fence = submit->fence; | 303 | gpu->submitted_fence = submit->fence; |
@@ -331,7 +329,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, | |||
331 | msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); | 329 | msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); |
332 | } | 330 | } |
333 | hangcheck_timer_reset(gpu); | 331 | hangcheck_timer_reset(gpu); |
334 | mutex_unlock(&dev->struct_mutex); | ||
335 | 332 | ||
336 | return ret; | 333 | return ret; |
337 | } | 334 | } |
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index e88145ba1bf5..d310c195bdfe 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile | |||
@@ -141,6 +141,7 @@ nouveau-y += core/subdev/mc/base.o | |||
141 | nouveau-y += core/subdev/mc/nv04.o | 141 | nouveau-y += core/subdev/mc/nv04.o |
142 | nouveau-y += core/subdev/mc/nv40.o | 142 | nouveau-y += core/subdev/mc/nv40.o |
143 | nouveau-y += core/subdev/mc/nv44.o | 143 | nouveau-y += core/subdev/mc/nv44.o |
144 | nouveau-y += core/subdev/mc/nv4c.o | ||
144 | nouveau-y += core/subdev/mc/nv50.o | 145 | nouveau-y += core/subdev/mc/nv50.o |
145 | nouveau-y += core/subdev/mc/nv94.o | 146 | nouveau-y += core/subdev/mc/nv94.o |
146 | nouveau-y += core/subdev/mc/nv98.o | 147 | nouveau-y += core/subdev/mc/nv98.o |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c index 1b653dd74a70..08b88591ed60 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c | |||
@@ -311,7 +311,7 @@ nv40_identify(struct nouveau_device *device) | |||
311 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 311 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
312 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 312 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
313 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; | 313 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
314 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; | 314 | device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; |
315 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 315 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
316 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 316 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
317 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; | 317 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; |
@@ -334,7 +334,7 @@ nv40_identify(struct nouveau_device *device) | |||
334 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 334 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
335 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 335 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
336 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; | 336 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
337 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; | 337 | device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; |
338 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 338 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
339 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 339 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
340 | device->oclass[NVDEV_SUBDEV_FB ] = nv4e_fb_oclass; | 340 | device->oclass[NVDEV_SUBDEV_FB ] = nv4e_fb_oclass; |
@@ -357,7 +357,7 @@ nv40_identify(struct nouveau_device *device) | |||
357 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 357 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
358 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 358 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
359 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; | 359 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
360 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; | 360 | device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; |
361 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 361 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
362 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 362 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
363 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; | 363 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; |
@@ -380,7 +380,7 @@ nv40_identify(struct nouveau_device *device) | |||
380 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 380 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
381 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 381 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
382 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; | 382 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
383 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; | 383 | device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; |
384 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 384 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
385 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 385 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
386 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; | 386 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; |
@@ -403,7 +403,7 @@ nv40_identify(struct nouveau_device *device) | |||
403 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; | 403 | device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; |
404 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; | 404 | device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; |
405 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; | 405 | device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; |
406 | device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; | 406 | device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; |
407 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; | 407 | device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; |
408 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; | 408 | device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; |
409 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; | 409 | device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c index 940eaa5d8b9a..9ad722e4e087 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c | |||
@@ -1142,7 +1142,7 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head) | |||
1142 | if (conf != ~0) { | 1142 | if (conf != ~0) { |
1143 | if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) { | 1143 | if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) { |
1144 | u32 soff = (ffs(outp.or) - 1) * 0x08; | 1144 | u32 soff = (ffs(outp.or) - 1) * 0x08; |
1145 | u32 ctrl = nv_rd32(priv, 0x610798 + soff); | 1145 | u32 ctrl = nv_rd32(priv, 0x610794 + soff); |
1146 | u32 datarate; | 1146 | u32 datarate; |
1147 | 1147 | ||
1148 | switch ((ctrl & 0x000f0000) >> 16) { | 1148 | switch ((ctrl & 0x000f0000) >> 16) { |
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c index 9a850fe19515..54c1b5b471cd 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c | |||
@@ -112,7 +112,7 @@ nve0_fifo_runlist_update(struct nve0_fifo_priv *priv, u32 engine) | |||
112 | 112 | ||
113 | nv_wr32(priv, 0x002270, cur->addr >> 12); | 113 | nv_wr32(priv, 0x002270, cur->addr >> 12); |
114 | nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3)); | 114 | nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3)); |
115 | if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000)) | 115 | if (!nv_wait(priv, 0x002284 + (engine * 8), 0x00100000, 0x00000000)) |
116 | nv_error(priv, "runlist %d update timeout\n", engine); | 116 | nv_error(priv, "runlist %d update timeout\n", engine); |
117 | mutex_unlock(&nv_subdev(priv)->mutex); | 117 | mutex_unlock(&nv_subdev(priv)->mutex); |
118 | } | 118 | } |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c index 30ed19c52e05..7a367c402978 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c | |||
@@ -539,7 +539,7 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old, | |||
539 | ustatus &= ~0x04030000; | 539 | ustatus &= ~0x04030000; |
540 | } | 540 | } |
541 | if (ustatus && display) { | 541 | if (ustatus && display) { |
542 | nv_error("%s - TP%d:", name, i); | 542 | nv_error(priv, "%s - TP%d:", name, i); |
543 | nouveau_bitfield_print(nv50_mpc_traps, ustatus); | 543 | nouveau_bitfield_print(nv50_mpc_traps, ustatus); |
544 | pr_cont("\n"); | 544 | pr_cont("\n"); |
545 | ustatus = 0; | 545 | ustatus = 0; |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h index adc88b73d911..3c6738edd127 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h | |||
@@ -47,6 +47,7 @@ struct nouveau_mc_oclass { | |||
47 | extern struct nouveau_oclass *nv04_mc_oclass; | 47 | extern struct nouveau_oclass *nv04_mc_oclass; |
48 | extern struct nouveau_oclass *nv40_mc_oclass; | 48 | extern struct nouveau_oclass *nv40_mc_oclass; |
49 | extern struct nouveau_oclass *nv44_mc_oclass; | 49 | extern struct nouveau_oclass *nv44_mc_oclass; |
50 | extern struct nouveau_oclass *nv4c_mc_oclass; | ||
50 | extern struct nouveau_oclass *nv50_mc_oclass; | 51 | extern struct nouveau_oclass *nv50_mc_oclass; |
51 | extern struct nouveau_oclass *nv94_mc_oclass; | 52 | extern struct nouveau_oclass *nv94_mc_oclass; |
52 | extern struct nouveau_oclass *nv98_mc_oclass; | 53 | extern struct nouveau_oclass *nv98_mc_oclass; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c index aa0fbbec7f08..ef0c9c4a8cc3 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c | |||
@@ -130,6 +130,10 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios) | |||
130 | u16 pcir; | 130 | u16 pcir; |
131 | int i; | 131 | int i; |
132 | 132 | ||
133 | /* there is no prom on nv4x IGP's */ | ||
134 | if (device->card_type == NV_40 && device->chipset >= 0x4c) | ||
135 | return; | ||
136 | |||
133 | /* enable access to rom */ | 137 | /* enable access to rom */ |
134 | if (device->card_type >= NV_50) | 138 | if (device->card_type >= NV_50) |
135 | pcireg = 0x088050; | 139 | pcireg = 0x088050; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c index 9159a5ccee93..265d1253624a 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c | |||
@@ -36,7 +36,7 @@ nv1a_fb_oclass = &(struct nv04_fb_impl) { | |||
36 | .fini = _nouveau_fb_fini, | 36 | .fini = _nouveau_fb_fini, |
37 | }, | 37 | }, |
38 | .base.memtype = nv04_fb_memtype_valid, | 38 | .base.memtype = nv04_fb_memtype_valid, |
39 | .base.ram = &nv10_ram_oclass, | 39 | .base.ram = &nv1a_ram_oclass, |
40 | .tile.regions = 8, | 40 | .tile.regions = 8, |
41 | .tile.init = nv10_fb_tile_init, | 41 | .tile.init = nv10_fb_tile_init, |
42 | .tile.fini = nv10_fb_tile_fini, | 42 | .tile.fini = nv10_fb_tile_fini, |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h index b0d5c31606c1..81a408e7d034 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h | |||
@@ -14,6 +14,7 @@ int nv04_mc_ctor(struct nouveau_object *, struct nouveau_object *, | |||
14 | extern const struct nouveau_mc_intr nv04_mc_intr[]; | 14 | extern const struct nouveau_mc_intr nv04_mc_intr[]; |
15 | int nv04_mc_init(struct nouveau_object *); | 15 | int nv04_mc_init(struct nouveau_object *); |
16 | void nv40_mc_msi_rearm(struct nouveau_mc *); | 16 | void nv40_mc_msi_rearm(struct nouveau_mc *); |
17 | int nv44_mc_init(struct nouveau_object *object); | ||
17 | int nv50_mc_init(struct nouveau_object *); | 18 | int nv50_mc_init(struct nouveau_object *); |
18 | extern const struct nouveau_mc_intr nv50_mc_intr[]; | 19 | extern const struct nouveau_mc_intr nv50_mc_intr[]; |
19 | extern const struct nouveau_mc_intr nvc0_mc_intr[]; | 20 | extern const struct nouveau_mc_intr nvc0_mc_intr[]; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c index 3bfee5c6c4f2..cc4d0d2d886e 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c | |||
@@ -24,7 +24,7 @@ | |||
24 | 24 | ||
25 | #include "nv04.h" | 25 | #include "nv04.h" |
26 | 26 | ||
27 | static int | 27 | int |
28 | nv44_mc_init(struct nouveau_object *object) | 28 | nv44_mc_init(struct nouveau_object *object) |
29 | { | 29 | { |
30 | struct nv04_mc_priv *priv = (void *)object; | 30 | struct nv04_mc_priv *priv = (void *)object; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c new file mode 100644 index 000000000000..a75c35ccf25c --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c | |||
@@ -0,0 +1,45 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Ilia Mirkin | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ilia Mirkin | ||
23 | */ | ||
24 | |||
25 | #include "nv04.h" | ||
26 | |||
27 | static void | ||
28 | nv4c_mc_msi_rearm(struct nouveau_mc *pmc) | ||
29 | { | ||
30 | struct nv04_mc_priv *priv = (void *)pmc; | ||
31 | nv_wr08(priv, 0x088050, 0xff); | ||
32 | } | ||
33 | |||
34 | struct nouveau_oclass * | ||
35 | nv4c_mc_oclass = &(struct nouveau_mc_oclass) { | ||
36 | .base.handle = NV_SUBDEV(MC, 0x4c), | ||
37 | .base.ofuncs = &(struct nouveau_ofuncs) { | ||
38 | .ctor = nv04_mc_ctor, | ||
39 | .dtor = _nouveau_mc_dtor, | ||
40 | .init = nv44_mc_init, | ||
41 | .fini = _nouveau_mc_fini, | ||
42 | }, | ||
43 | .intr = nv04_mc_intr, | ||
44 | .msi_rearm = nv4c_mc_msi_rearm, | ||
45 | }.base; | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 488686d490c0..4aed1714b9ab 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -1249,7 +1249,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |||
1249 | mem->bus.is_iomem = !dev->agp->cant_use_aperture; | 1249 | mem->bus.is_iomem = !dev->agp->cant_use_aperture; |
1250 | } | 1250 | } |
1251 | #endif | 1251 | #endif |
1252 | if (!node->memtype) | 1252 | if (nv_device(drm->device)->card_type < NV_50 || !node->memtype) |
1253 | /* untiled */ | 1253 | /* untiled */ |
1254 | break; | 1254 | break; |
1255 | /* fallthrough, tiled memory */ | 1255 | /* fallthrough, tiled memory */ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 78c8e7146d56..89c484d8ac26 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
@@ -376,6 +376,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) | |||
376 | if (ret) | 376 | if (ret) |
377 | goto fail_device; | 377 | goto fail_device; |
378 | 378 | ||
379 | dev->irq_enabled = true; | ||
380 | |||
379 | /* workaround an odd issue on nvc1 by disabling the device's | 381 | /* workaround an odd issue on nvc1 by disabling the device's |
380 | * nosnoop capability. hopefully won't cause issues until a | 382 | * nosnoop capability. hopefully won't cause issues until a |
381 | * better fix is found - assuming there is one... | 383 | * better fix is found - assuming there is one... |
@@ -475,6 +477,7 @@ nouveau_drm_remove(struct pci_dev *pdev) | |||
475 | struct nouveau_drm *drm = nouveau_drm(dev); | 477 | struct nouveau_drm *drm = nouveau_drm(dev); |
476 | struct nouveau_object *device; | 478 | struct nouveau_object *device; |
477 | 479 | ||
480 | dev->irq_enabled = false; | ||
478 | device = drm->client.base.device; | 481 | device = drm->client.base.device; |
479 | drm_put_dev(dev); | 482 | drm_put_dev(dev); |
480 | 483 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c index 81638d7f2eff..471347edc27e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c | |||
@@ -14,7 +14,9 @@ nouveau_vga_set_decode(void *priv, bool state) | |||
14 | { | 14 | { |
15 | struct nouveau_device *device = nouveau_dev(priv); | 15 | struct nouveau_device *device = nouveau_dev(priv); |
16 | 16 | ||
17 | if (device->chipset >= 0x40) | 17 | if (device->card_type == NV_40 && device->chipset >= 0x4c) |
18 | nv_wr32(device, 0x088060, state); | ||
19 | else if (device->chipset >= 0x40) | ||
18 | nv_wr32(device, 0x088054, state); | 20 | nv_wr32(device, 0x088054, state); |
19 | else | 21 | else |
20 | nv_wr32(device, 0x001854, state); | 22 | nv_wr32(device, 0x001854, state); |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index a9338c85630f..0d19f4f94d5a 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -559,7 +559,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
559 | u32 adjusted_clock = mode->clock; | 559 | u32 adjusted_clock = mode->clock; |
560 | int encoder_mode = atombios_get_encoder_mode(encoder); | 560 | int encoder_mode = atombios_get_encoder_mode(encoder); |
561 | u32 dp_clock = mode->clock; | 561 | u32 dp_clock = mode->clock; |
562 | int bpc = radeon_get_monitor_bpc(connector); | 562 | int bpc = radeon_crtc->bpc; |
563 | bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock); | 563 | bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock); |
564 | 564 | ||
565 | /* reset the pll flags */ | 565 | /* reset the pll flags */ |
@@ -1176,7 +1176,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, | |||
1176 | evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); | 1176 | evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); |
1177 | 1177 | ||
1178 | /* Set NUM_BANKS. */ | 1178 | /* Set NUM_BANKS. */ |
1179 | if (rdev->family >= CHIP_BONAIRE) { | 1179 | if (rdev->family >= CHIP_TAHITI) { |
1180 | unsigned tileb, index, num_banks, tile_split_bytes; | 1180 | unsigned tileb, index, num_banks, tile_split_bytes; |
1181 | 1181 | ||
1182 | /* Calculate the macrotile mode index. */ | 1182 | /* Calculate the macrotile mode index. */ |
@@ -1194,13 +1194,14 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, | |||
1194 | return -EINVAL; | 1194 | return -EINVAL; |
1195 | } | 1195 | } |
1196 | 1196 | ||
1197 | num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; | 1197 | if (rdev->family >= CHIP_BONAIRE) |
1198 | num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; | ||
1199 | else | ||
1200 | num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3; | ||
1198 | fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); | 1201 | fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); |
1199 | } else { | 1202 | } else { |
1200 | /* SI and older. */ | 1203 | /* NI and older. */ |
1201 | if (rdev->family >= CHIP_TAHITI) | 1204 | if (rdev->family >= CHIP_CAYMAN) |
1202 | tmp = rdev->config.si.tile_config; | ||
1203 | else if (rdev->family >= CHIP_CAYMAN) | ||
1204 | tmp = rdev->config.cayman.tile_config; | 1205 | tmp = rdev->config.cayman.tile_config; |
1205 | else | 1206 | else |
1206 | tmp = rdev->config.evergreen.tile_config; | 1207 | tmp = rdev->config.evergreen.tile_config; |
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index a42d61571f49..2cec2ab02f80 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c | |||
@@ -464,11 +464,12 @@ atombios_tv_setup(struct drm_encoder *encoder, int action) | |||
464 | 464 | ||
465 | static u8 radeon_atom_get_bpc(struct drm_encoder *encoder) | 465 | static u8 radeon_atom_get_bpc(struct drm_encoder *encoder) |
466 | { | 466 | { |
467 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
468 | int bpc = 8; | 467 | int bpc = 8; |
469 | 468 | ||
470 | if (connector) | 469 | if (encoder->crtc) { |
471 | bpc = radeon_get_monitor_bpc(connector); | 470 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
471 | bpc = radeon_crtc->bpc; | ||
472 | } | ||
472 | 473 | ||
473 | switch (bpc) { | 474 | switch (bpc) { |
474 | case 0: | 475 | case 0: |
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c index 0fbd36f3d4e9..ea103ccdf4bd 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.c +++ b/drivers/gpu/drm/radeon/btc_dpm.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include "cypress_dpm.h" | 29 | #include "cypress_dpm.h" |
30 | #include "btc_dpm.h" | 30 | #include "btc_dpm.h" |
31 | #include "atom.h" | 31 | #include "atom.h" |
32 | #include <linux/seq_file.h> | ||
32 | 33 | ||
33 | #define MC_CG_ARB_FREQ_F0 0x0a | 34 | #define MC_CG_ARB_FREQ_F0 0x0a |
34 | #define MC_CG_ARB_FREQ_F1 0x0b | 35 | #define MC_CG_ARB_FREQ_F1 0x0b |
@@ -2756,6 +2757,37 @@ void btc_dpm_fini(struct radeon_device *rdev) | |||
2756 | r600_free_extended_power_table(rdev); | 2757 | r600_free_extended_power_table(rdev); |
2757 | } | 2758 | } |
2758 | 2759 | ||
2760 | void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, | ||
2761 | struct seq_file *m) | ||
2762 | { | ||
2763 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); | ||
2764 | struct radeon_ps *rps = &eg_pi->current_rps; | ||
2765 | struct rv7xx_ps *ps = rv770_get_ps(rps); | ||
2766 | struct rv7xx_pl *pl; | ||
2767 | u32 current_index = | ||
2768 | (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >> | ||
2769 | CURRENT_PROFILE_INDEX_SHIFT; | ||
2770 | |||
2771 | if (current_index > 2) { | ||
2772 | seq_printf(m, "invalid dpm profile %d\n", current_index); | ||
2773 | } else { | ||
2774 | if (current_index == 0) | ||
2775 | pl = &ps->low; | ||
2776 | else if (current_index == 1) | ||
2777 | pl = &ps->medium; | ||
2778 | else /* current_index == 2 */ | ||
2779 | pl = &ps->high; | ||
2780 | seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); | ||
2781 | if (rdev->family >= CHIP_CEDAR) { | ||
2782 | seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n", | ||
2783 | current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci); | ||
2784 | } else { | ||
2785 | seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u\n", | ||
2786 | current_index, pl->sclk, pl->mclk, pl->vddc); | ||
2787 | } | ||
2788 | } | ||
2789 | } | ||
2790 | |||
2759 | u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low) | 2791 | u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low) |
2760 | { | 2792 | { |
2761 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); | 2793 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
diff --git a/drivers/gpu/drm/radeon/btcd.h b/drivers/gpu/drm/radeon/btcd.h index 29e32de7e025..9c65be2d55a9 100644 --- a/drivers/gpu/drm/radeon/btcd.h +++ b/drivers/gpu/drm/radeon/btcd.h | |||
@@ -44,6 +44,10 @@ | |||
44 | # define DYN_SPREAD_SPECTRUM_EN (1 << 23) | 44 | # define DYN_SPREAD_SPECTRUM_EN (1 << 23) |
45 | # define AC_DC_SW (1 << 24) | 45 | # define AC_DC_SW (1 << 24) |
46 | 46 | ||
47 | #define TARGET_AND_CURRENT_PROFILE_INDEX 0x66c | ||
48 | # define CURRENT_PROFILE_INDEX_MASK (0xf << 4) | ||
49 | # define CURRENT_PROFILE_INDEX_SHIFT 4 | ||
50 | |||
47 | #define CG_BIF_REQ_AND_RSP 0x7f4 | 51 | #define CG_BIF_REQ_AND_RSP 0x7f4 |
48 | #define CG_CLIENT_REQ(x) ((x) << 0) | 52 | #define CG_CLIENT_REQ(x) ((x) << 0) |
49 | #define CG_CLIENT_REQ_MASK (0xff << 0) | 53 | #define CG_CLIENT_REQ_MASK (0xff << 0) |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index f2b9e21ce4da..5623e7542d99 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -1680,7 +1680,7 @@ bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) | |||
1680 | case RADEON_HPD_6: | 1680 | case RADEON_HPD_6: |
1681 | if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) | 1681 | if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) |
1682 | connected = true; | 1682 | connected = true; |
1683 | break; | 1683 | break; |
1684 | default: | 1684 | default: |
1685 | break; | 1685 | break; |
1686 | } | 1686 | } |
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index b6e01d5d2cce..351db361239d 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c | |||
@@ -1223,7 +1223,7 @@ int kv_dpm_enable(struct radeon_device *rdev) | |||
1223 | 1223 | ||
1224 | int kv_dpm_late_enable(struct radeon_device *rdev) | 1224 | int kv_dpm_late_enable(struct radeon_device *rdev) |
1225 | { | 1225 | { |
1226 | int ret; | 1226 | int ret = 0; |
1227 | 1227 | ||
1228 | if (rdev->irq.installed && | 1228 | if (rdev->irq.installed && |
1229 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | 1229 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { |
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index c351226ecb31..ca814276b075 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c | |||
@@ -2588,7 +2588,7 @@ static int ni_populate_sq_ramping_values(struct radeon_device *rdev, | |||
2588 | if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) | 2588 | if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) |
2589 | enable_sq_ramping = false; | 2589 | enable_sq_ramping = false; |
2590 | 2590 | ||
2591 | if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) | 2591 | if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) |
2592 | enable_sq_ramping = false; | 2592 | enable_sq_ramping = false; |
2593 | 2593 | ||
2594 | for (i = 0; i < state->performance_level_count; i++) { | 2594 | for (i = 0; i < state->performance_level_count; i++) { |
@@ -3945,7 +3945,6 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev, | |||
3945 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); | 3945 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
3946 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); | 3946 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
3947 | struct ni_ps *ps = ni_get_ps(rps); | 3947 | struct ni_ps *ps = ni_get_ps(rps); |
3948 | u16 vddc; | ||
3949 | struct rv7xx_pl *pl = &ps->performance_levels[index]; | 3948 | struct rv7xx_pl *pl = &ps->performance_levels[index]; |
3950 | 3949 | ||
3951 | ps->performance_level_count = index + 1; | 3950 | ps->performance_level_count = index + 1; |
@@ -3961,8 +3960,8 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev, | |||
3961 | 3960 | ||
3962 | /* patch up vddc if necessary */ | 3961 | /* patch up vddc if necessary */ |
3963 | if (pl->vddc == 0xff01) { | 3962 | if (pl->vddc == 0xff01) { |
3964 | if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0) | 3963 | if (pi->max_vddc) |
3965 | pl->vddc = vddc; | 3964 | pl->vddc = pi->max_vddc; |
3966 | } | 3965 | } |
3967 | 3966 | ||
3968 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { | 3967 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { |
@@ -4322,7 +4321,8 @@ void ni_dpm_print_power_state(struct radeon_device *rdev, | |||
4322 | void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, | 4321 | void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, |
4323 | struct seq_file *m) | 4322 | struct seq_file *m) |
4324 | { | 4323 | { |
4325 | struct radeon_ps *rps = rdev->pm.dpm.current_ps; | 4324 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
4325 | struct radeon_ps *rps = &eg_pi->current_rps; | ||
4326 | struct ni_ps *ps = ni_get_ps(rps); | 4326 | struct ni_ps *ps = ni_get_ps(rps); |
4327 | struct rv7xx_pl *pl; | 4327 | struct rv7xx_pl *pl; |
4328 | u32 current_index = | 4328 | u32 current_index = |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 56140b4e5bb2..cdbc4171fe73 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -3991,6 +3991,10 @@ restart_ih: | |||
3991 | break; | 3991 | break; |
3992 | } | 3992 | } |
3993 | break; | 3993 | break; |
3994 | case 124: /* UVD */ | ||
3995 | DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); | ||
3996 | radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); | ||
3997 | break; | ||
3994 | case 176: /* CP_INT in ring buffer */ | 3998 | case 176: /* CP_INT in ring buffer */ |
3995 | case 177: /* CP_INT in IB1 */ | 3999 | case 177: /* CP_INT in IB1 */ |
3996 | case 178: /* CP_INT in IB2 */ | 4000 | case 178: /* CP_INT in IB2 */ |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 4a8ac1cd6b4c..024db37b1832 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -135,6 +135,9 @@ extern int radeon_hard_reset; | |||
135 | /* R600+ */ | 135 | /* R600+ */ |
136 | #define R600_RING_TYPE_UVD_INDEX 5 | 136 | #define R600_RING_TYPE_UVD_INDEX 5 |
137 | 137 | ||
138 | /* number of hw syncs before falling back on blocking */ | ||
139 | #define RADEON_NUM_SYNCS 4 | ||
140 | |||
138 | /* hardcode those limit for now */ | 141 | /* hardcode those limit for now */ |
139 | #define RADEON_VA_IB_OFFSET (1 << 20) | 142 | #define RADEON_VA_IB_OFFSET (1 << 20) |
140 | #define RADEON_VA_RESERVED_SIZE (8 << 20) | 143 | #define RADEON_VA_RESERVED_SIZE (8 << 20) |
@@ -554,7 +557,6 @@ int radeon_mode_dumb_mmap(struct drm_file *filp, | |||
554 | /* | 557 | /* |
555 | * Semaphores. | 558 | * Semaphores. |
556 | */ | 559 | */ |
557 | /* everything here is constant */ | ||
558 | struct radeon_semaphore { | 560 | struct radeon_semaphore { |
559 | struct radeon_sa_bo *sa_bo; | 561 | struct radeon_sa_bo *sa_bo; |
560 | signed waiters; | 562 | signed waiters; |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index f74db43346fd..dda02bfc10a4 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -1555,7 +1555,7 @@ static struct radeon_asic btc_asic = { | |||
1555 | .get_sclk = &btc_dpm_get_sclk, | 1555 | .get_sclk = &btc_dpm_get_sclk, |
1556 | .get_mclk = &btc_dpm_get_mclk, | 1556 | .get_mclk = &btc_dpm_get_mclk, |
1557 | .print_power_state = &rv770_dpm_print_power_state, | 1557 | .print_power_state = &rv770_dpm_print_power_state, |
1558 | .debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level, | 1558 | .debugfs_print_current_performance_level = &btc_dpm_debugfs_print_current_performance_level, |
1559 | .force_performance_level = &rv770_dpm_force_performance_level, | 1559 | .force_performance_level = &rv770_dpm_force_performance_level, |
1560 | .vblank_too_short = &btc_dpm_vblank_too_short, | 1560 | .vblank_too_short = &btc_dpm_vblank_too_short, |
1561 | }, | 1561 | }, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index b3bc433eed4c..ae637cfda783 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -551,6 +551,8 @@ void btc_dpm_fini(struct radeon_device *rdev); | |||
551 | u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low); | 551 | u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low); |
552 | u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low); | 552 | u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low); |
553 | bool btc_dpm_vblank_too_short(struct radeon_device *rdev); | 553 | bool btc_dpm_vblank_too_short(struct radeon_device *rdev); |
554 | void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, | ||
555 | struct seq_file *m); | ||
554 | int sumo_dpm_init(struct radeon_device *rdev); | 556 | int sumo_dpm_init(struct radeon_device *rdev); |
555 | int sumo_dpm_enable(struct radeon_device *rdev); | 557 | int sumo_dpm_enable(struct radeon_device *rdev); |
556 | int sumo_dpm_late_enable(struct radeon_device *rdev); | 558 | int sumo_dpm_late_enable(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index d680608f6f5b..fbd8b930f2be 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -571,6 +571,8 @@ static void radeon_crtc_init(struct drm_device *dev, int index) | |||
571 | radeon_crtc->max_cursor_width = CURSOR_WIDTH; | 571 | radeon_crtc->max_cursor_width = CURSOR_WIDTH; |
572 | radeon_crtc->max_cursor_height = CURSOR_HEIGHT; | 572 | radeon_crtc->max_cursor_height = CURSOR_HEIGHT; |
573 | } | 573 | } |
574 | dev->mode_config.cursor_width = radeon_crtc->max_cursor_width; | ||
575 | dev->mode_config.cursor_height = radeon_crtc->max_cursor_height; | ||
574 | 576 | ||
575 | #if 0 | 577 | #if 0 |
576 | radeon_crtc->mode_set.crtc = &radeon_crtc->base; | 578 | radeon_crtc->mode_set.crtc = &radeon_crtc->base; |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 1b783f0e6d3a..15e44a7281ab 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -139,7 +139,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, | |||
139 | } | 139 | } |
140 | 140 | ||
141 | /* 64 dwords should be enough for fence too */ | 141 | /* 64 dwords should be enough for fence too */ |
142 | r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8); | 142 | r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8); |
143 | if (r) { | 143 | if (r) { |
144 | dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); | 144 | dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); |
145 | return r; | 145 | return r; |
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c index 2b42aa1914f2..9006b32d5eed 100644 --- a/drivers/gpu/drm/radeon/radeon_semaphore.c +++ b/drivers/gpu/drm/radeon/radeon_semaphore.c | |||
@@ -34,14 +34,15 @@ | |||
34 | int radeon_semaphore_create(struct radeon_device *rdev, | 34 | int radeon_semaphore_create(struct radeon_device *rdev, |
35 | struct radeon_semaphore **semaphore) | 35 | struct radeon_semaphore **semaphore) |
36 | { | 36 | { |
37 | uint32_t *cpu_addr; | ||
37 | int i, r; | 38 | int i, r; |
38 | 39 | ||
39 | *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); | 40 | *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); |
40 | if (*semaphore == NULL) { | 41 | if (*semaphore == NULL) { |
41 | return -ENOMEM; | 42 | return -ENOMEM; |
42 | } | 43 | } |
43 | r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, | 44 | r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo, |
44 | &(*semaphore)->sa_bo, 8, 8, true); | 45 | 8 * RADEON_NUM_SYNCS, 8, true); |
45 | if (r) { | 46 | if (r) { |
46 | kfree(*semaphore); | 47 | kfree(*semaphore); |
47 | *semaphore = NULL; | 48 | *semaphore = NULL; |
@@ -49,7 +50,10 @@ int radeon_semaphore_create(struct radeon_device *rdev, | |||
49 | } | 50 | } |
50 | (*semaphore)->waiters = 0; | 51 | (*semaphore)->waiters = 0; |
51 | (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo); | 52 | (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo); |
52 | *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0; | 53 | |
54 | cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo); | ||
55 | for (i = 0; i < RADEON_NUM_SYNCS; ++i) | ||
56 | cpu_addr[i] = 0; | ||
53 | 57 | ||
54 | for (i = 0; i < RADEON_NUM_RINGS; ++i) | 58 | for (i = 0; i < RADEON_NUM_RINGS; ++i) |
55 | (*semaphore)->sync_to[i] = NULL; | 59 | (*semaphore)->sync_to[i] = NULL; |
@@ -125,6 +129,7 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev, | |||
125 | struct radeon_semaphore *semaphore, | 129 | struct radeon_semaphore *semaphore, |
126 | int ring) | 130 | int ring) |
127 | { | 131 | { |
132 | unsigned count = 0; | ||
128 | int i, r; | 133 | int i, r; |
129 | 134 | ||
130 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | 135 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
@@ -140,6 +145,12 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev, | |||
140 | return -EINVAL; | 145 | return -EINVAL; |
141 | } | 146 | } |
142 | 147 | ||
148 | if (++count > RADEON_NUM_SYNCS) { | ||
149 | /* not enough room, wait manually */ | ||
150 | radeon_fence_wait_locked(fence); | ||
151 | continue; | ||
152 | } | ||
153 | |||
143 | /* allocate enough space for sync command */ | 154 | /* allocate enough space for sync command */ |
144 | r = radeon_ring_alloc(rdev, &rdev->ring[i], 16); | 155 | r = radeon_ring_alloc(rdev, &rdev->ring[i], 16); |
145 | if (r) { | 156 | if (r) { |
@@ -164,6 +175,8 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev, | |||
164 | 175 | ||
165 | radeon_ring_commit(rdev, &rdev->ring[i]); | 176 | radeon_ring_commit(rdev, &rdev->ring[i]); |
166 | radeon_fence_note_sync(fence, ring); | 177 | radeon_fence_note_sync(fence, ring); |
178 | |||
179 | semaphore->gpu_addr += 8; | ||
167 | } | 180 | } |
168 | 181 | ||
169 | return 0; | 182 | return 0; |
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index 80c595aba359..b5f63f5e22a3 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c | |||
@@ -2174,7 +2174,6 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev, | |||
2174 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); | 2174 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
2175 | struct rv7xx_ps *ps = rv770_get_ps(rps); | 2175 | struct rv7xx_ps *ps = rv770_get_ps(rps); |
2176 | u32 sclk, mclk; | 2176 | u32 sclk, mclk; |
2177 | u16 vddc; | ||
2178 | struct rv7xx_pl *pl; | 2177 | struct rv7xx_pl *pl; |
2179 | 2178 | ||
2180 | switch (index) { | 2179 | switch (index) { |
@@ -2214,8 +2213,8 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev, | |||
2214 | 2213 | ||
2215 | /* patch up vddc if necessary */ | 2214 | /* patch up vddc if necessary */ |
2216 | if (pl->vddc == 0xff01) { | 2215 | if (pl->vddc == 0xff01) { |
2217 | if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0) | 2216 | if (pi->max_vddc) |
2218 | pl->vddc = vddc; | 2217 | pl->vddc = pi->max_vddc; |
2219 | } | 2218 | } |
2220 | 2219 | ||
2221 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { | 2220 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { |
@@ -2527,14 +2526,7 @@ u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low) | |||
2527 | bool rv770_dpm_vblank_too_short(struct radeon_device *rdev) | 2526 | bool rv770_dpm_vblank_too_short(struct radeon_device *rdev) |
2528 | { | 2527 | { |
2529 | u32 vblank_time = r600_dpm_get_vblank_time(rdev); | 2528 | u32 vblank_time = r600_dpm_get_vblank_time(rdev); |
2530 | u32 switch_limit = 300; | 2529 | u32 switch_limit = 200; /* 300 */ |
2531 | |||
2532 | /* quirks */ | ||
2533 | /* ASUS K70AF */ | ||
2534 | if ((rdev->pdev->device == 0x9553) && | ||
2535 | (rdev->pdev->subsystem_vendor == 0x1043) && | ||
2536 | (rdev->pdev->subsystem_device == 0x1c42)) | ||
2537 | switch_limit = 200; | ||
2538 | 2530 | ||
2539 | /* RV770 */ | 2531 | /* RV770 */ |
2540 | /* mclk switching doesn't seem to work reliably on desktop RV770s */ | 2532 | /* mclk switching doesn't seem to work reliably on desktop RV770s */ |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 09ec4f6c53bb..83578324e5d1 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -6338,6 +6338,10 @@ restart_ih: | |||
6338 | break; | 6338 | break; |
6339 | } | 6339 | } |
6340 | break; | 6340 | break; |
6341 | case 124: /* UVD */ | ||
6342 | DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); | ||
6343 | radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); | ||
6344 | break; | ||
6341 | case 146: | 6345 | case 146: |
6342 | case 147: | 6346 | case 147: |
6343 | addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); | 6347 | addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); |
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 0471501338fb..0a2f5b4bca43 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c | |||
@@ -2395,7 +2395,7 @@ static int si_populate_sq_ramping_values(struct radeon_device *rdev, | |||
2395 | if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) | 2395 | if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) |
2396 | enable_sq_ramping = false; | 2396 | enable_sq_ramping = false; |
2397 | 2397 | ||
2398 | if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) | 2398 | if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) |
2399 | enable_sq_ramping = false; | 2399 | enable_sq_ramping = false; |
2400 | 2400 | ||
2401 | for (i = 0; i < state->performance_level_count; i++) { | 2401 | for (i = 0; i < state->performance_level_count; i++) { |
@@ -6472,7 +6472,8 @@ void si_dpm_fini(struct radeon_device *rdev) | |||
6472 | void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, | 6472 | void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, |
6473 | struct seq_file *m) | 6473 | struct seq_file *m) |
6474 | { | 6474 | { |
6475 | struct radeon_ps *rps = rdev->pm.dpm.current_ps; | 6475 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
6476 | struct radeon_ps *rps = &eg_pi->current_rps; | ||
6476 | struct ni_ps *ps = ni_get_ps(rps); | 6477 | struct ni_ps *ps = ni_get_ps(rps); |
6477 | struct rv7xx_pl *pl; | 6478 | struct rv7xx_pl *pl; |
6478 | u32 current_index = | 6479 | u32 current_index = |
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c index f121efe12dc5..8b47b3cd0357 100644 --- a/drivers/gpu/drm/radeon/sumo_dpm.c +++ b/drivers/gpu/drm/radeon/sumo_dpm.c | |||
@@ -1807,7 +1807,7 @@ void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev | |||
1807 | struct seq_file *m) | 1807 | struct seq_file *m) |
1808 | { | 1808 | { |
1809 | struct sumo_power_info *pi = sumo_get_pi(rdev); | 1809 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1810 | struct radeon_ps *rps = rdev->pm.dpm.current_ps; | 1810 | struct radeon_ps *rps = &pi->current_rps; |
1811 | struct sumo_ps *ps = sumo_get_ps(rps); | 1811 | struct sumo_ps *ps = sumo_get_ps(rps); |
1812 | struct sumo_pl *pl; | 1812 | struct sumo_pl *pl; |
1813 | u32 current_index = | 1813 | u32 current_index = |
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index 2d447192d6f7..2da0e17eb960 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c | |||
@@ -1926,7 +1926,8 @@ void trinity_dpm_print_power_state(struct radeon_device *rdev, | |||
1926 | void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, | 1926 | void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, |
1927 | struct seq_file *m) | 1927 | struct seq_file *m) |
1928 | { | 1928 | { |
1929 | struct radeon_ps *rps = rdev->pm.dpm.current_ps; | 1929 | struct trinity_power_info *pi = trinity_get_pi(rdev); |
1930 | struct radeon_ps *rps = &pi->current_rps; | ||
1930 | struct trinity_ps *ps = trinity_get_ps(rps); | 1931 | struct trinity_ps *ps = trinity_get_ps(rps); |
1931 | struct trinity_pl *pl; | 1932 | struct trinity_pl *pl; |
1932 | u32 current_index = | 1933 | u32 current_index = |
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c index 824550db3fed..d1771004cb52 100644 --- a/drivers/gpu/drm/radeon/uvd_v2_2.c +++ b/drivers/gpu/drm/radeon/uvd_v2_2.c | |||
@@ -57,7 +57,6 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev, | |||
57 | radeon_ring_write(ring, 0); | 57 | radeon_ring_write(ring, 0); |
58 | radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); | 58 | radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); |
59 | radeon_ring_write(ring, 2); | 59 | radeon_ring_write(ring, 2); |
60 | return; | ||
61 | } | 60 | } |
62 | 61 | ||
63 | /** | 62 | /** |
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index 3302f99e7497..764be36397fd 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c | |||
@@ -126,6 +126,7 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, | |||
126 | agp_be->ttm.func = &ttm_agp_func; | 126 | agp_be->ttm.func = &ttm_agp_func; |
127 | 127 | ||
128 | if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) { | 128 | if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) { |
129 | kfree(agp_be); | ||
129 | return NULL; | 130 | return NULL; |
130 | } | 131 | } |
131 | 132 | ||
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h index b645647b7776..bb594c11605e 100644 --- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h +++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h | |||
@@ -1223,9 +1223,19 @@ typedef enum { | |||
1223 | #define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129 | 1223 | #define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129 |
1224 | 1224 | ||
1225 | #define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130 | 1225 | #define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130 |
1226 | 1226 | #define SVGA_3D_CMD_GB_SCREEN_DMA 1131 | |
1227 | #define SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH 1132 | ||
1228 | #define SVGA_3D_CMD_GB_MOB_FENCE 1133 | ||
1229 | #define SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 1134 | ||
1227 | #define SVGA_3D_CMD_DEFINE_GB_MOB64 1135 | 1230 | #define SVGA_3D_CMD_DEFINE_GB_MOB64 1135 |
1228 | #define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136 | 1231 | #define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136 |
1232 | #define SVGA_3D_CMD_NOP_ERROR 1137 | ||
1233 | |||
1234 | #define SVGA_3D_CMD_RESERVED1 1138 | ||
1235 | #define SVGA_3D_CMD_RESERVED2 1139 | ||
1236 | #define SVGA_3D_CMD_RESERVED3 1140 | ||
1237 | #define SVGA_3D_CMD_RESERVED4 1141 | ||
1238 | #define SVGA_3D_CMD_RESERVED5 1142 | ||
1229 | 1239 | ||
1230 | #define SVGA_3D_CMD_MAX 1142 | 1240 | #define SVGA_3D_CMD_MAX 1142 |
1231 | #define SVGA_3D_CMD_FUTURE_MAX 3000 | 1241 | #define SVGA_3D_CMD_FUTURE_MAX 3000 |
@@ -1973,8 +1983,7 @@ struct { | |||
1973 | uint32 sizeInBytes; | 1983 | uint32 sizeInBytes; |
1974 | uint32 validSizeInBytes; | 1984 | uint32 validSizeInBytes; |
1975 | SVGAMobFormat ptDepth; | 1985 | SVGAMobFormat ptDepth; |
1976 | } | 1986 | } __packed |
1977 | __attribute__((__packed__)) | ||
1978 | SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */ | 1987 | SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */ |
1979 | 1988 | ||
1980 | typedef | 1989 | typedef |
@@ -1984,15 +1993,13 @@ struct { | |||
1984 | uint32 sizeInBytes; | 1993 | uint32 sizeInBytes; |
1985 | uint32 validSizeInBytes; | 1994 | uint32 validSizeInBytes; |
1986 | SVGAMobFormat ptDepth; | 1995 | SVGAMobFormat ptDepth; |
1987 | } | 1996 | } __packed |
1988 | __attribute__((__packed__)) | ||
1989 | SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */ | 1997 | SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */ |
1990 | 1998 | ||
1991 | typedef | 1999 | typedef |
1992 | struct { | 2000 | struct { |
1993 | SVGAOTableType type; | 2001 | SVGAOTableType type; |
1994 | } | 2002 | } __packed |
1995 | __attribute__((__packed__)) | ||
1996 | SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */ | 2003 | SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */ |
1997 | 2004 | ||
1998 | /* | 2005 | /* |
@@ -2005,8 +2012,7 @@ struct SVGA3dCmdDefineGBMob { | |||
2005 | SVGAMobFormat ptDepth; | 2012 | SVGAMobFormat ptDepth; |
2006 | PPN base; | 2013 | PPN base; |
2007 | uint32 sizeInBytes; | 2014 | uint32 sizeInBytes; |
2008 | } | 2015 | } __packed |
2009 | __attribute__((__packed__)) | ||
2010 | SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */ | 2016 | SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */ |
2011 | 2017 | ||
2012 | 2018 | ||
@@ -2017,8 +2023,7 @@ SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */ | |||
2017 | typedef | 2023 | typedef |
2018 | struct SVGA3dCmdDestroyGBMob { | 2024 | struct SVGA3dCmdDestroyGBMob { |
2019 | SVGAMobId mobid; | 2025 | SVGAMobId mobid; |
2020 | } | 2026 | } __packed |
2021 | __attribute__((__packed__)) | ||
2022 | SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */ | 2027 | SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */ |
2023 | 2028 | ||
2024 | /* | 2029 | /* |
@@ -2031,8 +2036,7 @@ struct SVGA3dCmdRedefineGBMob { | |||
2031 | SVGAMobFormat ptDepth; | 2036 | SVGAMobFormat ptDepth; |
2032 | PPN base; | 2037 | PPN base; |
2033 | uint32 sizeInBytes; | 2038 | uint32 sizeInBytes; |
2034 | } | 2039 | } __packed |
2035 | __attribute__((__packed__)) | ||
2036 | SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */ | 2040 | SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */ |
2037 | 2041 | ||
2038 | /* | 2042 | /* |
@@ -2045,8 +2049,7 @@ struct SVGA3dCmdDefineGBMob64 { | |||
2045 | SVGAMobFormat ptDepth; | 2049 | SVGAMobFormat ptDepth; |
2046 | PPN64 base; | 2050 | PPN64 base; |
2047 | uint32 sizeInBytes; | 2051 | uint32 sizeInBytes; |
2048 | } | 2052 | } __packed |
2049 | __attribute__((__packed__)) | ||
2050 | SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */ | 2053 | SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */ |
2051 | 2054 | ||
2052 | /* | 2055 | /* |
@@ -2059,8 +2062,7 @@ struct SVGA3dCmdRedefineGBMob64 { | |||
2059 | SVGAMobFormat ptDepth; | 2062 | SVGAMobFormat ptDepth; |
2060 | PPN64 base; | 2063 | PPN64 base; |
2061 | uint32 sizeInBytes; | 2064 | uint32 sizeInBytes; |
2062 | } | 2065 | } __packed |
2063 | __attribute__((__packed__)) | ||
2064 | SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */ | 2066 | SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */ |
2065 | 2067 | ||
2066 | /* | 2068 | /* |
@@ -2070,8 +2072,7 @@ SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */ | |||
2070 | typedef | 2072 | typedef |
2071 | struct SVGA3dCmdUpdateGBMobMapping { | 2073 | struct SVGA3dCmdUpdateGBMobMapping { |
2072 | SVGAMobId mobid; | 2074 | SVGAMobId mobid; |
2073 | } | 2075 | } __packed |
2074 | __attribute__((__packed__)) | ||
2075 | SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */ | 2076 | SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */ |
2076 | 2077 | ||
2077 | /* | 2078 | /* |
@@ -2087,7 +2088,8 @@ struct SVGA3dCmdDefineGBSurface { | |||
2087 | uint32 multisampleCount; | 2088 | uint32 multisampleCount; |
2088 | SVGA3dTextureFilter autogenFilter; | 2089 | SVGA3dTextureFilter autogenFilter; |
2089 | SVGA3dSize size; | 2090 | SVGA3dSize size; |
2090 | } SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */ | 2091 | } __packed |
2092 | SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */ | ||
2091 | 2093 | ||
2092 | /* | 2094 | /* |
2093 | * Destroy a guest-backed surface. | 2095 | * Destroy a guest-backed surface. |
@@ -2096,7 +2098,8 @@ struct SVGA3dCmdDefineGBSurface { | |||
2096 | typedef | 2098 | typedef |
2097 | struct SVGA3dCmdDestroyGBSurface { | 2099 | struct SVGA3dCmdDestroyGBSurface { |
2098 | uint32 sid; | 2100 | uint32 sid; |
2099 | } SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */ | 2101 | } __packed |
2102 | SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */ | ||
2100 | 2103 | ||
2101 | /* | 2104 | /* |
2102 | * Bind a guest-backed surface to an object. | 2105 | * Bind a guest-backed surface to an object. |
@@ -2106,7 +2109,8 @@ typedef | |||
2106 | struct SVGA3dCmdBindGBSurface { | 2109 | struct SVGA3dCmdBindGBSurface { |
2107 | uint32 sid; | 2110 | uint32 sid; |
2108 | SVGAMobId mobid; | 2111 | SVGAMobId mobid; |
2109 | } SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */ | 2112 | } __packed |
2113 | SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */ | ||
2110 | 2114 | ||
2111 | /* | 2115 | /* |
2112 | * Conditionally bind a mob to a guest backed surface if testMobid | 2116 | * Conditionally bind a mob to a guest backed surface if testMobid |
@@ -2123,7 +2127,7 @@ struct{ | |||
2123 | SVGAMobId testMobid; | 2127 | SVGAMobId testMobid; |
2124 | SVGAMobId mobid; | 2128 | SVGAMobId mobid; |
2125 | uint32 flags; | 2129 | uint32 flags; |
2126 | } | 2130 | } __packed |
2127 | SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */ | 2131 | SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */ |
2128 | 2132 | ||
2129 | /* | 2133 | /* |
@@ -2135,7 +2139,8 @@ typedef | |||
2135 | struct SVGA3dCmdUpdateGBImage { | 2139 | struct SVGA3dCmdUpdateGBImage { |
2136 | SVGA3dSurfaceImageId image; | 2140 | SVGA3dSurfaceImageId image; |
2137 | SVGA3dBox box; | 2141 | SVGA3dBox box; |
2138 | } SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */ | 2142 | } __packed |
2143 | SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */ | ||
2139 | 2144 | ||
2140 | /* | 2145 | /* |
2141 | * Update an entire guest-backed surface. | 2146 | * Update an entire guest-backed surface. |
@@ -2145,7 +2150,8 @@ struct SVGA3dCmdUpdateGBImage { | |||
2145 | typedef | 2150 | typedef |
2146 | struct SVGA3dCmdUpdateGBSurface { | 2151 | struct SVGA3dCmdUpdateGBSurface { |
2147 | uint32 sid; | 2152 | uint32 sid; |
2148 | } SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */ | 2153 | } __packed |
2154 | SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */ | ||
2149 | 2155 | ||
2150 | /* | 2156 | /* |
2151 | * Readback an image in a guest-backed surface. | 2157 | * Readback an image in a guest-backed surface. |
@@ -2155,7 +2161,8 @@ struct SVGA3dCmdUpdateGBSurface { | |||
2155 | typedef | 2161 | typedef |
2156 | struct SVGA3dCmdReadbackGBImage { | 2162 | struct SVGA3dCmdReadbackGBImage { |
2157 | SVGA3dSurfaceImageId image; | 2163 | SVGA3dSurfaceImageId image; |
2158 | } SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/ | 2164 | } __packed |
2165 | SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/ | ||
2159 | 2166 | ||
2160 | /* | 2167 | /* |
2161 | * Readback an entire guest-backed surface. | 2168 | * Readback an entire guest-backed surface. |
@@ -2165,7 +2172,8 @@ struct SVGA3dCmdReadbackGBImage { | |||
2165 | typedef | 2172 | typedef |
2166 | struct SVGA3dCmdReadbackGBSurface { | 2173 | struct SVGA3dCmdReadbackGBSurface { |
2167 | uint32 sid; | 2174 | uint32 sid; |
2168 | } SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */ | 2175 | } __packed |
2176 | SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */ | ||
2169 | 2177 | ||
2170 | /* | 2178 | /* |
2171 | * Readback a sub rect of an image in a guest-backed surface. After | 2179 | * Readback a sub rect of an image in a guest-backed surface. After |
@@ -2179,7 +2187,7 @@ struct SVGA3dCmdReadbackGBImagePartial { | |||
2179 | SVGA3dSurfaceImageId image; | 2187 | SVGA3dSurfaceImageId image; |
2180 | SVGA3dBox box; | 2188 | SVGA3dBox box; |
2181 | uint32 invertBox; | 2189 | uint32 invertBox; |
2182 | } | 2190 | } __packed |
2183 | SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */ | 2191 | SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */ |
2184 | 2192 | ||
2185 | /* | 2193 | /* |
@@ -2190,7 +2198,8 @@ SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */ | |||
2190 | typedef | 2198 | typedef |
2191 | struct SVGA3dCmdInvalidateGBImage { | 2199 | struct SVGA3dCmdInvalidateGBImage { |
2192 | SVGA3dSurfaceImageId image; | 2200 | SVGA3dSurfaceImageId image; |
2193 | } SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */ | 2201 | } __packed |
2202 | SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */ | ||
2194 | 2203 | ||
2195 | /* | 2204 | /* |
2196 | * Invalidate an entire guest-backed surface. | 2205 | * Invalidate an entire guest-backed surface. |
@@ -2200,7 +2209,8 @@ struct SVGA3dCmdInvalidateGBImage { | |||
2200 | typedef | 2209 | typedef |
2201 | struct SVGA3dCmdInvalidateGBSurface { | 2210 | struct SVGA3dCmdInvalidateGBSurface { |
2202 | uint32 sid; | 2211 | uint32 sid; |
2203 | } SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */ | 2212 | } __packed |
2213 | SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */ | ||
2204 | 2214 | ||
2205 | /* | 2215 | /* |
2206 | * Invalidate a sub rect of an image in a guest-backed surface. After | 2216 | * Invalidate a sub rect of an image in a guest-backed surface. After |
@@ -2214,7 +2224,7 @@ struct SVGA3dCmdInvalidateGBImagePartial { | |||
2214 | SVGA3dSurfaceImageId image; | 2224 | SVGA3dSurfaceImageId image; |
2215 | SVGA3dBox box; | 2225 | SVGA3dBox box; |
2216 | uint32 invertBox; | 2226 | uint32 invertBox; |
2217 | } | 2227 | } __packed |
2218 | SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */ | 2228 | SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */ |
2219 | 2229 | ||
2220 | /* | 2230 | /* |
@@ -2224,7 +2234,8 @@ SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */ | |||
2224 | typedef | 2234 | typedef |
2225 | struct SVGA3dCmdDefineGBContext { | 2235 | struct SVGA3dCmdDefineGBContext { |
2226 | uint32 cid; | 2236 | uint32 cid; |
2227 | } SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */ | 2237 | } __packed |
2238 | SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */ | ||
2228 | 2239 | ||
2229 | /* | 2240 | /* |
2230 | * Destroy a guest-backed context. | 2241 | * Destroy a guest-backed context. |
@@ -2233,7 +2244,8 @@ struct SVGA3dCmdDefineGBContext { | |||
2233 | typedef | 2244 | typedef |
2234 | struct SVGA3dCmdDestroyGBContext { | 2245 | struct SVGA3dCmdDestroyGBContext { |
2235 | uint32 cid; | 2246 | uint32 cid; |
2236 | } SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */ | 2247 | } __packed |
2248 | SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */ | ||
2237 | 2249 | ||
2238 | /* | 2250 | /* |
2239 | * Bind a guest-backed context. | 2251 | * Bind a guest-backed context. |
@@ -2252,7 +2264,8 @@ struct SVGA3dCmdBindGBContext { | |||
2252 | uint32 cid; | 2264 | uint32 cid; |
2253 | SVGAMobId mobid; | 2265 | SVGAMobId mobid; |
2254 | uint32 validContents; | 2266 | uint32 validContents; |
2255 | } SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */ | 2267 | } __packed |
2268 | SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */ | ||
2256 | 2269 | ||
2257 | /* | 2270 | /* |
2258 | * Readback a guest-backed context. | 2271 | * Readback a guest-backed context. |
@@ -2262,7 +2275,8 @@ struct SVGA3dCmdBindGBContext { | |||
2262 | typedef | 2275 | typedef |
2263 | struct SVGA3dCmdReadbackGBContext { | 2276 | struct SVGA3dCmdReadbackGBContext { |
2264 | uint32 cid; | 2277 | uint32 cid; |
2265 | } SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */ | 2278 | } __packed |
2279 | SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */ | ||
2266 | 2280 | ||
2267 | /* | 2281 | /* |
2268 | * Invalidate a guest-backed context. | 2282 | * Invalidate a guest-backed context. |
@@ -2270,7 +2284,8 @@ struct SVGA3dCmdReadbackGBContext { | |||
2270 | typedef | 2284 | typedef |
2271 | struct SVGA3dCmdInvalidateGBContext { | 2285 | struct SVGA3dCmdInvalidateGBContext { |
2272 | uint32 cid; | 2286 | uint32 cid; |
2273 | } SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */ | 2287 | } __packed |
2288 | SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */ | ||
2274 | 2289 | ||
2275 | /* | 2290 | /* |
2276 | * Define a guest-backed shader. | 2291 | * Define a guest-backed shader. |
@@ -2281,7 +2296,8 @@ struct SVGA3dCmdDefineGBShader { | |||
2281 | uint32 shid; | 2296 | uint32 shid; |
2282 | SVGA3dShaderType type; | 2297 | SVGA3dShaderType type; |
2283 | uint32 sizeInBytes; | 2298 | uint32 sizeInBytes; |
2284 | } SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */ | 2299 | } __packed |
2300 | SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */ | ||
2285 | 2301 | ||
2286 | /* | 2302 | /* |
2287 | * Bind a guest-backed shader. | 2303 | * Bind a guest-backed shader. |
@@ -2291,7 +2307,8 @@ typedef struct SVGA3dCmdBindGBShader { | |||
2291 | uint32 shid; | 2307 | uint32 shid; |
2292 | SVGAMobId mobid; | 2308 | SVGAMobId mobid; |
2293 | uint32 offsetInBytes; | 2309 | uint32 offsetInBytes; |
2294 | } SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */ | 2310 | } __packed |
2311 | SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */ | ||
2295 | 2312 | ||
2296 | /* | 2313 | /* |
2297 | * Destroy a guest-backed shader. | 2314 | * Destroy a guest-backed shader. |
@@ -2299,7 +2316,8 @@ typedef struct SVGA3dCmdBindGBShader { | |||
2299 | 2316 | ||
2300 | typedef struct SVGA3dCmdDestroyGBShader { | 2317 | typedef struct SVGA3dCmdDestroyGBShader { |
2301 | uint32 shid; | 2318 | uint32 shid; |
2302 | } SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */ | 2319 | } __packed |
2320 | SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */ | ||
2303 | 2321 | ||
2304 | typedef | 2322 | typedef |
2305 | struct { | 2323 | struct { |
@@ -2314,14 +2332,16 @@ struct { | |||
2314 | * Note that FLOAT and INT constants are 4-dwords in length, while | 2332 | * Note that FLOAT and INT constants are 4-dwords in length, while |
2315 | * BOOL constants are 1-dword in length. | 2333 | * BOOL constants are 1-dword in length. |
2316 | */ | 2334 | */ |
2317 | } SVGA3dCmdSetGBShaderConstInline; | 2335 | } __packed |
2336 | SVGA3dCmdSetGBShaderConstInline; | ||
2318 | /* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */ | 2337 | /* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */ |
2319 | 2338 | ||
2320 | typedef | 2339 | typedef |
2321 | struct { | 2340 | struct { |
2322 | uint32 cid; | 2341 | uint32 cid; |
2323 | SVGA3dQueryType type; | 2342 | SVGA3dQueryType type; |
2324 | } SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */ | 2343 | } __packed |
2344 | SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */ | ||
2325 | 2345 | ||
2326 | typedef | 2346 | typedef |
2327 | struct { | 2347 | struct { |
@@ -2329,7 +2349,8 @@ struct { | |||
2329 | SVGA3dQueryType type; | 2349 | SVGA3dQueryType type; |
2330 | SVGAMobId mobid; | 2350 | SVGAMobId mobid; |
2331 | uint32 offset; | 2351 | uint32 offset; |
2332 | } SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */ | 2352 | } __packed |
2353 | SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */ | ||
2333 | 2354 | ||
2334 | 2355 | ||
2335 | /* | 2356 | /* |
@@ -2346,21 +2367,22 @@ struct { | |||
2346 | SVGA3dQueryType type; | 2367 | SVGA3dQueryType type; |
2347 | SVGAMobId mobid; | 2368 | SVGAMobId mobid; |
2348 | uint32 offset; | 2369 | uint32 offset; |
2349 | } SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */ | 2370 | } __packed |
2371 | SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */ | ||
2350 | 2372 | ||
2351 | typedef | 2373 | typedef |
2352 | struct { | 2374 | struct { |
2353 | SVGAMobId mobid; | 2375 | SVGAMobId mobid; |
2354 | uint32 fbOffset; | 2376 | uint32 fbOffset; |
2355 | uint32 initalized; | 2377 | uint32 initalized; |
2356 | } | 2378 | } __packed |
2357 | SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */ | 2379 | SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */ |
2358 | 2380 | ||
2359 | typedef | 2381 | typedef |
2360 | struct { | 2382 | struct { |
2361 | SVGAMobId mobid; | 2383 | SVGAMobId mobid; |
2362 | uint32 gartOffset; | 2384 | uint32 gartOffset; |
2363 | } | 2385 | } __packed |
2364 | SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */ | 2386 | SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */ |
2365 | 2387 | ||
2366 | 2388 | ||
@@ -2368,7 +2390,7 @@ typedef | |||
2368 | struct { | 2390 | struct { |
2369 | uint32 gartOffset; | 2391 | uint32 gartOffset; |
2370 | uint32 numPages; | 2392 | uint32 numPages; |
2371 | } | 2393 | } __packed |
2372 | SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */ | 2394 | SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */ |
2373 | 2395 | ||
2374 | 2396 | ||
@@ -2385,27 +2407,27 @@ struct { | |||
2385 | int32 xRoot; | 2407 | int32 xRoot; |
2386 | int32 yRoot; | 2408 | int32 yRoot; |
2387 | uint32 flags; | 2409 | uint32 flags; |
2388 | } | 2410 | } __packed |
2389 | SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */ | 2411 | SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */ |
2390 | 2412 | ||
2391 | typedef | 2413 | typedef |
2392 | struct { | 2414 | struct { |
2393 | uint32 stid; | 2415 | uint32 stid; |
2394 | } | 2416 | } __packed |
2395 | SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */ | 2417 | SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */ |
2396 | 2418 | ||
2397 | typedef | 2419 | typedef |
2398 | struct { | 2420 | struct { |
2399 | uint32 stid; | 2421 | uint32 stid; |
2400 | SVGA3dSurfaceImageId image; | 2422 | SVGA3dSurfaceImageId image; |
2401 | } | 2423 | } __packed |
2402 | SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */ | 2424 | SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */ |
2403 | 2425 | ||
2404 | typedef | 2426 | typedef |
2405 | struct { | 2427 | struct { |
2406 | uint32 stid; | 2428 | uint32 stid; |
2407 | SVGA3dBox box; | 2429 | SVGA3dBox box; |
2408 | } | 2430 | } __packed |
2409 | SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */ | 2431 | SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */ |
2410 | 2432 | ||
2411 | /* | 2433 | /* |
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h index 8369c3ba10fe..ef3385096145 100644 --- a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h +++ b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h | |||
@@ -38,8 +38,11 @@ | |||
38 | 38 | ||
39 | #define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y)) | 39 | #define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y)) |
40 | #define max_t(type, x, y) ((x) > (y) ? (x) : (y)) | 40 | #define max_t(type, x, y) ((x) > (y) ? (x) : (y)) |
41 | #define min_t(type, x, y) ((x) < (y) ? (x) : (y)) | ||
41 | #define surf_size_struct SVGA3dSize | 42 | #define surf_size_struct SVGA3dSize |
42 | #define u32 uint32 | 43 | #define u32 uint32 |
44 | #define u64 uint64_t | ||
45 | #define U32_MAX ((u32)~0U) | ||
43 | 46 | ||
44 | #endif /* __KERNEL__ */ | 47 | #endif /* __KERNEL__ */ |
45 | 48 | ||
@@ -704,8 +707,8 @@ static const struct svga3d_surface_desc svga3d_surface_descs[] = { | |||
704 | 707 | ||
705 | static inline u32 clamped_umul32(u32 a, u32 b) | 708 | static inline u32 clamped_umul32(u32 a, u32 b) |
706 | { | 709 | { |
707 | uint64_t tmp = (uint64_t) a*b; | 710 | u64 tmp = (u64) a*b; |
708 | return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp; | 711 | return (tmp > (u64) U32_MAX) ? U32_MAX : tmp; |
709 | } | 712 | } |
710 | 713 | ||
711 | static inline const struct svga3d_surface_desc * | 714 | static inline const struct svga3d_surface_desc * |
@@ -834,7 +837,7 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format, | |||
834 | bool cubemap) | 837 | bool cubemap) |
835 | { | 838 | { |
836 | const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format); | 839 | const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format); |
837 | u32 total_size = 0; | 840 | u64 total_size = 0; |
838 | u32 mip; | 841 | u32 mip; |
839 | 842 | ||
840 | for (mip = 0; mip < num_mip_levels; mip++) { | 843 | for (mip = 0; mip < num_mip_levels; mip++) { |
@@ -847,7 +850,7 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format, | |||
847 | if (cubemap) | 850 | if (cubemap) |
848 | total_size *= SVGA3D_MAX_SURFACE_FACES; | 851 | total_size *= SVGA3D_MAX_SURFACE_FACES; |
849 | 852 | ||
850 | return total_size; | 853 | return (u32) min_t(u64, total_size, (u64) U32_MAX); |
851 | } | 854 | } |
852 | 855 | ||
853 | 856 | ||
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h index 71defa4d2d75..11323dd5196f 100644 --- a/drivers/gpu/drm/vmwgfx/svga_reg.h +++ b/drivers/gpu/drm/vmwgfx/svga_reg.h | |||
@@ -169,10 +169,17 @@ enum { | |||
169 | SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */ | 169 | SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */ |
170 | SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */ | 170 | SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */ |
171 | SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */ | 171 | SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */ |
172 | SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */ | ||
173 | SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */ | ||
172 | SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */ | 174 | SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */ |
173 | SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */ | 175 | SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */ |
174 | SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */ | 176 | SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */ |
175 | SVGA_REG_TOP = 53, /* Must be 1 more than the last register */ | 177 | SVGA_REG_CMD_PREPEND_LOW = 53, |
178 | SVGA_REG_CMD_PREPEND_HIGH = 54, | ||
179 | SVGA_REG_SCREENTARGET_MAX_WIDTH = 55, | ||
180 | SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56, | ||
181 | SVGA_REG_MOB_MAX_SIZE = 57, | ||
182 | SVGA_REG_TOP = 58, /* Must be 1 more than the last register */ | ||
176 | 183 | ||
177 | SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ | 184 | SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ |
178 | /* Next 768 (== 256*3) registers exist for colormap */ | 185 | /* Next 768 (== 256*3) registers exist for colormap */ |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index 9426c53fb483..1e80152674b5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c | |||
@@ -551,8 +551,7 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind) | |||
551 | cmd->header.size = sizeof(cmd->body); | 551 | cmd->header.size = sizeof(cmd->body); |
552 | cmd->body.cid = bi->ctx->id; | 552 | cmd->body.cid = bi->ctx->id; |
553 | cmd->body.type = bi->i1.shader_type; | 553 | cmd->body.type = bi->i1.shader_type; |
554 | cmd->body.shid = | 554 | cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); |
555 | cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID); | ||
556 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 555 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
557 | 556 | ||
558 | return 0; | 557 | return 0; |
@@ -585,8 +584,7 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, | |||
585 | cmd->header.size = sizeof(cmd->body); | 584 | cmd->header.size = sizeof(cmd->body); |
586 | cmd->body.cid = bi->ctx->id; | 585 | cmd->body.cid = bi->ctx->id; |
587 | cmd->body.type = bi->i1.rt_type; | 586 | cmd->body.type = bi->i1.rt_type; |
588 | cmd->body.target.sid = | 587 | cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); |
589 | cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID); | ||
590 | cmd->body.target.face = 0; | 588 | cmd->body.target.face = 0; |
591 | cmd->body.target.mipmap = 0; | 589 | cmd->body.target.mipmap = 0; |
592 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 590 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
@@ -628,8 +626,7 @@ static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, | |||
628 | cmd->body.c.cid = bi->ctx->id; | 626 | cmd->body.c.cid = bi->ctx->id; |
629 | cmd->body.s1.stage = bi->i1.texture_stage; | 627 | cmd->body.s1.stage = bi->i1.texture_stage; |
630 | cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; | 628 | cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; |
631 | cmd->body.s1.value = | 629 | cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); |
632 | cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID); | ||
633 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 630 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
634 | 631 | ||
635 | return 0; | 632 | return 0; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 3bdc0adc656d..0083cbf99edf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -667,6 +667,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
667 | dev_priv->memory_size = 512*1024*1024; | 667 | dev_priv->memory_size = 512*1024*1024; |
668 | } | 668 | } |
669 | dev_priv->max_mob_pages = 0; | 669 | dev_priv->max_mob_pages = 0; |
670 | dev_priv->max_mob_size = 0; | ||
670 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | 671 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { |
671 | uint64_t mem_size = | 672 | uint64_t mem_size = |
672 | vmw_read(dev_priv, | 673 | vmw_read(dev_priv, |
@@ -676,6 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
676 | dev_priv->prim_bb_mem = | 677 | dev_priv->prim_bb_mem = |
677 | vmw_read(dev_priv, | 678 | vmw_read(dev_priv, |
678 | SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); | 679 | SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); |
680 | dev_priv->max_mob_size = | ||
681 | vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); | ||
679 | } else | 682 | } else |
680 | dev_priv->prim_bb_mem = dev_priv->vram_size; | 683 | dev_priv->prim_bb_mem = dev_priv->vram_size; |
681 | 684 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index ecaa302a6154..9e4be1725985 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -386,6 +386,7 @@ struct vmw_private { | |||
386 | uint32_t max_gmr_ids; | 386 | uint32_t max_gmr_ids; |
387 | uint32_t max_gmr_pages; | 387 | uint32_t max_gmr_pages; |
388 | uint32_t max_mob_pages; | 388 | uint32_t max_mob_pages; |
389 | uint32_t max_mob_size; | ||
389 | uint32_t memory_size; | 390 | uint32_t memory_size; |
390 | bool has_gmr; | 391 | bool has_gmr; |
391 | bool has_mob; | 392 | bool has_mob; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 269b85cc875a..efb575a7996c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -602,7 +602,7 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv, | |||
602 | { | 602 | { |
603 | struct vmw_cid_cmd { | 603 | struct vmw_cid_cmd { |
604 | SVGA3dCmdHeader header; | 604 | SVGA3dCmdHeader header; |
605 | __le32 cid; | 605 | uint32_t cid; |
606 | } *cmd; | 606 | } *cmd; |
607 | 607 | ||
608 | cmd = container_of(header, struct vmw_cid_cmd, header); | 608 | cmd = container_of(header, struct vmw_cid_cmd, header); |
@@ -1835,7 +1835,7 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, | |||
1835 | return 0; | 1835 | return 0; |
1836 | } | 1836 | } |
1837 | 1837 | ||
1838 | static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = { | 1838 | static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { |
1839 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, | 1839 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, |
1840 | false, false, false), | 1840 | false, false, false), |
1841 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, | 1841 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, |
@@ -2032,6 +2032,9 @@ static int vmw_cmd_check(struct vmw_private *dev_priv, | |||
2032 | goto out_invalid; | 2032 | goto out_invalid; |
2033 | 2033 | ||
2034 | entry = &vmw_cmd_entries[cmd_id]; | 2034 | entry = &vmw_cmd_entries[cmd_id]; |
2035 | if (unlikely(!entry->func)) | ||
2036 | goto out_invalid; | ||
2037 | |||
2035 | if (unlikely(!entry->user_allow && !sw_context->kernel)) | 2038 | if (unlikely(!entry->user_allow && !sw_context->kernel)) |
2036 | goto out_privileged; | 2039 | goto out_privileged; |
2037 | 2040 | ||
@@ -2469,7 +2472,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
2469 | if (dev_priv->has_mob) { | 2472 | if (dev_priv->has_mob) { |
2470 | ret = vmw_rebind_contexts(sw_context); | 2473 | ret = vmw_rebind_contexts(sw_context); |
2471 | if (unlikely(ret != 0)) | 2474 | if (unlikely(ret != 0)) |
2472 | goto out_err; | 2475 | goto out_unlock_binding; |
2473 | } | 2476 | } |
2474 | 2477 | ||
2475 | cmd = vmw_fifo_reserve(dev_priv, command_size); | 2478 | cmd = vmw_fifo_reserve(dev_priv, command_size); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index f9881f9e62bd..47b70949bf3a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | |||
@@ -102,6 +102,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
102 | vmw_fp->gb_aware = true; | 102 | vmw_fp->gb_aware = true; |
103 | param->value = dev_priv->max_mob_pages * PAGE_SIZE; | 103 | param->value = dev_priv->max_mob_pages * PAGE_SIZE; |
104 | break; | 104 | break; |
105 | case DRM_VMW_PARAM_MAX_MOB_SIZE: | ||
106 | param->value = dev_priv->max_mob_size; | ||
107 | break; | ||
105 | default: | 108 | default: |
106 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", | 109 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", |
107 | param->param); | 110 | param->param); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index 217d941b8176..ee3856578a12 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | |||
@@ -371,13 +371,13 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, | |||
371 | TTM_REF_USAGE); | 371 | TTM_REF_USAGE); |
372 | } | 372 | } |
373 | 373 | ||
374 | int vmw_shader_alloc(struct vmw_private *dev_priv, | 374 | static int vmw_shader_alloc(struct vmw_private *dev_priv, |
375 | struct vmw_dma_buffer *buffer, | 375 | struct vmw_dma_buffer *buffer, |
376 | size_t shader_size, | 376 | size_t shader_size, |
377 | size_t offset, | 377 | size_t offset, |
378 | SVGA3dShaderType shader_type, | 378 | SVGA3dShaderType shader_type, |
379 | struct ttm_object_file *tfile, | 379 | struct ttm_object_file *tfile, |
380 | u32 *handle) | 380 | u32 *handle) |
381 | { | 381 | { |
382 | struct vmw_user_shader *ushader; | 382 | struct vmw_user_shader *ushader; |
383 | struct vmw_resource *res, *tmp; | 383 | struct vmw_resource *res, *tmp; |
@@ -779,6 +779,8 @@ vmw_compat_shader_man_create(struct vmw_private *dev_priv) | |||
779 | int ret; | 779 | int ret; |
780 | 780 | ||
781 | man = kzalloc(sizeof(*man), GFP_KERNEL); | 781 | man = kzalloc(sizeof(*man), GFP_KERNEL); |
782 | if (man == NULL) | ||
783 | return ERR_PTR(-ENOMEM); | ||
782 | 784 | ||
783 | man->dev_priv = dev_priv; | 785 | man->dev_priv = dev_priv; |
784 | INIT_LIST_HEAD(&man->list); | 786 | INIT_LIST_HEAD(&man->list); |
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 497558127bb3..f822fd2a1ada 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c | |||
@@ -469,6 +469,9 @@ static const struct hid_device_id apple_devices[] = { | |||
469 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, | 469 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, |
470 | USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI), | 470 | USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI), |
471 | .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, | 471 | .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, |
472 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, | ||
473 | USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS), | ||
474 | .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, | ||
472 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS), | 475 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS), |
473 | .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, | 476 | .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, |
474 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI), | 477 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI), |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 3bfac3accd22..cc32a6f96c64 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
@@ -1679,6 +1679,7 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
1679 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, | 1679 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, |
1680 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) }, | 1680 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) }, |
1681 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) }, | 1681 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) }, |
1682 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) }, | ||
1682 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, | 1683 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, |
1683 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, | 1684 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, |
1684 | { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) }, | 1685 | { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) }, |
@@ -1779,6 +1780,8 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
1779 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) }, | 1780 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) }, |
1780 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) }, | 1781 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) }, |
1781 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) }, | 1782 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) }, |
1783 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2) }, | ||
1784 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2) }, | ||
1782 | { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, | 1785 | { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, |
1783 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, | 1786 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, |
1784 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) }, | 1787 | { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) }, |
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c index 8fae6d1414cc..c24908f14934 100644 --- a/drivers/hid/hid-hyperv.c +++ b/drivers/hid/hid-hyperv.c | |||
@@ -157,6 +157,7 @@ struct mousevsc_dev { | |||
157 | u32 report_desc_size; | 157 | u32 report_desc_size; |
158 | struct hv_input_dev_info hid_dev_info; | 158 | struct hv_input_dev_info hid_dev_info; |
159 | struct hid_device *hid_device; | 159 | struct hid_device *hid_device; |
160 | u8 input_buf[HID_MAX_BUFFER_SIZE]; | ||
160 | }; | 161 | }; |
161 | 162 | ||
162 | 163 | ||
@@ -256,6 +257,7 @@ static void mousevsc_on_receive(struct hv_device *device, | |||
256 | struct synthhid_msg *hid_msg; | 257 | struct synthhid_msg *hid_msg; |
257 | struct mousevsc_dev *input_dev = hv_get_drvdata(device); | 258 | struct mousevsc_dev *input_dev = hv_get_drvdata(device); |
258 | struct synthhid_input_report *input_report; | 259 | struct synthhid_input_report *input_report; |
260 | size_t len; | ||
259 | 261 | ||
260 | pipe_msg = (struct pipe_prt_msg *)((unsigned long)packet + | 262 | pipe_msg = (struct pipe_prt_msg *)((unsigned long)packet + |
261 | (packet->offset8 << 3)); | 263 | (packet->offset8 << 3)); |
@@ -300,9 +302,12 @@ static void mousevsc_on_receive(struct hv_device *device, | |||
300 | (struct synthhid_input_report *)pipe_msg->data; | 302 | (struct synthhid_input_report *)pipe_msg->data; |
301 | if (!input_dev->init_complete) | 303 | if (!input_dev->init_complete) |
302 | break; | 304 | break; |
303 | hid_input_report(input_dev->hid_device, | 305 | |
304 | HID_INPUT_REPORT, input_report->buffer, | 306 | len = min(input_report->header.size, |
305 | input_report->header.size, 1); | 307 | (u32)sizeof(input_dev->input_buf)); |
308 | memcpy(input_dev->input_buf, input_report->buffer, len); | ||
309 | hid_input_report(input_dev->hid_device, HID_INPUT_REPORT, | ||
310 | input_dev->input_buf, len, 1); | ||
306 | break; | 311 | break; |
307 | default: | 312 | default: |
308 | pr_err("unsupported hid msg type - type %d len %d", | 313 | pr_err("unsupported hid msg type - type %d len %d", |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 5a5248f2cc07..22f28d6b33a8 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -135,6 +135,7 @@ | |||
135 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b | 135 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b |
136 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255 | 136 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255 |
137 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256 | 137 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256 |
138 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257 | ||
138 | #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 | 139 | #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 |
139 | #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 | 140 | #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 |
140 | #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 | 141 | #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 |
@@ -240,6 +241,7 @@ | |||
240 | 241 | ||
241 | #define USB_VENDOR_ID_CYGNAL 0x10c4 | 242 | #define USB_VENDOR_ID_CYGNAL 0x10c4 |
242 | #define USB_DEVICE_ID_CYGNAL_RADIO_SI470X 0x818a | 243 | #define USB_DEVICE_ID_CYGNAL_RADIO_SI470X 0x818a |
244 | #define USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH 0x81b9 | ||
243 | 245 | ||
244 | #define USB_DEVICE_ID_CYGNAL_RADIO_SI4713 0x8244 | 246 | #define USB_DEVICE_ID_CYGNAL_RADIO_SI4713 0x8244 |
245 | 247 | ||
@@ -451,6 +453,9 @@ | |||
451 | #define USB_VENDOR_ID_INTEL_1 0x8087 | 453 | #define USB_VENDOR_ID_INTEL_1 0x8087 |
452 | #define USB_DEVICE_ID_INTEL_HID_SENSOR 0x09fa | 454 | #define USB_DEVICE_ID_INTEL_HID_SENSOR 0x09fa |
453 | 455 | ||
456 | #define USB_VENDOR_ID_STM_0 0x0483 | ||
457 | #define USB_DEVICE_ID_STM_HID_SENSOR 0x91d1 | ||
458 | |||
454 | #define USB_VENDOR_ID_ION 0x15e4 | 459 | #define USB_VENDOR_ID_ION 0x15e4 |
455 | #define USB_DEVICE_ID_ICADE 0x0132 | 460 | #define USB_DEVICE_ID_ICADE 0x0132 |
456 | 461 | ||
@@ -619,6 +624,8 @@ | |||
619 | #define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713 | 624 | #define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713 |
620 | #define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730 | 625 | #define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730 |
621 | #define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c | 626 | #define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c |
627 | #define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7 | ||
628 | #define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9 | ||
622 | 629 | ||
623 | #define USB_VENDOR_ID_MOJO 0x8282 | 630 | #define USB_VENDOR_ID_MOJO 0x8282 |
624 | #define USB_DEVICE_ID_RETRO_ADAPTER 0x3201 | 631 | #define USB_DEVICE_ID_RETRO_ADAPTER 0x3201 |
@@ -644,6 +651,7 @@ | |||
644 | 651 | ||
645 | #define USB_VENDOR_ID_NEXIO 0x1870 | 652 | #define USB_VENDOR_ID_NEXIO 0x1870 |
646 | #define USB_DEVICE_ID_NEXIO_MULTITOUCH_420 0x010d | 653 | #define USB_DEVICE_ID_NEXIO_MULTITOUCH_420 0x010d |
654 | #define USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750 0x0110 | ||
647 | 655 | ||
648 | #define USB_VENDOR_ID_NEXTWINDOW 0x1926 | 656 | #define USB_VENDOR_ID_NEXTWINDOW 0x1926 |
649 | #define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003 | 657 | #define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003 |
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index d50e7313b171..a713e6211419 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c | |||
@@ -1178,7 +1178,7 @@ static void hidinput_led_worker(struct work_struct *work) | |||
1178 | 1178 | ||
1179 | /* fall back to generic raw-output-report */ | 1179 | /* fall back to generic raw-output-report */ |
1180 | len = ((report->size - 1) >> 3) + 1 + (report->id > 0); | 1180 | len = ((report->size - 1) >> 3) + 1 + (report->id > 0); |
1181 | buf = kmalloc(len, GFP_KERNEL); | 1181 | buf = hid_alloc_report_buf(report, GFP_KERNEL); |
1182 | if (!buf) | 1182 | if (!buf) |
1183 | return; | 1183 | return; |
1184 | 1184 | ||
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c index c6ef6eed3091..404a3a8a82f1 100644 --- a/drivers/hid/hid-microsoft.c +++ b/drivers/hid/hid-microsoft.c | |||
@@ -208,6 +208,10 @@ static const struct hid_device_id ms_devices[] = { | |||
208 | .driver_data = MS_NOGET }, | 208 | .driver_data = MS_NOGET }, |
209 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500), | 209 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500), |
210 | .driver_data = MS_DUPLICATE_USAGES }, | 210 | .driver_data = MS_DUPLICATE_USAGES }, |
211 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2), | ||
212 | .driver_data = 0 }, | ||
213 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), | ||
214 | .driver_data = 0 }, | ||
211 | 215 | ||
212 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT), | 216 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT), |
213 | .driver_data = MS_PRESENTER }, | 217 | .driver_data = MS_PRESENTER }, |
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index f134d73beca1..221d503f1c24 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c | |||
@@ -1166,6 +1166,11 @@ static const struct hid_device_id mt_devices[] = { | |||
1166 | MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG, | 1166 | MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG, |
1167 | USB_DEVICE_ID_MULTITOUCH_3200) }, | 1167 | USB_DEVICE_ID_MULTITOUCH_3200) }, |
1168 | 1168 | ||
1169 | /* FocalTech Panels */ | ||
1170 | { .driver_data = MT_CLS_SERIAL, | ||
1171 | MT_USB_DEVICE(USB_VENDOR_ID_CYGNAL, | ||
1172 | USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH) }, | ||
1173 | |||
1169 | /* GeneralTouch panel */ | 1174 | /* GeneralTouch panel */ |
1170 | { .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS, | 1175 | { .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS, |
1171 | MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, | 1176 | MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, |
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index 46f4480035bc..9c22e14c57f0 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c | |||
@@ -665,6 +665,9 @@ static const struct hid_device_id sensor_hub_devices[] = { | |||
665 | { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_INTEL_1, | 665 | { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_INTEL_1, |
666 | USB_DEVICE_ID_INTEL_HID_SENSOR), | 666 | USB_DEVICE_ID_INTEL_HID_SENSOR), |
667 | .driver_data = HID_SENSOR_HUB_ENUM_QUIRK}, | 667 | .driver_data = HID_SENSOR_HUB_ENUM_QUIRK}, |
668 | { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0, | ||
669 | USB_DEVICE_ID_STM_HID_SENSOR), | ||
670 | .driver_data = HID_SENSOR_HUB_ENUM_QUIRK}, | ||
668 | { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID, | 671 | { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID, |
669 | HID_ANY_ID) }, | 672 | HID_ANY_ID) }, |
670 | { } | 673 | { } |
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index d1f81f52481a..42eebd14de1f 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c | |||
@@ -582,7 +582,7 @@ static void i2c_hid_request(struct hid_device *hid, struct hid_report *rep, | |||
582 | int ret; | 582 | int ret; |
583 | int len = i2c_hid_get_report_length(rep) - 2; | 583 | int len = i2c_hid_get_report_length(rep) - 2; |
584 | 584 | ||
585 | buf = kzalloc(len, GFP_KERNEL); | 585 | buf = hid_alloc_report_buf(rep, GFP_KERNEL); |
586 | if (!buf) | 586 | if (!buf) |
587 | return; | 587 | return; |
588 | 588 | ||
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 175ec0afb70c..dbd83878ff99 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
@@ -74,6 +74,7 @@ static const struct hid_blacklist { | |||
74 | { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, | 74 | { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, |
75 | { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, | 75 | { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, |
76 | { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS }, | 76 | { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS }, |
77 | { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS }, | ||
77 | { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, | 78 | { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, |
78 | { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, | 79 | { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, |
79 | { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, | 80 | { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, |
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index af6edf9b1936..f2d7bf90c9fe 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c | |||
@@ -67,7 +67,6 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, | |||
67 | int ret = 0; | 67 | int ret = 0; |
68 | struct vmbus_channel_initiate_contact *msg; | 68 | struct vmbus_channel_initiate_contact *msg; |
69 | unsigned long flags; | 69 | unsigned long flags; |
70 | int t; | ||
71 | 70 | ||
72 | init_completion(&msginfo->waitevent); | 71 | init_completion(&msginfo->waitevent); |
73 | 72 | ||
@@ -78,6 +77,8 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, | |||
78 | msg->interrupt_page = virt_to_phys(vmbus_connection.int_page); | 77 | msg->interrupt_page = virt_to_phys(vmbus_connection.int_page); |
79 | msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]); | 78 | msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]); |
80 | msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]); | 79 | msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]); |
80 | if (version == VERSION_WIN8) | ||
81 | msg->target_vcpu = hv_context.vp_index[smp_processor_id()]; | ||
81 | 82 | ||
82 | /* | 83 | /* |
83 | * Add to list before we send the request since we may | 84 | * Add to list before we send the request since we may |
@@ -100,15 +101,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, | |||
100 | } | 101 | } |
101 | 102 | ||
102 | /* Wait for the connection response */ | 103 | /* Wait for the connection response */ |
103 | t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ); | 104 | wait_for_completion(&msginfo->waitevent); |
104 | if (t == 0) { | ||
105 | spin_lock_irqsave(&vmbus_connection.channelmsg_lock, | ||
106 | flags); | ||
107 | list_del(&msginfo->msglistentry); | ||
108 | spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, | ||
109 | flags); | ||
110 | return -ETIMEDOUT; | ||
111 | } | ||
112 | 105 | ||
113 | spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); | 106 | spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); |
114 | list_del(&msginfo->msglistentry); | 107 | list_del(&msginfo->msglistentry); |
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index 8c23203915af..8a17f01e8672 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c | |||
@@ -145,7 +145,7 @@ struct ntc_data { | |||
145 | static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) | 145 | static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) |
146 | { | 146 | { |
147 | struct iio_channel *channel = pdata->chan; | 147 | struct iio_channel *channel = pdata->chan; |
148 | unsigned int result; | 148 | s64 result; |
149 | int val, ret; | 149 | int val, ret; |
150 | 150 | ||
151 | ret = iio_read_channel_raw(channel, &val); | 151 | ret = iio_read_channel_raw(channel, &val); |
@@ -155,10 +155,10 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) | |||
155 | } | 155 | } |
156 | 156 | ||
157 | /* unit: mV */ | 157 | /* unit: mV */ |
158 | result = pdata->pullup_uv * val; | 158 | result = pdata->pullup_uv * (s64) val; |
159 | result >>= 12; | 159 | result >>= 12; |
160 | 160 | ||
161 | return result; | 161 | return (int)result; |
162 | } | 162 | } |
163 | 163 | ||
164 | static const struct of_device_id ntc_match[] = { | 164 | static const struct of_device_id ntc_match[] = { |
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index b8c5187b9ee0..d52d84937ad3 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c | |||
@@ -97,7 +97,6 @@ enum { | |||
97 | enum { | 97 | enum { |
98 | MV64XXX_I2C_ACTION_INVALID, | 98 | MV64XXX_I2C_ACTION_INVALID, |
99 | MV64XXX_I2C_ACTION_CONTINUE, | 99 | MV64XXX_I2C_ACTION_CONTINUE, |
100 | MV64XXX_I2C_ACTION_OFFLOAD_SEND_START, | ||
101 | MV64XXX_I2C_ACTION_SEND_START, | 100 | MV64XXX_I2C_ACTION_SEND_START, |
102 | MV64XXX_I2C_ACTION_SEND_RESTART, | 101 | MV64XXX_I2C_ACTION_SEND_RESTART, |
103 | MV64XXX_I2C_ACTION_OFFLOAD_RESTART, | 102 | MV64XXX_I2C_ACTION_OFFLOAD_RESTART, |
@@ -204,6 +203,9 @@ static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data) | |||
204 | unsigned long ctrl_reg; | 203 | unsigned long ctrl_reg; |
205 | struct i2c_msg *msg = drv_data->msgs; | 204 | struct i2c_msg *msg = drv_data->msgs; |
206 | 205 | ||
206 | if (!drv_data->offload_enabled) | ||
207 | return -EOPNOTSUPP; | ||
208 | |||
207 | drv_data->msg = msg; | 209 | drv_data->msg = msg; |
208 | drv_data->byte_posn = 0; | 210 | drv_data->byte_posn = 0; |
209 | drv_data->bytes_left = msg->len; | 211 | drv_data->bytes_left = msg->len; |
@@ -433,8 +435,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data) | |||
433 | 435 | ||
434 | drv_data->msgs++; | 436 | drv_data->msgs++; |
435 | drv_data->num_msgs--; | 437 | drv_data->num_msgs--; |
436 | if (!(drv_data->offload_enabled && | 438 | if (mv64xxx_i2c_offload_msg(drv_data) < 0) { |
437 | mv64xxx_i2c_offload_msg(drv_data))) { | ||
438 | drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_START; | 439 | drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_START; |
439 | writel(drv_data->cntl_bits, | 440 | writel(drv_data->cntl_bits, |
440 | drv_data->reg_base + drv_data->reg_offsets.control); | 441 | drv_data->reg_base + drv_data->reg_offsets.control); |
@@ -458,15 +459,14 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data) | |||
458 | drv_data->reg_base + drv_data->reg_offsets.control); | 459 | drv_data->reg_base + drv_data->reg_offsets.control); |
459 | break; | 460 | break; |
460 | 461 | ||
461 | case MV64XXX_I2C_ACTION_OFFLOAD_SEND_START: | ||
462 | if (!mv64xxx_i2c_offload_msg(drv_data)) | ||
463 | break; | ||
464 | else | ||
465 | drv_data->action = MV64XXX_I2C_ACTION_SEND_START; | ||
466 | /* FALLTHRU */ | ||
467 | case MV64XXX_I2C_ACTION_SEND_START: | 462 | case MV64XXX_I2C_ACTION_SEND_START: |
468 | writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START, | 463 | /* Can we offload this msg ? */ |
469 | drv_data->reg_base + drv_data->reg_offsets.control); | 464 | if (mv64xxx_i2c_offload_msg(drv_data) < 0) { |
465 | /* No, switch to standard path */ | ||
466 | mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs); | ||
467 | writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START, | ||
468 | drv_data->reg_base + drv_data->reg_offsets.control); | ||
469 | } | ||
470 | break; | 470 | break; |
471 | 471 | ||
472 | case MV64XXX_I2C_ACTION_SEND_ADDR_1: | 472 | case MV64XXX_I2C_ACTION_SEND_ADDR_1: |
@@ -625,15 +625,10 @@ mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg, | |||
625 | unsigned long flags; | 625 | unsigned long flags; |
626 | 626 | ||
627 | spin_lock_irqsave(&drv_data->lock, flags); | 627 | spin_lock_irqsave(&drv_data->lock, flags); |
628 | if (drv_data->offload_enabled) { | ||
629 | drv_data->action = MV64XXX_I2C_ACTION_OFFLOAD_SEND_START; | ||
630 | drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND; | ||
631 | } else { | ||
632 | mv64xxx_i2c_prepare_for_io(drv_data, msg); | ||
633 | 628 | ||
634 | drv_data->action = MV64XXX_I2C_ACTION_SEND_START; | 629 | drv_data->action = MV64XXX_I2C_ACTION_SEND_START; |
635 | drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND; | 630 | drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND; |
636 | } | 631 | |
637 | drv_data->send_stop = is_last; | 632 | drv_data->send_stop = is_last; |
638 | drv_data->block = 1; | 633 | drv_data->block = 1; |
639 | mv64xxx_i2c_do_action(drv_data); | 634 | mv64xxx_i2c_do_action(drv_data); |
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c index 3bec9220df04..bfec313492b3 100644 --- a/drivers/iio/accel/bma180.c +++ b/drivers/iio/accel/bma180.c | |||
@@ -447,14 +447,14 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = { | |||
447 | { }, | 447 | { }, |
448 | }; | 448 | }; |
449 | 449 | ||
450 | #define BMA180_CHANNEL(_index) { \ | 450 | #define BMA180_CHANNEL(_axis) { \ |
451 | .type = IIO_ACCEL, \ | 451 | .type = IIO_ACCEL, \ |
452 | .indexed = 1, \ | 452 | .modified = 1, \ |
453 | .channel = (_index), \ | 453 | .channel2 = IIO_MOD_##_axis, \ |
454 | .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ | 454 | .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ |
455 | BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \ | 455 | BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \ |
456 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ | 456 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ |
457 | .scan_index = (_index), \ | 457 | .scan_index = AXIS_##_axis, \ |
458 | .scan_type = { \ | 458 | .scan_type = { \ |
459 | .sign = 's', \ | 459 | .sign = 's', \ |
460 | .realbits = 14, \ | 460 | .realbits = 14, \ |
@@ -465,10 +465,10 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = { | |||
465 | } | 465 | } |
466 | 466 | ||
467 | static const struct iio_chan_spec bma180_channels[] = { | 467 | static const struct iio_chan_spec bma180_channels[] = { |
468 | BMA180_CHANNEL(AXIS_X), | 468 | BMA180_CHANNEL(X), |
469 | BMA180_CHANNEL(AXIS_Y), | 469 | BMA180_CHANNEL(Y), |
470 | BMA180_CHANNEL(AXIS_Z), | 470 | BMA180_CHANNEL(Z), |
471 | IIO_CHAN_SOFT_TIMESTAMP(4), | 471 | IIO_CHAN_SOFT_TIMESTAMP(3), |
472 | }; | 472 | }; |
473 | 473 | ||
474 | static irqreturn_t bma180_trigger_handler(int irq, void *p) | 474 | static irqreturn_t bma180_trigger_handler(int irq, void *p) |
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c index e283f2f2ee2f..360259266d4f 100644 --- a/drivers/iio/adc/max1363.c +++ b/drivers/iio/adc/max1363.c | |||
@@ -1560,7 +1560,7 @@ static int max1363_probe(struct i2c_client *client, | |||
1560 | st->client = client; | 1560 | st->client = client; |
1561 | 1561 | ||
1562 | st->vref_uv = st->chip_info->int_vref_mv * 1000; | 1562 | st->vref_uv = st->chip_info->int_vref_mv * 1000; |
1563 | vref = devm_regulator_get(&client->dev, "vref"); | 1563 | vref = devm_regulator_get_optional(&client->dev, "vref"); |
1564 | if (!IS_ERR(vref)) { | 1564 | if (!IS_ERR(vref)) { |
1565 | int vref_uv; | 1565 | int vref_uv; |
1566 | 1566 | ||
diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h index 2f8f9d632386..0916bf6b6c31 100644 --- a/drivers/iio/imu/adis16400.h +++ b/drivers/iio/imu/adis16400.h | |||
@@ -189,6 +189,7 @@ enum { | |||
189 | ADIS16300_SCAN_INCLI_X, | 189 | ADIS16300_SCAN_INCLI_X, |
190 | ADIS16300_SCAN_INCLI_Y, | 190 | ADIS16300_SCAN_INCLI_Y, |
191 | ADIS16400_SCAN_ADC, | 191 | ADIS16400_SCAN_ADC, |
192 | ADIS16400_SCAN_TIMESTAMP, | ||
192 | }; | 193 | }; |
193 | 194 | ||
194 | #ifdef CONFIG_IIO_BUFFER | 195 | #ifdef CONFIG_IIO_BUFFER |
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c index 368660dfe135..7c582f7ae34e 100644 --- a/drivers/iio/imu/adis16400_core.c +++ b/drivers/iio/imu/adis16400_core.c | |||
@@ -632,7 +632,7 @@ static const struct iio_chan_spec adis16400_channels[] = { | |||
632 | ADIS16400_MAGN_CHAN(Z, ADIS16400_ZMAGN_OUT, 14), | 632 | ADIS16400_MAGN_CHAN(Z, ADIS16400_ZMAGN_OUT, 14), |
633 | ADIS16400_TEMP_CHAN(ADIS16400_TEMP_OUT, 12), | 633 | ADIS16400_TEMP_CHAN(ADIS16400_TEMP_OUT, 12), |
634 | ADIS16400_AUX_ADC_CHAN(ADIS16400_AUX_ADC, 12), | 634 | ADIS16400_AUX_ADC_CHAN(ADIS16400_AUX_ADC, 12), |
635 | IIO_CHAN_SOFT_TIMESTAMP(12) | 635 | IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP), |
636 | }; | 636 | }; |
637 | 637 | ||
638 | static const struct iio_chan_spec adis16448_channels[] = { | 638 | static const struct iio_chan_spec adis16448_channels[] = { |
@@ -659,7 +659,7 @@ static const struct iio_chan_spec adis16448_channels[] = { | |||
659 | }, | 659 | }, |
660 | }, | 660 | }, |
661 | ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12), | 661 | ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12), |
662 | IIO_CHAN_SOFT_TIMESTAMP(11) | 662 | IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP), |
663 | }; | 663 | }; |
664 | 664 | ||
665 | static const struct iio_chan_spec adis16350_channels[] = { | 665 | static const struct iio_chan_spec adis16350_channels[] = { |
@@ -677,7 +677,7 @@ static const struct iio_chan_spec adis16350_channels[] = { | |||
677 | ADIS16400_MOD_TEMP_CHAN(X, ADIS16350_XTEMP_OUT, 12), | 677 | ADIS16400_MOD_TEMP_CHAN(X, ADIS16350_XTEMP_OUT, 12), |
678 | ADIS16400_MOD_TEMP_CHAN(Y, ADIS16350_YTEMP_OUT, 12), | 678 | ADIS16400_MOD_TEMP_CHAN(Y, ADIS16350_YTEMP_OUT, 12), |
679 | ADIS16400_MOD_TEMP_CHAN(Z, ADIS16350_ZTEMP_OUT, 12), | 679 | ADIS16400_MOD_TEMP_CHAN(Z, ADIS16350_ZTEMP_OUT, 12), |
680 | IIO_CHAN_SOFT_TIMESTAMP(11) | 680 | IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP), |
681 | }; | 681 | }; |
682 | 682 | ||
683 | static const struct iio_chan_spec adis16300_channels[] = { | 683 | static const struct iio_chan_spec adis16300_channels[] = { |
@@ -690,7 +690,7 @@ static const struct iio_chan_spec adis16300_channels[] = { | |||
690 | ADIS16400_AUX_ADC_CHAN(ADIS16300_AUX_ADC, 12), | 690 | ADIS16400_AUX_ADC_CHAN(ADIS16300_AUX_ADC, 12), |
691 | ADIS16400_INCLI_CHAN(X, ADIS16300_PITCH_OUT, 13), | 691 | ADIS16400_INCLI_CHAN(X, ADIS16300_PITCH_OUT, 13), |
692 | ADIS16400_INCLI_CHAN(Y, ADIS16300_ROLL_OUT, 13), | 692 | ADIS16400_INCLI_CHAN(Y, ADIS16300_ROLL_OUT, 13), |
693 | IIO_CHAN_SOFT_TIMESTAMP(14) | 693 | IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP), |
694 | }; | 694 | }; |
695 | 695 | ||
696 | static const struct iio_chan_spec adis16334_channels[] = { | 696 | static const struct iio_chan_spec adis16334_channels[] = { |
@@ -701,7 +701,7 @@ static const struct iio_chan_spec adis16334_channels[] = { | |||
701 | ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14), | 701 | ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14), |
702 | ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14), | 702 | ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14), |
703 | ADIS16400_TEMP_CHAN(ADIS16350_XTEMP_OUT, 12), | 703 | ADIS16400_TEMP_CHAN(ADIS16350_XTEMP_OUT, 12), |
704 | IIO_CHAN_SOFT_TIMESTAMP(8) | 704 | IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP), |
705 | }; | 705 | }; |
706 | 706 | ||
707 | static struct attribute *adis16400_attributes[] = { | 707 | static struct attribute *adis16400_attributes[] = { |
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c index 3d8110157f2d..94daa9fc1247 100644 --- a/drivers/iio/light/tsl2563.c +++ b/drivers/iio/light/tsl2563.c | |||
@@ -460,10 +460,14 @@ static int tsl2563_write_raw(struct iio_dev *indio_dev, | |||
460 | { | 460 | { |
461 | struct tsl2563_chip *chip = iio_priv(indio_dev); | 461 | struct tsl2563_chip *chip = iio_priv(indio_dev); |
462 | 462 | ||
463 | if (chan->channel == IIO_MOD_LIGHT_BOTH) | 463 | if (mask != IIO_CHAN_INFO_CALIBSCALE) |
464 | return -EINVAL; | ||
465 | if (chan->channel2 == IIO_MOD_LIGHT_BOTH) | ||
464 | chip->calib0 = calib_from_sysfs(val); | 466 | chip->calib0 = calib_from_sysfs(val); |
465 | else | 467 | else if (chan->channel2 == IIO_MOD_LIGHT_IR) |
466 | chip->calib1 = calib_from_sysfs(val); | 468 | chip->calib1 = calib_from_sysfs(val); |
469 | else | ||
470 | return -EINVAL; | ||
467 | 471 | ||
468 | return 0; | 472 | return 0; |
469 | } | 473 | } |
@@ -472,14 +476,14 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev, | |||
472 | struct iio_chan_spec const *chan, | 476 | struct iio_chan_spec const *chan, |
473 | int *val, | 477 | int *val, |
474 | int *val2, | 478 | int *val2, |
475 | long m) | 479 | long mask) |
476 | { | 480 | { |
477 | int ret = -EINVAL; | 481 | int ret = -EINVAL; |
478 | u32 calib0, calib1; | 482 | u32 calib0, calib1; |
479 | struct tsl2563_chip *chip = iio_priv(indio_dev); | 483 | struct tsl2563_chip *chip = iio_priv(indio_dev); |
480 | 484 | ||
481 | mutex_lock(&chip->lock); | 485 | mutex_lock(&chip->lock); |
482 | switch (m) { | 486 | switch (mask) { |
483 | case IIO_CHAN_INFO_RAW: | 487 | case IIO_CHAN_INFO_RAW: |
484 | case IIO_CHAN_INFO_PROCESSED: | 488 | case IIO_CHAN_INFO_PROCESSED: |
485 | switch (chan->type) { | 489 | switch (chan->type) { |
@@ -498,7 +502,7 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev, | |||
498 | ret = tsl2563_get_adc(chip); | 502 | ret = tsl2563_get_adc(chip); |
499 | if (ret) | 503 | if (ret) |
500 | goto error_ret; | 504 | goto error_ret; |
501 | if (chan->channel == 0) | 505 | if (chan->channel2 == IIO_MOD_LIGHT_BOTH) |
502 | *val = chip->data0; | 506 | *val = chip->data0; |
503 | else | 507 | else |
504 | *val = chip->data1; | 508 | *val = chip->data1; |
@@ -510,7 +514,7 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev, | |||
510 | break; | 514 | break; |
511 | 515 | ||
512 | case IIO_CHAN_INFO_CALIBSCALE: | 516 | case IIO_CHAN_INFO_CALIBSCALE: |
513 | if (chan->channel == 0) | 517 | if (chan->channel2 == IIO_MOD_LIGHT_BOTH) |
514 | *val = calib_to_sysfs(chip->calib0); | 518 | *val = calib_to_sysfs(chip->calib0); |
515 | else | 519 | else |
516 | *val = calib_to_sysfs(chip->calib1); | 520 | *val = calib_to_sysfs(chip->calib1); |
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c index ff284e5afd95..05423543f89d 100644 --- a/drivers/iio/magnetometer/ak8975.c +++ b/drivers/iio/magnetometer/ak8975.c | |||
@@ -85,6 +85,7 @@ | |||
85 | #define AK8975_MAX_CONVERSION_TIMEOUT 500 | 85 | #define AK8975_MAX_CONVERSION_TIMEOUT 500 |
86 | #define AK8975_CONVERSION_DONE_POLL_TIME 10 | 86 | #define AK8975_CONVERSION_DONE_POLL_TIME 10 |
87 | #define AK8975_DATA_READY_TIMEOUT ((100*HZ)/1000) | 87 | #define AK8975_DATA_READY_TIMEOUT ((100*HZ)/1000) |
88 | #define RAW_TO_GAUSS(asa) ((((asa) + 128) * 3000) / 256) | ||
88 | 89 | ||
89 | /* | 90 | /* |
90 | * Per-instance context data for the device. | 91 | * Per-instance context data for the device. |
@@ -265,15 +266,15 @@ static int ak8975_setup(struct i2c_client *client) | |||
265 | * | 266 | * |
266 | * Since 1uT = 0.01 gauss, our final scale factor becomes: | 267 | * Since 1uT = 0.01 gauss, our final scale factor becomes: |
267 | * | 268 | * |
268 | * Hadj = H * ((ASA + 128) / 256) * 3/10 * 100 | 269 | * Hadj = H * ((ASA + 128) / 256) * 3/10 * 1/100 |
269 | * Hadj = H * ((ASA + 128) * 30 / 256 | 270 | * Hadj = H * ((ASA + 128) * 0.003) / 256 |
270 | * | 271 | * |
271 | * Since ASA doesn't change, we cache the resultant scale factor into the | 272 | * Since ASA doesn't change, we cache the resultant scale factor into the |
272 | * device context in ak8975_setup(). | 273 | * device context in ak8975_setup(). |
273 | */ | 274 | */ |
274 | data->raw_to_gauss[0] = ((data->asa[0] + 128) * 30) >> 8; | 275 | data->raw_to_gauss[0] = RAW_TO_GAUSS(data->asa[0]); |
275 | data->raw_to_gauss[1] = ((data->asa[1] + 128) * 30) >> 8; | 276 | data->raw_to_gauss[1] = RAW_TO_GAUSS(data->asa[1]); |
276 | data->raw_to_gauss[2] = ((data->asa[2] + 128) * 30) >> 8; | 277 | data->raw_to_gauss[2] = RAW_TO_GAUSS(data->asa[2]); |
277 | 278 | ||
278 | return 0; | 279 | return 0; |
279 | } | 280 | } |
@@ -428,8 +429,9 @@ static int ak8975_read_raw(struct iio_dev *indio_dev, | |||
428 | case IIO_CHAN_INFO_RAW: | 429 | case IIO_CHAN_INFO_RAW: |
429 | return ak8975_read_axis(indio_dev, chan->address, val); | 430 | return ak8975_read_axis(indio_dev, chan->address, val); |
430 | case IIO_CHAN_INFO_SCALE: | 431 | case IIO_CHAN_INFO_SCALE: |
431 | *val = data->raw_to_gauss[chan->address]; | 432 | *val = 0; |
432 | return IIO_VAL_INT; | 433 | *val2 = data->raw_to_gauss[chan->address]; |
434 | return IIO_VAL_INT_PLUS_MICRO; | ||
433 | } | 435 | } |
434 | return -EINVAL; | 436 | return -EINVAL; |
435 | } | 437 | } |
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c index 4b65b6d3bdb1..f66955fb3509 100644 --- a/drivers/iio/magnetometer/mag3110.c +++ b/drivers/iio/magnetometer/mag3110.c | |||
@@ -106,7 +106,7 @@ static ssize_t mag3110_show_int_plus_micros(char *buf, | |||
106 | 106 | ||
107 | while (n-- > 0) | 107 | while (n-- > 0) |
108 | len += scnprintf(buf + len, PAGE_SIZE - len, | 108 | len += scnprintf(buf + len, PAGE_SIZE - len, |
109 | "%d.%d ", vals[n][0], vals[n][1]); | 109 | "%d.%06d ", vals[n][0], vals[n][1]); |
110 | 110 | ||
111 | /* replace trailing space by newline */ | 111 | /* replace trailing space by newline */ |
112 | buf[len - 1] = '\n'; | 112 | buf[len - 1] = '\n'; |
@@ -154,6 +154,9 @@ static int mag3110_read_raw(struct iio_dev *indio_dev, | |||
154 | 154 | ||
155 | switch (mask) { | 155 | switch (mask) { |
156 | case IIO_CHAN_INFO_RAW: | 156 | case IIO_CHAN_INFO_RAW: |
157 | if (iio_buffer_enabled(indio_dev)) | ||
158 | return -EBUSY; | ||
159 | |||
157 | switch (chan->type) { | 160 | switch (chan->type) { |
158 | case IIO_MAGN: /* in 0.1 uT / LSB */ | 161 | case IIO_MAGN: /* in 0.1 uT / LSB */ |
159 | ret = mag3110_read(data, buffer); | 162 | ret = mag3110_read(data, buffer); |
@@ -199,6 +202,9 @@ static int mag3110_write_raw(struct iio_dev *indio_dev, | |||
199 | struct mag3110_data *data = iio_priv(indio_dev); | 202 | struct mag3110_data *data = iio_priv(indio_dev); |
200 | int rate; | 203 | int rate; |
201 | 204 | ||
205 | if (iio_buffer_enabled(indio_dev)) | ||
206 | return -EBUSY; | ||
207 | |||
202 | switch (mask) { | 208 | switch (mask) { |
203 | case IIO_CHAN_INFO_SAMP_FREQ: | 209 | case IIO_CHAN_INFO_SAMP_FREQ: |
204 | rate = mag3110_get_samp_freq_index(data, val, val2); | 210 | rate = mag3110_get_samp_freq_index(data, val, val2); |
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c index d53cf519f42a..00400c352c1a 100644 --- a/drivers/infiniband/hw/amso1100/c2.c +++ b/drivers/infiniband/hw/amso1100/c2.c | |||
@@ -1082,6 +1082,7 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) | |||
1082 | 1082 | ||
1083 | /* Initialize network device */ | 1083 | /* Initialize network device */ |
1084 | if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) { | 1084 | if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) { |
1085 | ret = -ENOMEM; | ||
1085 | iounmap(mmio_regs); | 1086 | iounmap(mmio_regs); |
1086 | goto bail4; | 1087 | goto bail4; |
1087 | } | 1088 | } |
@@ -1151,7 +1152,8 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) | |||
1151 | goto bail10; | 1152 | goto bail10; |
1152 | } | 1153 | } |
1153 | 1154 | ||
1154 | if (c2_register_device(c2dev)) | 1155 | ret = c2_register_device(c2dev); |
1156 | if (ret) | ||
1155 | goto bail10; | 1157 | goto bail10; |
1156 | 1158 | ||
1157 | return 0; | 1159 | return 0; |
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c index b7c986990053..d2a6d961344b 100644 --- a/drivers/infiniband/hw/amso1100/c2_rnic.c +++ b/drivers/infiniband/hw/amso1100/c2_rnic.c | |||
@@ -576,7 +576,8 @@ int c2_rnic_init(struct c2_dev *c2dev) | |||
576 | goto bail4; | 576 | goto bail4; |
577 | 577 | ||
578 | /* Initialize cached the adapter limits */ | 578 | /* Initialize cached the adapter limits */ |
579 | if (c2_rnic_query(c2dev, &c2dev->props)) | 579 | err = c2_rnic_query(c2dev, &c2dev->props); |
580 | if (err) | ||
580 | goto bail5; | 581 | goto bail5; |
581 | 582 | ||
582 | /* Initialize the PD pool */ | 583 | /* Initialize the PD pool */ |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 45126879ad28..d286bdebe2ab 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -3352,6 +3352,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) | |||
3352 | goto free_dst; | 3352 | goto free_dst; |
3353 | } | 3353 | } |
3354 | 3354 | ||
3355 | neigh_release(neigh); | ||
3355 | step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; | 3356 | step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; |
3356 | rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; | 3357 | rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; |
3357 | window = (__force u16) htons((__force u16)tcph->window); | 3358 | window = (__force u16) htons((__force u16)tcph->window); |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index c2702f549f10..e81c5547e647 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -347,7 +347,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, | |||
347 | props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ? | 347 | props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ? |
348 | IB_WIDTH_4X : IB_WIDTH_1X; | 348 | IB_WIDTH_4X : IB_WIDTH_1X; |
349 | props->active_speed = IB_SPEED_QDR; | 349 | props->active_speed = IB_SPEED_QDR; |
350 | props->port_cap_flags = IB_PORT_CM_SUP; | 350 | props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS; |
351 | props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; | 351 | props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; |
352 | props->max_msg_sz = mdev->dev->caps.max_msg_sz; | 352 | props->max_msg_sz = mdev->dev->caps.max_msg_sz; |
353 | props->pkey_tbl_len = 1; | 353 | props->pkey_tbl_len = 1; |
@@ -1357,6 +1357,21 @@ static struct device_attribute *mlx4_class_attributes[] = { | |||
1357 | &dev_attr_board_id | 1357 | &dev_attr_board_id |
1358 | }; | 1358 | }; |
1359 | 1359 | ||
1360 | static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, | ||
1361 | struct net_device *dev) | ||
1362 | { | ||
1363 | memcpy(eui, dev->dev_addr, 3); | ||
1364 | memcpy(eui + 5, dev->dev_addr + 3, 3); | ||
1365 | if (vlan_id < 0x1000) { | ||
1366 | eui[3] = vlan_id >> 8; | ||
1367 | eui[4] = vlan_id & 0xff; | ||
1368 | } else { | ||
1369 | eui[3] = 0xff; | ||
1370 | eui[4] = 0xfe; | ||
1371 | } | ||
1372 | eui[0] ^= 2; | ||
1373 | } | ||
1374 | |||
1360 | static void update_gids_task(struct work_struct *work) | 1375 | static void update_gids_task(struct work_struct *work) |
1361 | { | 1376 | { |
1362 | struct update_gid_work *gw = container_of(work, struct update_gid_work, work); | 1377 | struct update_gid_work *gw = container_of(work, struct update_gid_work, work); |
@@ -1393,7 +1408,6 @@ static void reset_gids_task(struct work_struct *work) | |||
1393 | struct mlx4_cmd_mailbox *mailbox; | 1408 | struct mlx4_cmd_mailbox *mailbox; |
1394 | union ib_gid *gids; | 1409 | union ib_gid *gids; |
1395 | int err; | 1410 | int err; |
1396 | int i; | ||
1397 | struct mlx4_dev *dev = gw->dev->dev; | 1411 | struct mlx4_dev *dev = gw->dev->dev; |
1398 | 1412 | ||
1399 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 1413 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
@@ -1405,18 +1419,16 @@ static void reset_gids_task(struct work_struct *work) | |||
1405 | gids = mailbox->buf; | 1419 | gids = mailbox->buf; |
1406 | memcpy(gids, gw->gids, sizeof(gw->gids)); | 1420 | memcpy(gids, gw->gids, sizeof(gw->gids)); |
1407 | 1421 | ||
1408 | for (i = 1; i < gw->dev->num_ports + 1; i++) { | 1422 | if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) == |
1409 | if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, i) == | 1423 | IB_LINK_LAYER_ETHERNET) { |
1410 | IB_LINK_LAYER_ETHERNET) { | 1424 | err = mlx4_cmd(dev, mailbox->dma, |
1411 | err = mlx4_cmd(dev, mailbox->dma, | 1425 | MLX4_SET_PORT_GID_TABLE << 8 | gw->port, |
1412 | MLX4_SET_PORT_GID_TABLE << 8 | i, | 1426 | 1, MLX4_CMD_SET_PORT, |
1413 | 1, MLX4_CMD_SET_PORT, | 1427 | MLX4_CMD_TIME_CLASS_B, |
1414 | MLX4_CMD_TIME_CLASS_B, | 1428 | MLX4_CMD_WRAPPED); |
1415 | MLX4_CMD_WRAPPED); | 1429 | if (err) |
1416 | if (err) | 1430 | pr_warn(KERN_WARNING |
1417 | pr_warn(KERN_WARNING | 1431 | "set port %d command failed\n", gw->port); |
1418 | "set port %d command failed\n", i); | ||
1419 | } | ||
1420 | } | 1432 | } |
1421 | 1433 | ||
1422 | mlx4_free_cmd_mailbox(dev, mailbox); | 1434 | mlx4_free_cmd_mailbox(dev, mailbox); |
@@ -1425,7 +1437,8 @@ free: | |||
1425 | } | 1437 | } |
1426 | 1438 | ||
1427 | static int update_gid_table(struct mlx4_ib_dev *dev, int port, | 1439 | static int update_gid_table(struct mlx4_ib_dev *dev, int port, |
1428 | union ib_gid *gid, int clear) | 1440 | union ib_gid *gid, int clear, |
1441 | int default_gid) | ||
1429 | { | 1442 | { |
1430 | struct update_gid_work *work; | 1443 | struct update_gid_work *work; |
1431 | int i; | 1444 | int i; |
@@ -1434,26 +1447,31 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port, | |||
1434 | int found = -1; | 1447 | int found = -1; |
1435 | int max_gids; | 1448 | int max_gids; |
1436 | 1449 | ||
1437 | max_gids = dev->dev->caps.gid_table_len[port]; | 1450 | if (default_gid) { |
1438 | for (i = 0; i < max_gids; ++i) { | 1451 | free = 0; |
1439 | if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid, | 1452 | } else { |
1440 | sizeof(*gid))) | 1453 | max_gids = dev->dev->caps.gid_table_len[port]; |
1441 | found = i; | 1454 | for (i = 1; i < max_gids; ++i) { |
1442 | 1455 | if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid, | |
1443 | if (clear) { | ||
1444 | if (found >= 0) { | ||
1445 | need_update = 1; | ||
1446 | dev->iboe.gid_table[port - 1][found] = zgid; | ||
1447 | break; | ||
1448 | } | ||
1449 | } else { | ||
1450 | if (found >= 0) | ||
1451 | break; | ||
1452 | |||
1453 | if (free < 0 && | ||
1454 | !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, | ||
1455 | sizeof(*gid))) | 1456 | sizeof(*gid))) |
1456 | free = i; | 1457 | found = i; |
1458 | |||
1459 | if (clear) { | ||
1460 | if (found >= 0) { | ||
1461 | need_update = 1; | ||
1462 | dev->iboe.gid_table[port - 1][found] = | ||
1463 | zgid; | ||
1464 | break; | ||
1465 | } | ||
1466 | } else { | ||
1467 | if (found >= 0) | ||
1468 | break; | ||
1469 | |||
1470 | if (free < 0 && | ||
1471 | !memcmp(&dev->iboe.gid_table[port - 1][i], | ||
1472 | &zgid, sizeof(*gid))) | ||
1473 | free = i; | ||
1474 | } | ||
1457 | } | 1475 | } |
1458 | } | 1476 | } |
1459 | 1477 | ||
@@ -1478,18 +1496,26 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port, | |||
1478 | return 0; | 1496 | return 0; |
1479 | } | 1497 | } |
1480 | 1498 | ||
1481 | static int reset_gid_table(struct mlx4_ib_dev *dev) | 1499 | static void mlx4_make_default_gid(struct net_device *dev, union ib_gid *gid) |
1482 | { | 1500 | { |
1483 | struct update_gid_work *work; | 1501 | gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); |
1502 | mlx4_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev); | ||
1503 | } | ||
1504 | |||
1484 | 1505 | ||
1506 | static int reset_gid_table(struct mlx4_ib_dev *dev, u8 port) | ||
1507 | { | ||
1508 | struct update_gid_work *work; | ||
1485 | 1509 | ||
1486 | work = kzalloc(sizeof(*work), GFP_ATOMIC); | 1510 | work = kzalloc(sizeof(*work), GFP_ATOMIC); |
1487 | if (!work) | 1511 | if (!work) |
1488 | return -ENOMEM; | 1512 | return -ENOMEM; |
1489 | memset(dev->iboe.gid_table, 0, sizeof(dev->iboe.gid_table)); | 1513 | |
1514 | memset(dev->iboe.gid_table[port - 1], 0, sizeof(work->gids)); | ||
1490 | memset(work->gids, 0, sizeof(work->gids)); | 1515 | memset(work->gids, 0, sizeof(work->gids)); |
1491 | INIT_WORK(&work->work, reset_gids_task); | 1516 | INIT_WORK(&work->work, reset_gids_task); |
1492 | work->dev = dev; | 1517 | work->dev = dev; |
1518 | work->port = port; | ||
1493 | queue_work(wq, &work->work); | 1519 | queue_work(wq, &work->work); |
1494 | return 0; | 1520 | return 0; |
1495 | } | 1521 | } |
@@ -1502,6 +1528,12 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, | |||
1502 | struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ? | 1528 | struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ? |
1503 | rdma_vlan_dev_real_dev(event_netdev) : | 1529 | rdma_vlan_dev_real_dev(event_netdev) : |
1504 | event_netdev; | 1530 | event_netdev; |
1531 | union ib_gid default_gid; | ||
1532 | |||
1533 | mlx4_make_default_gid(real_dev, &default_gid); | ||
1534 | |||
1535 | if (!memcmp(gid, &default_gid, sizeof(*gid))) | ||
1536 | return 0; | ||
1505 | 1537 | ||
1506 | if (event != NETDEV_DOWN && event != NETDEV_UP) | 1538 | if (event != NETDEV_DOWN && event != NETDEV_UP) |
1507 | return 0; | 1539 | return 0; |
@@ -1520,7 +1552,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, | |||
1520 | (!netif_is_bond_master(real_dev) && | 1552 | (!netif_is_bond_master(real_dev) && |
1521 | (real_dev == iboe->netdevs[port - 1]))) | 1553 | (real_dev == iboe->netdevs[port - 1]))) |
1522 | update_gid_table(ibdev, port, gid, | 1554 | update_gid_table(ibdev, port, gid, |
1523 | event == NETDEV_DOWN); | 1555 | event == NETDEV_DOWN, 0); |
1524 | 1556 | ||
1525 | spin_unlock(&iboe->lock); | 1557 | spin_unlock(&iboe->lock); |
1526 | return 0; | 1558 | return 0; |
@@ -1536,7 +1568,6 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev, | |||
1536 | rdma_vlan_dev_real_dev(dev) : dev; | 1568 | rdma_vlan_dev_real_dev(dev) : dev; |
1537 | 1569 | ||
1538 | iboe = &ibdev->iboe; | 1570 | iboe = &ibdev->iboe; |
1539 | spin_lock(&iboe->lock); | ||
1540 | 1571 | ||
1541 | for (port = 1; port <= MLX4_MAX_PORTS; ++port) | 1572 | for (port = 1; port <= MLX4_MAX_PORTS; ++port) |
1542 | if ((netif_is_bond_master(real_dev) && | 1573 | if ((netif_is_bond_master(real_dev) && |
@@ -1545,8 +1576,6 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev, | |||
1545 | (real_dev == iboe->netdevs[port - 1]))) | 1576 | (real_dev == iboe->netdevs[port - 1]))) |
1546 | break; | 1577 | break; |
1547 | 1578 | ||
1548 | spin_unlock(&iboe->lock); | ||
1549 | |||
1550 | if ((port == 0) || (port > MLX4_MAX_PORTS)) | 1579 | if ((port == 0) || (port > MLX4_MAX_PORTS)) |
1551 | return 0; | 1580 | return 0; |
1552 | else | 1581 | else |
@@ -1607,7 +1636,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, | |||
1607 | /*ifa->ifa_address;*/ | 1636 | /*ifa->ifa_address;*/ |
1608 | ipv6_addr_set_v4mapped(ifa->ifa_address, | 1637 | ipv6_addr_set_v4mapped(ifa->ifa_address, |
1609 | (struct in6_addr *)&gid); | 1638 | (struct in6_addr *)&gid); |
1610 | update_gid_table(ibdev, port, &gid, 0); | 1639 | update_gid_table(ibdev, port, &gid, 0, 0); |
1611 | } | 1640 | } |
1612 | endfor_ifa(in_dev); | 1641 | endfor_ifa(in_dev); |
1613 | in_dev_put(in_dev); | 1642 | in_dev_put(in_dev); |
@@ -1619,7 +1648,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, | |||
1619 | read_lock_bh(&in6_dev->lock); | 1648 | read_lock_bh(&in6_dev->lock); |
1620 | list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { | 1649 | list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { |
1621 | pgid = (union ib_gid *)&ifp->addr; | 1650 | pgid = (union ib_gid *)&ifp->addr; |
1622 | update_gid_table(ibdev, port, pgid, 0); | 1651 | update_gid_table(ibdev, port, pgid, 0, 0); |
1623 | } | 1652 | } |
1624 | read_unlock_bh(&in6_dev->lock); | 1653 | read_unlock_bh(&in6_dev->lock); |
1625 | in6_dev_put(in6_dev); | 1654 | in6_dev_put(in6_dev); |
@@ -1627,14 +1656,26 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, | |||
1627 | #endif | 1656 | #endif |
1628 | } | 1657 | } |
1629 | 1658 | ||
1659 | static void mlx4_ib_set_default_gid(struct mlx4_ib_dev *ibdev, | ||
1660 | struct net_device *dev, u8 port) | ||
1661 | { | ||
1662 | union ib_gid gid; | ||
1663 | mlx4_make_default_gid(dev, &gid); | ||
1664 | update_gid_table(ibdev, port, &gid, 0, 1); | ||
1665 | } | ||
1666 | |||
1630 | static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) | 1667 | static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) |
1631 | { | 1668 | { |
1632 | struct net_device *dev; | 1669 | struct net_device *dev; |
1670 | struct mlx4_ib_iboe *iboe = &ibdev->iboe; | ||
1671 | int i; | ||
1633 | 1672 | ||
1634 | if (reset_gid_table(ibdev)) | 1673 | for (i = 1; i <= ibdev->num_ports; ++i) |
1635 | return -1; | 1674 | if (reset_gid_table(ibdev, i)) |
1675 | return -1; | ||
1636 | 1676 | ||
1637 | read_lock(&dev_base_lock); | 1677 | read_lock(&dev_base_lock); |
1678 | spin_lock(&iboe->lock); | ||
1638 | 1679 | ||
1639 | for_each_netdev(&init_net, dev) { | 1680 | for_each_netdev(&init_net, dev) { |
1640 | u8 port = mlx4_ib_get_dev_port(dev, ibdev); | 1681 | u8 port = mlx4_ib_get_dev_port(dev, ibdev); |
@@ -1642,6 +1683,7 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) | |||
1642 | mlx4_ib_get_dev_addr(dev, ibdev, port); | 1683 | mlx4_ib_get_dev_addr(dev, ibdev, port); |
1643 | } | 1684 | } |
1644 | 1685 | ||
1686 | spin_unlock(&iboe->lock); | ||
1645 | read_unlock(&dev_base_lock); | 1687 | read_unlock(&dev_base_lock); |
1646 | 1688 | ||
1647 | return 0; | 1689 | return 0; |
@@ -1656,25 +1698,57 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) | |||
1656 | 1698 | ||
1657 | spin_lock(&iboe->lock); | 1699 | spin_lock(&iboe->lock); |
1658 | mlx4_foreach_ib_transport_port(port, ibdev->dev) { | 1700 | mlx4_foreach_ib_transport_port(port, ibdev->dev) { |
1701 | enum ib_port_state port_state = IB_PORT_NOP; | ||
1659 | struct net_device *old_master = iboe->masters[port - 1]; | 1702 | struct net_device *old_master = iboe->masters[port - 1]; |
1703 | struct net_device *curr_netdev; | ||
1660 | struct net_device *curr_master; | 1704 | struct net_device *curr_master; |
1705 | |||
1661 | iboe->netdevs[port - 1] = | 1706 | iboe->netdevs[port - 1] = |
1662 | mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); | 1707 | mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); |
1708 | if (iboe->netdevs[port - 1]) | ||
1709 | mlx4_ib_set_default_gid(ibdev, | ||
1710 | iboe->netdevs[port - 1], port); | ||
1711 | curr_netdev = iboe->netdevs[port - 1]; | ||
1663 | 1712 | ||
1664 | if (iboe->netdevs[port - 1] && | 1713 | if (iboe->netdevs[port - 1] && |
1665 | netif_is_bond_slave(iboe->netdevs[port - 1])) { | 1714 | netif_is_bond_slave(iboe->netdevs[port - 1])) { |
1666 | rtnl_lock(); | ||
1667 | iboe->masters[port - 1] = netdev_master_upper_dev_get( | 1715 | iboe->masters[port - 1] = netdev_master_upper_dev_get( |
1668 | iboe->netdevs[port - 1]); | 1716 | iboe->netdevs[port - 1]); |
1669 | rtnl_unlock(); | 1717 | } else { |
1718 | iboe->masters[port - 1] = NULL; | ||
1670 | } | 1719 | } |
1671 | curr_master = iboe->masters[port - 1]; | 1720 | curr_master = iboe->masters[port - 1]; |
1672 | 1721 | ||
1722 | if (curr_netdev) { | ||
1723 | port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? | ||
1724 | IB_PORT_ACTIVE : IB_PORT_DOWN; | ||
1725 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | ||
1726 | } else { | ||
1727 | reset_gid_table(ibdev, port); | ||
1728 | } | ||
1729 | /* if using bonding/team and a slave port is down, we don't the bond IP | ||
1730 | * based gids in the table since flows that select port by gid may get | ||
1731 | * the down port. | ||
1732 | */ | ||
1733 | if (curr_master && (port_state == IB_PORT_DOWN)) { | ||
1734 | reset_gid_table(ibdev, port); | ||
1735 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | ||
1736 | } | ||
1673 | /* if bonding is used it is possible that we add it to masters | 1737 | /* if bonding is used it is possible that we add it to masters |
1674 | only after IP address is assigned to the net bonding | 1738 | * only after IP address is assigned to the net bonding |
1675 | interface */ | 1739 | * interface. |
1676 | if (curr_master && (old_master != curr_master)) | 1740 | */ |
1741 | if (curr_master && (old_master != curr_master)) { | ||
1742 | reset_gid_table(ibdev, port); | ||
1743 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | ||
1677 | mlx4_ib_get_dev_addr(curr_master, ibdev, port); | 1744 | mlx4_ib_get_dev_addr(curr_master, ibdev, port); |
1745 | } | ||
1746 | |||
1747 | if (!curr_master && (old_master != curr_master)) { | ||
1748 | reset_gid_table(ibdev, port); | ||
1749 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | ||
1750 | mlx4_ib_get_dev_addr(curr_netdev, ibdev, port); | ||
1751 | } | ||
1678 | } | 1752 | } |
1679 | 1753 | ||
1680 | spin_unlock(&iboe->lock); | 1754 | spin_unlock(&iboe->lock); |
@@ -1810,6 +1884,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
1810 | int i, j; | 1884 | int i, j; |
1811 | int err; | 1885 | int err; |
1812 | struct mlx4_ib_iboe *iboe; | 1886 | struct mlx4_ib_iboe *iboe; |
1887 | int ib_num_ports = 0; | ||
1813 | 1888 | ||
1814 | pr_info_once("%s", mlx4_ib_version); | 1889 | pr_info_once("%s", mlx4_ib_version); |
1815 | 1890 | ||
@@ -1985,10 +2060,14 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
1985 | ibdev->counters[i] = -1; | 2060 | ibdev->counters[i] = -1; |
1986 | } | 2061 | } |
1987 | 2062 | ||
2063 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) | ||
2064 | ib_num_ports++; | ||
2065 | |||
1988 | spin_lock_init(&ibdev->sm_lock); | 2066 | spin_lock_init(&ibdev->sm_lock); |
1989 | mutex_init(&ibdev->cap_mask_mutex); | 2067 | mutex_init(&ibdev->cap_mask_mutex); |
1990 | 2068 | ||
1991 | if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) { | 2069 | if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED && |
2070 | ib_num_ports) { | ||
1992 | ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS; | 2071 | ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS; |
1993 | err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count, | 2072 | err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count, |
1994 | MLX4_IB_UC_STEER_QPN_ALIGN, | 2073 | MLX4_IB_UC_STEER_QPN_ALIGN, |
@@ -2051,7 +2130,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2051 | } | 2130 | } |
2052 | } | 2131 | } |
2053 | #endif | 2132 | #endif |
2133 | for (i = 1 ; i <= ibdev->num_ports ; ++i) | ||
2134 | reset_gid_table(ibdev, i); | ||
2135 | rtnl_lock(); | ||
2054 | mlx4_ib_scan_netdevs(ibdev); | 2136 | mlx4_ib_scan_netdevs(ibdev); |
2137 | rtnl_unlock(); | ||
2055 | mlx4_ib_init_gid_table(ibdev); | 2138 | mlx4_ib_init_gid_table(ibdev); |
2056 | } | 2139 | } |
2057 | 2140 | ||
diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig index 8e6aebfaf8a4..10df386c6344 100644 --- a/drivers/infiniband/hw/mlx5/Kconfig +++ b/drivers/infiniband/hw/mlx5/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config MLX5_INFINIBAND | 1 | config MLX5_INFINIBAND |
2 | tristate "Mellanox Connect-IB HCA support" | 2 | tristate "Mellanox Connect-IB HCA support" |
3 | depends on NETDEVICES && ETHERNET && PCI && X86 | 3 | depends on NETDEVICES && ETHERNET && PCI |
4 | select NET_VENDOR_MELLANOX | 4 | select NET_VENDOR_MELLANOX |
5 | select MLX5_CORE | 5 | select MLX5_CORE |
6 | ---help--- | 6 | ---help--- |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 9660d093f8cf..aa03e732b6a8 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -261,8 +261,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, | |||
261 | props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | | 261 | props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | |
262 | IB_DEVICE_PORT_ACTIVE_EVENT | | 262 | IB_DEVICE_PORT_ACTIVE_EVENT | |
263 | IB_DEVICE_SYS_IMAGE_GUID | | 263 | IB_DEVICE_SYS_IMAGE_GUID | |
264 | IB_DEVICE_RC_RNR_NAK_GEN | | 264 | IB_DEVICE_RC_RNR_NAK_GEN; |
265 | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; | ||
266 | flags = dev->mdev.caps.flags; | 265 | flags = dev->mdev.caps.flags; |
267 | if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) | 266 | if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) |
268 | props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; | 267 | props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; |
@@ -536,24 +535,38 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | |||
536 | struct ib_udata *udata) | 535 | struct ib_udata *udata) |
537 | { | 536 | { |
538 | struct mlx5_ib_dev *dev = to_mdev(ibdev); | 537 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
539 | struct mlx5_ib_alloc_ucontext_req req; | 538 | struct mlx5_ib_alloc_ucontext_req_v2 req; |
540 | struct mlx5_ib_alloc_ucontext_resp resp; | 539 | struct mlx5_ib_alloc_ucontext_resp resp; |
541 | struct mlx5_ib_ucontext *context; | 540 | struct mlx5_ib_ucontext *context; |
542 | struct mlx5_uuar_info *uuari; | 541 | struct mlx5_uuar_info *uuari; |
543 | struct mlx5_uar *uars; | 542 | struct mlx5_uar *uars; |
544 | int gross_uuars; | 543 | int gross_uuars; |
545 | int num_uars; | 544 | int num_uars; |
545 | int ver; | ||
546 | int uuarn; | 546 | int uuarn; |
547 | int err; | 547 | int err; |
548 | int i; | 548 | int i; |
549 | int reqlen; | ||
549 | 550 | ||
550 | if (!dev->ib_active) | 551 | if (!dev->ib_active) |
551 | return ERR_PTR(-EAGAIN); | 552 | return ERR_PTR(-EAGAIN); |
552 | 553 | ||
553 | err = ib_copy_from_udata(&req, udata, sizeof(req)); | 554 | memset(&req, 0, sizeof(req)); |
555 | reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr); | ||
556 | if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req)) | ||
557 | ver = 0; | ||
558 | else if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req_v2)) | ||
559 | ver = 2; | ||
560 | else | ||
561 | return ERR_PTR(-EINVAL); | ||
562 | |||
563 | err = ib_copy_from_udata(&req, udata, reqlen); | ||
554 | if (err) | 564 | if (err) |
555 | return ERR_PTR(err); | 565 | return ERR_PTR(err); |
556 | 566 | ||
567 | if (req.flags || req.reserved) | ||
568 | return ERR_PTR(-EINVAL); | ||
569 | |||
557 | if (req.total_num_uuars > MLX5_MAX_UUARS) | 570 | if (req.total_num_uuars > MLX5_MAX_UUARS) |
558 | return ERR_PTR(-ENOMEM); | 571 | return ERR_PTR(-ENOMEM); |
559 | 572 | ||
@@ -626,6 +639,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | |||
626 | if (err) | 639 | if (err) |
627 | goto out_uars; | 640 | goto out_uars; |
628 | 641 | ||
642 | uuari->ver = ver; | ||
629 | uuari->num_low_latency_uuars = req.num_low_latency_uuars; | 643 | uuari->num_low_latency_uuars = req.num_low_latency_uuars; |
630 | uuari->uars = uars; | 644 | uuari->uars = uars; |
631 | uuari->num_uars = num_uars; | 645 | uuari->num_uars = num_uars; |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index ae37fb9bf262..7dfe8a1c84cf 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -216,7 +216,9 @@ static int sq_overhead(enum ib_qp_type qp_type) | |||
216 | 216 | ||
217 | case IB_QPT_UC: | 217 | case IB_QPT_UC: |
218 | size += sizeof(struct mlx5_wqe_ctrl_seg) + | 218 | size += sizeof(struct mlx5_wqe_ctrl_seg) + |
219 | sizeof(struct mlx5_wqe_raddr_seg); | 219 | sizeof(struct mlx5_wqe_raddr_seg) + |
220 | sizeof(struct mlx5_wqe_umr_ctrl_seg) + | ||
221 | sizeof(struct mlx5_mkey_seg); | ||
220 | break; | 222 | break; |
221 | 223 | ||
222 | case IB_QPT_UD: | 224 | case IB_QPT_UD: |
@@ -428,11 +430,17 @@ static int alloc_uuar(struct mlx5_uuar_info *uuari, | |||
428 | break; | 430 | break; |
429 | 431 | ||
430 | case MLX5_IB_LATENCY_CLASS_MEDIUM: | 432 | case MLX5_IB_LATENCY_CLASS_MEDIUM: |
431 | uuarn = alloc_med_class_uuar(uuari); | 433 | if (uuari->ver < 2) |
434 | uuarn = -ENOMEM; | ||
435 | else | ||
436 | uuarn = alloc_med_class_uuar(uuari); | ||
432 | break; | 437 | break; |
433 | 438 | ||
434 | case MLX5_IB_LATENCY_CLASS_HIGH: | 439 | case MLX5_IB_LATENCY_CLASS_HIGH: |
435 | uuarn = alloc_high_class_uuar(uuari); | 440 | if (uuari->ver < 2) |
441 | uuarn = -ENOMEM; | ||
442 | else | ||
443 | uuarn = alloc_high_class_uuar(uuari); | ||
436 | break; | 444 | break; |
437 | 445 | ||
438 | case MLX5_IB_LATENCY_CLASS_FAST_PATH: | 446 | case MLX5_IB_LATENCY_CLASS_FAST_PATH: |
@@ -657,8 +665,8 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, | |||
657 | int err; | 665 | int err; |
658 | 666 | ||
659 | uuari = &dev->mdev.priv.uuari; | 667 | uuari = &dev->mdev.priv.uuari; |
660 | if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) | 668 | if (init_attr->create_flags) |
661 | qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK; | 669 | return -EINVAL; |
662 | 670 | ||
663 | if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) | 671 | if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) |
664 | lc = MLX5_IB_LATENCY_CLASS_FAST_PATH; | 672 | lc = MLX5_IB_LATENCY_CLASS_FAST_PATH; |
diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h index 32a2a5dfc523..0f4f8e42a17f 100644 --- a/drivers/infiniband/hw/mlx5/user.h +++ b/drivers/infiniband/hw/mlx5/user.h | |||
@@ -62,6 +62,13 @@ struct mlx5_ib_alloc_ucontext_req { | |||
62 | __u32 num_low_latency_uuars; | 62 | __u32 num_low_latency_uuars; |
63 | }; | 63 | }; |
64 | 64 | ||
65 | struct mlx5_ib_alloc_ucontext_req_v2 { | ||
66 | __u32 total_num_uuars; | ||
67 | __u32 num_low_latency_uuars; | ||
68 | __u32 flags; | ||
69 | __u32 reserved; | ||
70 | }; | ||
71 | |||
65 | struct mlx5_ib_alloc_ucontext_resp { | 72 | struct mlx5_ib_alloc_ucontext_resp { |
66 | __u32 qp_tab_size; | 73 | __u32 qp_tab_size; |
67 | __u32 bf_reg_size; | 74 | __u32 bf_reg_size; |
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 429141078eec..353c7b05a90a 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c | |||
@@ -675,8 +675,11 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) | |||
675 | INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status); | 675 | INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status); |
676 | 676 | ||
677 | /* Initialize network devices */ | 677 | /* Initialize network devices */ |
678 | if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) | 678 | netdev = nes_netdev_init(nesdev, mmio_regs); |
679 | if (netdev == NULL) { | ||
680 | ret = -ENOMEM; | ||
679 | goto bail7; | 681 | goto bail7; |
682 | } | ||
680 | 683 | ||
681 | /* Register network device */ | 684 | /* Register network device */ |
682 | ret = register_netdev(netdev); | 685 | ret = register_netdev(netdev); |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 2ca86ca818bd..1a8a945efa60 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c | |||
@@ -127,7 +127,7 @@ static int ocrdma_addr_event(unsigned long event, struct net_device *netdev, | |||
127 | 127 | ||
128 | is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN; | 128 | is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN; |
129 | if (is_vlan) | 129 | if (is_vlan) |
130 | netdev = vlan_dev_real_dev(netdev); | 130 | netdev = rdma_vlan_dev_real_dev(netdev); |
131 | 131 | ||
132 | rcu_read_lock(); | 132 | rcu_read_lock(); |
133 | list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) { | 133 | list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) { |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index aa92f40c9d50..e0cc201be41a 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | |||
@@ -176,7 +176,7 @@ int ocrdma_query_port(struct ib_device *ibdev, | |||
176 | props->port_cap_flags = | 176 | props->port_cap_flags = |
177 | IB_PORT_CM_SUP | | 177 | IB_PORT_CM_SUP | |
178 | IB_PORT_REINIT_SUP | | 178 | IB_PORT_REINIT_SUP | |
179 | IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP; | 179 | IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS; |
180 | props->gid_tbl_len = OCRDMA_MAX_SGID; | 180 | props->gid_tbl_len = OCRDMA_MAX_SGID; |
181 | props->pkey_tbl_len = 1; | 181 | props->pkey_tbl_len = 1; |
182 | props->bad_pkey_cntr = 0; | 182 | props->bad_pkey_cntr = 0; |
@@ -1416,7 +1416,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp, | |||
1416 | OCRDMA_QP_PARAMS_HOP_LMT_MASK) >> | 1416 | OCRDMA_QP_PARAMS_HOP_LMT_MASK) >> |
1417 | OCRDMA_QP_PARAMS_HOP_LMT_SHIFT; | 1417 | OCRDMA_QP_PARAMS_HOP_LMT_SHIFT; |
1418 | qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn & | 1418 | qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn & |
1419 | OCRDMA_QP_PARAMS_SQ_PSN_MASK) >> | 1419 | OCRDMA_QP_PARAMS_TCLASS_MASK) >> |
1420 | OCRDMA_QP_PARAMS_TCLASS_SHIFT; | 1420 | OCRDMA_QP_PARAMS_TCLASS_SHIFT; |
1421 | 1421 | ||
1422 | qp_attr->ah_attr.ah_flags = IB_AH_GRH; | 1422 | qp_attr->ah_attr.ah_flags = IB_AH_GRH; |
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 5bfc02f450e6..d1bd21319d7d 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
@@ -2395,6 +2395,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd) | |||
2395 | qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); | 2395 | qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); |
2396 | qib_write_kreg(dd, kr_scratch, 0ULL); | 2396 | qib_write_kreg(dd, kr_scratch, 0ULL); |
2397 | 2397 | ||
2398 | /* ensure previous Tx parameters are not still forced */ | ||
2399 | qib_write_kreg_port(ppd, krp_tx_deemph_override, | ||
2400 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | ||
2401 | reset_tx_deemphasis_override)); | ||
2402 | |||
2398 | if (qib_compat_ddr_negotiate) { | 2403 | if (qib_compat_ddr_negotiate) { |
2399 | ppd->cpspec->ibdeltainprog = 1; | 2404 | ppd->cpspec->ibdeltainprog = 1; |
2400 | ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd, | 2405 | ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd, |
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c index 7ecc6061f1f4..f8dfd76be89f 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c | |||
@@ -629,6 +629,7 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow, | |||
629 | { | 629 | { |
630 | enum usnic_transport_type trans_type = qp_flow->trans_type; | 630 | enum usnic_transport_type trans_type = qp_flow->trans_type; |
631 | int err; | 631 | int err; |
632 | uint16_t port_num = 0; | ||
632 | 633 | ||
633 | switch (trans_type) { | 634 | switch (trans_type) { |
634 | case USNIC_TRANSPORT_ROCE_CUSTOM: | 635 | case USNIC_TRANSPORT_ROCE_CUSTOM: |
@@ -637,9 +638,15 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow, | |||
637 | case USNIC_TRANSPORT_IPV4_UDP: | 638 | case USNIC_TRANSPORT_IPV4_UDP: |
638 | err = usnic_transport_sock_get_addr(qp_flow->udp.sock, | 639 | err = usnic_transport_sock_get_addr(qp_flow->udp.sock, |
639 | NULL, NULL, | 640 | NULL, NULL, |
640 | (uint16_t *) id); | 641 | &port_num); |
641 | if (err) | 642 | if (err) |
642 | return err; | 643 | return err; |
644 | /* | ||
645 | * Copy port_num to stack first and then to *id, | ||
646 | * so that the short to int cast works for little | ||
647 | * and big endian systems. | ||
648 | */ | ||
649 | *id = port_num; | ||
643 | break; | 650 | break; |
644 | default: | 651 | default: |
645 | usnic_err("Unsupported transport %u\n", trans_type); | 652 | usnic_err("Unsupported transport %u\n", trans_type); |
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 538822684d5b..334f34b1cd46 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c | |||
@@ -610,11 +610,12 @@ void iser_snd_completion(struct iser_tx_desc *tx_desc, | |||
610 | ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr, | 610 | ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr, |
611 | ISER_HEADERS_LEN, DMA_TO_DEVICE); | 611 | ISER_HEADERS_LEN, DMA_TO_DEVICE); |
612 | kmem_cache_free(ig.desc_cache, tx_desc); | 612 | kmem_cache_free(ig.desc_cache, tx_desc); |
613 | tx_desc = NULL; | ||
613 | } | 614 | } |
614 | 615 | ||
615 | atomic_dec(&ib_conn->post_send_buf_count); | 616 | atomic_dec(&ib_conn->post_send_buf_count); |
616 | 617 | ||
617 | if (tx_desc->type == ISCSI_TX_CONTROL) { | 618 | if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) { |
618 | /* this arithmetic is legal by libiscsi dd_data allocation */ | 619 | /* this arithmetic is legal by libiscsi dd_data allocation */ |
619 | task = (void *) ((long)(void *)tx_desc - | 620 | task = (void *) ((long)(void *)tx_desc - |
620 | sizeof(struct iscsi_task)); | 621 | sizeof(struct iscsi_task)); |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index afe95674008b..ca37edef2791 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -652,9 +652,13 @@ static int iser_disconnected_handler(struct rdma_cm_id *cma_id) | |||
652 | /* getting here when the state is UP means that the conn is being * | 652 | /* getting here when the state is UP means that the conn is being * |
653 | * terminated asynchronously from the iSCSI layer's perspective. */ | 653 | * terminated asynchronously from the iSCSI layer's perspective. */ |
654 | if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, | 654 | if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, |
655 | ISER_CONN_TERMINATING)) | 655 | ISER_CONN_TERMINATING)){ |
656 | iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, | 656 | if (ib_conn->iser_conn) |
657 | ISCSI_ERR_CONN_FAILED); | 657 | iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, |
658 | ISCSI_ERR_CONN_FAILED); | ||
659 | else | ||
660 | iser_err("iscsi_iser connection isn't bound\n"); | ||
661 | } | ||
658 | 662 | ||
659 | /* Complete the termination process if no posts are pending */ | 663 | /* Complete the termination process if no posts are pending */ |
660 | if (ib_conn->post_recv_buf_count == 0 && | 664 | if (ib_conn->post_recv_buf_count == 0 && |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 2b161be3c1a3..d18d08a076e8 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
@@ -453,6 +453,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) | |||
453 | if (ret) { | 453 | if (ret) { |
454 | pr_err("Failed to create fastreg descriptor err=%d\n", | 454 | pr_err("Failed to create fastreg descriptor err=%d\n", |
455 | ret); | 455 | ret); |
456 | kfree(fr_desc); | ||
456 | goto err; | 457 | goto err; |
457 | } | 458 | } |
458 | 459 | ||
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 520a7e5a490b..0e537d8d0e47 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c | |||
@@ -3666,9 +3666,9 @@ static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size( | |||
3666 | unsigned long val; | 3666 | unsigned long val; |
3667 | int ret; | 3667 | int ret; |
3668 | 3668 | ||
3669 | ret = strict_strtoul(page, 0, &val); | 3669 | ret = kstrtoul(page, 0, &val); |
3670 | if (ret < 0) { | 3670 | if (ret < 0) { |
3671 | pr_err("strict_strtoul() failed with ret: %d\n", ret); | 3671 | pr_err("kstrtoul() failed with ret: %d\n", ret); |
3672 | return -EINVAL; | 3672 | return -EINVAL; |
3673 | } | 3673 | } |
3674 | if (val > MAX_SRPT_RDMA_SIZE) { | 3674 | if (val > MAX_SRPT_RDMA_SIZE) { |
@@ -3706,9 +3706,9 @@ static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size( | |||
3706 | unsigned long val; | 3706 | unsigned long val; |
3707 | int ret; | 3707 | int ret; |
3708 | 3708 | ||
3709 | ret = strict_strtoul(page, 0, &val); | 3709 | ret = kstrtoul(page, 0, &val); |
3710 | if (ret < 0) { | 3710 | if (ret < 0) { |
3711 | pr_err("strict_strtoul() failed with ret: %d\n", ret); | 3711 | pr_err("kstrtoul() failed with ret: %d\n", ret); |
3712 | return -EINVAL; | 3712 | return -EINVAL; |
3713 | } | 3713 | } |
3714 | if (val > MAX_SRPT_RSP_SIZE) { | 3714 | if (val > MAX_SRPT_RSP_SIZE) { |
@@ -3746,9 +3746,9 @@ static ssize_t srpt_tpg_attrib_store_srp_sq_size( | |||
3746 | unsigned long val; | 3746 | unsigned long val; |
3747 | int ret; | 3747 | int ret; |
3748 | 3748 | ||
3749 | ret = strict_strtoul(page, 0, &val); | 3749 | ret = kstrtoul(page, 0, &val); |
3750 | if (ret < 0) { | 3750 | if (ret < 0) { |
3751 | pr_err("strict_strtoul() failed with ret: %d\n", ret); | 3751 | pr_err("kstrtoul() failed with ret: %d\n", ret); |
3752 | return -EINVAL; | 3752 | return -EINVAL; |
3753 | } | 3753 | } |
3754 | if (val > MAX_SRPT_SRQ_SIZE) { | 3754 | if (val > MAX_SRPT_SRQ_SIZE) { |
@@ -3793,7 +3793,7 @@ static ssize_t srpt_tpg_store_enable( | |||
3793 | unsigned long tmp; | 3793 | unsigned long tmp; |
3794 | int ret; | 3794 | int ret; |
3795 | 3795 | ||
3796 | ret = strict_strtoul(page, 0, &tmp); | 3796 | ret = kstrtoul(page, 0, &tmp); |
3797 | if (ret < 0) { | 3797 | if (ret < 0) { |
3798 | printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n"); | 3798 | printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n"); |
3799 | return -EINVAL; | 3799 | return -EINVAL; |
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 0c707e4f4eaf..a4c7306ff43d 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h | |||
@@ -210,7 +210,9 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); | |||
210 | #define GC_MARK_RECLAIMABLE 0 | 210 | #define GC_MARK_RECLAIMABLE 0 |
211 | #define GC_MARK_DIRTY 1 | 211 | #define GC_MARK_DIRTY 1 |
212 | #define GC_MARK_METADATA 2 | 212 | #define GC_MARK_METADATA 2 |
213 | BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13); | 213 | #define GC_SECTORS_USED_SIZE 13 |
214 | #define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE)) | ||
215 | BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE); | ||
214 | BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1); | 216 | BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1); |
215 | 217 | ||
216 | #include "journal.h" | 218 | #include "journal.h" |
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 4f6b5940e609..3f74b4b0747b 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c | |||
@@ -23,7 +23,7 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set) | |||
23 | for (k = i->start; k < bset_bkey_last(i); k = next) { | 23 | for (k = i->start; k < bset_bkey_last(i); k = next) { |
24 | next = bkey_next(k); | 24 | next = bkey_next(k); |
25 | 25 | ||
26 | printk(KERN_ERR "block %u key %zi/%u: ", set, | 26 | printk(KERN_ERR "block %u key %li/%u: ", set, |
27 | (uint64_t *) k - i->d, i->keys); | 27 | (uint64_t *) k - i->d, i->keys); |
28 | 28 | ||
29 | if (b->ops->key_dump) | 29 | if (b->ops->key_dump) |
@@ -1185,9 +1185,12 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter, | |||
1185 | struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO, | 1185 | struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO, |
1186 | order); | 1186 | order); |
1187 | if (!out) { | 1187 | if (!out) { |
1188 | struct page *outp; | ||
1189 | |||
1188 | BUG_ON(order > state->page_order); | 1190 | BUG_ON(order > state->page_order); |
1189 | 1191 | ||
1190 | out = page_address(mempool_alloc(state->pool, GFP_NOIO)); | 1192 | outp = mempool_alloc(state->pool, GFP_NOIO); |
1193 | out = page_address(outp); | ||
1191 | used_mempool = true; | 1194 | used_mempool = true; |
1192 | order = state->page_order; | 1195 | order = state->page_order; |
1193 | } | 1196 | } |
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 98cc0a810a36..5f9c2a665ca5 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c | |||
@@ -1167,7 +1167,7 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k) | |||
1167 | /* guard against overflow */ | 1167 | /* guard against overflow */ |
1168 | SET_GC_SECTORS_USED(g, min_t(unsigned, | 1168 | SET_GC_SECTORS_USED(g, min_t(unsigned, |
1169 | GC_SECTORS_USED(g) + KEY_SIZE(k), | 1169 | GC_SECTORS_USED(g) + KEY_SIZE(k), |
1170 | (1 << 14) - 1)); | 1170 | MAX_GC_SECTORS_USED)); |
1171 | 1171 | ||
1172 | BUG_ON(!GC_SECTORS_USED(g)); | 1172 | BUG_ON(!GC_SECTORS_USED(g)); |
1173 | } | 1173 | } |
@@ -1805,7 +1805,7 @@ static bool btree_insert_key(struct btree *b, struct bkey *k, | |||
1805 | 1805 | ||
1806 | static size_t insert_u64s_remaining(struct btree *b) | 1806 | static size_t insert_u64s_remaining(struct btree *b) |
1807 | { | 1807 | { |
1808 | ssize_t ret = bch_btree_keys_u64s_remaining(&b->keys); | 1808 | long ret = bch_btree_keys_u64s_remaining(&b->keys); |
1809 | 1809 | ||
1810 | /* | 1810 | /* |
1811 | * Might land in the middle of an existing extent and have to split it | 1811 | * Might land in the middle of an existing extent and have to split it |
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 72cd213f213f..5d5d031cf381 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
@@ -353,14 +353,14 @@ static void bch_data_insert_start(struct closure *cl) | |||
353 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); | 353 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
354 | struct bio *bio = op->bio, *n; | 354 | struct bio *bio = op->bio, *n; |
355 | 355 | ||
356 | if (op->bypass) | ||
357 | return bch_data_invalidate(cl); | ||
358 | |||
359 | if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) { | 356 | if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) { |
360 | set_gc_sectors(op->c); | 357 | set_gc_sectors(op->c); |
361 | wake_up_gc(op->c); | 358 | wake_up_gc(op->c); |
362 | } | 359 | } |
363 | 360 | ||
361 | if (op->bypass) | ||
362 | return bch_data_invalidate(cl); | ||
363 | |||
364 | /* | 364 | /* |
365 | * Journal writes are marked REQ_FLUSH; if the original write was a | 365 | * Journal writes are marked REQ_FLUSH; if the original write was a |
366 | * flush, it'll wait on the journal write. | 366 | * flush, it'll wait on the journal write. |
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index c6ab69333a6d..d8458d477a12 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c | |||
@@ -416,7 +416,7 @@ static int btree_bset_stats(struct btree_op *b_op, struct btree *b) | |||
416 | return MAP_CONTINUE; | 416 | return MAP_CONTINUE; |
417 | } | 417 | } |
418 | 418 | ||
419 | int bch_bset_print_stats(struct cache_set *c, char *buf) | 419 | static int bch_bset_print_stats(struct cache_set *c, char *buf) |
420 | { | 420 | { |
421 | struct bset_stats_op op; | 421 | struct bset_stats_op op; |
422 | int ret; | 422 | int ret; |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index fd3a2a14b587..4a6ca1cb2e78 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -1953,11 +1953,15 @@ static int process_checks(struct r1bio *r1_bio) | |||
1953 | for (i = 0; i < conf->raid_disks * 2; i++) { | 1953 | for (i = 0; i < conf->raid_disks * 2; i++) { |
1954 | int j; | 1954 | int j; |
1955 | int size; | 1955 | int size; |
1956 | int uptodate; | ||
1956 | struct bio *b = r1_bio->bios[i]; | 1957 | struct bio *b = r1_bio->bios[i]; |
1957 | if (b->bi_end_io != end_sync_read) | 1958 | if (b->bi_end_io != end_sync_read) |
1958 | continue; | 1959 | continue; |
1959 | /* fixup the bio for reuse */ | 1960 | /* fixup the bio for reuse, but preserve BIO_UPTODATE */ |
1961 | uptodate = test_bit(BIO_UPTODATE, &b->bi_flags); | ||
1960 | bio_reset(b); | 1962 | bio_reset(b); |
1963 | if (!uptodate) | ||
1964 | clear_bit(BIO_UPTODATE, &b->bi_flags); | ||
1961 | b->bi_vcnt = vcnt; | 1965 | b->bi_vcnt = vcnt; |
1962 | b->bi_iter.bi_size = r1_bio->sectors << 9; | 1966 | b->bi_iter.bi_size = r1_bio->sectors << 9; |
1963 | b->bi_iter.bi_sector = r1_bio->sector + | 1967 | b->bi_iter.bi_sector = r1_bio->sector + |
@@ -1990,11 +1994,14 @@ static int process_checks(struct r1bio *r1_bio) | |||
1990 | int j; | 1994 | int j; |
1991 | struct bio *pbio = r1_bio->bios[primary]; | 1995 | struct bio *pbio = r1_bio->bios[primary]; |
1992 | struct bio *sbio = r1_bio->bios[i]; | 1996 | struct bio *sbio = r1_bio->bios[i]; |
1997 | int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags); | ||
1993 | 1998 | ||
1994 | if (sbio->bi_end_io != end_sync_read) | 1999 | if (sbio->bi_end_io != end_sync_read) |
1995 | continue; | 2000 | continue; |
2001 | /* Now we can 'fixup' the BIO_UPTODATE flag */ | ||
2002 | set_bit(BIO_UPTODATE, &sbio->bi_flags); | ||
1996 | 2003 | ||
1997 | if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) { | 2004 | if (uptodate) { |
1998 | for (j = vcnt; j-- ; ) { | 2005 | for (j = vcnt; j-- ; ) { |
1999 | struct page *p, *s; | 2006 | struct page *p, *s; |
2000 | p = pbio->bi_io_vec[j].bv_page; | 2007 | p = pbio->bi_io_vec[j].bv_page; |
@@ -2009,7 +2016,7 @@ static int process_checks(struct r1bio *r1_bio) | |||
2009 | if (j >= 0) | 2016 | if (j >= 0) |
2010 | atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); | 2017 | atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); |
2011 | if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) | 2018 | if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) |
2012 | && test_bit(BIO_UPTODATE, &sbio->bi_flags))) { | 2019 | && uptodate)) { |
2013 | /* No need to write to this device. */ | 2020 | /* No need to write to this device. */ |
2014 | sbio->bi_end_io = NULL; | 2021 | sbio->bi_end_io = NULL; |
2015 | rdev_dec_pending(conf->mirrors[i].rdev, mddev); | 2022 | rdev_dec_pending(conf->mirrors[i].rdev, mddev); |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index f1feadeb7bb2..16f5c21963db 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -5514,23 +5514,43 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) | |||
5514 | return sectors * (raid_disks - conf->max_degraded); | 5514 | return sectors * (raid_disks - conf->max_degraded); |
5515 | } | 5515 | } |
5516 | 5516 | ||
5517 | static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) | ||
5518 | { | ||
5519 | safe_put_page(percpu->spare_page); | ||
5520 | kfree(percpu->scribble); | ||
5521 | percpu->spare_page = NULL; | ||
5522 | percpu->scribble = NULL; | ||
5523 | } | ||
5524 | |||
5525 | static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) | ||
5526 | { | ||
5527 | if (conf->level == 6 && !percpu->spare_page) | ||
5528 | percpu->spare_page = alloc_page(GFP_KERNEL); | ||
5529 | if (!percpu->scribble) | ||
5530 | percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL); | ||
5531 | |||
5532 | if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) { | ||
5533 | free_scratch_buffer(conf, percpu); | ||
5534 | return -ENOMEM; | ||
5535 | } | ||
5536 | |||
5537 | return 0; | ||
5538 | } | ||
5539 | |||
5517 | static void raid5_free_percpu(struct r5conf *conf) | 5540 | static void raid5_free_percpu(struct r5conf *conf) |
5518 | { | 5541 | { |
5519 | struct raid5_percpu *percpu; | ||
5520 | unsigned long cpu; | 5542 | unsigned long cpu; |
5521 | 5543 | ||
5522 | if (!conf->percpu) | 5544 | if (!conf->percpu) |
5523 | return; | 5545 | return; |
5524 | 5546 | ||
5525 | get_online_cpus(); | ||
5526 | for_each_possible_cpu(cpu) { | ||
5527 | percpu = per_cpu_ptr(conf->percpu, cpu); | ||
5528 | safe_put_page(percpu->spare_page); | ||
5529 | kfree(percpu->scribble); | ||
5530 | } | ||
5531 | #ifdef CONFIG_HOTPLUG_CPU | 5547 | #ifdef CONFIG_HOTPLUG_CPU |
5532 | unregister_cpu_notifier(&conf->cpu_notify); | 5548 | unregister_cpu_notifier(&conf->cpu_notify); |
5533 | #endif | 5549 | #endif |
5550 | |||
5551 | get_online_cpus(); | ||
5552 | for_each_possible_cpu(cpu) | ||
5553 | free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); | ||
5534 | put_online_cpus(); | 5554 | put_online_cpus(); |
5535 | 5555 | ||
5536 | free_percpu(conf->percpu); | 5556 | free_percpu(conf->percpu); |
@@ -5557,15 +5577,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, | |||
5557 | switch (action) { | 5577 | switch (action) { |
5558 | case CPU_UP_PREPARE: | 5578 | case CPU_UP_PREPARE: |
5559 | case CPU_UP_PREPARE_FROZEN: | 5579 | case CPU_UP_PREPARE_FROZEN: |
5560 | if (conf->level == 6 && !percpu->spare_page) | 5580 | if (alloc_scratch_buffer(conf, percpu)) { |
5561 | percpu->spare_page = alloc_page(GFP_KERNEL); | ||
5562 | if (!percpu->scribble) | ||
5563 | percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL); | ||
5564 | |||
5565 | if (!percpu->scribble || | ||
5566 | (conf->level == 6 && !percpu->spare_page)) { | ||
5567 | safe_put_page(percpu->spare_page); | ||
5568 | kfree(percpu->scribble); | ||
5569 | pr_err("%s: failed memory allocation for cpu%ld\n", | 5581 | pr_err("%s: failed memory allocation for cpu%ld\n", |
5570 | __func__, cpu); | 5582 | __func__, cpu); |
5571 | return notifier_from_errno(-ENOMEM); | 5583 | return notifier_from_errno(-ENOMEM); |
@@ -5573,10 +5585,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, | |||
5573 | break; | 5585 | break; |
5574 | case CPU_DEAD: | 5586 | case CPU_DEAD: |
5575 | case CPU_DEAD_FROZEN: | 5587 | case CPU_DEAD_FROZEN: |
5576 | safe_put_page(percpu->spare_page); | 5588 | free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); |
5577 | kfree(percpu->scribble); | ||
5578 | percpu->spare_page = NULL; | ||
5579 | percpu->scribble = NULL; | ||
5580 | break; | 5589 | break; |
5581 | default: | 5590 | default: |
5582 | break; | 5591 | break; |
@@ -5588,40 +5597,29 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, | |||
5588 | static int raid5_alloc_percpu(struct r5conf *conf) | 5597 | static int raid5_alloc_percpu(struct r5conf *conf) |
5589 | { | 5598 | { |
5590 | unsigned long cpu; | 5599 | unsigned long cpu; |
5591 | struct page *spare_page; | 5600 | int err = 0; |
5592 | struct raid5_percpu __percpu *allcpus; | ||
5593 | void *scribble; | ||
5594 | int err; | ||
5595 | 5601 | ||
5596 | allcpus = alloc_percpu(struct raid5_percpu); | 5602 | conf->percpu = alloc_percpu(struct raid5_percpu); |
5597 | if (!allcpus) | 5603 | if (!conf->percpu) |
5598 | return -ENOMEM; | 5604 | return -ENOMEM; |
5599 | conf->percpu = allcpus; | 5605 | |
5606 | #ifdef CONFIG_HOTPLUG_CPU | ||
5607 | conf->cpu_notify.notifier_call = raid456_cpu_notify; | ||
5608 | conf->cpu_notify.priority = 0; | ||
5609 | err = register_cpu_notifier(&conf->cpu_notify); | ||
5610 | if (err) | ||
5611 | return err; | ||
5612 | #endif | ||
5600 | 5613 | ||
5601 | get_online_cpus(); | 5614 | get_online_cpus(); |
5602 | err = 0; | ||
5603 | for_each_present_cpu(cpu) { | 5615 | for_each_present_cpu(cpu) { |
5604 | if (conf->level == 6) { | 5616 | err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); |
5605 | spare_page = alloc_page(GFP_KERNEL); | 5617 | if (err) { |
5606 | if (!spare_page) { | 5618 | pr_err("%s: failed memory allocation for cpu%ld\n", |
5607 | err = -ENOMEM; | 5619 | __func__, cpu); |
5608 | break; | ||
5609 | } | ||
5610 | per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page; | ||
5611 | } | ||
5612 | scribble = kmalloc(conf->scribble_len, GFP_KERNEL); | ||
5613 | if (!scribble) { | ||
5614 | err = -ENOMEM; | ||
5615 | break; | 5620 | break; |
5616 | } | 5621 | } |
5617 | per_cpu_ptr(conf->percpu, cpu)->scribble = scribble; | ||
5618 | } | 5622 | } |
5619 | #ifdef CONFIG_HOTPLUG_CPU | ||
5620 | conf->cpu_notify.notifier_call = raid456_cpu_notify; | ||
5621 | conf->cpu_notify.priority = 0; | ||
5622 | if (err == 0) | ||
5623 | err = register_cpu_notifier(&conf->cpu_notify); | ||
5624 | #endif | ||
5625 | put_online_cpus(); | 5623 | put_online_cpus(); |
5626 | 5624 | ||
5627 | return err; | 5625 | return err; |
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c index 8f8a6b327cdb..2c2c9cc75231 100644 --- a/drivers/misc/genwqe/card_dev.c +++ b/drivers/misc/genwqe/card_dev.c | |||
@@ -787,6 +787,7 @@ static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m) | |||
787 | if (rc != 0) { | 787 | if (rc != 0) { |
788 | dev_err(&pci_dev->dev, | 788 | dev_err(&pci_dev->dev, |
789 | "[%s] genwqe_user_vmap rc=%d\n", __func__, rc); | 789 | "[%s] genwqe_user_vmap rc=%d\n", __func__, rc); |
790 | kfree(dma_map); | ||
790 | return rc; | 791 | return rc; |
791 | } | 792 | } |
792 | 793 | ||
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 1ee2b9492a82..9b809cfc2899 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c | |||
@@ -908,7 +908,6 @@ void mei_cl_all_disconnect(struct mei_device *dev) | |||
908 | list_for_each_entry_safe(cl, next, &dev->file_list, link) { | 908 | list_for_each_entry_safe(cl, next, &dev->file_list, link) { |
909 | cl->state = MEI_FILE_DISCONNECTED; | 909 | cl->state = MEI_FILE_DISCONNECTED; |
910 | cl->mei_flow_ctrl_creds = 0; | 910 | cl->mei_flow_ctrl_creds = 0; |
911 | cl->read_cb = NULL; | ||
912 | cl->timer_count = 0; | 911 | cl->timer_count = 0; |
913 | } | 912 | } |
914 | } | 913 | } |
@@ -942,8 +941,16 @@ void mei_cl_all_wakeup(struct mei_device *dev) | |||
942 | void mei_cl_all_write_clear(struct mei_device *dev) | 941 | void mei_cl_all_write_clear(struct mei_device *dev) |
943 | { | 942 | { |
944 | struct mei_cl_cb *cb, *next; | 943 | struct mei_cl_cb *cb, *next; |
944 | struct list_head *list; | ||
945 | 945 | ||
946 | list_for_each_entry_safe(cb, next, &dev->write_list.list, list) { | 946 | list = &dev->write_list.list; |
947 | list_for_each_entry_safe(cb, next, list, list) { | ||
948 | list_del(&cb->list); | ||
949 | mei_io_cb_free(cb); | ||
950 | } | ||
951 | |||
952 | list = &dev->write_waiting_list.list; | ||
953 | list_for_each_entry_safe(cb, next, list, list) { | ||
947 | list_del(&cb->list); | 954 | list_del(&cb->list); |
948 | mei_io_cb_free(cb); | 955 | mei_io_cb_free(cb); |
949 | } | 956 | } |
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c index 752ff873f891..7e1ef0ebbb80 100644 --- a/drivers/misc/mic/host/mic_virtio.c +++ b/drivers/misc/mic/host/mic_virtio.c | |||
@@ -156,7 +156,8 @@ static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov, | |||
156 | static int _mic_virtio_copy(struct mic_vdev *mvdev, | 156 | static int _mic_virtio_copy(struct mic_vdev *mvdev, |
157 | struct mic_copy_desc *copy) | 157 | struct mic_copy_desc *copy) |
158 | { | 158 | { |
159 | int ret = 0, iovcnt = copy->iovcnt; | 159 | int ret = 0; |
160 | u32 iovcnt = copy->iovcnt; | ||
160 | struct iovec iov; | 161 | struct iovec iov; |
161 | struct iovec __user *u_iov = copy->iov; | 162 | struct iovec __user *u_iov = copy->iov; |
162 | void __user *ubuf = NULL; | 163 | void __user *ubuf = NULL; |
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 357bbc54fe4b..3e049c13429c 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c | |||
@@ -197,7 +197,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, | |||
197 | struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; | 197 | struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; |
198 | 198 | ||
199 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) | 199 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) |
200 | limit = dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; | 200 | limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; |
201 | 201 | ||
202 | mq->card = card; | 202 | mq->card = card; |
203 | mq->queue = blk_init_queue(mmc_request_fn, lock); | 203 | mq->queue = blk_init_queue(mmc_request_fn, lock); |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index f342278539d5..494b888a6568 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -139,7 +139,7 @@ config MACVTAP | |||
139 | This adds a specialized tap character device driver that is based | 139 | This adds a specialized tap character device driver that is based |
140 | on the MAC-VLAN network interface, called macvtap. A macvtap device | 140 | on the MAC-VLAN network interface, called macvtap. A macvtap device |
141 | can be added in the same way as a macvlan device, using 'type | 141 | can be added in the same way as a macvlan device, using 'type |
142 | macvlan', and then be accessed through the tap user space interface. | 142 | macvtap', and then be accessed through the tap user space interface. |
143 | 143 | ||
144 | To compile this driver as a module, choose M here: the module | 144 | To compile this driver as a module, choose M here: the module |
145 | will be called macvtap. | 145 | will be called macvtap. |
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index e9edd8473df6..e362ff720e6b 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
@@ -1797,8 +1797,6 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout) | |||
1797 | BOND_AD_INFO(bond).agg_select_timer = timeout; | 1797 | BOND_AD_INFO(bond).agg_select_timer = timeout; |
1798 | } | 1798 | } |
1799 | 1799 | ||
1800 | static u16 aggregator_identifier; | ||
1801 | |||
1802 | /** | 1800 | /** |
1803 | * bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures | 1801 | * bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures |
1804 | * @bond: bonding struct to work on | 1802 | * @bond: bonding struct to work on |
@@ -1812,7 +1810,7 @@ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution) | |||
1812 | if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr), | 1810 | if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr), |
1813 | bond->dev->dev_addr)) { | 1811 | bond->dev->dev_addr)) { |
1814 | 1812 | ||
1815 | aggregator_identifier = 0; | 1813 | BOND_AD_INFO(bond).aggregator_identifier = 0; |
1816 | 1814 | ||
1817 | BOND_AD_INFO(bond).system.sys_priority = 0xFFFF; | 1815 | BOND_AD_INFO(bond).system.sys_priority = 0xFFFF; |
1818 | BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr); | 1816 | BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr); |
@@ -1881,7 +1879,7 @@ void bond_3ad_bind_slave(struct slave *slave) | |||
1881 | ad_initialize_agg(aggregator); | 1879 | ad_initialize_agg(aggregator); |
1882 | 1880 | ||
1883 | aggregator->aggregator_mac_address = *((struct mac_addr *)bond->dev->dev_addr); | 1881 | aggregator->aggregator_mac_address = *((struct mac_addr *)bond->dev->dev_addr); |
1884 | aggregator->aggregator_identifier = (++aggregator_identifier); | 1882 | aggregator->aggregator_identifier = ++BOND_AD_INFO(bond).aggregator_identifier; |
1885 | aggregator->slave = slave; | 1883 | aggregator->slave = slave; |
1886 | aggregator->is_active = 0; | 1884 | aggregator->is_active = 0; |
1887 | aggregator->num_of_ports = 0; | 1885 | aggregator->num_of_ports = 0; |
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h index 3b97fe487dca..bb03b1df2f3e 100644 --- a/drivers/net/bonding/bond_3ad.h +++ b/drivers/net/bonding/bond_3ad.h | |||
@@ -253,6 +253,7 @@ struct ad_system { | |||
253 | struct ad_bond_info { | 253 | struct ad_bond_info { |
254 | struct ad_system system; /* 802.3ad system structure */ | 254 | struct ad_system system; /* 802.3ad system structure */ |
255 | u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */ | 255 | u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */ |
256 | u16 aggregator_identifier; | ||
256 | }; | 257 | }; |
257 | 258 | ||
258 | struct ad_slave_info { | 259 | struct ad_slave_info { |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index ac4a1b88115e..afae7cab5cf6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -1538,9 +1538,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1538 | bond_set_carrier(bond); | 1538 | bond_set_carrier(bond); |
1539 | 1539 | ||
1540 | if (USES_PRIMARY(bond->params.mode)) { | 1540 | if (USES_PRIMARY(bond->params.mode)) { |
1541 | block_netpoll_tx(); | ||
1541 | write_lock_bh(&bond->curr_slave_lock); | 1542 | write_lock_bh(&bond->curr_slave_lock); |
1542 | bond_select_active_slave(bond); | 1543 | bond_select_active_slave(bond); |
1543 | write_unlock_bh(&bond->curr_slave_lock); | 1544 | write_unlock_bh(&bond->curr_slave_lock); |
1545 | unblock_netpoll_tx(); | ||
1544 | } | 1546 | } |
1545 | 1547 | ||
1546 | pr_info("%s: Enslaving %s as %s interface with %s link\n", | 1548 | pr_info("%s: Enslaving %s as %s interface with %s link\n", |
@@ -1566,10 +1568,12 @@ err_detach: | |||
1566 | if (bond->primary_slave == new_slave) | 1568 | if (bond->primary_slave == new_slave) |
1567 | bond->primary_slave = NULL; | 1569 | bond->primary_slave = NULL; |
1568 | if (bond->curr_active_slave == new_slave) { | 1570 | if (bond->curr_active_slave == new_slave) { |
1571 | block_netpoll_tx(); | ||
1569 | write_lock_bh(&bond->curr_slave_lock); | 1572 | write_lock_bh(&bond->curr_slave_lock); |
1570 | bond_change_active_slave(bond, NULL); | 1573 | bond_change_active_slave(bond, NULL); |
1571 | bond_select_active_slave(bond); | 1574 | bond_select_active_slave(bond); |
1572 | write_unlock_bh(&bond->curr_slave_lock); | 1575 | write_unlock_bh(&bond->curr_slave_lock); |
1576 | unblock_netpoll_tx(); | ||
1573 | } | 1577 | } |
1574 | slave_disable_netpoll(new_slave); | 1578 | slave_disable_netpoll(new_slave); |
1575 | 1579 | ||
@@ -2858,9 +2862,12 @@ static int bond_slave_netdev_event(unsigned long event, | |||
2858 | pr_info("%s: Primary slave changed to %s, reselecting active slave\n", | 2862 | pr_info("%s: Primary slave changed to %s, reselecting active slave\n", |
2859 | bond->dev->name, | 2863 | bond->dev->name, |
2860 | bond->primary_slave ? slave_dev->name : "none"); | 2864 | bond->primary_slave ? slave_dev->name : "none"); |
2865 | |||
2866 | block_netpoll_tx(); | ||
2861 | write_lock_bh(&bond->curr_slave_lock); | 2867 | write_lock_bh(&bond->curr_slave_lock); |
2862 | bond_select_active_slave(bond); | 2868 | bond_select_active_slave(bond); |
2863 | write_unlock_bh(&bond->curr_slave_lock); | 2869 | write_unlock_bh(&bond->curr_slave_lock); |
2870 | unblock_netpoll_tx(); | ||
2864 | break; | 2871 | break; |
2865 | case NETDEV_FEAT_CHANGE: | 2872 | case NETDEV_FEAT_CHANGE: |
2866 | bond_compute_features(bond); | 2873 | bond_compute_features(bond); |
@@ -3683,7 +3690,7 @@ static inline int bond_slave_override(struct bonding *bond, | |||
3683 | 3690 | ||
3684 | 3691 | ||
3685 | static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, | 3692 | static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, |
3686 | void *accel_priv) | 3693 | void *accel_priv, select_queue_fallback_t fallback) |
3687 | { | 3694 | { |
3688 | /* | 3695 | /* |
3689 | * This helper function exists to help dev_pick_tx get the correct | 3696 | * This helper function exists to help dev_pick_tx get the correct |
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 5f997b9af54d..23f365510b58 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/if.h> | 15 | #include <linux/if.h> |
16 | #include <linux/netdevice.h> | 16 | #include <linux/netdevice.h> |
17 | #include <linux/rwlock.h> | 17 | #include <linux/spinlock.h> |
18 | #include <linux/rcupdate.h> | 18 | #include <linux/rcupdate.h> |
19 | #include <linux/ctype.h> | 19 | #include <linux/ctype.h> |
20 | #include <linux/inet.h> | 20 | #include <linux/inet.h> |
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 6c859bba8b65..e77d11049747 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c | |||
@@ -473,6 +473,8 @@ static int kvaser_usb_get_card_info(struct kvaser_usb *dev) | |||
473 | return err; | 473 | return err; |
474 | 474 | ||
475 | dev->nchannels = msg.u.cardinfo.nchannels; | 475 | dev->nchannels = msg.u.cardinfo.nchannels; |
476 | if (dev->nchannels > MAX_NET_DEVICES) | ||
477 | return -EINVAL; | ||
476 | 478 | ||
477 | return 0; | 479 | return 0; |
478 | } | 480 | } |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 8363b9de5004..5ee13af78e53 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -1867,7 +1867,7 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) | |||
1867 | } | 1867 | } |
1868 | 1868 | ||
1869 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, | 1869 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, |
1870 | void *accel_priv) | 1870 | void *accel_priv, select_queue_fallback_t fallback) |
1871 | { | 1871 | { |
1872 | struct bnx2x *bp = netdev_priv(dev); | 1872 | struct bnx2x *bp = netdev_priv(dev); |
1873 | 1873 | ||
@@ -1889,7 +1889,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, | |||
1889 | } | 1889 | } |
1890 | 1890 | ||
1891 | /* select a non-FCoE queue */ | 1891 | /* select a non-FCoE queue */ |
1892 | return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); | 1892 | return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); |
1893 | } | 1893 | } |
1894 | 1894 | ||
1895 | void bnx2x_set_num_queues(struct bnx2x *bp) | 1895 | void bnx2x_set_num_queues(struct bnx2x *bp) |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 5135cc7f7b6f..ec02b15fba32 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | |||
@@ -496,7 +496,7 @@ int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); | |||
496 | 496 | ||
497 | /* select_queue callback */ | 497 | /* select_queue callback */ |
498 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, | 498 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, |
499 | void *accel_priv); | 499 | void *accel_priv, select_queue_fallback_t fallback); |
500 | 500 | ||
501 | static inline void bnx2x_update_rx_prod(struct bnx2x *bp, | 501 | static inline void bnx2x_update_rx_prod(struct bnx2x *bp, |
502 | struct bnx2x_fastpath *fp, | 502 | struct bnx2x_fastpath *fp, |
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index add05f14b38b..1642de78aac8 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c | |||
@@ -1939,6 +1939,7 @@ static void tulip_remove_one(struct pci_dev *pdev) | |||
1939 | pci_iounmap(pdev, tp->base_addr); | 1939 | pci_iounmap(pdev, tp->base_addr); |
1940 | free_netdev (dev); | 1940 | free_netdev (dev); |
1941 | pci_release_regions (pdev); | 1941 | pci_release_regions (pdev); |
1942 | pci_disable_device(pdev); | ||
1942 | 1943 | ||
1943 | /* pci_power_off (pdev, -1); */ | 1944 | /* pci_power_off (pdev, -1); */ |
1944 | } | 1945 | } |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index d4782b42401b..903362a7b584 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -1778,8 +1778,6 @@ fec_enet_open(struct net_device *ndev) | |||
1778 | struct fec_enet_private *fep = netdev_priv(ndev); | 1778 | struct fec_enet_private *fep = netdev_priv(ndev); |
1779 | int ret; | 1779 | int ret; |
1780 | 1780 | ||
1781 | napi_enable(&fep->napi); | ||
1782 | |||
1783 | /* I should reset the ring buffers here, but I don't yet know | 1781 | /* I should reset the ring buffers here, but I don't yet know |
1784 | * a simple way to do that. | 1782 | * a simple way to do that. |
1785 | */ | 1783 | */ |
@@ -1794,6 +1792,8 @@ fec_enet_open(struct net_device *ndev) | |||
1794 | fec_enet_free_buffers(ndev); | 1792 | fec_enet_free_buffers(ndev); |
1795 | return ret; | 1793 | return ret; |
1796 | } | 1794 | } |
1795 | |||
1796 | napi_enable(&fep->napi); | ||
1797 | phy_start(fep->phy_dev); | 1797 | phy_start(fep->phy_dev); |
1798 | netif_start_queue(ndev); | 1798 | netif_start_queue(ndev); |
1799 | fep->opened = 1; | 1799 | fep->opened = 1; |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 6d4ada72dfd0..18076c4178b4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -6881,7 +6881,7 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) | |||
6881 | } | 6881 | } |
6882 | 6882 | ||
6883 | static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, | 6883 | static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, |
6884 | void *accel_priv) | 6884 | void *accel_priv, select_queue_fallback_t fallback) |
6885 | { | 6885 | { |
6886 | struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; | 6886 | struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; |
6887 | #ifdef IXGBE_FCOE | 6887 | #ifdef IXGBE_FCOE |
@@ -6907,7 +6907,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, | |||
6907 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) | 6907 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) |
6908 | break; | 6908 | break; |
6909 | default: | 6909 | default: |
6910 | return __netdev_pick_tx(dev, skb); | 6910 | return fallback(dev, skb); |
6911 | } | 6911 | } |
6912 | 6912 | ||
6913 | f = &adapter->ring_feature[RING_F_FCOE]; | 6913 | f = &adapter->ring_feature[RING_F_FCOE]; |
@@ -6920,7 +6920,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, | |||
6920 | 6920 | ||
6921 | return txq + f->offset; | 6921 | return txq + f->offset; |
6922 | #else | 6922 | #else |
6923 | return __netdev_pick_tx(dev, skb); | 6923 | return fallback(dev, skb); |
6924 | #endif | 6924 | #endif |
6925 | } | 6925 | } |
6926 | 6926 | ||
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 8f9266c64c75..fd4b6aecf6ee 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c | |||
@@ -619,7 +619,7 @@ ltq_etop_set_multicast_list(struct net_device *dev) | |||
619 | 619 | ||
620 | static u16 | 620 | static u16 |
621 | ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb, | 621 | ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb, |
622 | void *accel_priv) | 622 | void *accel_priv, select_queue_fallback_t fallback) |
623 | { | 623 | { |
624 | /* we are currently only using the first queue */ | 624 | /* we are currently only using the first queue */ |
625 | return 0; | 625 | return 0; |
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index 6300fd27f2db..68e6a6613e9a 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig | |||
@@ -43,12 +43,12 @@ config MVMDIO | |||
43 | This driver is used by the MV643XX_ETH and MVNETA drivers. | 43 | This driver is used by the MV643XX_ETH and MVNETA drivers. |
44 | 44 | ||
45 | config MVNETA | 45 | config MVNETA |
46 | tristate "Marvell Armada 370/XP network interface support" | 46 | tristate "Marvell Armada 370/38x/XP network interface support" |
47 | depends on MACH_ARMADA_370_XP | 47 | depends on PLAT_ORION |
48 | select MVMDIO | 48 | select MVMDIO |
49 | ---help--- | 49 | ---help--- |
50 | This driver supports the network interface units in the | 50 | This driver supports the network interface units in the |
51 | Marvell ARMADA XP and ARMADA 370 SoC family. | 51 | Marvell ARMADA XP, ARMADA 370 and ARMADA 38x SoC family. |
52 | 52 | ||
53 | Note that this driver is distinct from the mv643xx_eth | 53 | Note that this driver is distinct from the mv643xx_eth |
54 | driver, which should be used for the older Marvell SoCs | 54 | driver, which should be used for the older Marvell SoCs |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 8e8a7eb43a2c..13457032d15f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c | |||
@@ -629,7 +629,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk | |||
629 | } | 629 | } |
630 | 630 | ||
631 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, | 631 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, |
632 | void *accel_priv) | 632 | void *accel_priv, select_queue_fallback_t fallback) |
633 | { | 633 | { |
634 | struct mlx4_en_priv *priv = netdev_priv(dev); | 634 | struct mlx4_en_priv *priv = netdev_priv(dev); |
635 | u16 rings_p_up = priv->num_tx_rings_p_up; | 635 | u16 rings_p_up = priv->num_tx_rings_p_up; |
@@ -641,7 +641,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, | |||
641 | if (vlan_tx_tag_present(skb)) | 641 | if (vlan_tx_tag_present(skb)) |
642 | up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT; | 642 | up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT; |
643 | 643 | ||
644 | return __netdev_pick_tx(dev, skb) % rings_p_up + up * rings_p_up; | 644 | return fallback(dev, skb) % rings_p_up + up * rings_p_up; |
645 | } | 645 | } |
646 | 646 | ||
647 | static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt) | 647 | static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 3af04c3f42ea..9ca223bc90fc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | |||
@@ -723,7 +723,7 @@ int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | |||
723 | 723 | ||
724 | void mlx4_en_tx_irq(struct mlx4_cq *mcq); | 724 | void mlx4_en_tx_irq(struct mlx4_cq *mcq); |
725 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, | 725 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, |
726 | void *accel_priv); | 726 | void *accel_priv, select_queue_fallback_t fallback); |
727 | netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); | 727 | netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); |
728 | 728 | ||
729 | int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, | 729 | int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 157fe8df2c3e..8ff57e8e3e91 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig | |||
@@ -4,5 +4,5 @@ | |||
4 | 4 | ||
5 | config MLX5_CORE | 5 | config MLX5_CORE |
6 | tristate | 6 | tristate |
7 | depends on PCI && X86 | 7 | depends on PCI |
8 | default n | 8 | default n |
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index e2f202e3932f..f2d7c702c77f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig | |||
@@ -37,6 +37,17 @@ config DWMAC_SUNXI | |||
37 | stmmac device driver. This driver is used for A20/A31 | 37 | stmmac device driver. This driver is used for A20/A31 |
38 | GMAC ethernet controller. | 38 | GMAC ethernet controller. |
39 | 39 | ||
40 | config DWMAC_STI | ||
41 | bool "STi GMAC support" | ||
42 | depends on STMMAC_PLATFORM && ARCH_STI | ||
43 | default y | ||
44 | ---help--- | ||
45 | Support for ethernet controller on STi SOCs. | ||
46 | |||
47 | This selects STi SoC glue layer support for the stmmac | ||
48 | device driver. This driver is used on for the STi series | ||
49 | SOCs GMAC ethernet controller. | ||
50 | |||
40 | config STMMAC_PCI | 51 | config STMMAC_PCI |
41 | bool "STMMAC PCI bus support" | 52 | bool "STMMAC PCI bus support" |
42 | depends on STMMAC_ETH && PCI | 53 | depends on STMMAC_ETH && PCI |
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index ecadecea79b2..dcef28775dad 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile | |||
@@ -2,6 +2,7 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o | |||
2 | stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o | 2 | stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o |
3 | stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o | 3 | stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o |
4 | stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o | 4 | stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o |
5 | stmmac-$(CONFIG_DWMAC_STI) += dwmac-sti.o | ||
5 | stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ | 6 | stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ |
6 | chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ | 7 | chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ |
7 | dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ | 8 | dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c new file mode 100644 index 000000000000..552bbc17863c --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c | |||
@@ -0,0 +1,330 @@ | |||
1 | /** | ||
2 | * dwmac-sti.c - STMicroelectronics DWMAC Specific Glue layer | ||
3 | * | ||
4 | * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited | ||
5 | * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com> | ||
6 | * | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/stmmac.h> | ||
18 | #include <linux/phy.h> | ||
19 | #include <linux/mfd/syscon.h> | ||
20 | #include <linux/regmap.h> | ||
21 | #include <linux/clk.h> | ||
22 | #include <linux/of.h> | ||
23 | #include <linux/of_net.h> | ||
24 | |||
25 | /** | ||
26 | * STi GMAC glue logic. | ||
27 | * -------------------- | ||
28 | * | ||
29 | * _ | ||
30 | * | \ | ||
31 | * --------|0 \ ETH_SEL_INTERNAL_NOTEXT_PHYCLK | ||
32 | * phyclk | |___________________________________________ | ||
33 | * | | | (phyclk-in) | ||
34 | * --------|1 / | | ||
35 | * int-clk |_ / | | ||
36 | * | _ | ||
37 | * | | \ | ||
38 | * |_______|1 \ ETH_SEL_TX_RETIME_CLK | ||
39 | * | |___________________________ | ||
40 | * | | (tx-retime-clk) | ||
41 | * _______|0 / | ||
42 | * | |_ / | ||
43 | * _ | | ||
44 | * | \ | | ||
45 | * --------|0 \ | | ||
46 | * clk_125 | |__| | ||
47 | * | | ETH_SEL_TXCLK_NOT_CLK125 | ||
48 | * --------|1 / | ||
49 | * txclk |_ / | ||
50 | * | ||
51 | * | ||
52 | * ETH_SEL_INTERNAL_NOTEXT_PHYCLK is valid only for RMII where PHY can | ||
53 | * generate 50MHz clock or MAC can generate it. | ||
54 | * This bit is configured by "st,ext-phyclk" property. | ||
55 | * | ||
56 | * ETH_SEL_TXCLK_NOT_CLK125 is only valid for gigabit modes, where the 125Mhz | ||
57 | * clock either comes from clk-125 pin or txclk pin. This configuration is | ||
58 | * totally driven by the board wiring. This bit is configured by | ||
59 | * "st,tx-retime-src" property. | ||
60 | * | ||
61 | * TXCLK configuration is different for different phy interface modes | ||
62 | * and changes according to link speed in modes like RGMII. | ||
63 | * | ||
64 | * Below table summarizes the clock requirement and clock sources for | ||
65 | * supported phy interface modes with link speeds. | ||
66 | * ________________________________________________ | ||
67 | *| PHY_MODE | 1000 Mbit Link | 100 Mbit Link | | ||
68 | * ------------------------------------------------ | ||
69 | *| MII | n/a | 25Mhz | | ||
70 | *| | | txclk | | ||
71 | * ------------------------------------------------ | ||
72 | *| GMII | 125Mhz | 25Mhz | | ||
73 | *| | clk-125/txclk | txclk | | ||
74 | * ------------------------------------------------ | ||
75 | *| RGMII | 125Mhz | 25Mhz | | ||
76 | *| | clk-125/txclk | clkgen | | ||
77 | * ------------------------------------------------ | ||
78 | *| RMII | n/a | 25Mhz | | ||
79 | *| | |clkgen/phyclk-in | | ||
80 | * ------------------------------------------------ | ||
81 | * | ||
82 | * TX lines are always retimed with a clk, which can vary depending | ||
83 | * on the board configuration. Below is the table of these bits | ||
84 | * in eth configuration register depending on source of retime clk. | ||
85 | * | ||
86 | *--------------------------------------------------------------- | ||
87 | * src | tx_rt_clk | int_not_ext_phyclk | txclk_n_clk125| | ||
88 | *--------------------------------------------------------------- | ||
89 | * txclk | 0 | n/a | 1 | | ||
90 | *--------------------------------------------------------------- | ||
91 | * ck_125| 0 | n/a | 0 | | ||
92 | *--------------------------------------------------------------- | ||
93 | * phyclk| 1 | 0 | n/a | | ||
94 | *--------------------------------------------------------------- | ||
95 | * clkgen| 1 | 1 | n/a | | ||
96 | *--------------------------------------------------------------- | ||
97 | */ | ||
98 | |||
99 | /* Register definition */ | ||
100 | |||
101 | /* 3 bits [8:6] | ||
102 | * [6:6] ETH_SEL_TXCLK_NOT_CLK125 | ||
103 | * [7:7] ETH_SEL_INTERNAL_NOTEXT_PHYCLK | ||
104 | * [8:8] ETH_SEL_TX_RETIME_CLK | ||
105 | * | ||
106 | */ | ||
107 | |||
108 | #define TX_RETIME_SRC_MASK GENMASK(8, 6) | ||
109 | #define ETH_SEL_TX_RETIME_CLK BIT(8) | ||
110 | #define ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7) | ||
111 | #define ETH_SEL_TXCLK_NOT_CLK125 BIT(6) | ||
112 | |||
113 | #define ENMII_MASK GENMASK(5, 5) | ||
114 | #define ENMII BIT(5) | ||
115 | |||
116 | /** | ||
117 | * 3 bits [4:2] | ||
118 | * 000-GMII/MII | ||
119 | * 001-RGMII | ||
120 | * 010-SGMII | ||
121 | * 100-RMII | ||
122 | */ | ||
123 | #define MII_PHY_SEL_MASK GENMASK(4, 2) | ||
124 | #define ETH_PHY_SEL_RMII BIT(4) | ||
125 | #define ETH_PHY_SEL_SGMII BIT(3) | ||
126 | #define ETH_PHY_SEL_RGMII BIT(2) | ||
127 | #define ETH_PHY_SEL_GMII 0x0 | ||
128 | #define ETH_PHY_SEL_MII 0x0 | ||
129 | |||
130 | #define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \ | ||
131 | iface == PHY_INTERFACE_MODE_RGMII_ID || \ | ||
132 | iface == PHY_INTERFACE_MODE_RGMII_RXID || \ | ||
133 | iface == PHY_INTERFACE_MODE_RGMII_TXID) | ||
134 | |||
135 | #define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \ | ||
136 | iface == PHY_INTERFACE_MODE_GMII) | ||
137 | |||
138 | struct sti_dwmac { | ||
139 | int interface; | ||
140 | bool ext_phyclk; | ||
141 | bool is_tx_retime_src_clk_125; | ||
142 | struct clk *clk; | ||
143 | int reg; | ||
144 | struct device *dev; | ||
145 | struct regmap *regmap; | ||
146 | }; | ||
147 | |||
148 | static u32 phy_intf_sels[] = { | ||
149 | [PHY_INTERFACE_MODE_MII] = ETH_PHY_SEL_MII, | ||
150 | [PHY_INTERFACE_MODE_GMII] = ETH_PHY_SEL_GMII, | ||
151 | [PHY_INTERFACE_MODE_RGMII] = ETH_PHY_SEL_RGMII, | ||
152 | [PHY_INTERFACE_MODE_RGMII_ID] = ETH_PHY_SEL_RGMII, | ||
153 | [PHY_INTERFACE_MODE_SGMII] = ETH_PHY_SEL_SGMII, | ||
154 | [PHY_INTERFACE_MODE_RMII] = ETH_PHY_SEL_RMII, | ||
155 | }; | ||
156 | |||
157 | enum { | ||
158 | TX_RETIME_SRC_NA = 0, | ||
159 | TX_RETIME_SRC_TXCLK = 1, | ||
160 | TX_RETIME_SRC_CLK_125, | ||
161 | TX_RETIME_SRC_PHYCLK, | ||
162 | TX_RETIME_SRC_CLKGEN, | ||
163 | }; | ||
164 | |||
165 | static const char *const tx_retime_srcs[] = { | ||
166 | [TX_RETIME_SRC_NA] = "", | ||
167 | [TX_RETIME_SRC_TXCLK] = "txclk", | ||
168 | [TX_RETIME_SRC_CLK_125] = "clk_125", | ||
169 | [TX_RETIME_SRC_PHYCLK] = "phyclk", | ||
170 | [TX_RETIME_SRC_CLKGEN] = "clkgen", | ||
171 | }; | ||
172 | |||
173 | static u32 tx_retime_val[] = { | ||
174 | [TX_RETIME_SRC_TXCLK] = ETH_SEL_TXCLK_NOT_CLK125, | ||
175 | [TX_RETIME_SRC_CLK_125] = 0x0, | ||
176 | [TX_RETIME_SRC_PHYCLK] = ETH_SEL_TX_RETIME_CLK, | ||
177 | [TX_RETIME_SRC_CLKGEN] = ETH_SEL_TX_RETIME_CLK | | ||
178 | ETH_SEL_INTERNAL_NOTEXT_PHYCLK, | ||
179 | }; | ||
180 | |||
181 | static void setup_retime_src(struct sti_dwmac *dwmac, u32 spd) | ||
182 | { | ||
183 | u32 src = 0, freq = 0; | ||
184 | |||
185 | if (spd == SPEED_100) { | ||
186 | if (dwmac->interface == PHY_INTERFACE_MODE_MII || | ||
187 | dwmac->interface == PHY_INTERFACE_MODE_GMII) { | ||
188 | src = TX_RETIME_SRC_TXCLK; | ||
189 | } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) { | ||
190 | if (dwmac->ext_phyclk) { | ||
191 | src = TX_RETIME_SRC_PHYCLK; | ||
192 | } else { | ||
193 | src = TX_RETIME_SRC_CLKGEN; | ||
194 | freq = 50000000; | ||
195 | } | ||
196 | |||
197 | } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) { | ||
198 | src = TX_RETIME_SRC_CLKGEN; | ||
199 | freq = 25000000; | ||
200 | } | ||
201 | |||
202 | if (src == TX_RETIME_SRC_CLKGEN && dwmac->clk) | ||
203 | clk_set_rate(dwmac->clk, freq); | ||
204 | |||
205 | } else if (spd == SPEED_1000) { | ||
206 | if (dwmac->is_tx_retime_src_clk_125) | ||
207 | src = TX_RETIME_SRC_CLK_125; | ||
208 | else | ||
209 | src = TX_RETIME_SRC_TXCLK; | ||
210 | } | ||
211 | |||
212 | regmap_update_bits(dwmac->regmap, dwmac->reg, | ||
213 | TX_RETIME_SRC_MASK, tx_retime_val[src]); | ||
214 | } | ||
215 | |||
216 | static void sti_dwmac_exit(struct platform_device *pdev, void *priv) | ||
217 | { | ||
218 | struct sti_dwmac *dwmac = priv; | ||
219 | |||
220 | if (dwmac->clk) | ||
221 | clk_disable_unprepare(dwmac->clk); | ||
222 | } | ||
223 | |||
224 | static void sti_fix_mac_speed(void *priv, unsigned int spd) | ||
225 | { | ||
226 | struct sti_dwmac *dwmac = priv; | ||
227 | |||
228 | setup_retime_src(dwmac, spd); | ||
229 | |||
230 | return; | ||
231 | } | ||
232 | |||
233 | static int sti_dwmac_parse_data(struct sti_dwmac *dwmac, | ||
234 | struct platform_device *pdev) | ||
235 | { | ||
236 | struct resource *res; | ||
237 | struct device *dev = &pdev->dev; | ||
238 | struct device_node *np = dev->of_node; | ||
239 | struct regmap *regmap; | ||
240 | int err; | ||
241 | |||
242 | if (!np) | ||
243 | return -EINVAL; | ||
244 | |||
245 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-ethconf"); | ||
246 | if (!res) | ||
247 | return -ENODATA; | ||
248 | |||
249 | regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon"); | ||
250 | if (IS_ERR(regmap)) | ||
251 | return PTR_ERR(regmap); | ||
252 | |||
253 | dwmac->dev = dev; | ||
254 | dwmac->interface = of_get_phy_mode(np); | ||
255 | dwmac->regmap = regmap; | ||
256 | dwmac->reg = res->start; | ||
257 | dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk"); | ||
258 | dwmac->is_tx_retime_src_clk_125 = false; | ||
259 | |||
260 | if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) { | ||
261 | const char *rs; | ||
262 | |||
263 | err = of_property_read_string(np, "st,tx-retime-src", &rs); | ||
264 | if (err < 0) { | ||
265 | dev_err(dev, "st,tx-retime-src not specified\n"); | ||
266 | return err; | ||
267 | } | ||
268 | |||
269 | if (!strcasecmp(rs, "clk_125")) | ||
270 | dwmac->is_tx_retime_src_clk_125 = true; | ||
271 | } | ||
272 | |||
273 | dwmac->clk = devm_clk_get(dev, "sti-ethclk"); | ||
274 | |||
275 | if (IS_ERR(dwmac->clk)) | ||
276 | dwmac->clk = NULL; | ||
277 | |||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | static int sti_dwmac_init(struct platform_device *pdev, void *priv) | ||
282 | { | ||
283 | struct sti_dwmac *dwmac = priv; | ||
284 | struct regmap *regmap = dwmac->regmap; | ||
285 | int iface = dwmac->interface; | ||
286 | u32 reg = dwmac->reg; | ||
287 | u32 val, spd; | ||
288 | |||
289 | if (dwmac->clk) | ||
290 | clk_prepare_enable(dwmac->clk); | ||
291 | |||
292 | regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]); | ||
293 | |||
294 | val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII; | ||
295 | regmap_update_bits(regmap, reg, ENMII_MASK, val); | ||
296 | |||
297 | if (IS_PHY_IF_MODE_GBIT(iface)) | ||
298 | spd = SPEED_1000; | ||
299 | else | ||
300 | spd = SPEED_100; | ||
301 | |||
302 | setup_retime_src(dwmac, spd); | ||
303 | |||
304 | return 0; | ||
305 | } | ||
306 | |||
307 | static void *sti_dwmac_setup(struct platform_device *pdev) | ||
308 | { | ||
309 | struct sti_dwmac *dwmac; | ||
310 | int ret; | ||
311 | |||
312 | dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); | ||
313 | if (!dwmac) | ||
314 | return ERR_PTR(-ENOMEM); | ||
315 | |||
316 | ret = sti_dwmac_parse_data(dwmac, pdev); | ||
317 | if (ret) { | ||
318 | dev_err(&pdev->dev, "Unable to parse OF data\n"); | ||
319 | return ERR_PTR(ret); | ||
320 | } | ||
321 | |||
322 | return dwmac; | ||
323 | } | ||
324 | |||
325 | const struct stmmac_of_data sti_gmac_data = { | ||
326 | .fix_mac_speed = sti_fix_mac_speed, | ||
327 | .setup = sti_dwmac_setup, | ||
328 | .init = sti_dwmac_init, | ||
329 | .exit = sti_dwmac_exit, | ||
330 | }; | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index d9af26ed58ee..f9e60d7918c4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h | |||
@@ -133,6 +133,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv); | |||
133 | #ifdef CONFIG_DWMAC_SUNXI | 133 | #ifdef CONFIG_DWMAC_SUNXI |
134 | extern const struct stmmac_of_data sun7i_gmac_data; | 134 | extern const struct stmmac_of_data sun7i_gmac_data; |
135 | #endif | 135 | #endif |
136 | #ifdef CONFIG_DWMAC_STI | ||
137 | extern const struct stmmac_of_data sti_gmac_data; | ||
138 | #endif | ||
136 | extern struct platform_driver stmmac_pltfr_driver; | 139 | extern struct platform_driver stmmac_pltfr_driver; |
137 | static inline int stmmac_register_platform(void) | 140 | static inline int stmmac_register_platform(void) |
138 | { | 141 | { |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 5884a7d2063b..c61bc72b8e90 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
@@ -33,6 +33,11 @@ static const struct of_device_id stmmac_dt_ids[] = { | |||
33 | #ifdef CONFIG_DWMAC_SUNXI | 33 | #ifdef CONFIG_DWMAC_SUNXI |
34 | { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data}, | 34 | { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data}, |
35 | #endif | 35 | #endif |
36 | #ifdef CONFIG_DWMAC_STI | ||
37 | { .compatible = "st,stih415-dwmac", .data = &sti_gmac_data}, | ||
38 | { .compatible = "st,stih416-dwmac", .data = &sti_gmac_data}, | ||
39 | { .compatible = "st,stih127-dwmac", .data = &sti_gmac_data}, | ||
40 | #endif | ||
36 | /* SoC specific glue layers should come before generic bindings */ | 41 | /* SoC specific glue layers should come before generic bindings */ |
37 | { .compatible = "st,spear600-gmac"}, | 42 | { .compatible = "st,spear600-gmac"}, |
38 | { .compatible = "snps,dwmac-3.610"}, | 43 | { .compatible = "snps,dwmac-3.610"}, |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 39d12535c3c6..0b6a2802d51b 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -554,7 +554,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) | |||
554 | * common for both the interface as the interface shares | 554 | * common for both the interface as the interface shares |
555 | * the same hardware resource. | 555 | * the same hardware resource. |
556 | */ | 556 | */ |
557 | for (i = 0; i <= priv->data.slaves; i++) | 557 | for (i = 0; i < priv->data.slaves; i++) |
558 | if (priv->slaves[i].ndev->flags & IFF_PROMISC) | 558 | if (priv->slaves[i].ndev->flags & IFF_PROMISC) |
559 | flag = true; | 559 | flag = true; |
560 | 560 | ||
@@ -578,7 +578,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) | |||
578 | unsigned long timeout = jiffies + HZ; | 578 | unsigned long timeout = jiffies + HZ; |
579 | 579 | ||
580 | /* Disable Learn for all ports */ | 580 | /* Disable Learn for all ports */ |
581 | for (i = 0; i <= priv->data.slaves; i++) { | 581 | for (i = 0; i < priv->data.slaves; i++) { |
582 | cpsw_ale_control_set(ale, i, | 582 | cpsw_ale_control_set(ale, i, |
583 | ALE_PORT_NOLEARN, 1); | 583 | ALE_PORT_NOLEARN, 1); |
584 | cpsw_ale_control_set(ale, i, | 584 | cpsw_ale_control_set(ale, i, |
@@ -606,7 +606,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) | |||
606 | cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0); | 606 | cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0); |
607 | 607 | ||
608 | /* Enable Learn for all ports */ | 608 | /* Enable Learn for all ports */ |
609 | for (i = 0; i <= priv->data.slaves; i++) { | 609 | for (i = 0; i < priv->data.slaves; i++) { |
610 | cpsw_ale_control_set(ale, i, | 610 | cpsw_ale_control_set(ale, i, |
611 | ALE_PORT_NOLEARN, 0); | 611 | ALE_PORT_NOLEARN, 0); |
612 | cpsw_ale_control_set(ale, i, | 612 | cpsw_ale_control_set(ale, i, |
@@ -1892,6 +1892,11 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, | |||
1892 | memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); | 1892 | memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); |
1893 | 1893 | ||
1894 | slave_data->phy_if = of_get_phy_mode(slave_node); | 1894 | slave_data->phy_if = of_get_phy_mode(slave_node); |
1895 | if (slave_data->phy_if < 0) { | ||
1896 | pr_err("Missing or malformed slave[%d] phy-mode property\n", | ||
1897 | i); | ||
1898 | return slave_data->phy_if; | ||
1899 | } | ||
1895 | 1900 | ||
1896 | if (data->dual_emac) { | 1901 | if (data->dual_emac) { |
1897 | if (of_property_read_u32(slave_node, "dual_emac_res_vlan", | 1902 | if (of_property_read_u32(slave_node, "dual_emac_res_vlan", |
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 023237a65720..17503da9f7a5 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c | |||
@@ -2071,7 +2071,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) | |||
2071 | 2071 | ||
2072 | /* Return subqueue id on this core (one per core). */ | 2072 | /* Return subqueue id on this core (one per core). */ |
2073 | static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb, | 2073 | static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb, |
2074 | void *accel_priv) | 2074 | void *accel_priv, select_queue_fallback_t fallback) |
2075 | { | 2075 | { |
2076 | return smp_processor_id(); | 2076 | return smp_processor_id(); |
2077 | } | 2077 | } |
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 1ec65feebb9e..4bfdf8c7ada0 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/netdevice.h> | 26 | #include <linux/netdevice.h> |
27 | #include <linux/of_mdio.h> | 27 | #include <linux/of_mdio.h> |
28 | #include <linux/of_platform.h> | 28 | #include <linux/of_platform.h> |
29 | #include <linux/of_irq.h> | ||
29 | #include <linux/of_address.h> | 30 | #include <linux/of_address.h> |
30 | #include <linux/skbuff.h> | 31 | #include <linux/skbuff.h> |
31 | #include <linux/spinlock.h> | 32 | #include <linux/spinlock.h> |
@@ -600,7 +601,8 @@ static void axienet_start_xmit_done(struct net_device *ndev) | |||
600 | size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; | 601 | size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; |
601 | packets++; | 602 | packets++; |
602 | 603 | ||
603 | lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM; | 604 | ++lp->tx_bd_ci; |
605 | lp->tx_bd_ci %= TX_BD_NUM; | ||
604 | cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; | 606 | cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; |
605 | status = cur_p->status; | 607 | status = cur_p->status; |
606 | } | 608 | } |
@@ -686,7 +688,8 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
686 | skb_headlen(skb), DMA_TO_DEVICE); | 688 | skb_headlen(skb), DMA_TO_DEVICE); |
687 | 689 | ||
688 | for (ii = 0; ii < num_frag; ii++) { | 690 | for (ii = 0; ii < num_frag; ii++) { |
689 | lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM; | 691 | ++lp->tx_bd_tail; |
692 | lp->tx_bd_tail %= TX_BD_NUM; | ||
690 | cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; | 693 | cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; |
691 | frag = &skb_shinfo(skb)->frags[ii]; | 694 | frag = &skb_shinfo(skb)->frags[ii]; |
692 | cur_p->phys = dma_map_single(ndev->dev.parent, | 695 | cur_p->phys = dma_map_single(ndev->dev.parent, |
@@ -702,7 +705,8 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
702 | tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; | 705 | tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; |
703 | /* Start the transfer */ | 706 | /* Start the transfer */ |
704 | axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); | 707 | axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); |
705 | lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM; | 708 | ++lp->tx_bd_tail; |
709 | lp->tx_bd_tail %= TX_BD_NUM; | ||
706 | 710 | ||
707 | return NETDEV_TX_OK; | 711 | return NETDEV_TX_OK; |
708 | } | 712 | } |
@@ -774,7 +778,8 @@ static void axienet_recv(struct net_device *ndev) | |||
774 | cur_p->status = 0; | 778 | cur_p->status = 0; |
775 | cur_p->sw_id_offset = (u32) new_skb; | 779 | cur_p->sw_id_offset = (u32) new_skb; |
776 | 780 | ||
777 | lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM; | 781 | ++lp->rx_bd_ci; |
782 | lp->rx_bd_ci %= RX_BD_NUM; | ||
778 | cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; | 783 | cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; |
779 | } | 784 | } |
780 | 785 | ||
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 1eadc136a372..bcd2df2f406a 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -88,8 +88,12 @@ static int netvsc_open(struct net_device *net) | |||
88 | { | 88 | { |
89 | struct net_device_context *net_device_ctx = netdev_priv(net); | 89 | struct net_device_context *net_device_ctx = netdev_priv(net); |
90 | struct hv_device *device_obj = net_device_ctx->device_ctx; | 90 | struct hv_device *device_obj = net_device_ctx->device_ctx; |
91 | struct netvsc_device *nvdev; | ||
92 | struct rndis_device *rdev; | ||
91 | int ret = 0; | 93 | int ret = 0; |
92 | 94 | ||
95 | netif_carrier_off(net); | ||
96 | |||
93 | /* Open up the device */ | 97 | /* Open up the device */ |
94 | ret = rndis_filter_open(device_obj); | 98 | ret = rndis_filter_open(device_obj); |
95 | if (ret != 0) { | 99 | if (ret != 0) { |
@@ -99,6 +103,11 @@ static int netvsc_open(struct net_device *net) | |||
99 | 103 | ||
100 | netif_start_queue(net); | 104 | netif_start_queue(net); |
101 | 105 | ||
106 | nvdev = hv_get_drvdata(device_obj); | ||
107 | rdev = nvdev->extension; | ||
108 | if (!rdev->link_state) | ||
109 | netif_carrier_on(net); | ||
110 | |||
102 | return ret; | 111 | return ret; |
103 | } | 112 | } |
104 | 113 | ||
@@ -229,23 +238,24 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj, | |||
229 | struct net_device *net; | 238 | struct net_device *net; |
230 | struct net_device_context *ndev_ctx; | 239 | struct net_device_context *ndev_ctx; |
231 | struct netvsc_device *net_device; | 240 | struct netvsc_device *net_device; |
241 | struct rndis_device *rdev; | ||
232 | 242 | ||
233 | net_device = hv_get_drvdata(device_obj); | 243 | net_device = hv_get_drvdata(device_obj); |
244 | rdev = net_device->extension; | ||
245 | |||
246 | rdev->link_state = status != 1; | ||
247 | |||
234 | net = net_device->ndev; | 248 | net = net_device->ndev; |
235 | 249 | ||
236 | if (!net) { | 250 | if (!net || net->reg_state != NETREG_REGISTERED) |
237 | netdev_err(net, "got link status but net device " | ||
238 | "not initialized yet\n"); | ||
239 | return; | 251 | return; |
240 | } | ||
241 | 252 | ||
253 | ndev_ctx = netdev_priv(net); | ||
242 | if (status == 1) { | 254 | if (status == 1) { |
243 | netif_carrier_on(net); | ||
244 | ndev_ctx = netdev_priv(net); | ||
245 | schedule_delayed_work(&ndev_ctx->dwork, 0); | 255 | schedule_delayed_work(&ndev_ctx->dwork, 0); |
246 | schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20)); | 256 | schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20)); |
247 | } else { | 257 | } else { |
248 | netif_carrier_off(net); | 258 | schedule_delayed_work(&ndev_ctx->dwork, 0); |
249 | } | 259 | } |
250 | } | 260 | } |
251 | 261 | ||
@@ -388,17 +398,35 @@ static const struct net_device_ops device_ops = { | |||
388 | * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add | 398 | * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add |
389 | * another netif_notify_peers() into a delayed work, otherwise GARP packet | 399 | * another netif_notify_peers() into a delayed work, otherwise GARP packet |
390 | * will not be sent after quick migration, and cause network disconnection. | 400 | * will not be sent after quick migration, and cause network disconnection. |
401 | * Also, we update the carrier status here. | ||
391 | */ | 402 | */ |
392 | static void netvsc_send_garp(struct work_struct *w) | 403 | static void netvsc_link_change(struct work_struct *w) |
393 | { | 404 | { |
394 | struct net_device_context *ndev_ctx; | 405 | struct net_device_context *ndev_ctx; |
395 | struct net_device *net; | 406 | struct net_device *net; |
396 | struct netvsc_device *net_device; | 407 | struct netvsc_device *net_device; |
408 | struct rndis_device *rdev; | ||
409 | bool notify; | ||
410 | |||
411 | rtnl_lock(); | ||
397 | 412 | ||
398 | ndev_ctx = container_of(w, struct net_device_context, dwork.work); | 413 | ndev_ctx = container_of(w, struct net_device_context, dwork.work); |
399 | net_device = hv_get_drvdata(ndev_ctx->device_ctx); | 414 | net_device = hv_get_drvdata(ndev_ctx->device_ctx); |
415 | rdev = net_device->extension; | ||
400 | net = net_device->ndev; | 416 | net = net_device->ndev; |
401 | netdev_notify_peers(net); | 417 | |
418 | if (rdev->link_state) { | ||
419 | netif_carrier_off(net); | ||
420 | notify = false; | ||
421 | } else { | ||
422 | netif_carrier_on(net); | ||
423 | notify = true; | ||
424 | } | ||
425 | |||
426 | rtnl_unlock(); | ||
427 | |||
428 | if (notify) | ||
429 | netdev_notify_peers(net); | ||
402 | } | 430 | } |
403 | 431 | ||
404 | 432 | ||
@@ -414,13 +442,10 @@ static int netvsc_probe(struct hv_device *dev, | |||
414 | if (!net) | 442 | if (!net) |
415 | return -ENOMEM; | 443 | return -ENOMEM; |
416 | 444 | ||
417 | /* Set initial state */ | ||
418 | netif_carrier_off(net); | ||
419 | |||
420 | net_device_ctx = netdev_priv(net); | 445 | net_device_ctx = netdev_priv(net); |
421 | net_device_ctx->device_ctx = dev; | 446 | net_device_ctx->device_ctx = dev; |
422 | hv_set_drvdata(dev, net); | 447 | hv_set_drvdata(dev, net); |
423 | INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp); | 448 | INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); |
424 | INIT_WORK(&net_device_ctx->work, do_set_multicast); | 449 | INIT_WORK(&net_device_ctx->work, do_set_multicast); |
425 | 450 | ||
426 | net->netdev_ops = &device_ops; | 451 | net->netdev_ops = &device_ops; |
@@ -443,8 +468,6 @@ static int netvsc_probe(struct hv_device *dev, | |||
443 | } | 468 | } |
444 | memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); | 469 | memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); |
445 | 470 | ||
446 | netif_carrier_on(net); | ||
447 | |||
448 | ret = register_netdev(net); | 471 | ret = register_netdev(net); |
449 | if (ret != 0) { | 472 | if (ret != 0) { |
450 | pr_err("Unable to register netdev.\n"); | 473 | pr_err("Unable to register netdev.\n"); |
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c index 177441afeb96..24b6dddd7f2f 100644 --- a/drivers/net/irda/irtty-sir.c +++ b/drivers/net/irda/irtty-sir.c | |||
@@ -522,7 +522,6 @@ static void irtty_close(struct tty_struct *tty) | |||
522 | sirdev_put_instance(priv->dev); | 522 | sirdev_put_instance(priv->dev); |
523 | 523 | ||
524 | /* Stop tty */ | 524 | /* Stop tty */ |
525 | irtty_stop_receiver(tty, TRUE); | ||
526 | clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); | 525 | clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); |
527 | if (tty->ops->stop) | 526 | if (tty->ops->stop) |
528 | tty->ops->stop(tty); | 527 | tty->ops->stop(tty); |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 25685e3eb472..44227c25a276 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -872,14 +872,15 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, | |||
872 | dev->priv_flags |= IFF_MACVLAN; | 872 | dev->priv_flags |= IFF_MACVLAN; |
873 | err = netdev_upper_dev_link(lowerdev, dev); | 873 | err = netdev_upper_dev_link(lowerdev, dev); |
874 | if (err) | 874 | if (err) |
875 | goto destroy_port; | 875 | goto unregister_netdev; |
876 | |||
877 | 876 | ||
878 | list_add_tail_rcu(&vlan->list, &port->vlans); | 877 | list_add_tail_rcu(&vlan->list, &port->vlans); |
879 | netif_stacked_transfer_operstate(lowerdev, dev); | 878 | netif_stacked_transfer_operstate(lowerdev, dev); |
880 | 879 | ||
881 | return 0; | 880 | return 0; |
882 | 881 | ||
882 | unregister_netdev: | ||
883 | unregister_netdevice(dev); | ||
883 | destroy_port: | 884 | destroy_port: |
884 | port->count -= 1; | 885 | port->count -= 1; |
885 | if (!port->count) | 886 | if (!port->count) |
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 9414fa272160..98e7cbf720a5 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c | |||
@@ -1006,11 +1006,6 @@ static int dp83640_probe(struct phy_device *phydev) | |||
1006 | } else | 1006 | } else |
1007 | list_add_tail(&dp83640->list, &clock->phylist); | 1007 | list_add_tail(&dp83640->list, &clock->phylist); |
1008 | 1008 | ||
1009 | if (clock->chosen && !list_empty(&clock->phylist)) | ||
1010 | recalibrate(clock); | ||
1011 | else | ||
1012 | enable_broadcast(dp83640->phydev, clock->page, 1); | ||
1013 | |||
1014 | dp83640_clock_put(clock); | 1009 | dp83640_clock_put(clock); |
1015 | return 0; | 1010 | return 0; |
1016 | 1011 | ||
@@ -1063,6 +1058,14 @@ static void dp83640_remove(struct phy_device *phydev) | |||
1063 | 1058 | ||
1064 | static int dp83640_config_init(struct phy_device *phydev) | 1059 | static int dp83640_config_init(struct phy_device *phydev) |
1065 | { | 1060 | { |
1061 | struct dp83640_private *dp83640 = phydev->priv; | ||
1062 | struct dp83640_clock *clock = dp83640->clock; | ||
1063 | |||
1064 | if (clock->chosen && !list_empty(&clock->phylist)) | ||
1065 | recalibrate(clock); | ||
1066 | else | ||
1067 | enable_broadcast(phydev, clock->page, 1); | ||
1068 | |||
1066 | enable_status_frames(phydev, true); | 1069 | enable_status_frames(phydev, true); |
1067 | ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE); | 1070 | ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE); |
1068 | return 0; | 1071 | return 0; |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index adb46de7c90d..aea92f02401b 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
@@ -1642,7 +1642,7 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1642 | } | 1642 | } |
1643 | 1643 | ||
1644 | static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, | 1644 | static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, |
1645 | void *accel_priv) | 1645 | void *accel_priv, select_queue_fallback_t fallback) |
1646 | { | 1646 | { |
1647 | /* | 1647 | /* |
1648 | * This helper function exists to help dev_pick_tx get the correct | 1648 | * This helper function exists to help dev_pick_tx get the correct |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 44c4db8450f0..8fe9cb7d0f72 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -366,7 +366,7 @@ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) | |||
366 | * hope the rxq no. may help here. | 366 | * hope the rxq no. may help here. |
367 | */ | 367 | */ |
368 | static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, | 368 | static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, |
369 | void *accel_priv) | 369 | void *accel_priv, select_queue_fallback_t fallback) |
370 | { | 370 | { |
371 | struct tun_struct *tun = netdev_priv(dev); | 371 | struct tun_struct *tun = netdev_priv(dev); |
372 | struct tun_flow_entry *e; | 372 | struct tun_flow_entry *e; |
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 409499fdb157..7e7269fd3707 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig | |||
@@ -296,7 +296,6 @@ config USB_NET_SR9800 | |||
296 | tristate "CoreChip-sz SR9800 based USB 2.0 10/100 ethernet devices" | 296 | tristate "CoreChip-sz SR9800 based USB 2.0 10/100 ethernet devices" |
297 | depends on USB_USBNET | 297 | depends on USB_USBNET |
298 | select CRC32 | 298 | select CRC32 |
299 | default y | ||
300 | ---help--- | 299 | ---help--- |
301 | Say Y if you want to use one of the following 100Mbps USB Ethernet | 300 | Say Y if you want to use one of the following 100Mbps USB Ethernet |
302 | device based on the CoreChip-sz SR9800 chip. | 301 | device based on the CoreChip-sz SR9800 chip. |
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 9765a7d4766d..5d194093f3e1 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c | |||
@@ -917,7 +917,8 @@ static const struct driver_info ax88178_info = { | |||
917 | .status = asix_status, | 917 | .status = asix_status, |
918 | .link_reset = ax88178_link_reset, | 918 | .link_reset = ax88178_link_reset, |
919 | .reset = ax88178_reset, | 919 | .reset = ax88178_reset, |
920 | .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR, | 920 | .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | |
921 | FLAG_MULTI_PACKET, | ||
921 | .rx_fixup = asix_rx_fixup_common, | 922 | .rx_fixup = asix_rx_fixup_common, |
922 | .tx_fixup = asix_tx_fixup, | 923 | .tx_fixup = asix_tx_fixup, |
923 | }; | 924 | }; |
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index d6f64dad05bc..955df81a4358 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c | |||
@@ -1118,6 +1118,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
1118 | u16 hdr_off; | 1118 | u16 hdr_off; |
1119 | u32 *pkt_hdr; | 1119 | u32 *pkt_hdr; |
1120 | 1120 | ||
1121 | /* This check is no longer done by usbnet */ | ||
1122 | if (skb->len < dev->net->hard_header_len) | ||
1123 | return 0; | ||
1124 | |||
1121 | skb_trim(skb, skb->len - 4); | 1125 | skb_trim(skb, skb->len - 4); |
1122 | memcpy(&rx_hdr, skb_tail_pointer(skb), 4); | 1126 | memcpy(&rx_hdr, skb_tail_pointer(skb), 4); |
1123 | le32_to_cpus(&rx_hdr); | 1127 | le32_to_cpus(&rx_hdr); |
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c index e4a8a93fbaf7..1cc24e6f23e2 100644 --- a/drivers/net/usb/gl620a.c +++ b/drivers/net/usb/gl620a.c | |||
@@ -84,6 +84,10 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
84 | u32 size; | 84 | u32 size; |
85 | u32 count; | 85 | u32 count; |
86 | 86 | ||
87 | /* This check is no longer done by usbnet */ | ||
88 | if (skb->len < dev->net->hard_header_len) | ||
89 | return 0; | ||
90 | |||
87 | header = (struct gl_header *) skb->data; | 91 | header = (struct gl_header *) skb->data; |
88 | 92 | ||
89 | // get the packet count of the received skb | 93 | // get the packet count of the received skb |
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c index a305a7b2dae6..82d844a8ebd0 100644 --- a/drivers/net/usb/mcs7830.c +++ b/drivers/net/usb/mcs7830.c | |||
@@ -526,8 +526,9 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
526 | { | 526 | { |
527 | u8 status; | 527 | u8 status; |
528 | 528 | ||
529 | if (skb->len == 0) { | 529 | /* This check is no longer done by usbnet */ |
530 | dev_err(&dev->udev->dev, "unexpected empty rx frame\n"); | 530 | if (skb->len < dev->net->hard_header_len) { |
531 | dev_err(&dev->udev->dev, "unexpected tiny rx frame\n"); | ||
531 | return 0; | 532 | return 0; |
532 | } | 533 | } |
533 | 534 | ||
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c index 0a85d9227775..4cbdb1307f3e 100644 --- a/drivers/net/usb/net1080.c +++ b/drivers/net/usb/net1080.c | |||
@@ -364,6 +364,10 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
364 | struct nc_trailer *trailer; | 364 | struct nc_trailer *trailer; |
365 | u16 hdr_len, packet_len; | 365 | u16 hdr_len, packet_len; |
366 | 366 | ||
367 | /* This check is no longer done by usbnet */ | ||
368 | if (skb->len < dev->net->hard_header_len) | ||
369 | return 0; | ||
370 | |||
367 | if (!(skb->len & 0x01)) { | 371 | if (!(skb->len & 0x01)) { |
368 | netdev_dbg(dev->net, "rx framesize %d range %d..%d mtu %d\n", | 372 | netdev_dbg(dev->net, "rx framesize %d range %d..%d mtu %d\n", |
369 | skb->len, dev->net->hard_header_len, dev->hard_mtu, | 373 | skb->len, dev->net->hard_header_len, dev->hard_mtu, |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index ff5c87128ffe..313cb6cd4848 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -80,10 +80,10 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
80 | { | 80 | { |
81 | __be16 proto; | 81 | __be16 proto; |
82 | 82 | ||
83 | /* usbnet rx_complete guarantees that skb->len is at least | 83 | /* This check is no longer done by usbnet */ |
84 | * hard_header_len, so we can inspect the dest address without | 84 | if (skb->len < dev->net->hard_header_len) |
85 | * checking skb->len | 85 | return 0; |
86 | */ | 86 | |
87 | switch (skb->data[0] & 0xf0) { | 87 | switch (skb->data[0] & 0xf0) { |
88 | case 0x40: | 88 | case 0x40: |
89 | proto = htons(ETH_P_IP); | 89 | proto = htons(ETH_P_IP); |
@@ -732,6 +732,7 @@ static const struct usb_device_id products[] = { | |||
732 | {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ | 732 | {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ |
733 | {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */ | 733 | {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */ |
734 | {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ | 734 | {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ |
735 | {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ | ||
735 | 736 | ||
736 | /* 4. Gobi 1000 devices */ | 737 | /* 4. Gobi 1000 devices */ |
737 | {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ | 738 | {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ |
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index a48bc0f20c1a..524a47a28120 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c | |||
@@ -492,6 +492,10 @@ EXPORT_SYMBOL_GPL(rndis_unbind); | |||
492 | */ | 492 | */ |
493 | int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | 493 | int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb) |
494 | { | 494 | { |
495 | /* This check is no longer done by usbnet */ | ||
496 | if (skb->len < dev->net->hard_header_len) | ||
497 | return 0; | ||
498 | |||
495 | /* peripheral may have batched packets to us... */ | 499 | /* peripheral may have batched packets to us... */ |
496 | while (likely(skb->len)) { | 500 | while (likely(skb->len)) { |
497 | struct rndis_data_hdr *hdr = (void *)skb->data; | 501 | struct rndis_data_hdr *hdr = (void *)skb->data; |
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index f17b9e02dd34..d9e7892262fa 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c | |||
@@ -2106,6 +2106,10 @@ static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb, | |||
2106 | 2106 | ||
2107 | static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | 2107 | static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) |
2108 | { | 2108 | { |
2109 | /* This check is no longer done by usbnet */ | ||
2110 | if (skb->len < dev->net->hard_header_len) | ||
2111 | return 0; | ||
2112 | |||
2109 | while (skb->len > 0) { | 2113 | while (skb->len > 0) { |
2110 | u32 rx_cmd_a, rx_cmd_b, align_count, size; | 2114 | u32 rx_cmd_a, rx_cmd_b, align_count, size; |
2111 | struct sk_buff *ax_skb; | 2115 | struct sk_buff *ax_skb; |
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 8dd54a0f7b29..424db65e4396 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c | |||
@@ -1723,6 +1723,10 @@ static void smsc95xx_rx_csum_offload(struct sk_buff *skb) | |||
1723 | 1723 | ||
1724 | static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | 1724 | static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) |
1725 | { | 1725 | { |
1726 | /* This check is no longer done by usbnet */ | ||
1727 | if (skb->len < dev->net->hard_header_len) | ||
1728 | return 0; | ||
1729 | |||
1726 | while (skb->len > 0) { | 1730 | while (skb->len > 0) { |
1727 | u32 header, align_count; | 1731 | u32 header, align_count; |
1728 | struct sk_buff *ax_skb; | 1732 | struct sk_buff *ax_skb; |
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c index 4175eb9fdeca..b94a0fbb8b3b 100644 --- a/drivers/net/usb/sr9800.c +++ b/drivers/net/usb/sr9800.c | |||
@@ -63,6 +63,10 @@ static int sr_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
63 | { | 63 | { |
64 | int offset = 0; | 64 | int offset = 0; |
65 | 65 | ||
66 | /* This check is no longer done by usbnet */ | ||
67 | if (skb->len < dev->net->hard_header_len) | ||
68 | return 0; | ||
69 | |||
66 | while (offset + sizeof(u32) < skb->len) { | 70 | while (offset + sizeof(u32) < skb->len) { |
67 | struct sk_buff *sr_skb; | 71 | struct sk_buff *sr_skb; |
68 | u16 size; | 72 | u16 size; |
@@ -823,7 +827,7 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf) | |||
823 | dev->rx_urb_size = | 827 | dev->rx_urb_size = |
824 | SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].size; | 828 | SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].size; |
825 | } | 829 | } |
826 | netdev_dbg(dev->net, "%s : setting rx_urb_size with : %ld\n", __func__, | 830 | netdev_dbg(dev->net, "%s : setting rx_urb_size with : %zu\n", __func__, |
827 | dev->rx_urb_size); | 831 | dev->rx_urb_size); |
828 | return 0; | 832 | return 0; |
829 | 833 | ||
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 4671da755e7b..dd10d5817d2a 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
@@ -542,17 +542,19 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb) | |||
542 | } | 542 | } |
543 | // else network stack removes extra byte if we forced a short packet | 543 | // else network stack removes extra byte if we forced a short packet |
544 | 544 | ||
545 | if (skb->len) { | 545 | /* all data was already cloned from skb inside the driver */ |
546 | /* all data was already cloned from skb inside the driver */ | 546 | if (dev->driver_info->flags & FLAG_MULTI_PACKET) |
547 | if (dev->driver_info->flags & FLAG_MULTI_PACKET) | 547 | goto done; |
548 | dev_kfree_skb_any(skb); | 548 | |
549 | else | 549 | if (skb->len < ETH_HLEN) { |
550 | usbnet_skb_return(dev, skb); | 550 | dev->net->stats.rx_errors++; |
551 | dev->net->stats.rx_length_errors++; | ||
552 | netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len); | ||
553 | } else { | ||
554 | usbnet_skb_return(dev, skb); | ||
551 | return; | 555 | return; |
552 | } | 556 | } |
553 | 557 | ||
554 | netif_dbg(dev, rx_err, dev->net, "drop\n"); | ||
555 | dev->net->stats.rx_errors++; | ||
556 | done: | 558 | done: |
557 | skb_queue_tail(&dev->done, skb); | 559 | skb_queue_tail(&dev->done, skb); |
558 | } | 560 | } |
@@ -574,13 +576,6 @@ static void rx_complete (struct urb *urb) | |||
574 | switch (urb_status) { | 576 | switch (urb_status) { |
575 | /* success */ | 577 | /* success */ |
576 | case 0: | 578 | case 0: |
577 | if (skb->len < dev->net->hard_header_len) { | ||
578 | state = rx_cleanup; | ||
579 | dev->net->stats.rx_errors++; | ||
580 | dev->net->stats.rx_length_errors++; | ||
581 | netif_dbg(dev, rx_err, dev->net, | ||
582 | "rx length %d\n", skb->len); | ||
583 | } | ||
584 | break; | 579 | break; |
585 | 580 | ||
586 | /* stalls need manual reset. this is rare ... except that | 581 | /* stalls need manual reset. this is rare ... except that |
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c index d6bc7cb61bfb..1a2973b7acf2 100644 --- a/drivers/net/wireless/ath/ath5k/phy.c +++ b/drivers/net/wireless/ath/ath5k/phy.c | |||
@@ -110,7 +110,7 @@ ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band) | |||
110 | ath5k_hw_reg_write(ah, 0x00010000, AR5K_PHY(0x20)); | 110 | ath5k_hw_reg_write(ah, 0x00010000, AR5K_PHY(0x20)); |
111 | 111 | ||
112 | if (ah->ah_version == AR5K_AR5210) { | 112 | if (ah->ah_version == AR5K_AR5210) { |
113 | srev = ath5k_hw_reg_read(ah, AR5K_PHY(256) >> 28) & 0xf; | 113 | srev = (ath5k_hw_reg_read(ah, AR5K_PHY(256)) >> 28) & 0xf; |
114 | ret = (u16)ath5k_hw_bitswap(srev, 4) + 1; | 114 | ret = (u16)ath5k_hw_bitswap(srev, 4) + 1; |
115 | } else { | 115 | } else { |
116 | srev = (ath5k_hw_reg_read(ah, AR5K_PHY(0x100)) >> 24) & 0xff; | 116 | srev = (ath5k_hw_reg_read(ah, AR5K_PHY(0x100)) >> 24) & 0xff; |
diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c index aa7ad3a7a69b..4e5c0f8c9496 100644 --- a/drivers/net/wireless/hostap/hostap_proc.c +++ b/drivers/net/wireless/hostap/hostap_proc.c | |||
@@ -496,7 +496,7 @@ void hostap_init_proc(local_info_t *local) | |||
496 | 496 | ||
497 | void hostap_remove_proc(local_info_t *local) | 497 | void hostap_remove_proc(local_info_t *local) |
498 | { | 498 | { |
499 | remove_proc_subtree(local->ddev->name, hostap_proc); | 499 | proc_remove(local->proc); |
500 | } | 500 | } |
501 | 501 | ||
502 | 502 | ||
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index c24d1d3d55f6..73086c1629ca 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c | |||
@@ -696,6 +696,24 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, | |||
696 | return ret; | 696 | return ret; |
697 | } | 697 | } |
698 | 698 | ||
699 | static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg) | ||
700 | { | ||
701 | if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) | ||
702 | return false; | ||
703 | return true; | ||
704 | } | ||
705 | |||
706 | static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) | ||
707 | { | ||
708 | if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) | ||
709 | return false; | ||
710 | if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) | ||
711 | return true; | ||
712 | |||
713 | /* disabled by default */ | ||
714 | return false; | ||
715 | } | ||
716 | |||
699 | static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, | 717 | static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, |
700 | struct ieee80211_vif *vif, | 718 | struct ieee80211_vif *vif, |
701 | enum ieee80211_ampdu_mlme_action action, | 719 | enum ieee80211_ampdu_mlme_action action, |
@@ -717,7 +735,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, | |||
717 | 735 | ||
718 | switch (action) { | 736 | switch (action) { |
719 | case IEEE80211_AMPDU_RX_START: | 737 | case IEEE80211_AMPDU_RX_START: |
720 | if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) | 738 | if (!iwl_enable_rx_ampdu(priv->cfg)) |
721 | break; | 739 | break; |
722 | IWL_DEBUG_HT(priv, "start Rx\n"); | 740 | IWL_DEBUG_HT(priv, "start Rx\n"); |
723 | ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn); | 741 | ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn); |
@@ -729,7 +747,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, | |||
729 | case IEEE80211_AMPDU_TX_START: | 747 | case IEEE80211_AMPDU_TX_START: |
730 | if (!priv->trans->ops->txq_enable) | 748 | if (!priv->trans->ops->txq_enable) |
731 | break; | 749 | break; |
732 | if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) | 750 | if (!iwl_enable_tx_ampdu(priv->cfg)) |
733 | break; | 751 | break; |
734 | IWL_DEBUG_HT(priv, "start Tx\n"); | 752 | IWL_DEBUG_HT(priv, "start Tx\n"); |
735 | ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn); | 753 | ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index c3728163be46..75103554cd63 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c | |||
@@ -1286,7 +1286,7 @@ module_param_named(swcrypto, iwlwifi_mod_params.sw_crypto, int, S_IRUGO); | |||
1286 | MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); | 1286 | MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); |
1287 | module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO); | 1287 | module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO); |
1288 | MODULE_PARM_DESC(11n_disable, | 1288 | MODULE_PARM_DESC(11n_disable, |
1289 | "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX"); | 1289 | "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX"); |
1290 | module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K, | 1290 | module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K, |
1291 | int, S_IRUGO); | 1291 | int, S_IRUGO); |
1292 | MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)"); | 1292 | MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)"); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h index 0a84ade7edac..b29075c3da8e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h | |||
@@ -79,9 +79,12 @@ enum iwl_power_level { | |||
79 | IWL_POWER_NUM | 79 | IWL_POWER_NUM |
80 | }; | 80 | }; |
81 | 81 | ||
82 | #define IWL_DISABLE_HT_ALL BIT(0) | 82 | enum iwl_disable_11n { |
83 | #define IWL_DISABLE_HT_TXAGG BIT(1) | 83 | IWL_DISABLE_HT_ALL = BIT(0), |
84 | #define IWL_DISABLE_HT_RXAGG BIT(2) | 84 | IWL_DISABLE_HT_TXAGG = BIT(1), |
85 | IWL_DISABLE_HT_RXAGG = BIT(2), | ||
86 | IWL_ENABLE_HT_TXAGG = BIT(3), | ||
87 | }; | ||
85 | 88 | ||
86 | /** | 89 | /** |
87 | * struct iwl_mod_params | 90 | * struct iwl_mod_params |
@@ -90,7 +93,7 @@ enum iwl_power_level { | |||
90 | * | 93 | * |
91 | * @sw_crypto: using hardware encryption, default = 0 | 94 | * @sw_crypto: using hardware encryption, default = 0 |
92 | * @disable_11n: disable 11n capabilities, default = 0, | 95 | * @disable_11n: disable 11n capabilities, default = 0, |
93 | * use IWL_DISABLE_HT_* constants | 96 | * use IWL_[DIS,EN]ABLE_HT_* constants |
94 | * @amsdu_size_8K: enable 8K amsdu size, default = 0 | 97 | * @amsdu_size_8K: enable 8K amsdu size, default = 0 |
95 | * @restart_fw: restart firmware, default = 1 | 98 | * @restart_fw: restart firmware, default = 1 |
96 | * @wd_disable: enable stuck queue check, default = 0 | 99 | * @wd_disable: enable stuck queue check, default = 0 |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 6bf9766e5982..c35b8661b395 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c | |||
@@ -328,6 +328,24 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, | |||
328 | ieee80211_free_txskb(hw, skb); | 328 | ieee80211_free_txskb(hw, skb); |
329 | } | 329 | } |
330 | 330 | ||
331 | static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg) | ||
332 | { | ||
333 | if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) | ||
334 | return false; | ||
335 | return true; | ||
336 | } | ||
337 | |||
338 | static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) | ||
339 | { | ||
340 | if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) | ||
341 | return false; | ||
342 | if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) | ||
343 | return true; | ||
344 | |||
345 | /* enabled by default */ | ||
346 | return true; | ||
347 | } | ||
348 | |||
331 | static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, | 349 | static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, |
332 | struct ieee80211_vif *vif, | 350 | struct ieee80211_vif *vif, |
333 | enum ieee80211_ampdu_mlme_action action, | 351 | enum ieee80211_ampdu_mlme_action action, |
@@ -347,7 +365,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, | |||
347 | 365 | ||
348 | switch (action) { | 366 | switch (action) { |
349 | case IEEE80211_AMPDU_RX_START: | 367 | case IEEE80211_AMPDU_RX_START: |
350 | if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) { | 368 | if (!iwl_enable_rx_ampdu(mvm->cfg)) { |
351 | ret = -EINVAL; | 369 | ret = -EINVAL; |
352 | break; | 370 | break; |
353 | } | 371 | } |
@@ -357,7 +375,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, | |||
357 | ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false); | 375 | ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false); |
358 | break; | 376 | break; |
359 | case IEEE80211_AMPDU_TX_START: | 377 | case IEEE80211_AMPDU_TX_START: |
360 | if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) { | 378 | if (!iwl_enable_tx_ampdu(mvm->cfg)) { |
361 | ret = -EINVAL; | 379 | ret = -EINVAL; |
362 | break; | 380 | break; |
363 | } | 381 | } |
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 4d79761b9c87..9d3d2758ec35 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c | |||
@@ -748,7 +748,7 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev) | |||
748 | 748 | ||
749 | static u16 | 749 | static u16 |
750 | mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb, | 750 | mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb, |
751 | void *accel_priv) | 751 | void *accel_priv, select_queue_fallback_t fallback) |
752 | { | 752 | { |
753 | skb->priority = cfg80211_classify8021d(skb, NULL); | 753 | skb->priority = cfg80211_classify8021d(skb, NULL); |
754 | return mwifiex_1d_to_wmm_queue[skb->priority]; | 754 | return mwifiex_1d_to_wmm_queue[skb->priority]; |
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h index 56aee067f324..a6ad79f61bf9 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h +++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h | |||
@@ -15,6 +15,8 @@ | |||
15 | #ifndef RTL8187_H | 15 | #ifndef RTL8187_H |
16 | #define RTL8187_H | 16 | #define RTL8187_H |
17 | 17 | ||
18 | #include <linux/cache.h> | ||
19 | |||
18 | #include "rtl818x.h" | 20 | #include "rtl818x.h" |
19 | #include "leds.h" | 21 | #include "leds.h" |
20 | 22 | ||
@@ -139,7 +141,10 @@ struct rtl8187_priv { | |||
139 | u8 aifsn[4]; | 141 | u8 aifsn[4]; |
140 | u8 rfkill_mask; | 142 | u8 rfkill_mask; |
141 | struct { | 143 | struct { |
142 | __le64 buf; | 144 | union { |
145 | __le64 buf; | ||
146 | u8 dummy1[L1_CACHE_BYTES]; | ||
147 | } ____cacheline_aligned; | ||
143 | struct sk_buff_head queue; | 148 | struct sk_buff_head queue; |
144 | } b_tx_status; /* This queue is used by both -b and non-b devices */ | 149 | } b_tx_status; /* This queue is used by both -b and non-b devices */ |
145 | struct mutex io_mutex; | 150 | struct mutex io_mutex; |
@@ -147,7 +152,8 @@ struct rtl8187_priv { | |||
147 | u8 bits8; | 152 | u8 bits8; |
148 | __le16 bits16; | 153 | __le16 bits16; |
149 | __le32 bits32; | 154 | __le32 bits32; |
150 | } *io_dmabuf; | 155 | u8 dummy2[L1_CACHE_BYTES]; |
156 | } *io_dmabuf ____cacheline_aligned; | ||
151 | bool rfkill_off; | 157 | bool rfkill_off; |
152 | u16 seqno; | 158 | u16 seqno; |
153 | }; | 159 | }; |
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c index deedae3c5449..d1c0191a195b 100644 --- a/drivers/net/wireless/rtlwifi/ps.c +++ b/drivers/net/wireless/rtlwifi/ps.c | |||
@@ -48,7 +48,7 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw) | |||
48 | 48 | ||
49 | /*<2> Enable Adapter */ | 49 | /*<2> Enable Adapter */ |
50 | if (rtlpriv->cfg->ops->hw_init(hw)) | 50 | if (rtlpriv->cfg->ops->hw_init(hw)) |
51 | return 1; | 51 | return false; |
52 | RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); | 52 | RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); |
53 | 53 | ||
54 | /*<3> Enable Interrupt */ | 54 | /*<3> Enable Interrupt */ |
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c index a82b30a1996c..2eb0b38384dd 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c | |||
@@ -937,14 +937,26 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw) | |||
937 | bool is92c; | 937 | bool is92c; |
938 | int err; | 938 | int err; |
939 | u8 tmp_u1b; | 939 | u8 tmp_u1b; |
940 | unsigned long flags; | ||
940 | 941 | ||
941 | rtlpci->being_init_adapter = true; | 942 | rtlpci->being_init_adapter = true; |
943 | |||
944 | /* Since this function can take a very long time (up to 350 ms) | ||
945 | * and can be called with irqs disabled, reenable the irqs | ||
946 | * to let the other devices continue being serviced. | ||
947 | * | ||
948 | * It is safe doing so since our own interrupts will only be enabled | ||
949 | * in a subsequent step. | ||
950 | */ | ||
951 | local_save_flags(flags); | ||
952 | local_irq_enable(); | ||
953 | |||
942 | rtlpriv->intf_ops->disable_aspm(hw); | 954 | rtlpriv->intf_ops->disable_aspm(hw); |
943 | rtstatus = _rtl92ce_init_mac(hw); | 955 | rtstatus = _rtl92ce_init_mac(hw); |
944 | if (!rtstatus) { | 956 | if (!rtstatus) { |
945 | RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); | 957 | RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); |
946 | err = 1; | 958 | err = 1; |
947 | return err; | 959 | goto exit; |
948 | } | 960 | } |
949 | 961 | ||
950 | err = rtl92c_download_fw(hw); | 962 | err = rtl92c_download_fw(hw); |
@@ -952,7 +964,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw) | |||
952 | RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, | 964 | RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, |
953 | "Failed to download FW. Init HW without FW now..\n"); | 965 | "Failed to download FW. Init HW without FW now..\n"); |
954 | err = 1; | 966 | err = 1; |
955 | return err; | 967 | goto exit; |
956 | } | 968 | } |
957 | 969 | ||
958 | rtlhal->last_hmeboxnum = 0; | 970 | rtlhal->last_hmeboxnum = 0; |
@@ -1032,6 +1044,8 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw) | |||
1032 | RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n"); | 1044 | RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n"); |
1033 | } | 1045 | } |
1034 | rtl92c_dm_init(hw); | 1046 | rtl92c_dm_init(hw); |
1047 | exit: | ||
1048 | local_irq_restore(flags); | ||
1035 | rtlpci->being_init_adapter = false; | 1049 | rtlpci->being_init_adapter = false; |
1036 | return err; | 1050 | return err; |
1037 | } | 1051 | } |
diff --git a/drivers/of/address.c b/drivers/of/address.c index d3dd41c840f1..1a54f1ffaadb 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c | |||
@@ -99,11 +99,12 @@ static unsigned int of_bus_default_get_flags(const __be32 *addr) | |||
99 | static int of_bus_pci_match(struct device_node *np) | 99 | static int of_bus_pci_match(struct device_node *np) |
100 | { | 100 | { |
101 | /* | 101 | /* |
102 | * "pciex" is PCI Express | ||
102 | * "vci" is for the /chaos bridge on 1st-gen PCI powermacs | 103 | * "vci" is for the /chaos bridge on 1st-gen PCI powermacs |
103 | * "ht" is hypertransport | 104 | * "ht" is hypertransport |
104 | */ | 105 | */ |
105 | return !strcmp(np->type, "pci") || !strcmp(np->type, "vci") || | 106 | return !strcmp(np->type, "pci") || !strcmp(np->type, "pciex") || |
106 | !strcmp(np->type, "ht"); | 107 | !strcmp(np->type, "vci") || !strcmp(np->type, "ht"); |
107 | } | 108 | } |
108 | 109 | ||
109 | static void of_bus_pci_count_cells(struct device_node *np, | 110 | static void of_bus_pci_count_cells(struct device_node *np, |
diff --git a/drivers/of/base.c b/drivers/of/base.c index ff85450d5683..10b51106c854 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
@@ -730,46 +730,64 @@ out: | |||
730 | } | 730 | } |
731 | EXPORT_SYMBOL(of_find_node_with_property); | 731 | EXPORT_SYMBOL(of_find_node_with_property); |
732 | 732 | ||
733 | static | 733 | static const struct of_device_id * |
734 | const struct of_device_id *__of_match_node(const struct of_device_id *matches, | 734 | of_match_compatible(const struct of_device_id *matches, |
735 | const struct device_node *node) | 735 | const struct device_node *node) |
736 | { | 736 | { |
737 | const char *cp; | 737 | const char *cp; |
738 | int cplen, l; | 738 | int cplen, l; |
739 | 739 | const struct of_device_id *m; | |
740 | if (!matches) | ||
741 | return NULL; | ||
742 | 740 | ||
743 | cp = __of_get_property(node, "compatible", &cplen); | 741 | cp = __of_get_property(node, "compatible", &cplen); |
744 | do { | 742 | while (cp && (cplen > 0)) { |
745 | const struct of_device_id *m = matches; | 743 | m = matches; |
746 | |||
747 | /* Check against matches with current compatible string */ | ||
748 | while (m->name[0] || m->type[0] || m->compatible[0]) { | 744 | while (m->name[0] || m->type[0] || m->compatible[0]) { |
749 | int match = 1; | 745 | /* Only match for the entries without type and name */ |
750 | if (m->name[0]) | 746 | if (m->name[0] || m->type[0] || |
751 | match &= node->name | 747 | of_compat_cmp(m->compatible, cp, |
752 | && !strcmp(m->name, node->name); | 748 | strlen(m->compatible))) |
753 | if (m->type[0]) | 749 | m++; |
754 | match &= node->type | 750 | else |
755 | && !strcmp(m->type, node->type); | ||
756 | if (m->compatible[0]) | ||
757 | match &= cp | ||
758 | && !of_compat_cmp(m->compatible, cp, | ||
759 | strlen(m->compatible)); | ||
760 | if (match) | ||
761 | return m; | 751 | return m; |
762 | m++; | ||
763 | } | 752 | } |
764 | 753 | ||
765 | /* Get node's next compatible string */ | 754 | /* Get node's next compatible string */ |
766 | if (cp) { | 755 | l = strlen(cp) + 1; |
767 | l = strlen(cp) + 1; | 756 | cp += l; |
768 | cp += l; | 757 | cplen -= l; |
769 | cplen -= l; | 758 | } |
770 | } | 759 | |
771 | } while (cp && (cplen > 0)); | 760 | return NULL; |
761 | } | ||
762 | |||
763 | static | ||
764 | const struct of_device_id *__of_match_node(const struct of_device_id *matches, | ||
765 | const struct device_node *node) | ||
766 | { | ||
767 | const struct of_device_id *m; | ||
772 | 768 | ||
769 | if (!matches) | ||
770 | return NULL; | ||
771 | |||
772 | m = of_match_compatible(matches, node); | ||
773 | if (m) | ||
774 | return m; | ||
775 | |||
776 | while (matches->name[0] || matches->type[0] || matches->compatible[0]) { | ||
777 | int match = 1; | ||
778 | if (matches->name[0]) | ||
779 | match &= node->name | ||
780 | && !strcmp(matches->name, node->name); | ||
781 | if (matches->type[0]) | ||
782 | match &= node->type | ||
783 | && !strcmp(matches->type, node->type); | ||
784 | if (matches->compatible[0]) | ||
785 | match &= __of_device_is_compatible(node, | ||
786 | matches->compatible); | ||
787 | if (match) | ||
788 | return matches; | ||
789 | matches++; | ||
790 | } | ||
773 | return NULL; | 791 | return NULL; |
774 | } | 792 | } |
775 | 793 | ||
@@ -778,10 +796,12 @@ const struct of_device_id *__of_match_node(const struct of_device_id *matches, | |||
778 | * @matches: array of of device match structures to search in | 796 | * @matches: array of of device match structures to search in |
779 | * @node: the of device structure to match against | 797 | * @node: the of device structure to match against |
780 | * | 798 | * |
781 | * Low level utility function used by device matching. Matching order | 799 | * Low level utility function used by device matching. We have two ways |
782 | * is to compare each of the node's compatibles with all given matches | 800 | * of matching: |
783 | * first. This implies node's compatible is sorted from specific to | 801 | * - Try to find the best compatible match by comparing each compatible |
784 | * generic while matches can be in any order. | 802 | * string of device node with all the given matches respectively. |
803 | * - If the above method failed, then try to match the compatible by using | ||
804 | * __of_device_is_compatible() besides the match in type and name. | ||
785 | */ | 805 | */ |
786 | const struct of_device_id *of_match_node(const struct of_device_id *matches, | 806 | const struct of_device_id *of_match_node(const struct of_device_id *matches, |
787 | const struct device_node *node) | 807 | const struct device_node *node) |
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 875b7b6f0d2a..5b3c24f3cde5 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c | |||
@@ -24,7 +24,11 @@ MODULE_LICENSE("GPL"); | |||
24 | 24 | ||
25 | static void of_set_phy_supported(struct phy_device *phydev, u32 max_speed) | 25 | static void of_set_phy_supported(struct phy_device *phydev, u32 max_speed) |
26 | { | 26 | { |
27 | phydev->supported |= PHY_DEFAULT_FEATURES; | 27 | /* The default values for phydev->supported are provided by the PHY |
28 | * driver "features" member, we want to reset to sane defaults fist | ||
29 | * before supporting higher speeds. | ||
30 | */ | ||
31 | phydev->supported &= PHY_DEFAULT_FEATURES; | ||
28 | 32 | ||
29 | switch (max_speed) { | 33 | switch (max_speed) { |
30 | default: | 34 | default: |
@@ -44,7 +48,7 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi | |||
44 | { | 48 | { |
45 | struct phy_device *phy; | 49 | struct phy_device *phy; |
46 | bool is_c45; | 50 | bool is_c45; |
47 | int rc, prev_irq; | 51 | int rc; |
48 | u32 max_speed = 0; | 52 | u32 max_speed = 0; |
49 | 53 | ||
50 | is_c45 = of_device_is_compatible(child, | 54 | is_c45 = of_device_is_compatible(child, |
@@ -54,12 +58,14 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi | |||
54 | if (!phy || IS_ERR(phy)) | 58 | if (!phy || IS_ERR(phy)) |
55 | return 1; | 59 | return 1; |
56 | 60 | ||
57 | if (mdio->irq) { | 61 | rc = irq_of_parse_and_map(child, 0); |
58 | prev_irq = mdio->irq[addr]; | 62 | if (rc > 0) { |
59 | mdio->irq[addr] = | 63 | phy->irq = rc; |
60 | irq_of_parse_and_map(child, 0); | 64 | if (mdio->irq) |
61 | if (!mdio->irq[addr]) | 65 | mdio->irq[addr] = rc; |
62 | mdio->irq[addr] = prev_irq; | 66 | } else { |
67 | if (mdio->irq) | ||
68 | phy->irq = mdio->irq[addr]; | ||
63 | } | 69 | } |
64 | 70 | ||
65 | /* Associate the OF node with the device structure so it | 71 | /* Associate the OF node with the device structure so it |
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index e2a783fdb98f..7c7a388c85ab 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
@@ -730,6 +730,17 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot) | |||
730 | return (unsigned int)sta; | 730 | return (unsigned int)sta; |
731 | } | 731 | } |
732 | 732 | ||
733 | static inline bool device_status_valid(unsigned int sta) | ||
734 | { | ||
735 | /* | ||
736 | * ACPI spec says that _STA may return bit 0 clear with bit 3 set | ||
737 | * if the device is valid but does not require a device driver to be | ||
738 | * loaded (Section 6.3.7 of ACPI 5.0A). | ||
739 | */ | ||
740 | unsigned int mask = ACPI_STA_DEVICE_ENABLED | ACPI_STA_DEVICE_FUNCTIONING; | ||
741 | return (sta & mask) == mask; | ||
742 | } | ||
743 | |||
733 | /** | 744 | /** |
734 | * trim_stale_devices - remove PCI devices that are not responding. | 745 | * trim_stale_devices - remove PCI devices that are not responding. |
735 | * @dev: PCI device to start walking the hierarchy from. | 746 | * @dev: PCI device to start walking the hierarchy from. |
@@ -745,7 +756,7 @@ static void trim_stale_devices(struct pci_dev *dev) | |||
745 | unsigned long long sta; | 756 | unsigned long long sta; |
746 | 757 | ||
747 | status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); | 758 | status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); |
748 | alive = (ACPI_SUCCESS(status) && sta == ACPI_STA_ALL) | 759 | alive = (ACPI_SUCCESS(status) && device_status_valid(sta)) |
749 | || acpiphp_no_hotplug(handle); | 760 | || acpiphp_no_hotplug(handle); |
750 | } | 761 | } |
751 | if (!alive) { | 762 | if (!alive) { |
@@ -792,7 +803,7 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge) | |||
792 | mutex_lock(&slot->crit_sect); | 803 | mutex_lock(&slot->crit_sect); |
793 | if (slot_no_hotplug(slot)) { | 804 | if (slot_no_hotplug(slot)) { |
794 | ; /* do nothing */ | 805 | ; /* do nothing */ |
795 | } else if (get_slot_status(slot) == ACPI_STA_ALL) { | 806 | } else if (device_status_valid(get_slot_status(slot))) { |
796 | /* remove stale devices if any */ | 807 | /* remove stale devices if any */ |
797 | list_for_each_entry_safe_reverse(dev, tmp, | 808 | list_for_each_entry_safe_reverse(dev, tmp, |
798 | &bus->devices, bus_list) | 809 | &bus->devices, bus_list) |
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index 645c867c1257..5f5b0f4be5be 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c | |||
@@ -162,6 +162,9 @@ int phy_init(struct phy *phy) | |||
162 | { | 162 | { |
163 | int ret; | 163 | int ret; |
164 | 164 | ||
165 | if (!phy) | ||
166 | return 0; | ||
167 | |||
165 | ret = phy_pm_runtime_get_sync(phy); | 168 | ret = phy_pm_runtime_get_sync(phy); |
166 | if (ret < 0 && ret != -ENOTSUPP) | 169 | if (ret < 0 && ret != -ENOTSUPP) |
167 | return ret; | 170 | return ret; |
@@ -187,6 +190,9 @@ int phy_exit(struct phy *phy) | |||
187 | { | 190 | { |
188 | int ret; | 191 | int ret; |
189 | 192 | ||
193 | if (!phy) | ||
194 | return 0; | ||
195 | |||
190 | ret = phy_pm_runtime_get_sync(phy); | 196 | ret = phy_pm_runtime_get_sync(phy); |
191 | if (ret < 0 && ret != -ENOTSUPP) | 197 | if (ret < 0 && ret != -ENOTSUPP) |
192 | return ret; | 198 | return ret; |
@@ -212,6 +218,9 @@ int phy_power_on(struct phy *phy) | |||
212 | { | 218 | { |
213 | int ret; | 219 | int ret; |
214 | 220 | ||
221 | if (!phy) | ||
222 | return 0; | ||
223 | |||
215 | ret = phy_pm_runtime_get_sync(phy); | 224 | ret = phy_pm_runtime_get_sync(phy); |
216 | if (ret < 0 && ret != -ENOTSUPP) | 225 | if (ret < 0 && ret != -ENOTSUPP) |
217 | return ret; | 226 | return ret; |
@@ -240,6 +249,9 @@ int phy_power_off(struct phy *phy) | |||
240 | { | 249 | { |
241 | int ret; | 250 | int ret; |
242 | 251 | ||
252 | if (!phy) | ||
253 | return 0; | ||
254 | |||
243 | mutex_lock(&phy->mutex); | 255 | mutex_lock(&phy->mutex); |
244 | if (phy->power_count == 1 && phy->ops->power_off) { | 256 | if (phy->power_count == 1 && phy->ops->power_off) { |
245 | ret = phy->ops->power_off(phy); | 257 | ret = phy->ops->power_off(phy); |
@@ -308,7 +320,7 @@ err0: | |||
308 | */ | 320 | */ |
309 | void phy_put(struct phy *phy) | 321 | void phy_put(struct phy *phy) |
310 | { | 322 | { |
311 | if (IS_ERR(phy)) | 323 | if (!phy || IS_ERR(phy)) |
312 | return; | 324 | return; |
313 | 325 | ||
314 | module_put(phy->ops->owner); | 326 | module_put(phy->ops->owner); |
@@ -328,6 +340,9 @@ void devm_phy_put(struct device *dev, struct phy *phy) | |||
328 | { | 340 | { |
329 | int r; | 341 | int r; |
330 | 342 | ||
343 | if (!phy) | ||
344 | return; | ||
345 | |||
331 | r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy); | 346 | r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy); |
332 | dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n"); | 347 | dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n"); |
333 | } | 348 | } |
@@ -411,6 +426,27 @@ struct phy *phy_get(struct device *dev, const char *string) | |||
411 | EXPORT_SYMBOL_GPL(phy_get); | 426 | EXPORT_SYMBOL_GPL(phy_get); |
412 | 427 | ||
413 | /** | 428 | /** |
429 | * phy_optional_get() - lookup and obtain a reference to an optional phy. | ||
430 | * @dev: device that requests this phy | ||
431 | * @string: the phy name as given in the dt data or the name of the controller | ||
432 | * port for non-dt case | ||
433 | * | ||
434 | * Returns the phy driver, after getting a refcount to it; or | ||
435 | * NULL if there is no such phy. The caller is responsible for | ||
436 | * calling phy_put() to release that count. | ||
437 | */ | ||
438 | struct phy *phy_optional_get(struct device *dev, const char *string) | ||
439 | { | ||
440 | struct phy *phy = phy_get(dev, string); | ||
441 | |||
442 | if (PTR_ERR(phy) == -ENODEV) | ||
443 | phy = NULL; | ||
444 | |||
445 | return phy; | ||
446 | } | ||
447 | EXPORT_SYMBOL_GPL(phy_optional_get); | ||
448 | |||
449 | /** | ||
414 | * devm_phy_get() - lookup and obtain a reference to a phy. | 450 | * devm_phy_get() - lookup and obtain a reference to a phy. |
415 | * @dev: device that requests this phy | 451 | * @dev: device that requests this phy |
416 | * @string: the phy name as given in the dt data or phy device name | 452 | * @string: the phy name as given in the dt data or phy device name |
@@ -441,6 +477,30 @@ struct phy *devm_phy_get(struct device *dev, const char *string) | |||
441 | EXPORT_SYMBOL_GPL(devm_phy_get); | 477 | EXPORT_SYMBOL_GPL(devm_phy_get); |
442 | 478 | ||
443 | /** | 479 | /** |
480 | * devm_phy_optional_get() - lookup and obtain a reference to an optional phy. | ||
481 | * @dev: device that requests this phy | ||
482 | * @string: the phy name as given in the dt data or phy device name | ||
483 | * for non-dt case | ||
484 | * | ||
485 | * Gets the phy using phy_get(), and associates a device with it using | ||
486 | * devres. On driver detach, release function is invoked on the devres | ||
487 | * data, then, devres data is freed. This differs to devm_phy_get() in | ||
488 | * that if the phy does not exist, it is not considered an error and | ||
489 | * -ENODEV will not be returned. Instead the NULL phy is returned, | ||
490 | * which can be passed to all other phy consumer calls. | ||
491 | */ | ||
492 | struct phy *devm_phy_optional_get(struct device *dev, const char *string) | ||
493 | { | ||
494 | struct phy *phy = devm_phy_get(dev, string); | ||
495 | |||
496 | if (PTR_ERR(phy) == -ENODEV) | ||
497 | phy = NULL; | ||
498 | |||
499 | return phy; | ||
500 | } | ||
501 | EXPORT_SYMBOL_GPL(devm_phy_optional_get); | ||
502 | |||
503 | /** | ||
444 | * phy_create() - create a new phy | 504 | * phy_create() - create a new phy |
445 | * @dev: device that is creating the new phy | 505 | * @dev: device that is creating the new phy |
446 | * @ops: function pointers for performing phy operations | 506 | * @ops: function pointers for performing phy operations |
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c index 563174891c90..041f9b638d28 100644 --- a/drivers/power/ds2782_battery.c +++ b/drivers/power/ds2782_battery.c | |||
@@ -192,7 +192,7 @@ static int ds2786_get_voltage(struct ds278x_info *info, int *voltage_uV) | |||
192 | 192 | ||
193 | /* | 193 | /* |
194 | * Voltage is measured in units of 1.22mV. The voltage is stored as | 194 | * Voltage is measured in units of 1.22mV. The voltage is stored as |
195 | * a 10-bit number plus sign, in the upper bits of a 16-bit register | 195 | * a 12-bit number plus sign, in the upper bits of a 16-bit register |
196 | */ | 196 | */ |
197 | err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw); | 197 | err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw); |
198 | if (err) | 198 | if (err) |
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c index 80edb7d8cb54..0b4cf9d63291 100644 --- a/drivers/power/isp1704_charger.c +++ b/drivers/power/isp1704_charger.c | |||
@@ -444,8 +444,6 @@ static int isp1704_charger_probe(struct platform_device *pdev) | |||
444 | ret = PTR_ERR(isp->phy); | 444 | ret = PTR_ERR(isp->phy); |
445 | goto fail0; | 445 | goto fail0; |
446 | } | 446 | } |
447 | if (!isp->phy) | ||
448 | goto fail0; | ||
449 | 447 | ||
450 | isp->dev = &pdev->dev; | 448 | isp->dev = &pdev->dev; |
451 | platform_set_drvdata(pdev, isp); | 449 | platform_set_drvdata(pdev, isp); |
diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c index c7ff6d67f158..0fbac861080d 100644 --- a/drivers/power/max17040_battery.c +++ b/drivers/power/max17040_battery.c | |||
@@ -148,7 +148,7 @@ static void max17040_get_online(struct i2c_client *client) | |||
148 | { | 148 | { |
149 | struct max17040_chip *chip = i2c_get_clientdata(client); | 149 | struct max17040_chip *chip = i2c_get_clientdata(client); |
150 | 150 | ||
151 | if (chip->pdata->battery_online) | 151 | if (chip->pdata && chip->pdata->battery_online) |
152 | chip->online = chip->pdata->battery_online(); | 152 | chip->online = chip->pdata->battery_online(); |
153 | else | 153 | else |
154 | chip->online = 1; | 154 | chip->online = 1; |
@@ -158,7 +158,8 @@ static void max17040_get_status(struct i2c_client *client) | |||
158 | { | 158 | { |
159 | struct max17040_chip *chip = i2c_get_clientdata(client); | 159 | struct max17040_chip *chip = i2c_get_clientdata(client); |
160 | 160 | ||
161 | if (!chip->pdata->charger_online || !chip->pdata->charger_enable) { | 161 | if (!chip->pdata || !chip->pdata->charger_online |
162 | || !chip->pdata->charger_enable) { | ||
162 | chip->status = POWER_SUPPLY_STATUS_UNKNOWN; | 163 | chip->status = POWER_SUPPLY_STATUS_UNKNOWN; |
163 | return; | 164 | return; |
164 | } | 165 | } |
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c index 7f340206d329..b14ebdad5dd2 100644 --- a/drivers/regulator/da9055-regulator.c +++ b/drivers/regulator/da9055-regulator.c | |||
@@ -576,7 +576,9 @@ static int da9055_regulator_probe(struct platform_device *pdev) | |||
576 | /* Only LDO 5 and 6 has got the over current interrupt */ | 576 | /* Only LDO 5 and 6 has got the over current interrupt */ |
577 | if (pdev->id == DA9055_ID_LDO5 || pdev->id == DA9055_ID_LDO6) { | 577 | if (pdev->id == DA9055_ID_LDO5 || pdev->id == DA9055_ID_LDO6) { |
578 | irq = platform_get_irq_byname(pdev, "REGULATOR"); | 578 | irq = platform_get_irq_byname(pdev, "REGULATOR"); |
579 | irq = regmap_irq_get_virq(da9055->irq_data, irq); | 579 | if (irq < 0) |
580 | return irq; | ||
581 | |||
580 | ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, | 582 | ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, |
581 | da9055_ldo5_6_oc_irq, | 583 | da9055_ldo5_6_oc_irq, |
582 | IRQF_TRIGGER_HIGH | | 584 | IRQF_TRIGGER_HIGH | |
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c index b1078ba3f393..186df8785a91 100644 --- a/drivers/regulator/max14577.c +++ b/drivers/regulator/max14577.c | |||
@@ -168,10 +168,11 @@ static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev) | |||
168 | MAX14577_REG_MAX); | 168 | MAX14577_REG_MAX); |
169 | if (ret < 0) { | 169 | if (ret < 0) { |
170 | dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret); | 170 | dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret); |
171 | return ret; | ||
172 | } | 171 | } |
173 | 172 | ||
174 | return 0; | 173 | of_node_put(np); |
174 | |||
175 | return ret; | ||
175 | } | 176 | } |
176 | 177 | ||
177 | static inline struct regulator_init_data *match_init_data(int index) | 178 | static inline struct regulator_init_data *match_init_data(int index) |
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 9e80d61e5a3a..2eb97d7e8d12 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c | |||
@@ -2595,8 +2595,6 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, | |||
2595 | return -ENOMEM; | 2595 | return -ENOMEM; |
2596 | } | 2596 | } |
2597 | 2597 | ||
2598 | INIT_LIST_HEAD(&cmd->cmd_list); | ||
2599 | |||
2600 | memcpy(&cmd->atio, atio, sizeof(*atio)); | 2598 | memcpy(&cmd->atio, atio, sizeof(*atio)); |
2601 | cmd->state = QLA_TGT_STATE_NEW; | 2599 | cmd->state = QLA_TGT_STATE_NEW; |
2602 | cmd->tgt = vha->vha_tgt.qla_tgt; | 2600 | cmd->tgt = vha->vha_tgt.qla_tgt; |
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 1d10eecad499..66e755cdde57 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h | |||
@@ -855,7 +855,6 @@ struct qla_tgt_cmd { | |||
855 | uint16_t loop_id; /* to save extra sess dereferences */ | 855 | uint16_t loop_id; /* to save extra sess dereferences */ |
856 | struct qla_tgt *tgt; /* to save extra sess dereferences */ | 856 | struct qla_tgt *tgt; /* to save extra sess dereferences */ |
857 | struct scsi_qla_host *vha; | 857 | struct scsi_qla_host *vha; |
858 | struct list_head cmd_list; | ||
859 | 858 | ||
860 | struct atio_from_isp atio; | 859 | struct atio_from_isp atio; |
861 | }; | 860 | }; |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 7bd7f0d5f050..62ec84b42e31 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -1684,7 +1684,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) | |||
1684 | 1684 | ||
1685 | host_dev = scsi_get_device(shost); | 1685 | host_dev = scsi_get_device(shost); |
1686 | if (host_dev && host_dev->dma_mask) | 1686 | if (host_dev && host_dev->dma_mask) |
1687 | bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT; | 1687 | bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT; |
1688 | 1688 | ||
1689 | return bounce_limit; | 1689 | return bounce_limit; |
1690 | } | 1690 | } |
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index ba9310bc9acb..581ee2a8856b 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
@@ -376,10 +376,10 @@ config SPI_PXA2XX_PCI | |||
376 | def_tristate SPI_PXA2XX && PCI | 376 | def_tristate SPI_PXA2XX && PCI |
377 | 377 | ||
378 | config SPI_RSPI | 378 | config SPI_RSPI |
379 | tristate "Renesas RSPI controller" | 379 | tristate "Renesas RSPI/QSPI controller" |
380 | depends on (SUPERH && SH_DMAE_BASE) || ARCH_SHMOBILE | 380 | depends on (SUPERH && SH_DMAE_BASE) || ARCH_SHMOBILE |
381 | help | 381 | help |
382 | SPI driver for Renesas RSPI blocks. | 382 | SPI driver for Renesas RSPI and QSPI blocks. |
383 | 383 | ||
384 | config SPI_S3C24XX | 384 | config SPI_S3C24XX |
385 | tristate "Samsung S3C24XX series SPI" | 385 | tristate "Samsung S3C24XX series SPI" |
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c index 50406306bc20..bae97ffec4b9 100644 --- a/drivers/spi/spi-nuc900.c +++ b/drivers/spi/spi-nuc900.c | |||
@@ -361,6 +361,8 @@ static int nuc900_spi_probe(struct platform_device *pdev) | |||
361 | init_completion(&hw->done); | 361 | init_completion(&hw->done); |
362 | 362 | ||
363 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; | 363 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; |
364 | if (hw->pdata->lsb) | ||
365 | master->mode_bits |= SPI_LSB_FIRST; | ||
364 | master->num_chipselect = hw->pdata->num_cs; | 366 | master->num_chipselect = hw->pdata->num_cs; |
365 | master->bus_num = hw->pdata->bus_num; | 367 | master->bus_num = hw->pdata->bus_num; |
366 | hw->bitbang.master = hw->master; | 368 | hw->bitbang.master = hw->master; |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 23756b0f9036..d0b28bba38be 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -755,9 +755,7 @@ static void spi_pump_messages(struct kthread_work *work) | |||
755 | ret = master->transfer_one_message(master, master->cur_msg); | 755 | ret = master->transfer_one_message(master, master->cur_msg); |
756 | if (ret) { | 756 | if (ret) { |
757 | dev_err(&master->dev, | 757 | dev_err(&master->dev, |
758 | "failed to transfer one message from queue: %d\n", ret); | 758 | "failed to transfer one message from queue\n"); |
759 | master->cur_msg->status = ret; | ||
760 | spi_finalize_current_message(master); | ||
761 | return; | 759 | return; |
762 | } | 760 | } |
763 | } | 761 | } |
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index 23948f167012..713a97226787 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c | |||
@@ -295,21 +295,29 @@ static ssize_t ashmem_read(struct file *file, char __user *buf, | |||
295 | 295 | ||
296 | /* If size is not set, or set to 0, always return EOF. */ | 296 | /* If size is not set, or set to 0, always return EOF. */ |
297 | if (asma->size == 0) | 297 | if (asma->size == 0) |
298 | goto out; | 298 | goto out_unlock; |
299 | 299 | ||
300 | if (!asma->file) { | 300 | if (!asma->file) { |
301 | ret = -EBADF; | 301 | ret = -EBADF; |
302 | goto out; | 302 | goto out_unlock; |
303 | } | 303 | } |
304 | 304 | ||
305 | ret = asma->file->f_op->read(asma->file, buf, len, pos); | 305 | mutex_unlock(&ashmem_mutex); |
306 | if (ret < 0) | ||
307 | goto out; | ||
308 | 306 | ||
309 | /** Update backing file pos, since f_ops->read() doesn't */ | 307 | /* |
310 | asma->file->f_pos = *pos; | 308 | * asma and asma->file are used outside the lock here. We assume |
309 | * once asma->file is set it will never be changed, and will not | ||
310 | * be destroyed until all references to the file are dropped and | ||
311 | * ashmem_release is called. | ||
312 | */ | ||
313 | ret = asma->file->f_op->read(asma->file, buf, len, pos); | ||
314 | if (ret >= 0) { | ||
315 | /** Update backing file pos, since f_ops->read() doesn't */ | ||
316 | asma->file->f_pos = *pos; | ||
317 | } | ||
318 | return ret; | ||
311 | 319 | ||
312 | out: | 320 | out_unlock: |
313 | mutex_unlock(&ashmem_mutex); | 321 | mutex_unlock(&ashmem_mutex); |
314 | return ret; | 322 | return ret; |
315 | } | 323 | } |
@@ -498,6 +506,7 @@ out: | |||
498 | 506 | ||
499 | static int set_name(struct ashmem_area *asma, void __user *name) | 507 | static int set_name(struct ashmem_area *asma, void __user *name) |
500 | { | 508 | { |
509 | int len; | ||
501 | int ret = 0; | 510 | int ret = 0; |
502 | char local_name[ASHMEM_NAME_LEN]; | 511 | char local_name[ASHMEM_NAME_LEN]; |
503 | 512 | ||
@@ -510,21 +519,19 @@ static int set_name(struct ashmem_area *asma, void __user *name) | |||
510 | * variable that does not need protection and later copy the local | 519 | * variable that does not need protection and later copy the local |
511 | * variable to the structure member with lock held. | 520 | * variable to the structure member with lock held. |
512 | */ | 521 | */ |
513 | if (copy_from_user(local_name, name, ASHMEM_NAME_LEN)) | 522 | len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN); |
514 | return -EFAULT; | 523 | if (len < 0) |
515 | 524 | return len; | |
525 | if (len == ASHMEM_NAME_LEN) | ||
526 | local_name[ASHMEM_NAME_LEN - 1] = '\0'; | ||
516 | mutex_lock(&ashmem_mutex); | 527 | mutex_lock(&ashmem_mutex); |
517 | /* cannot change an existing mapping's name */ | 528 | /* cannot change an existing mapping's name */ |
518 | if (unlikely(asma->file)) { | 529 | if (unlikely(asma->file)) |
519 | ret = -EINVAL; | 530 | ret = -EINVAL; |
520 | goto out; | 531 | else |
521 | } | 532 | strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name); |
522 | memcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, | ||
523 | local_name, ASHMEM_NAME_LEN); | ||
524 | asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0'; | ||
525 | out: | ||
526 | mutex_unlock(&ashmem_mutex); | ||
527 | 533 | ||
534 | mutex_unlock(&ashmem_mutex); | ||
528 | return ret; | 535 | return ret; |
529 | } | 536 | } |
530 | 537 | ||
diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c index af6cd370b30f..ee3a7380e53b 100644 --- a/drivers/staging/android/ion/compat_ion.c +++ b/drivers/staging/android/ion/compat_ion.c | |||
@@ -35,9 +35,14 @@ struct compat_ion_custom_data { | |||
35 | compat_ulong_t arg; | 35 | compat_ulong_t arg; |
36 | }; | 36 | }; |
37 | 37 | ||
38 | struct compat_ion_handle_data { | ||
39 | compat_int_t handle; | ||
40 | }; | ||
41 | |||
38 | #define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ | 42 | #define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ |
39 | struct compat_ion_allocation_data) | 43 | struct compat_ion_allocation_data) |
40 | #define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data) | 44 | #define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, \ |
45 | struct compat_ion_handle_data) | ||
41 | #define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \ | 46 | #define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \ |
42 | struct compat_ion_custom_data) | 47 | struct compat_ion_custom_data) |
43 | 48 | ||
@@ -64,6 +69,19 @@ static int compat_get_ion_allocation_data( | |||
64 | return err; | 69 | return err; |
65 | } | 70 | } |
66 | 71 | ||
72 | static int compat_get_ion_handle_data( | ||
73 | struct compat_ion_handle_data __user *data32, | ||
74 | struct ion_handle_data __user *data) | ||
75 | { | ||
76 | compat_int_t i; | ||
77 | int err; | ||
78 | |||
79 | err = get_user(i, &data32->handle); | ||
80 | err |= put_user(i, &data->handle); | ||
81 | |||
82 | return err; | ||
83 | } | ||
84 | |||
67 | static int compat_put_ion_allocation_data( | 85 | static int compat_put_ion_allocation_data( |
68 | struct compat_ion_allocation_data __user *data32, | 86 | struct compat_ion_allocation_data __user *data32, |
69 | struct ion_allocation_data __user *data) | 87 | struct ion_allocation_data __user *data) |
@@ -132,8 +150,8 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
132 | } | 150 | } |
133 | case COMPAT_ION_IOC_FREE: | 151 | case COMPAT_ION_IOC_FREE: |
134 | { | 152 | { |
135 | struct compat_ion_allocation_data __user *data32; | 153 | struct compat_ion_handle_data __user *data32; |
136 | struct ion_allocation_data __user *data; | 154 | struct ion_handle_data __user *data; |
137 | int err; | 155 | int err; |
138 | 156 | ||
139 | data32 = compat_ptr(arg); | 157 | data32 = compat_ptr(arg); |
@@ -141,7 +159,7 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
141 | if (data == NULL) | 159 | if (data == NULL) |
142 | return -EFAULT; | 160 | return -EFAULT; |
143 | 161 | ||
144 | err = compat_get_ion_allocation_data(data32, data); | 162 | err = compat_get_ion_handle_data(data32, data); |
145 | if (err) | 163 | if (err) |
146 | return err; | 164 | return err; |
147 | 165 | ||
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c index 55b2002753f2..01cdc8aee898 100644 --- a/drivers/staging/android/ion/ion_dummy_driver.c +++ b/drivers/staging/android/ion/ion_dummy_driver.c | |||
@@ -17,9 +17,11 @@ | |||
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/init.h> | ||
20 | #include <linux/bootmem.h> | 21 | #include <linux/bootmem.h> |
21 | #include <linux/memblock.h> | 22 | #include <linux/memblock.h> |
22 | #include <linux/sizes.h> | 23 | #include <linux/sizes.h> |
24 | #include <linux/io.h> | ||
23 | #include "ion.h" | 25 | #include "ion.h" |
24 | #include "ion_priv.h" | 26 | #include "ion_priv.h" |
25 | 27 | ||
@@ -57,7 +59,7 @@ struct ion_platform_heap dummy_heaps[] = { | |||
57 | }; | 59 | }; |
58 | 60 | ||
59 | struct ion_platform_data dummy_ion_pdata = { | 61 | struct ion_platform_data dummy_ion_pdata = { |
60 | .nr = 4, | 62 | .nr = ARRAY_SIZE(dummy_heaps), |
61 | .heaps = dummy_heaps, | 63 | .heaps = dummy_heaps, |
62 | }; | 64 | }; |
63 | 65 | ||
@@ -69,7 +71,7 @@ static int __init ion_dummy_init(void) | |||
69 | heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr, | 71 | heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr, |
70 | GFP_KERNEL); | 72 | GFP_KERNEL); |
71 | if (!heaps) | 73 | if (!heaps) |
72 | return PTR_ERR(heaps); | 74 | return -ENOMEM; |
73 | 75 | ||
74 | 76 | ||
75 | /* Allocate a dummy carveout heap */ | 77 | /* Allocate a dummy carveout heap */ |
@@ -128,6 +130,7 @@ err: | |||
128 | } | 130 | } |
129 | return err; | 131 | return err; |
130 | } | 132 | } |
133 | device_initcall(ion_dummy_init); | ||
131 | 134 | ||
132 | static void __exit ion_dummy_exit(void) | 135 | static void __exit ion_dummy_exit(void) |
133 | { | 136 | { |
@@ -152,7 +155,4 @@ static void __exit ion_dummy_exit(void) | |||
152 | 155 | ||
153 | return; | 156 | return; |
154 | } | 157 | } |
155 | 158 | __exitcall(ion_dummy_exit); | |
156 | module_init(ion_dummy_init); | ||
157 | module_exit(ion_dummy_exit); | ||
158 | |||
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c index 296c74f98dc0..37e64d51394c 100644 --- a/drivers/staging/android/ion/ion_heap.c +++ b/drivers/staging/android/ion/ion_heap.c | |||
@@ -243,12 +243,12 @@ int ion_heap_init_deferred_free(struct ion_heap *heap) | |||
243 | init_waitqueue_head(&heap->waitqueue); | 243 | init_waitqueue_head(&heap->waitqueue); |
244 | heap->task = kthread_run(ion_heap_deferred_free, heap, | 244 | heap->task = kthread_run(ion_heap_deferred_free, heap, |
245 | "%s", heap->name); | 245 | "%s", heap->name); |
246 | sched_setscheduler(heap->task, SCHED_IDLE, ¶m); | ||
247 | if (IS_ERR(heap->task)) { | 246 | if (IS_ERR(heap->task)) { |
248 | pr_err("%s: creating thread for deferred free failed\n", | 247 | pr_err("%s: creating thread for deferred free failed\n", |
249 | __func__); | 248 | __func__); |
250 | return PTR_RET(heap->task); | 249 | return PTR_RET(heap->task); |
251 | } | 250 | } |
251 | sched_setscheduler(heap->task, SCHED_IDLE, ¶m); | ||
252 | return 0; | 252 | return 0; |
253 | } | 253 | } |
254 | 254 | ||
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h index d98673981cc4..fc2e4fccf69d 100644 --- a/drivers/staging/android/ion/ion_priv.h +++ b/drivers/staging/android/ion/ion_priv.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #ifndef _ION_PRIV_H | 17 | #ifndef _ION_PRIV_H |
18 | #define _ION_PRIV_H | 18 | #define _ION_PRIV_H |
19 | 19 | ||
20 | #include <linux/device.h> | ||
20 | #include <linux/dma-direction.h> | 21 | #include <linux/dma-direction.h> |
21 | #include <linux/kref.h> | 22 | #include <linux/kref.h> |
22 | #include <linux/mm_types.h> | 23 | #include <linux/mm_types.h> |
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index 7f0729130d65..9849f3963e75 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c | |||
@@ -124,6 +124,7 @@ static struct page_info *alloc_largest_available(struct ion_system_heap *heap, | |||
124 | 124 | ||
125 | info->page = page; | 125 | info->page = page; |
126 | info->order = orders[i]; | 126 | info->order = orders[i]; |
127 | INIT_LIST_HEAD(&info->list); | ||
127 | return info; | 128 | return info; |
128 | } | 129 | } |
129 | kfree(info); | 130 | kfree(info); |
@@ -145,12 +146,15 @@ static int ion_system_heap_allocate(struct ion_heap *heap, | |||
145 | struct list_head pages; | 146 | struct list_head pages; |
146 | struct page_info *info, *tmp_info; | 147 | struct page_info *info, *tmp_info; |
147 | int i = 0; | 148 | int i = 0; |
148 | long size_remaining = PAGE_ALIGN(size); | 149 | unsigned long size_remaining = PAGE_ALIGN(size); |
149 | unsigned int max_order = orders[0]; | 150 | unsigned int max_order = orders[0]; |
150 | 151 | ||
151 | if (align > PAGE_SIZE) | 152 | if (align > PAGE_SIZE) |
152 | return -EINVAL; | 153 | return -EINVAL; |
153 | 154 | ||
155 | if (size / PAGE_SIZE > totalram_pages / 2) | ||
156 | return -ENOMEM; | ||
157 | |||
154 | INIT_LIST_HEAD(&pages); | 158 | INIT_LIST_HEAD(&pages); |
155 | while (size_remaining > 0) { | 159 | while (size_remaining > 0) { |
156 | info = alloc_largest_available(sys_heap, buffer, size_remaining, | 160 | info = alloc_largest_available(sys_heap, buffer, size_remaining, |
diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h index 585040be5f18..5aaf71d6974b 100644 --- a/drivers/staging/android/sw_sync.h +++ b/drivers/staging/android/sw_sync.h | |||
@@ -35,10 +35,27 @@ struct sw_sync_pt { | |||
35 | u32 value; | 35 | u32 value; |
36 | }; | 36 | }; |
37 | 37 | ||
38 | #if IS_ENABLED(CONFIG_SW_SYNC) | ||
38 | struct sw_sync_timeline *sw_sync_timeline_create(const char *name); | 39 | struct sw_sync_timeline *sw_sync_timeline_create(const char *name); |
39 | void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc); | 40 | void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc); |
40 | 41 | ||
41 | struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value); | 42 | struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value); |
43 | #else | ||
44 | static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name) | ||
45 | { | ||
46 | return NULL; | ||
47 | } | ||
48 | |||
49 | static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc) | ||
50 | { | ||
51 | } | ||
52 | |||
53 | static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, | ||
54 | u32 value) | ||
55 | { | ||
56 | return NULL; | ||
57 | } | ||
58 | #endif /* IS_ENABLED(CONFIG_SW_SYNC) */ | ||
42 | 59 | ||
43 | #endif /* __KERNEL __ */ | 60 | #endif /* __KERNEL __ */ |
44 | 61 | ||
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c index 38e5d3b5ed9b..3d05f662110b 100644 --- a/drivers/staging/android/sync.c +++ b/drivers/staging/android/sync.c | |||
@@ -79,27 +79,27 @@ static void sync_timeline_free(struct kref *kref) | |||
79 | container_of(kref, struct sync_timeline, kref); | 79 | container_of(kref, struct sync_timeline, kref); |
80 | unsigned long flags; | 80 | unsigned long flags; |
81 | 81 | ||
82 | if (obj->ops->release_obj) | ||
83 | obj->ops->release_obj(obj); | ||
84 | |||
85 | spin_lock_irqsave(&sync_timeline_list_lock, flags); | 82 | spin_lock_irqsave(&sync_timeline_list_lock, flags); |
86 | list_del(&obj->sync_timeline_list); | 83 | list_del(&obj->sync_timeline_list); |
87 | spin_unlock_irqrestore(&sync_timeline_list_lock, flags); | 84 | spin_unlock_irqrestore(&sync_timeline_list_lock, flags); |
88 | 85 | ||
86 | if (obj->ops->release_obj) | ||
87 | obj->ops->release_obj(obj); | ||
88 | |||
89 | kfree(obj); | 89 | kfree(obj); |
90 | } | 90 | } |
91 | 91 | ||
92 | void sync_timeline_destroy(struct sync_timeline *obj) | 92 | void sync_timeline_destroy(struct sync_timeline *obj) |
93 | { | 93 | { |
94 | obj->destroyed = true; | 94 | obj->destroyed = true; |
95 | smp_wmb(); | ||
95 | 96 | ||
96 | /* | 97 | /* |
97 | * If this is not the last reference, signal any children | 98 | * signal any children that their parent is going away. |
98 | * that their parent is going away. | ||
99 | */ | 99 | */ |
100 | sync_timeline_signal(obj); | ||
100 | 101 | ||
101 | if (!kref_put(&obj->kref, sync_timeline_free)) | 102 | kref_put(&obj->kref, sync_timeline_free); |
102 | sync_timeline_signal(obj); | ||
103 | } | 103 | } |
104 | EXPORT_SYMBOL(sync_timeline_destroy); | 104 | EXPORT_SYMBOL(sync_timeline_destroy); |
105 | 105 | ||
diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c index 8dfdd2732bdc..95a2358267ba 100644 --- a/drivers/staging/bcm/Bcmnet.c +++ b/drivers/staging/bcm/Bcmnet.c | |||
@@ -40,7 +40,7 @@ static INT bcm_close(struct net_device *dev) | |||
40 | } | 40 | } |
41 | 41 | ||
42 | static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb, | 42 | static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb, |
43 | void *accel_priv) | 43 | void *accel_priv, select_queue_fallback_t fallback) |
44 | { | 44 | { |
45 | return ClassifyPacket(netdev_priv(dev), skb); | 45 | return ClassifyPacket(netdev_priv(dev), skb); |
46 | } | 46 | } |
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index 246080316c90..5b15033a94bf 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c | |||
@@ -616,8 +616,6 @@ int comedi_auto_config(struct device *hardware_device, | |||
616 | ret = driver->auto_attach(dev, context); | 616 | ret = driver->auto_attach(dev, context); |
617 | if (ret >= 0) | 617 | if (ret >= 0) |
618 | ret = comedi_device_postconfig(dev); | 618 | ret = comedi_device_postconfig(dev); |
619 | if (ret < 0) | ||
620 | comedi_device_detach(dev); | ||
621 | mutex_unlock(&dev->mutex); | 619 | mutex_unlock(&dev->mutex); |
622 | 620 | ||
623 | if (ret < 0) { | 621 | if (ret < 0) { |
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c index 593676cf706a..d9ad2c0fdda2 100644 --- a/drivers/staging/comedi/drivers/adv_pci1710.c +++ b/drivers/staging/comedi/drivers/adv_pci1710.c | |||
@@ -494,6 +494,7 @@ static int pci171x_insn_write_ao(struct comedi_device *dev, | |||
494 | struct comedi_insn *insn, unsigned int *data) | 494 | struct comedi_insn *insn, unsigned int *data) |
495 | { | 495 | { |
496 | struct pci1710_private *devpriv = dev->private; | 496 | struct pci1710_private *devpriv = dev->private; |
497 | unsigned int val; | ||
497 | int n, chan, range, ofs; | 498 | int n, chan, range, ofs; |
498 | 499 | ||
499 | chan = CR_CHAN(insn->chanspec); | 500 | chan = CR_CHAN(insn->chanspec); |
@@ -509,11 +510,14 @@ static int pci171x_insn_write_ao(struct comedi_device *dev, | |||
509 | outw(devpriv->da_ranges, dev->iobase + PCI171x_DAREF); | 510 | outw(devpriv->da_ranges, dev->iobase + PCI171x_DAREF); |
510 | ofs = PCI171x_DA1; | 511 | ofs = PCI171x_DA1; |
511 | } | 512 | } |
513 | val = devpriv->ao_data[chan]; | ||
512 | 514 | ||
513 | for (n = 0; n < insn->n; n++) | 515 | for (n = 0; n < insn->n; n++) { |
514 | outw(data[n], dev->iobase + ofs); | 516 | val = data[n]; |
517 | outw(val, dev->iobase + ofs); | ||
518 | } | ||
515 | 519 | ||
516 | devpriv->ao_data[chan] = data[n]; | 520 | devpriv->ao_data[chan] = val; |
517 | 521 | ||
518 | return n; | 522 | return n; |
519 | 523 | ||
@@ -679,6 +683,7 @@ static int pci1720_insn_write_ao(struct comedi_device *dev, | |||
679 | struct comedi_insn *insn, unsigned int *data) | 683 | struct comedi_insn *insn, unsigned int *data) |
680 | { | 684 | { |
681 | struct pci1710_private *devpriv = dev->private; | 685 | struct pci1710_private *devpriv = dev->private; |
686 | unsigned int val; | ||
682 | int n, rangereg, chan; | 687 | int n, rangereg, chan; |
683 | 688 | ||
684 | chan = CR_CHAN(insn->chanspec); | 689 | chan = CR_CHAN(insn->chanspec); |
@@ -688,13 +693,15 @@ static int pci1720_insn_write_ao(struct comedi_device *dev, | |||
688 | outb(rangereg, dev->iobase + PCI1720_RANGE); | 693 | outb(rangereg, dev->iobase + PCI1720_RANGE); |
689 | devpriv->da_ranges = rangereg; | 694 | devpriv->da_ranges = rangereg; |
690 | } | 695 | } |
696 | val = devpriv->ao_data[chan]; | ||
691 | 697 | ||
692 | for (n = 0; n < insn->n; n++) { | 698 | for (n = 0; n < insn->n; n++) { |
693 | outw(data[n], dev->iobase + PCI1720_DA0 + (chan << 1)); | 699 | val = data[n]; |
700 | outw(val, dev->iobase + PCI1720_DA0 + (chan << 1)); | ||
694 | outb(0, dev->iobase + PCI1720_SYNCOUT); /* update outputs */ | 701 | outb(0, dev->iobase + PCI1720_SYNCOUT); /* update outputs */ |
695 | } | 702 | } |
696 | 703 | ||
697 | devpriv->ao_data[chan] = data[n]; | 704 | devpriv->ao_data[chan] = val; |
698 | 705 | ||
699 | return n; | 706 | return n; |
700 | } | 707 | } |
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c index 3beeb1254152..88c60b6020c4 100644 --- a/drivers/staging/comedi/drivers/usbduxsigma.c +++ b/drivers/staging/comedi/drivers/usbduxsigma.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <linux/usb.h> | 48 | #include <linux/usb.h> |
49 | #include <linux/fcntl.h> | 49 | #include <linux/fcntl.h> |
50 | #include <linux/compiler.h> | 50 | #include <linux/compiler.h> |
51 | #include <asm/unaligned.h> | ||
51 | 52 | ||
52 | #include "comedi_fc.h" | 53 | #include "comedi_fc.h" |
53 | #include "../comedidev.h" | 54 | #include "../comedidev.h" |
@@ -792,7 +793,8 @@ static int usbduxsigma_ai_insn_read(struct comedi_device *dev, | |||
792 | } | 793 | } |
793 | 794 | ||
794 | /* 32 bits big endian from the A/D converter */ | 795 | /* 32 bits big endian from the A/D converter */ |
795 | val = be32_to_cpu(*((uint32_t *)((devpriv->insn_buf) + 1))); | 796 | val = be32_to_cpu(get_unaligned((uint32_t |
797 | *)(devpriv->insn_buf + 1))); | ||
796 | val &= 0x00ffffff; /* strip status byte */ | 798 | val &= 0x00ffffff; /* strip status byte */ |
797 | val ^= 0x00800000; /* convert to unsigned */ | 799 | val ^= 0x00800000; /* convert to unsigned */ |
798 | 800 | ||
@@ -1357,7 +1359,7 @@ static int usbduxsigma_getstatusinfo(struct comedi_device *dev, int chan) | |||
1357 | return ret; | 1359 | return ret; |
1358 | 1360 | ||
1359 | /* 32 bits big endian from the A/D converter */ | 1361 | /* 32 bits big endian from the A/D converter */ |
1360 | val = be32_to_cpu(*((uint32_t *)((devpriv->insn_buf)+1))); | 1362 | val = be32_to_cpu(get_unaligned((uint32_t *)(devpriv->insn_buf + 1))); |
1361 | val &= 0x00ffffff; /* strip status byte */ | 1363 | val &= 0x00ffffff; /* strip status byte */ |
1362 | val ^= 0x00800000; /* convert to unsigned */ | 1364 | val ^= 0x00800000; /* convert to unsigned */ |
1363 | 1365 | ||
diff --git a/drivers/staging/dgrp/dgrp_net_ops.c b/drivers/staging/dgrp/dgrp_net_ops.c index 1f61b89eca44..33ac7fb88cbd 100644 --- a/drivers/staging/dgrp/dgrp_net_ops.c +++ b/drivers/staging/dgrp/dgrp_net_ops.c | |||
@@ -2232,177 +2232,6 @@ done: | |||
2232 | return rtn; | 2232 | return rtn; |
2233 | } | 2233 | } |
2234 | 2234 | ||
2235 | /* | ||
2236 | * Common Packet Handling code | ||
2237 | */ | ||
2238 | |||
2239 | static void handle_data_in_packet(struct nd_struct *nd, struct ch_struct *ch, | ||
2240 | long dlen, long plen, int n1, u8 *dbuf) | ||
2241 | { | ||
2242 | char *error; | ||
2243 | long n; | ||
2244 | long remain; | ||
2245 | u8 *buf; | ||
2246 | u8 *b; | ||
2247 | |||
2248 | remain = nd->nd_remain; | ||
2249 | nd->nd_tx_work = 1; | ||
2250 | |||
2251 | /* | ||
2252 | * Otherwise data should appear only when we are | ||
2253 | * in the CS_READY state. | ||
2254 | */ | ||
2255 | |||
2256 | if (ch->ch_state < CS_READY) { | ||
2257 | error = "Data received before RWIN established"; | ||
2258 | nd->nd_remain = 0; | ||
2259 | nd->nd_state = NS_SEND_ERROR; | ||
2260 | nd->nd_error = error; | ||
2261 | } | ||
2262 | |||
2263 | /* | ||
2264 | * Assure that the data received is within the | ||
2265 | * allowable window. | ||
2266 | */ | ||
2267 | |||
2268 | n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff; | ||
2269 | |||
2270 | if (dlen > n) { | ||
2271 | error = "Receive data overrun"; | ||
2272 | nd->nd_remain = 0; | ||
2273 | nd->nd_state = NS_SEND_ERROR; | ||
2274 | nd->nd_error = error; | ||
2275 | } | ||
2276 | |||
2277 | /* | ||
2278 | * If we received 3 or less characters, | ||
2279 | * assume it is a human typing, and set RTIME | ||
2280 | * to 10 milliseconds. | ||
2281 | * | ||
2282 | * If we receive 10 or more characters, | ||
2283 | * assume its not a human typing, and set RTIME | ||
2284 | * to 100 milliseconds. | ||
2285 | */ | ||
2286 | |||
2287 | if (ch->ch_edelay != DGRP_RTIME) { | ||
2288 | if (ch->ch_rtime != ch->ch_edelay) { | ||
2289 | ch->ch_rtime = ch->ch_edelay; | ||
2290 | ch->ch_flag |= CH_PARAM; | ||
2291 | } | ||
2292 | } else if (dlen <= 3) { | ||
2293 | if (ch->ch_rtime != 10) { | ||
2294 | ch->ch_rtime = 10; | ||
2295 | ch->ch_flag |= CH_PARAM; | ||
2296 | } | ||
2297 | } else { | ||
2298 | if (ch->ch_rtime != DGRP_RTIME) { | ||
2299 | ch->ch_rtime = DGRP_RTIME; | ||
2300 | ch->ch_flag |= CH_PARAM; | ||
2301 | } | ||
2302 | } | ||
2303 | |||
2304 | /* | ||
2305 | * If a portion of the packet is outside the | ||
2306 | * buffer, shorten the effective length of the | ||
2307 | * data packet to be the amount of data received. | ||
2308 | */ | ||
2309 | |||
2310 | if (remain < plen) | ||
2311 | dlen -= plen - remain; | ||
2312 | |||
2313 | /* | ||
2314 | * Detect if receive flush is now complete. | ||
2315 | */ | ||
2316 | |||
2317 | if ((ch->ch_flag & CH_RX_FLUSH) != 0 && | ||
2318 | ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >= | ||
2319 | ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) { | ||
2320 | ch->ch_flag &= ~CH_RX_FLUSH; | ||
2321 | } | ||
2322 | |||
2323 | /* | ||
2324 | * If we are ready to receive, move the data into | ||
2325 | * the receive buffer. | ||
2326 | */ | ||
2327 | |||
2328 | ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff; | ||
2329 | |||
2330 | if (ch->ch_state == CS_READY && | ||
2331 | (ch->ch_tun.un_open_count != 0) && | ||
2332 | (ch->ch_tun.un_flag & UN_CLOSING) == 0 && | ||
2333 | (ch->ch_cflag & CF_CREAD) != 0 && | ||
2334 | (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 && | ||
2335 | (ch->ch_send & RR_RX_FLUSH) == 0) { | ||
2336 | |||
2337 | if (ch->ch_rin + dlen >= RBUF_MAX) { | ||
2338 | n = RBUF_MAX - ch->ch_rin; | ||
2339 | |||
2340 | memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n); | ||
2341 | |||
2342 | ch->ch_rin = 0; | ||
2343 | dbuf += n; | ||
2344 | dlen -= n; | ||
2345 | } | ||
2346 | |||
2347 | memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen); | ||
2348 | |||
2349 | ch->ch_rin += dlen; | ||
2350 | |||
2351 | |||
2352 | /* | ||
2353 | * If we are not in fastcook mode, or | ||
2354 | * if there is a fastcook thread | ||
2355 | * waiting for data, send the data to | ||
2356 | * the line discipline. | ||
2357 | */ | ||
2358 | |||
2359 | if ((ch->ch_flag & CH_FAST_READ) == 0 || | ||
2360 | ch->ch_inwait != 0) { | ||
2361 | dgrp_input(ch); | ||
2362 | } | ||
2363 | |||
2364 | /* | ||
2365 | * If there is a read thread waiting | ||
2366 | * in select, and we are in fastcook | ||
2367 | * mode, wake him up. | ||
2368 | */ | ||
2369 | |||
2370 | if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) && | ||
2371 | (ch->ch_flag & CH_FAST_READ) != 0) | ||
2372 | wake_up_interruptible(&ch->ch_tun.un_tty->read_wait); | ||
2373 | |||
2374 | /* | ||
2375 | * Wake any thread waiting in the | ||
2376 | * fastcook loop. | ||
2377 | */ | ||
2378 | |||
2379 | if ((ch->ch_flag & CH_INPUT) != 0) { | ||
2380 | ch->ch_flag &= ~CH_INPUT; | ||
2381 | wake_up_interruptible(&ch->ch_flag_wait); | ||
2382 | } | ||
2383 | } | ||
2384 | |||
2385 | /* | ||
2386 | * Fabricate and insert a data packet header to | ||
2387 | * preced the remaining data when it comes in. | ||
2388 | */ | ||
2389 | |||
2390 | if (remain < plen) { | ||
2391 | dlen = plen - remain; | ||
2392 | b = buf; | ||
2393 | |||
2394 | b[0] = 0x90 + n1; | ||
2395 | put_unaligned_be16(dlen, b + 1); | ||
2396 | |||
2397 | remain = 3; | ||
2398 | if (remain > 0 && b != buf) | ||
2399 | memcpy(buf, b, remain); | ||
2400 | |||
2401 | nd->nd_remain = remain; | ||
2402 | return; | ||
2403 | } | ||
2404 | } | ||
2405 | |||
2406 | /** | 2235 | /** |
2407 | * dgrp_receive() -- decode data packets received from the remote PortServer. | 2236 | * dgrp_receive() -- decode data packets received from the remote PortServer. |
2408 | * @nd: pointer to a node structure | 2237 | * @nd: pointer to a node structure |
@@ -2477,8 +2306,7 @@ static void dgrp_receive(struct nd_struct *nd) | |||
2477 | plen = dlen + 1; | 2306 | plen = dlen + 1; |
2478 | 2307 | ||
2479 | dbuf = b + 1; | 2308 | dbuf = b + 1; |
2480 | handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf); | 2309 | goto data; |
2481 | break; | ||
2482 | 2310 | ||
2483 | /* | 2311 | /* |
2484 | * Process 2-byte header data packet. | 2312 | * Process 2-byte header data packet. |
@@ -2492,8 +2320,7 @@ static void dgrp_receive(struct nd_struct *nd) | |||
2492 | plen = dlen + 2; | 2320 | plen = dlen + 2; |
2493 | 2321 | ||
2494 | dbuf = b + 2; | 2322 | dbuf = b + 2; |
2495 | handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf); | 2323 | goto data; |
2496 | break; | ||
2497 | 2324 | ||
2498 | /* | 2325 | /* |
2499 | * Process 3-byte header data packet. | 2326 | * Process 3-byte header data packet. |
@@ -2508,6 +2335,159 @@ static void dgrp_receive(struct nd_struct *nd) | |||
2508 | 2335 | ||
2509 | dbuf = b + 3; | 2336 | dbuf = b + 3; |
2510 | 2337 | ||
2338 | /* | ||
2339 | * Common packet handling code. | ||
2340 | */ | ||
2341 | |||
2342 | data: | ||
2343 | nd->nd_tx_work = 1; | ||
2344 | |||
2345 | /* | ||
2346 | * Otherwise data should appear only when we are | ||
2347 | * in the CS_READY state. | ||
2348 | */ | ||
2349 | |||
2350 | if (ch->ch_state < CS_READY) { | ||
2351 | error = "Data received before RWIN established"; | ||
2352 | goto prot_error; | ||
2353 | } | ||
2354 | |||
2355 | /* | ||
2356 | * Assure that the data received is within the | ||
2357 | * allowable window. | ||
2358 | */ | ||
2359 | |||
2360 | n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff; | ||
2361 | |||
2362 | if (dlen > n) { | ||
2363 | error = "Receive data overrun"; | ||
2364 | goto prot_error; | ||
2365 | } | ||
2366 | |||
2367 | /* | ||
2368 | * If we received 3 or less characters, | ||
2369 | * assume it is a human typing, and set RTIME | ||
2370 | * to 10 milliseconds. | ||
2371 | * | ||
2372 | * If we receive 10 or more characters, | ||
2373 | * assume its not a human typing, and set RTIME | ||
2374 | * to 100 milliseconds. | ||
2375 | */ | ||
2376 | |||
2377 | if (ch->ch_edelay != DGRP_RTIME) { | ||
2378 | if (ch->ch_rtime != ch->ch_edelay) { | ||
2379 | ch->ch_rtime = ch->ch_edelay; | ||
2380 | ch->ch_flag |= CH_PARAM; | ||
2381 | } | ||
2382 | } else if (dlen <= 3) { | ||
2383 | if (ch->ch_rtime != 10) { | ||
2384 | ch->ch_rtime = 10; | ||
2385 | ch->ch_flag |= CH_PARAM; | ||
2386 | } | ||
2387 | } else { | ||
2388 | if (ch->ch_rtime != DGRP_RTIME) { | ||
2389 | ch->ch_rtime = DGRP_RTIME; | ||
2390 | ch->ch_flag |= CH_PARAM; | ||
2391 | } | ||
2392 | } | ||
2393 | |||
2394 | /* | ||
2395 | * If a portion of the packet is outside the | ||
2396 | * buffer, shorten the effective length of the | ||
2397 | * data packet to be the amount of data received. | ||
2398 | */ | ||
2399 | |||
2400 | if (remain < plen) | ||
2401 | dlen -= plen - remain; | ||
2402 | |||
2403 | /* | ||
2404 | * Detect if receive flush is now complete. | ||
2405 | */ | ||
2406 | |||
2407 | if ((ch->ch_flag & CH_RX_FLUSH) != 0 && | ||
2408 | ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >= | ||
2409 | ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) { | ||
2410 | ch->ch_flag &= ~CH_RX_FLUSH; | ||
2411 | } | ||
2412 | |||
2413 | /* | ||
2414 | * If we are ready to receive, move the data into | ||
2415 | * the receive buffer. | ||
2416 | */ | ||
2417 | |||
2418 | ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff; | ||
2419 | |||
2420 | if (ch->ch_state == CS_READY && | ||
2421 | (ch->ch_tun.un_open_count != 0) && | ||
2422 | (ch->ch_tun.un_flag & UN_CLOSING) == 0 && | ||
2423 | (ch->ch_cflag & CF_CREAD) != 0 && | ||
2424 | (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 && | ||
2425 | (ch->ch_send & RR_RX_FLUSH) == 0) { | ||
2426 | |||
2427 | if (ch->ch_rin + dlen >= RBUF_MAX) { | ||
2428 | n = RBUF_MAX - ch->ch_rin; | ||
2429 | |||
2430 | memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n); | ||
2431 | |||
2432 | ch->ch_rin = 0; | ||
2433 | dbuf += n; | ||
2434 | dlen -= n; | ||
2435 | } | ||
2436 | |||
2437 | memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen); | ||
2438 | |||
2439 | ch->ch_rin += dlen; | ||
2440 | |||
2441 | |||
2442 | /* | ||
2443 | * If we are not in fastcook mode, or | ||
2444 | * if there is a fastcook thread | ||
2445 | * waiting for data, send the data to | ||
2446 | * the line discipline. | ||
2447 | */ | ||
2448 | |||
2449 | if ((ch->ch_flag & CH_FAST_READ) == 0 || | ||
2450 | ch->ch_inwait != 0) { | ||
2451 | dgrp_input(ch); | ||
2452 | } | ||
2453 | |||
2454 | /* | ||
2455 | * If there is a read thread waiting | ||
2456 | * in select, and we are in fastcook | ||
2457 | * mode, wake him up. | ||
2458 | */ | ||
2459 | |||
2460 | if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) && | ||
2461 | (ch->ch_flag & CH_FAST_READ) != 0) | ||
2462 | wake_up_interruptible(&ch->ch_tun.un_tty->read_wait); | ||
2463 | |||
2464 | /* | ||
2465 | * Wake any thread waiting in the | ||
2466 | * fastcook loop. | ||
2467 | */ | ||
2468 | |||
2469 | if ((ch->ch_flag & CH_INPUT) != 0) { | ||
2470 | ch->ch_flag &= ~CH_INPUT; | ||
2471 | |||
2472 | wake_up_interruptible(&ch->ch_flag_wait); | ||
2473 | } | ||
2474 | } | ||
2475 | |||
2476 | /* | ||
2477 | * Fabricate and insert a data packet header to | ||
2478 | * preced the remaining data when it comes in. | ||
2479 | */ | ||
2480 | |||
2481 | if (remain < plen) { | ||
2482 | dlen = plen - remain; | ||
2483 | b = buf; | ||
2484 | |||
2485 | b[0] = 0x90 + n1; | ||
2486 | put_unaligned_be16(dlen, b + 1); | ||
2487 | |||
2488 | remain = 3; | ||
2489 | goto done; | ||
2490 | } | ||
2511 | break; | 2491 | break; |
2512 | 2492 | ||
2513 | /* | 2493 | /* |
diff --git a/drivers/staging/gdm72xx/gdm_usb.c b/drivers/staging/gdm72xx/gdm_usb.c index f8788bf0a7d3..cdeffe75496b 100644 --- a/drivers/staging/gdm72xx/gdm_usb.c +++ b/drivers/staging/gdm72xx/gdm_usb.c | |||
@@ -635,11 +635,14 @@ static int gdm_usb_probe(struct usb_interface *intf, | |||
635 | #endif /* CONFIG_WIMAX_GDM72XX_USB_PM */ | 635 | #endif /* CONFIG_WIMAX_GDM72XX_USB_PM */ |
636 | 636 | ||
637 | ret = register_wimax_device(phy_dev, &intf->dev); | 637 | ret = register_wimax_device(phy_dev, &intf->dev); |
638 | if (ret) | ||
639 | release_usb(udev); | ||
638 | 640 | ||
639 | out: | 641 | out: |
640 | if (ret) { | 642 | if (ret) { |
641 | kfree(phy_dev); | 643 | kfree(phy_dev); |
642 | kfree(udev); | 644 | kfree(udev); |
645 | usb_put_dev(usbdev); | ||
643 | } else { | 646 | } else { |
644 | usb_set_intfdata(intf, phy_dev); | 647 | usb_set_intfdata(intf, phy_dev); |
645 | } | 648 | } |
diff --git a/drivers/staging/iio/Documentation/iio_utils.h b/drivers/staging/iio/Documentation/iio_utils.h index 35154d60faf6..c9fedb79e3a2 100644 --- a/drivers/staging/iio/Documentation/iio_utils.h +++ b/drivers/staging/iio/Documentation/iio_utils.h | |||
@@ -77,7 +77,6 @@ struct iio_channel_info { | |||
77 | uint64_t mask; | 77 | uint64_t mask; |
78 | unsigned be; | 78 | unsigned be; |
79 | unsigned is_signed; | 79 | unsigned is_signed; |
80 | unsigned enabled; | ||
81 | unsigned location; | 80 | unsigned location; |
82 | }; | 81 | }; |
83 | 82 | ||
@@ -335,6 +334,7 @@ inline int build_channel_array(const char *device_dir, | |||
335 | while (ent = readdir(dp), ent != NULL) { | 334 | while (ent = readdir(dp), ent != NULL) { |
336 | if (strcmp(ent->d_name + strlen(ent->d_name) - strlen("_en"), | 335 | if (strcmp(ent->d_name + strlen(ent->d_name) - strlen("_en"), |
337 | "_en") == 0) { | 336 | "_en") == 0) { |
337 | int current_enabled = 0; | ||
338 | current = &(*ci_array)[count++]; | 338 | current = &(*ci_array)[count++]; |
339 | ret = asprintf(&filename, | 339 | ret = asprintf(&filename, |
340 | "%s/%s", scan_el_dir, ent->d_name); | 340 | "%s/%s", scan_el_dir, ent->d_name); |
@@ -350,10 +350,10 @@ inline int build_channel_array(const char *device_dir, | |||
350 | ret = -errno; | 350 | ret = -errno; |
351 | goto error_cleanup_array; | 351 | goto error_cleanup_array; |
352 | } | 352 | } |
353 | fscanf(sysfsfp, "%u", ¤t->enabled); | 353 | fscanf(sysfsfp, "%u", ¤t_enabled); |
354 | fclose(sysfsfp); | 354 | fclose(sysfsfp); |
355 | 355 | ||
356 | if (!current->enabled) { | 356 | if (!current_enabled) { |
357 | free(filename); | 357 | free(filename); |
358 | count--; | 358 | count--; |
359 | continue; | 359 | continue; |
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c index 5ea36410f716..5708ffc62aec 100644 --- a/drivers/staging/iio/adc/ad799x_core.c +++ b/drivers/staging/iio/adc/ad799x_core.c | |||
@@ -393,7 +393,7 @@ static const struct iio_event_spec ad799x_events[] = { | |||
393 | }, { | 393 | }, { |
394 | .type = IIO_EV_TYPE_THRESH, | 394 | .type = IIO_EV_TYPE_THRESH, |
395 | .dir = IIO_EV_DIR_FALLING, | 395 | .dir = IIO_EV_DIR_FALLING, |
396 | .mask_separate = BIT(IIO_EV_INFO_VALUE), | 396 | .mask_separate = BIT(IIO_EV_INFO_VALUE) | |
397 | BIT(IIO_EV_INFO_ENABLE), | 397 | BIT(IIO_EV_INFO_ENABLE), |
398 | }, { | 398 | }, { |
399 | .type = IIO_EV_TYPE_THRESH, | 399 | .type = IIO_EV_TYPE_THRESH, |
@@ -409,7 +409,13 @@ static const struct iio_event_spec ad799x_events[] = { | |||
409 | .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ | 409 | .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ |
410 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ | 410 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ |
411 | .scan_index = (_index), \ | 411 | .scan_index = (_index), \ |
412 | .scan_type = IIO_ST('u', _realbits, 16, 12 - (_realbits)), \ | 412 | .scan_type = { \ |
413 | .sign = 'u', \ | ||
414 | .realbits = (_realbits), \ | ||
415 | .storagebits = 16, \ | ||
416 | .shift = 12 - (_realbits), \ | ||
417 | .endianness = IIO_BE, \ | ||
418 | }, \ | ||
413 | .event_spec = _ev_spec, \ | 419 | .event_spec = _ev_spec, \ |
414 | .num_event_specs = _num_ev_spec, \ | 420 | .num_event_specs = _num_ev_spec, \ |
415 | } | 421 | } |
@@ -588,7 +594,8 @@ static int ad799x_probe(struct i2c_client *client, | |||
588 | return 0; | 594 | return 0; |
589 | 595 | ||
590 | error_free_irq: | 596 | error_free_irq: |
591 | free_irq(client->irq, indio_dev); | 597 | if (client->irq > 0) |
598 | free_irq(client->irq, indio_dev); | ||
592 | error_cleanup_ring: | 599 | error_cleanup_ring: |
593 | ad799x_ring_cleanup(indio_dev); | 600 | ad799x_ring_cleanup(indio_dev); |
594 | error_disable_reg: | 601 | error_disable_reg: |
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c index df71669bb60e..7fc66a6a6e36 100644 --- a/drivers/staging/iio/adc/mxs-lradc.c +++ b/drivers/staging/iio/adc/mxs-lradc.c | |||
@@ -1035,8 +1035,6 @@ SHOW_SCALE_AVAILABLE_ATTR(4); | |||
1035 | SHOW_SCALE_AVAILABLE_ATTR(5); | 1035 | SHOW_SCALE_AVAILABLE_ATTR(5); |
1036 | SHOW_SCALE_AVAILABLE_ATTR(6); | 1036 | SHOW_SCALE_AVAILABLE_ATTR(6); |
1037 | SHOW_SCALE_AVAILABLE_ATTR(7); | 1037 | SHOW_SCALE_AVAILABLE_ATTR(7); |
1038 | SHOW_SCALE_AVAILABLE_ATTR(8); | ||
1039 | SHOW_SCALE_AVAILABLE_ATTR(9); | ||
1040 | SHOW_SCALE_AVAILABLE_ATTR(10); | 1038 | SHOW_SCALE_AVAILABLE_ATTR(10); |
1041 | SHOW_SCALE_AVAILABLE_ATTR(11); | 1039 | SHOW_SCALE_AVAILABLE_ATTR(11); |
1042 | SHOW_SCALE_AVAILABLE_ATTR(12); | 1040 | SHOW_SCALE_AVAILABLE_ATTR(12); |
@@ -1053,8 +1051,6 @@ static struct attribute *mxs_lradc_attributes[] = { | |||
1053 | &iio_dev_attr_in_voltage5_scale_available.dev_attr.attr, | 1051 | &iio_dev_attr_in_voltage5_scale_available.dev_attr.attr, |
1054 | &iio_dev_attr_in_voltage6_scale_available.dev_attr.attr, | 1052 | &iio_dev_attr_in_voltage6_scale_available.dev_attr.attr, |
1055 | &iio_dev_attr_in_voltage7_scale_available.dev_attr.attr, | 1053 | &iio_dev_attr_in_voltage7_scale_available.dev_attr.attr, |
1056 | &iio_dev_attr_in_voltage8_scale_available.dev_attr.attr, | ||
1057 | &iio_dev_attr_in_voltage9_scale_available.dev_attr.attr, | ||
1058 | &iio_dev_attr_in_voltage10_scale_available.dev_attr.attr, | 1054 | &iio_dev_attr_in_voltage10_scale_available.dev_attr.attr, |
1059 | &iio_dev_attr_in_voltage11_scale_available.dev_attr.attr, | 1055 | &iio_dev_attr_in_voltage11_scale_available.dev_attr.attr, |
1060 | &iio_dev_attr_in_voltage12_scale_available.dev_attr.attr, | 1056 | &iio_dev_attr_in_voltage12_scale_available.dev_attr.attr, |
@@ -1613,7 +1609,7 @@ static int mxs_lradc_probe(struct platform_device *pdev) | |||
1613 | * of the array. | 1609 | * of the array. |
1614 | */ | 1610 | */ |
1615 | scale_uv = ((u64)lradc->vref_mv[i] * 100000000) >> | 1611 | scale_uv = ((u64)lradc->vref_mv[i] * 100000000) >> |
1616 | (iio->channels[i].scan_type.realbits - s); | 1612 | (LRADC_RESOLUTION - s); |
1617 | lradc->scale_avail[i][s].nano = | 1613 | lradc->scale_avail[i][s].nano = |
1618 | do_div(scale_uv, 100000000) * 10; | 1614 | do_div(scale_uv, 100000000) * 10; |
1619 | lradc->scale_avail[i][s].integer = scale_uv; | 1615 | lradc->scale_avail[i][s].integer = scale_uv; |
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c index 0a4298b744e6..2b96665da8a2 100644 --- a/drivers/staging/iio/impedance-analyzer/ad5933.c +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c | |||
@@ -629,7 +629,7 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev) | |||
629 | struct iio_buffer *buffer; | 629 | struct iio_buffer *buffer; |
630 | 630 | ||
631 | buffer = iio_kfifo_allocate(indio_dev); | 631 | buffer = iio_kfifo_allocate(indio_dev); |
632 | if (buffer) | 632 | if (!buffer) |
633 | return -ENOMEM; | 633 | return -ENOMEM; |
634 | 634 | ||
635 | iio_device_attach_buffer(indio_dev, buffer); | 635 | iio_device_attach_buffer(indio_dev, buffer); |
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index 09ef5fb8bae6..236ed66f116a 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c | |||
@@ -88,9 +88,9 @@ static int imx_drm_driver_unload(struct drm_device *drm) | |||
88 | 88 | ||
89 | imx_drm_device_put(); | 89 | imx_drm_device_put(); |
90 | 90 | ||
91 | drm_vblank_cleanup(imxdrm->drm); | 91 | drm_vblank_cleanup(drm); |
92 | drm_kms_helper_poll_fini(imxdrm->drm); | 92 | drm_kms_helper_poll_fini(drm); |
93 | drm_mode_config_cleanup(imxdrm->drm); | 93 | drm_mode_config_cleanup(drm); |
94 | 94 | ||
95 | return 0; | 95 | return 0; |
96 | } | 96 | } |
@@ -142,19 +142,19 @@ EXPORT_SYMBOL_GPL(imx_drm_crtc_panel_format); | |||
142 | 142 | ||
143 | int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc) | 143 | int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc) |
144 | { | 144 | { |
145 | return drm_vblank_get(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe); | 145 | return drm_vblank_get(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe); |
146 | } | 146 | } |
147 | EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_get); | 147 | EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_get); |
148 | 148 | ||
149 | void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc) | 149 | void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc) |
150 | { | 150 | { |
151 | drm_vblank_put(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe); | 151 | drm_vblank_put(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe); |
152 | } | 152 | } |
153 | EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_put); | 153 | EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_put); |
154 | 154 | ||
155 | void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc) | 155 | void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc) |
156 | { | 156 | { |
157 | drm_handle_vblank(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe); | 157 | drm_handle_vblank(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe); |
158 | } | 158 | } |
159 | EXPORT_SYMBOL_GPL(imx_drm_handle_vblank); | 159 | EXPORT_SYMBOL_GPL(imx_drm_handle_vblank); |
160 | 160 | ||
@@ -370,29 +370,6 @@ static void imx_drm_connector_unregister( | |||
370 | } | 370 | } |
371 | 371 | ||
372 | /* | 372 | /* |
373 | * register a crtc to the drm core | ||
374 | */ | ||
375 | static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc) | ||
376 | { | ||
377 | struct imx_drm_device *imxdrm = __imx_drm_device(); | ||
378 | int ret; | ||
379 | |||
380 | ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256); | ||
381 | if (ret) | ||
382 | return ret; | ||
383 | |||
384 | drm_crtc_helper_add(imx_drm_crtc->crtc, | ||
385 | imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); | ||
386 | |||
387 | drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc, | ||
388 | imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); | ||
389 | |||
390 | drm_mode_group_reinit(imxdrm->drm); | ||
391 | |||
392 | return 0; | ||
393 | } | ||
394 | |||
395 | /* | ||
396 | * Called by the CRTC driver when all CRTCs are registered. This | 373 | * Called by the CRTC driver when all CRTCs are registered. This |
397 | * puts all the pieces together and initializes the driver. | 374 | * puts all the pieces together and initializes the driver. |
398 | * Once this is called no more CRTCs can be registered since | 375 | * Once this is called no more CRTCs can be registered since |
@@ -424,15 +401,15 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) | |||
424 | 401 | ||
425 | mutex_lock(&imxdrm->mutex); | 402 | mutex_lock(&imxdrm->mutex); |
426 | 403 | ||
427 | drm_kms_helper_poll_init(imxdrm->drm); | 404 | drm_kms_helper_poll_init(drm); |
428 | 405 | ||
429 | /* setup the grouping for the legacy output */ | 406 | /* setup the grouping for the legacy output */ |
430 | ret = drm_mode_group_init_legacy_group(imxdrm->drm, | 407 | ret = drm_mode_group_init_legacy_group(drm, |
431 | &imxdrm->drm->primary->mode_group); | 408 | &drm->primary->mode_group); |
432 | if (ret) | 409 | if (ret) |
433 | goto err_kms; | 410 | goto err_kms; |
434 | 411 | ||
435 | ret = drm_vblank_init(imxdrm->drm, MAX_CRTC); | 412 | ret = drm_vblank_init(drm, MAX_CRTC); |
436 | if (ret) | 413 | if (ret) |
437 | goto err_kms; | 414 | goto err_kms; |
438 | 415 | ||
@@ -441,7 +418,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) | |||
441 | * by drm timer once a current process gives up ownership of | 418 | * by drm timer once a current process gives up ownership of |
442 | * vblank event.(after drm_vblank_put function is called) | 419 | * vblank event.(after drm_vblank_put function is called) |
443 | */ | 420 | */ |
444 | imxdrm->drm->vblank_disable_allowed = true; | 421 | drm->vblank_disable_allowed = true; |
445 | 422 | ||
446 | if (!imx_drm_device_get()) { | 423 | if (!imx_drm_device_get()) { |
447 | ret = -EINVAL; | 424 | ret = -EINVAL; |
@@ -536,10 +513,18 @@ int imx_drm_add_crtc(struct drm_crtc *crtc, | |||
536 | 513 | ||
537 | *new_crtc = imx_drm_crtc; | 514 | *new_crtc = imx_drm_crtc; |
538 | 515 | ||
539 | ret = imx_drm_crtc_register(imx_drm_crtc); | 516 | ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256); |
540 | if (ret) | 517 | if (ret) |
541 | goto err_register; | 518 | goto err_register; |
542 | 519 | ||
520 | drm_crtc_helper_add(crtc, | ||
521 | imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); | ||
522 | |||
523 | drm_crtc_init(imxdrm->drm, crtc, | ||
524 | imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); | ||
525 | |||
526 | drm_mode_group_reinit(imxdrm->drm); | ||
527 | |||
543 | imx_drm_update_possible_crtcs(); | 528 | imx_drm_update_possible_crtcs(); |
544 | 529 | ||
545 | mutex_unlock(&imxdrm->mutex); | 530 | mutex_unlock(&imxdrm->mutex); |
diff --git a/drivers/staging/imx-drm/imx-hdmi.c b/drivers/staging/imx-drm/imx-hdmi.c index f3a1f5e2e492..62ce0e86f14b 100644 --- a/drivers/staging/imx-drm/imx-hdmi.c +++ b/drivers/staging/imx-drm/imx-hdmi.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | #include <linux/clk.h> | 18 | #include <linux/clk.h> |
19 | #include <linux/hdmi.h> | ||
19 | #include <linux/regmap.h> | 20 | #include <linux/regmap.h> |
20 | #include <linux/mfd/syscon.h> | 21 | #include <linux/mfd/syscon.h> |
21 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> | 22 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> |
@@ -52,11 +53,6 @@ enum hdmi_datamap { | |||
52 | YCbCr422_12B = 0x12, | 53 | YCbCr422_12B = 0x12, |
53 | }; | 54 | }; |
54 | 55 | ||
55 | enum hdmi_colorimetry { | ||
56 | ITU601, | ||
57 | ITU709, | ||
58 | }; | ||
59 | |||
60 | enum imx_hdmi_devtype { | 56 | enum imx_hdmi_devtype { |
61 | IMX6Q_HDMI, | 57 | IMX6Q_HDMI, |
62 | IMX6DL_HDMI, | 58 | IMX6DL_HDMI, |
@@ -489,12 +485,12 @@ static void imx_hdmi_update_csc_coeffs(struct imx_hdmi *hdmi) | |||
489 | 485 | ||
490 | if (is_color_space_conversion(hdmi)) { | 486 | if (is_color_space_conversion(hdmi)) { |
491 | if (hdmi->hdmi_data.enc_out_format == RGB) { | 487 | if (hdmi->hdmi_data.enc_out_format == RGB) { |
492 | if (hdmi->hdmi_data.colorimetry == ITU601) | 488 | if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601) |
493 | csc_coeff = &csc_coeff_rgb_out_eitu601; | 489 | csc_coeff = &csc_coeff_rgb_out_eitu601; |
494 | else | 490 | else |
495 | csc_coeff = &csc_coeff_rgb_out_eitu709; | 491 | csc_coeff = &csc_coeff_rgb_out_eitu709; |
496 | } else if (hdmi->hdmi_data.enc_in_format == RGB) { | 492 | } else if (hdmi->hdmi_data.enc_in_format == RGB) { |
497 | if (hdmi->hdmi_data.colorimetry == ITU601) | 493 | if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601) |
498 | csc_coeff = &csc_coeff_rgb_in_eitu601; | 494 | csc_coeff = &csc_coeff_rgb_in_eitu601; |
499 | else | 495 | else |
500 | csc_coeff = &csc_coeff_rgb_in_eitu709; | 496 | csc_coeff = &csc_coeff_rgb_in_eitu709; |
@@ -1140,16 +1136,16 @@ static void hdmi_config_AVI(struct imx_hdmi *hdmi) | |||
1140 | /* Set up colorimetry */ | 1136 | /* Set up colorimetry */ |
1141 | if (hdmi->hdmi_data.enc_out_format == XVYCC444) { | 1137 | if (hdmi->hdmi_data.enc_out_format == XVYCC444) { |
1142 | colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO; | 1138 | colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO; |
1143 | if (hdmi->hdmi_data.colorimetry == ITU601) | 1139 | if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601) |
1144 | ext_colorimetry = | 1140 | ext_colorimetry = |
1145 | HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601; | 1141 | HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601; |
1146 | else /* hdmi->hdmi_data.colorimetry == ITU709 */ | 1142 | else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/ |
1147 | ext_colorimetry = | 1143 | ext_colorimetry = |
1148 | HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709; | 1144 | HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709; |
1149 | } else if (hdmi->hdmi_data.enc_out_format != RGB) { | 1145 | } else if (hdmi->hdmi_data.enc_out_format != RGB) { |
1150 | if (hdmi->hdmi_data.colorimetry == ITU601) | 1146 | if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601) |
1151 | colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_SMPTE; | 1147 | colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_SMPTE; |
1152 | else /* hdmi->hdmi_data.colorimetry == ITU709 */ | 1148 | else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/ |
1153 | colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_ITUR; | 1149 | colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_ITUR; |
1154 | ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601; | 1150 | ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601; |
1155 | } else { /* Carries no data */ | 1151 | } else { /* Carries no data */ |
@@ -1379,9 +1375,9 @@ static int imx_hdmi_setup(struct imx_hdmi *hdmi, struct drm_display_mode *mode) | |||
1379 | (hdmi->vic == 21) || (hdmi->vic == 22) || | 1375 | (hdmi->vic == 21) || (hdmi->vic == 22) || |
1380 | (hdmi->vic == 2) || (hdmi->vic == 3) || | 1376 | (hdmi->vic == 2) || (hdmi->vic == 3) || |
1381 | (hdmi->vic == 17) || (hdmi->vic == 18)) | 1377 | (hdmi->vic == 17) || (hdmi->vic == 18)) |
1382 | hdmi->hdmi_data.colorimetry = ITU601; | 1378 | hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_601; |
1383 | else | 1379 | else |
1384 | hdmi->hdmi_data.colorimetry = ITU709; | 1380 | hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709; |
1385 | 1381 | ||
1386 | if ((hdmi->vic == 10) || (hdmi->vic == 11) || | 1382 | if ((hdmi->vic == 10) || (hdmi->vic == 11) || |
1387 | (hdmi->vic == 12) || (hdmi->vic == 13) || | 1383 | (hdmi->vic == 12) || (hdmi->vic == 13) || |
diff --git a/drivers/staging/lustre/TODO b/drivers/staging/lustre/TODO index 22742d6d62a8..0a2b6cb3775e 100644 --- a/drivers/staging/lustre/TODO +++ b/drivers/staging/lustre/TODO | |||
@@ -9,5 +9,6 @@ | |||
9 | * Other minor misc cleanups... | 9 | * Other minor misc cleanups... |
10 | 10 | ||
11 | Please send any patches to Greg Kroah-Hartman <greg@kroah.com>, Andreas Dilger | 11 | Please send any patches to Greg Kroah-Hartman <greg@kroah.com>, Andreas Dilger |
12 | <andreas.dilger@intel.com> and Peng Tao <tao.peng@emc.com>. CCing | 12 | <andreas.dilger@intel.com>, Oleg Drokin <oleg.drokin@intel.com> and |
13 | hpdd-discuss <hpdd-discuss@lists.01.org> would be great too. | 13 | Peng Tao <tao.peng@emc.com>. CCing hpdd-discuss <hpdd-discuss@lists.01.org> |
14 | would be great too. | ||
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h index 596a15fc8996..037ae8a6d531 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h | |||
@@ -61,6 +61,8 @@ struct kuc_hdr { | |||
61 | __u16 kuc_msglen; /* Including header */ | 61 | __u16 kuc_msglen; /* Including header */ |
62 | } __attribute__((aligned(sizeof(__u64)))); | 62 | } __attribute__((aligned(sizeof(__u64)))); |
63 | 63 | ||
64 | #define KUC_CHANGELOG_MSG_MAXSIZE (sizeof(struct kuc_hdr)+CR_MAXSIZE) | ||
65 | |||
64 | #define KUC_MAGIC 0x191C /*Lustre9etLinC */ | 66 | #define KUC_MAGIC 0x191C /*Lustre9etLinC */ |
65 | #define KUC_FL_BLOCK 0x01 /* Wait for send */ | 67 | #define KUC_FL_BLOCK 0x01 /* Wait for send */ |
66 | 68 | ||
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h index d0d942ced01a..dddccca120c9 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h | |||
@@ -120,7 +120,7 @@ do { \ | |||
120 | do { \ | 120 | do { \ |
121 | LASSERT(!in_interrupt() || \ | 121 | LASSERT(!in_interrupt() || \ |
122 | ((size) <= LIBCFS_VMALLOC_SIZE && \ | 122 | ((size) <= LIBCFS_VMALLOC_SIZE && \ |
123 | ((mask) & GFP_ATOMIC)) != 0); \ | 123 | ((mask) & __GFP_WAIT) == 0)); \ |
124 | } while (0) | 124 | } while (0) |
125 | 125 | ||
126 | #define LIBCFS_ALLOC_POST(ptr, size) \ | 126 | #define LIBCFS_ALLOC_POST(ptr, size) \ |
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c index 93648632ba26..6f58ead20393 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c | |||
@@ -529,7 +529,7 @@ kiblnd_kvaddr_to_page (unsigned long vaddr) | |||
529 | { | 529 | { |
530 | struct page *page; | 530 | struct page *page; |
531 | 531 | ||
532 | if (is_vmalloc_addr(vaddr)) { | 532 | if (is_vmalloc_addr((void *)vaddr)) { |
533 | page = vmalloc_to_page ((void *)vaddr); | 533 | page = vmalloc_to_page ((void *)vaddr); |
534 | LASSERT (page != NULL); | 534 | LASSERT (page != NULL); |
535 | return page; | 535 | return page; |
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c index 68a4f52ec998..b7b53b579c85 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c | |||
@@ -924,7 +924,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) | |||
924 | int | 924 | int |
925 | ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) | 925 | ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) |
926 | { | 926 | { |
927 | int mpflag = 0; | 927 | int mpflag = 1; |
928 | int type = lntmsg->msg_type; | 928 | int type = lntmsg->msg_type; |
929 | lnet_process_id_t target = lntmsg->msg_target; | 929 | lnet_process_id_t target = lntmsg->msg_target; |
930 | unsigned int payload_niov = lntmsg->msg_niov; | 930 | unsigned int payload_niov = lntmsg->msg_niov; |
@@ -993,8 +993,9 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) | |||
993 | 993 | ||
994 | /* The first fragment will be set later in pro_pack */ | 994 | /* The first fragment will be set later in pro_pack */ |
995 | rc = ksocknal_launch_packet(ni, tx, target); | 995 | rc = ksocknal_launch_packet(ni, tx, target); |
996 | if (lntmsg->msg_vmflush) | 996 | if (!mpflag) |
997 | cfs_memory_pressure_restore(mpflag); | 997 | cfs_memory_pressure_restore(mpflag); |
998 | |||
998 | if (rc == 0) | 999 | if (rc == 0) |
999 | return (0); | 1000 | return (0); |
1000 | 1001 | ||
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h index 6b6c0240e824..7893d83e131f 100644 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h +++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h | |||
@@ -760,7 +760,8 @@ static inline void hsm_set_cl_error(int *flags, int error) | |||
760 | *flags |= (error << CLF_HSM_ERR_L); | 760 | *flags |= (error << CLF_HSM_ERR_L); |
761 | } | 761 | } |
762 | 762 | ||
763 | #define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + sizeof(struct changelog_rec)) | 763 | #define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + \ |
764 | sizeof(struct changelog_ext_rec)) | ||
764 | 765 | ||
765 | struct changelog_rec { | 766 | struct changelog_rec { |
766 | __u16 cr_namelen; | 767 | __u16 cr_namelen; |
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c index 22d0acc95bc5..52b7731bcc38 100644 --- a/drivers/staging/lustre/lustre/llite/dir.c +++ b/drivers/staging/lustre/lustre/llite/dir.c | |||
@@ -1086,7 +1086,7 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl) | |||
1086 | break; | 1086 | break; |
1087 | case Q_GETQUOTA: | 1087 | case Q_GETQUOTA: |
1088 | if (((type == USRQUOTA && | 1088 | if (((type == USRQUOTA && |
1089 | uid_eq(current_euid(), make_kuid(&init_user_ns, id))) || | 1089 | !uid_eq(current_euid(), make_kuid(&init_user_ns, id))) || |
1090 | (type == GRPQUOTA && | 1090 | (type == GRPQUOTA && |
1091 | !in_egroup_p(make_kgid(&init_user_ns, id)))) && | 1091 | !in_egroup_p(make_kgid(&init_user_ns, id)))) && |
1092 | (!cfs_capable(CFS_CAP_SYS_ADMIN) || | 1092 | (!cfs_capable(CFS_CAP_SYS_ADMIN) || |
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c index d1ad91c34ddc..83013927e131 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_request.c +++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c | |||
@@ -1430,7 +1430,7 @@ static struct kuc_hdr *changelog_kuc_hdr(char *buf, int len, int flags) | |||
1430 | { | 1430 | { |
1431 | struct kuc_hdr *lh = (struct kuc_hdr *)buf; | 1431 | struct kuc_hdr *lh = (struct kuc_hdr *)buf; |
1432 | 1432 | ||
1433 | LASSERT(len <= CR_MAXSIZE); | 1433 | LASSERT(len <= KUC_CHANGELOG_MSG_MAXSIZE); |
1434 | 1434 | ||
1435 | lh->kuc_magic = KUC_MAGIC; | 1435 | lh->kuc_magic = KUC_MAGIC; |
1436 | lh->kuc_transport = KUC_TRANSPORT_CHANGELOG; | 1436 | lh->kuc_transport = KUC_TRANSPORT_CHANGELOG; |
@@ -1503,7 +1503,7 @@ static int mdc_changelog_send_thread(void *csdata) | |||
1503 | CDEBUG(D_CHANGELOG, "changelog to fp=%p start "LPU64"\n", | 1503 | CDEBUG(D_CHANGELOG, "changelog to fp=%p start "LPU64"\n", |
1504 | cs->cs_fp, cs->cs_startrec); | 1504 | cs->cs_fp, cs->cs_startrec); |
1505 | 1505 | ||
1506 | OBD_ALLOC(cs->cs_buf, CR_MAXSIZE); | 1506 | OBD_ALLOC(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE); |
1507 | if (cs->cs_buf == NULL) | 1507 | if (cs->cs_buf == NULL) |
1508 | GOTO(out, rc = -ENOMEM); | 1508 | GOTO(out, rc = -ENOMEM); |
1509 | 1509 | ||
@@ -1540,7 +1540,7 @@ out: | |||
1540 | if (ctxt) | 1540 | if (ctxt) |
1541 | llog_ctxt_put(ctxt); | 1541 | llog_ctxt_put(ctxt); |
1542 | if (cs->cs_buf) | 1542 | if (cs->cs_buf) |
1543 | OBD_FREE(cs->cs_buf, CR_MAXSIZE); | 1543 | OBD_FREE(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE); |
1544 | OBD_FREE_PTR(cs); | 1544 | OBD_FREE_PTR(cs); |
1545 | return rc; | 1545 | return rc; |
1546 | } | 1546 | } |
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c index eedffed17e39..31b269a5fff7 100644 --- a/drivers/staging/netlogic/xlr_net.c +++ b/drivers/staging/netlogic/xlr_net.c | |||
@@ -307,7 +307,7 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb, | |||
307 | } | 307 | } |
308 | 308 | ||
309 | static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb, | 309 | static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb, |
310 | void *accel_priv) | 310 | void *accel_priv, select_queue_fallback_t fallback) |
311 | { | 311 | { |
312 | return (u16)smp_processor_id(); | 312 | return (u16)smp_processor_id(); |
313 | } | 313 | } |
@@ -892,6 +892,11 @@ static int xlr_setup_mdio(struct xlr_net_priv *priv, | |||
892 | priv->mii_bus->write = xlr_mii_write; | 892 | priv->mii_bus->write = xlr_mii_write; |
893 | priv->mii_bus->parent = &pdev->dev; | 893 | priv->mii_bus->parent = &pdev->dev; |
894 | priv->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); | 894 | priv->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); |
895 | if (priv->mii_bus->irq == NULL) { | ||
896 | pr_err("irq alloc failed\n"); | ||
897 | mdiobus_free(priv->mii_bus); | ||
898 | return -ENOMEM; | ||
899 | } | ||
895 | priv->mii_bus->irq[priv->phy_addr] = priv->ndev->irq; | 900 | priv->mii_bus->irq[priv->phy_addr] = priv->ndev->irq; |
896 | 901 | ||
897 | /* Scan only the enabled address */ | 902 | /* Scan only the enabled address */ |
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c index 47e0a91238a1..5a001d9b4252 100644 --- a/drivers/staging/octeon-usb/octeon-hcd.c +++ b/drivers/staging/octeon-usb/octeon-hcd.c | |||
@@ -275,13 +275,6 @@ enum cvmx_usb_pipe_flags { | |||
275 | */ | 275 | */ |
276 | #define MAX_TRANSFER_PACKETS ((1<<10)-1) | 276 | #define MAX_TRANSFER_PACKETS ((1<<10)-1) |
277 | 277 | ||
278 | enum { | ||
279 | USB_CLOCK_TYPE_REF_12, | ||
280 | USB_CLOCK_TYPE_REF_24, | ||
281 | USB_CLOCK_TYPE_REF_48, | ||
282 | USB_CLOCK_TYPE_CRYSTAL_12, | ||
283 | }; | ||
284 | |||
285 | /** | 278 | /** |
286 | * Logical transactions may take numerous low level | 279 | * Logical transactions may take numerous low level |
287 | * transactions, especially when splits are concerned. This | 280 | * transactions, especially when splits are concerned. This |
@@ -471,19 +464,6 @@ struct octeon_hcd { | |||
471 | /* Returns the IO address to push/pop stuff data from the FIFOs */ | 464 | /* Returns the IO address to push/pop stuff data from the FIFOs */ |
472 | #define USB_FIFO_ADDRESS(channel, usb_index) (CVMX_USBCX_GOTGCTL(usb_index) + ((channel)+1)*0x1000) | 465 | #define USB_FIFO_ADDRESS(channel, usb_index) (CVMX_USBCX_GOTGCTL(usb_index) + ((channel)+1)*0x1000) |
473 | 466 | ||
474 | static int octeon_usb_get_clock_type(void) | ||
475 | { | ||
476 | switch (cvmx_sysinfo_get()->board_type) { | ||
477 | case CVMX_BOARD_TYPE_BBGW_REF: | ||
478 | case CVMX_BOARD_TYPE_LANAI2_A: | ||
479 | case CVMX_BOARD_TYPE_LANAI2_U: | ||
480 | case CVMX_BOARD_TYPE_LANAI2_G: | ||
481 | case CVMX_BOARD_TYPE_UBNT_E100: | ||
482 | return USB_CLOCK_TYPE_CRYSTAL_12; | ||
483 | } | ||
484 | return USB_CLOCK_TYPE_REF_48; | ||
485 | } | ||
486 | |||
487 | /** | 467 | /** |
488 | * Read a USB 32bit CSR. It performs the necessary address swizzle | 468 | * Read a USB 32bit CSR. It performs the necessary address swizzle |
489 | * for 32bit CSRs and logs the value in a readable format if | 469 | * for 32bit CSRs and logs the value in a readable format if |
@@ -582,37 +562,6 @@ static inline int __cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe) | |||
582 | return 0; /* Data0 */ | 562 | return 0; /* Data0 */ |
583 | } | 563 | } |
584 | 564 | ||
585 | |||
586 | /** | ||
587 | * Return the number of USB ports supported by this Octeon | ||
588 | * chip. If the chip doesn't support USB, or is not supported | ||
589 | * by this API, a zero will be returned. Most Octeon chips | ||
590 | * support one usb port, but some support two ports. | ||
591 | * cvmx_usb_initialize() must be called on independent | ||
592 | * struct cvmx_usb_state. | ||
593 | * | ||
594 | * Returns: Number of port, zero if usb isn't supported | ||
595 | */ | ||
596 | static int cvmx_usb_get_num_ports(void) | ||
597 | { | ||
598 | int arch_ports = 0; | ||
599 | |||
600 | if (OCTEON_IS_MODEL(OCTEON_CN56XX)) | ||
601 | arch_ports = 1; | ||
602 | else if (OCTEON_IS_MODEL(OCTEON_CN52XX)) | ||
603 | arch_ports = 2; | ||
604 | else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) | ||
605 | arch_ports = 1; | ||
606 | else if (OCTEON_IS_MODEL(OCTEON_CN31XX)) | ||
607 | arch_ports = 1; | ||
608 | else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) | ||
609 | arch_ports = 1; | ||
610 | else | ||
611 | arch_ports = 0; | ||
612 | |||
613 | return arch_ports; | ||
614 | } | ||
615 | |||
616 | /** | 565 | /** |
617 | * Initialize a USB port for use. This must be called before any | 566 | * Initialize a USB port for use. This must be called before any |
618 | * other access to the Octeon USB port is made. The port starts | 567 | * other access to the Octeon USB port is made. The port starts |
@@ -628,41 +577,16 @@ static int cvmx_usb_get_num_ports(void) | |||
628 | * Returns: 0 or a negative error code. | 577 | * Returns: 0 or a negative error code. |
629 | */ | 578 | */ |
630 | static int cvmx_usb_initialize(struct cvmx_usb_state *usb, | 579 | static int cvmx_usb_initialize(struct cvmx_usb_state *usb, |
631 | int usb_port_number) | 580 | int usb_port_number, |
581 | enum cvmx_usb_initialize_flags flags) | ||
632 | { | 582 | { |
633 | union cvmx_usbnx_clk_ctl usbn_clk_ctl; | 583 | union cvmx_usbnx_clk_ctl usbn_clk_ctl; |
634 | union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status; | 584 | union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status; |
635 | enum cvmx_usb_initialize_flags flags = 0; | ||
636 | int i; | 585 | int i; |
637 | 586 | ||
638 | /* At first allow 0-1 for the usb port number */ | 587 | /* At first allow 0-1 for the usb port number */ |
639 | if ((usb_port_number < 0) || (usb_port_number > 1)) | 588 | if ((usb_port_number < 0) || (usb_port_number > 1)) |
640 | return -EINVAL; | 589 | return -EINVAL; |
641 | /* For all chips except 52XX there is only one port */ | ||
642 | if (!OCTEON_IS_MODEL(OCTEON_CN52XX) && (usb_port_number > 0)) | ||
643 | return -EINVAL; | ||
644 | /* Try to determine clock type automatically */ | ||
645 | if (octeon_usb_get_clock_type() == USB_CLOCK_TYPE_CRYSTAL_12) { | ||
646 | /* Only 12 MHZ crystals are supported */ | ||
647 | flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI; | ||
648 | } else { | ||
649 | flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND; | ||
650 | |||
651 | switch (octeon_usb_get_clock_type()) { | ||
652 | case USB_CLOCK_TYPE_REF_12: | ||
653 | flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ; | ||
654 | break; | ||
655 | case USB_CLOCK_TYPE_REF_24: | ||
656 | flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ; | ||
657 | break; | ||
658 | case USB_CLOCK_TYPE_REF_48: | ||
659 | flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ; | ||
660 | break; | ||
661 | default: | ||
662 | return -EINVAL; | ||
663 | break; | ||
664 | } | ||
665 | } | ||
666 | 590 | ||
667 | memset(usb, 0, sizeof(*usb)); | 591 | memset(usb, 0, sizeof(*usb)); |
668 | usb->init_flags = flags; | 592 | usb->init_flags = flags; |
@@ -3431,7 +3355,6 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
3431 | return 0; | 3355 | return 0; |
3432 | } | 3356 | } |
3433 | 3357 | ||
3434 | |||
3435 | static const struct hc_driver octeon_hc_driver = { | 3358 | static const struct hc_driver octeon_hc_driver = { |
3436 | .description = "Octeon USB", | 3359 | .description = "Octeon USB", |
3437 | .product_desc = "Octeon Host Controller", | 3360 | .product_desc = "Octeon Host Controller", |
@@ -3448,15 +3371,74 @@ static const struct hc_driver octeon_hc_driver = { | |||
3448 | .hub_control = octeon_usb_hub_control, | 3371 | .hub_control = octeon_usb_hub_control, |
3449 | }; | 3372 | }; |
3450 | 3373 | ||
3451 | 3374 | static int octeon_usb_probe(struct platform_device *pdev) | |
3452 | static int octeon_usb_driver_probe(struct device *dev) | ||
3453 | { | 3375 | { |
3454 | int status; | 3376 | int status; |
3455 | int usb_num = to_platform_device(dev)->id; | 3377 | int initialize_flags; |
3456 | int irq = platform_get_irq(to_platform_device(dev), 0); | 3378 | int usb_num; |
3379 | struct resource *res_mem; | ||
3380 | struct device_node *usbn_node; | ||
3381 | int irq = platform_get_irq(pdev, 0); | ||
3382 | struct device *dev = &pdev->dev; | ||
3457 | struct octeon_hcd *priv; | 3383 | struct octeon_hcd *priv; |
3458 | struct usb_hcd *hcd; | 3384 | struct usb_hcd *hcd; |
3459 | unsigned long flags; | 3385 | unsigned long flags; |
3386 | u32 clock_rate = 48000000; | ||
3387 | bool is_crystal_clock = false; | ||
3388 | const char *clock_type; | ||
3389 | int i; | ||
3390 | |||
3391 | if (dev->of_node == NULL) { | ||
3392 | dev_err(dev, "Error: empty of_node\n"); | ||
3393 | return -ENXIO; | ||
3394 | } | ||
3395 | usbn_node = dev->of_node->parent; | ||
3396 | |||
3397 | i = of_property_read_u32(usbn_node, | ||
3398 | "refclk-frequency", &clock_rate); | ||
3399 | if (i) { | ||
3400 | dev_err(dev, "No USBN \"refclk-frequency\"\n"); | ||
3401 | return -ENXIO; | ||
3402 | } | ||
3403 | switch (clock_rate) { | ||
3404 | case 12000000: | ||
3405 | initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ; | ||
3406 | break; | ||
3407 | case 24000000: | ||
3408 | initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ; | ||
3409 | break; | ||
3410 | case 48000000: | ||
3411 | initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ; | ||
3412 | break; | ||
3413 | default: | ||
3414 | dev_err(dev, "Illebal USBN \"refclk-frequency\" %u\n", clock_rate); | ||
3415 | return -ENXIO; | ||
3416 | |||
3417 | } | ||
3418 | |||
3419 | i = of_property_read_string(usbn_node, | ||
3420 | "refclk-type", &clock_type); | ||
3421 | |||
3422 | if (!i && strcmp("crystal", clock_type) == 0) | ||
3423 | is_crystal_clock = true; | ||
3424 | |||
3425 | if (is_crystal_clock) | ||
3426 | initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI; | ||
3427 | else | ||
3428 | initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND; | ||
3429 | |||
3430 | res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
3431 | if (res_mem == NULL) { | ||
3432 | dev_err(dev, "found no memory resource\n"); | ||
3433 | return -ENXIO; | ||
3434 | } | ||
3435 | usb_num = (res_mem->start >> 44) & 1; | ||
3436 | |||
3437 | if (irq < 0) { | ||
3438 | /* Defective device tree, but we know how to fix it. */ | ||
3439 | irq_hw_number_t hwirq = usb_num ? (1 << 6) + 17 : 56; | ||
3440 | irq = irq_create_mapping(NULL, hwirq); | ||
3441 | } | ||
3460 | 3442 | ||
3461 | /* | 3443 | /* |
3462 | * Set the DMA mask to 64bits so we get buffers already translated for | 3444 | * Set the DMA mask to 64bits so we get buffers already translated for |
@@ -3465,6 +3447,26 @@ static int octeon_usb_driver_probe(struct device *dev) | |||
3465 | dev->coherent_dma_mask = ~0; | 3447 | dev->coherent_dma_mask = ~0; |
3466 | dev->dma_mask = &dev->coherent_dma_mask; | 3448 | dev->dma_mask = &dev->coherent_dma_mask; |
3467 | 3449 | ||
3450 | /* | ||
3451 | * Only cn52XX and cn56XX have DWC_OTG USB hardware and the | ||
3452 | * IOB priority registers. Under heavy network load USB | ||
3453 | * hardware can be starved by the IOB causing a crash. Give | ||
3454 | * it a priority boost if it has been waiting more than 400 | ||
3455 | * cycles to avoid this situation. | ||
3456 | * | ||
3457 | * Testing indicates that a cnt_val of 8192 is not sufficient, | ||
3458 | * but no failures are seen with 4096. We choose a value of | ||
3459 | * 400 to give a safety factor of 10. | ||
3460 | */ | ||
3461 | if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) { | ||
3462 | union cvmx_iob_n2c_l2c_pri_cnt pri_cnt; | ||
3463 | |||
3464 | pri_cnt.u64 = 0; | ||
3465 | pri_cnt.s.cnt_enb = 1; | ||
3466 | pri_cnt.s.cnt_val = 400; | ||
3467 | cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64); | ||
3468 | } | ||
3469 | |||
3468 | hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev)); | 3470 | hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev)); |
3469 | if (!hcd) { | 3471 | if (!hcd) { |
3470 | dev_dbg(dev, "Failed to allocate memory for HCD\n"); | 3472 | dev_dbg(dev, "Failed to allocate memory for HCD\n"); |
@@ -3478,7 +3480,7 @@ static int octeon_usb_driver_probe(struct device *dev) | |||
3478 | tasklet_init(&priv->dequeue_tasklet, octeon_usb_urb_dequeue_work, (unsigned long)priv); | 3480 | tasklet_init(&priv->dequeue_tasklet, octeon_usb_urb_dequeue_work, (unsigned long)priv); |
3479 | INIT_LIST_HEAD(&priv->dequeue_list); | 3481 | INIT_LIST_HEAD(&priv->dequeue_list); |
3480 | 3482 | ||
3481 | status = cvmx_usb_initialize(&priv->usb, usb_num); | 3483 | status = cvmx_usb_initialize(&priv->usb, usb_num, initialize_flags); |
3482 | if (status) { | 3484 | if (status) { |
3483 | dev_dbg(dev, "USB initialization failed with %d\n", status); | 3485 | dev_dbg(dev, "USB initialization failed with %d\n", status); |
3484 | kfree(hcd); | 3486 | kfree(hcd); |
@@ -3492,7 +3494,7 @@ static int octeon_usb_driver_probe(struct device *dev) | |||
3492 | cvmx_usb_poll(&priv->usb); | 3494 | cvmx_usb_poll(&priv->usb); |
3493 | spin_unlock_irqrestore(&priv->lock, flags); | 3495 | spin_unlock_irqrestore(&priv->lock, flags); |
3494 | 3496 | ||
3495 | status = usb_add_hcd(hcd, irq, IRQF_SHARED); | 3497 | status = usb_add_hcd(hcd, irq, 0); |
3496 | if (status) { | 3498 | if (status) { |
3497 | dev_dbg(dev, "USB add HCD failed with %d\n", status); | 3499 | dev_dbg(dev, "USB add HCD failed with %d\n", status); |
3498 | kfree(hcd); | 3500 | kfree(hcd); |
@@ -3500,14 +3502,15 @@ static int octeon_usb_driver_probe(struct device *dev) | |||
3500 | } | 3502 | } |
3501 | device_wakeup_enable(hcd->self.controller); | 3503 | device_wakeup_enable(hcd->self.controller); |
3502 | 3504 | ||
3503 | dev_dbg(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq); | 3505 | dev_info(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq); |
3504 | 3506 | ||
3505 | return 0; | 3507 | return 0; |
3506 | } | 3508 | } |
3507 | 3509 | ||
3508 | static int octeon_usb_driver_remove(struct device *dev) | 3510 | static int octeon_usb_remove(struct platform_device *pdev) |
3509 | { | 3511 | { |
3510 | int status; | 3512 | int status; |
3513 | struct device *dev = &pdev->dev; | ||
3511 | struct usb_hcd *hcd = dev_get_drvdata(dev); | 3514 | struct usb_hcd *hcd = dev_get_drvdata(dev); |
3512 | struct octeon_hcd *priv = hcd_to_octeon(hcd); | 3515 | struct octeon_hcd *priv = hcd_to_octeon(hcd); |
3513 | unsigned long flags; | 3516 | unsigned long flags; |
@@ -3525,85 +3528,41 @@ static int octeon_usb_driver_remove(struct device *dev) | |||
3525 | return 0; | 3528 | return 0; |
3526 | } | 3529 | } |
3527 | 3530 | ||
3528 | static struct device_driver octeon_usb_driver = { | 3531 | static struct of_device_id octeon_usb_match[] = { |
3529 | .name = "OcteonUSB", | 3532 | { |
3530 | .bus = &platform_bus_type, | 3533 | .compatible = "cavium,octeon-5750-usbc", |
3531 | .probe = octeon_usb_driver_probe, | 3534 | }, |
3532 | .remove = octeon_usb_driver_remove, | 3535 | {}, |
3533 | }; | 3536 | }; |
3534 | 3537 | ||
3538 | static struct platform_driver octeon_usb_driver = { | ||
3539 | .driver = { | ||
3540 | .name = "OcteonUSB", | ||
3541 | .owner = THIS_MODULE, | ||
3542 | .of_match_table = octeon_usb_match, | ||
3543 | }, | ||
3544 | .probe = octeon_usb_probe, | ||
3545 | .remove = octeon_usb_remove, | ||
3546 | }; | ||
3535 | 3547 | ||
3536 | #define MAX_USB_PORTS 10 | 3548 | static int __init octeon_usb_driver_init(void) |
3537 | static struct platform_device *pdev_glob[MAX_USB_PORTS]; | ||
3538 | static int octeon_usb_registered; | ||
3539 | static int __init octeon_usb_module_init(void) | ||
3540 | { | 3549 | { |
3541 | int num_devices = cvmx_usb_get_num_ports(); | 3550 | if (usb_disabled()) |
3542 | int device; | 3551 | return 0; |
3543 | |||
3544 | if (usb_disabled() || num_devices == 0) | ||
3545 | return -ENODEV; | ||
3546 | |||
3547 | if (driver_register(&octeon_usb_driver)) | ||
3548 | return -ENOMEM; | ||
3549 | |||
3550 | octeon_usb_registered = 1; | ||
3551 | |||
3552 | /* | ||
3553 | * Only cn52XX and cn56XX have DWC_OTG USB hardware and the | ||
3554 | * IOB priority registers. Under heavy network load USB | ||
3555 | * hardware can be starved by the IOB causing a crash. Give | ||
3556 | * it a priority boost if it has been waiting more than 400 | ||
3557 | * cycles to avoid this situation. | ||
3558 | * | ||
3559 | * Testing indicates that a cnt_val of 8192 is not sufficient, | ||
3560 | * but no failures are seen with 4096. We choose a value of | ||
3561 | * 400 to give a safety factor of 10. | ||
3562 | */ | ||
3563 | if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) { | ||
3564 | union cvmx_iob_n2c_l2c_pri_cnt pri_cnt; | ||
3565 | |||
3566 | pri_cnt.u64 = 0; | ||
3567 | pri_cnt.s.cnt_enb = 1; | ||
3568 | pri_cnt.s.cnt_val = 400; | ||
3569 | cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64); | ||
3570 | } | ||
3571 | |||
3572 | for (device = 0; device < num_devices; device++) { | ||
3573 | struct resource irq_resource; | ||
3574 | struct platform_device *pdev; | ||
3575 | memset(&irq_resource, 0, sizeof(irq_resource)); | ||
3576 | irq_resource.start = (device == 0) ? OCTEON_IRQ_USB0 : OCTEON_IRQ_USB1; | ||
3577 | irq_resource.end = irq_resource.start; | ||
3578 | irq_resource.flags = IORESOURCE_IRQ; | ||
3579 | pdev = platform_device_register_simple((char *)octeon_usb_driver. name, device, &irq_resource, 1); | ||
3580 | if (IS_ERR(pdev)) { | ||
3581 | driver_unregister(&octeon_usb_driver); | ||
3582 | octeon_usb_registered = 0; | ||
3583 | return PTR_ERR(pdev); | ||
3584 | } | ||
3585 | if (device < MAX_USB_PORTS) | ||
3586 | pdev_glob[device] = pdev; | ||
3587 | 3552 | ||
3588 | } | 3553 | return platform_driver_register(&octeon_usb_driver); |
3589 | return 0; | ||
3590 | } | 3554 | } |
3555 | module_init(octeon_usb_driver_init); | ||
3591 | 3556 | ||
3592 | static void __exit octeon_usb_module_cleanup(void) | 3557 | static void __exit octeon_usb_driver_exit(void) |
3593 | { | 3558 | { |
3594 | int i; | 3559 | if (usb_disabled()) |
3560 | return; | ||
3595 | 3561 | ||
3596 | for (i = 0; i < MAX_USB_PORTS; i++) | 3562 | platform_driver_unregister(&octeon_usb_driver); |
3597 | if (pdev_glob[i]) { | ||
3598 | platform_device_unregister(pdev_glob[i]); | ||
3599 | pdev_glob[i] = NULL; | ||
3600 | } | ||
3601 | if (octeon_usb_registered) | ||
3602 | driver_unregister(&octeon_usb_driver); | ||
3603 | } | 3563 | } |
3564 | module_exit(octeon_usb_driver_exit); | ||
3604 | 3565 | ||
3605 | MODULE_LICENSE("GPL"); | 3566 | MODULE_LICENSE("GPL"); |
3606 | MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>"); | 3567 | MODULE_AUTHOR("Cavium, Inc. <support@cavium.com>"); |
3607 | MODULE_DESCRIPTION("Cavium Networks Octeon USB Host driver."); | 3568 | MODULE_DESCRIPTION("Cavium Inc. OCTEON USB Host driver."); |
3608 | module_init(octeon_usb_module_init); | ||
3609 | module_exit(octeon_usb_module_cleanup); | ||
diff --git a/drivers/staging/ozwpan/ozproto.c b/drivers/staging/ozwpan/ozproto.c index cb060364dfe7..5d965cf06d59 100644 --- a/drivers/staging/ozwpan/ozproto.c +++ b/drivers/staging/ozwpan/ozproto.c | |||
@@ -668,8 +668,8 @@ void oz_binding_add(const char *net_dev) | |||
668 | if (binding) { | 668 | if (binding) { |
669 | binding->ptype.type = __constant_htons(OZ_ETHERTYPE); | 669 | binding->ptype.type = __constant_htons(OZ_ETHERTYPE); |
670 | binding->ptype.func = oz_pkt_recv; | 670 | binding->ptype.func = oz_pkt_recv; |
671 | memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN); | ||
672 | if (net_dev && *net_dev) { | 671 | if (net_dev && *net_dev) { |
672 | memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN); | ||
673 | oz_dbg(ON, "Adding binding: %s\n", net_dev); | 673 | oz_dbg(ON, "Adding binding: %s\n", net_dev); |
674 | binding->ptype.dev = | 674 | binding->ptype.dev = |
675 | dev_get_by_name(&init_net, net_dev); | 675 | dev_get_by_name(&init_net, net_dev); |
@@ -680,6 +680,7 @@ void oz_binding_add(const char *net_dev) | |||
680 | } | 680 | } |
681 | } else { | 681 | } else { |
682 | oz_dbg(ON, "Binding to all netcards\n"); | 682 | oz_dbg(ON, "Binding to all netcards\n"); |
683 | memset(binding->name, 0, OZ_MAX_BINDING_LEN); | ||
683 | binding->ptype.dev = NULL; | 684 | binding->ptype.dev = NULL; |
684 | } | 685 | } |
685 | if (binding) { | 686 | if (binding) { |
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c index 153ec61493ab..96df62f95b6b 100644 --- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c +++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c | |||
@@ -912,12 +912,12 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len) | |||
912 | unsigned char *pbuf; | 912 | unsigned char *pbuf; |
913 | u32 wpa_ielen = 0; | 913 | u32 wpa_ielen = 0; |
914 | u8 *pbssid = GetAddr3Ptr(pframe); | 914 | u8 *pbssid = GetAddr3Ptr(pframe); |
915 | u32 hidden_ssid = 0; | ||
916 | struct HT_info_element *pht_info = NULL; | 915 | struct HT_info_element *pht_info = NULL; |
917 | struct rtw_ieee80211_ht_cap *pht_cap = NULL; | 916 | struct rtw_ieee80211_ht_cap *pht_cap = NULL; |
918 | u32 bcn_channel; | 917 | u32 bcn_channel; |
919 | unsigned short ht_cap_info; | 918 | unsigned short ht_cap_info; |
920 | unsigned char ht_info_infos_0; | 919 | unsigned char ht_info_infos_0; |
920 | int ssid_len; | ||
921 | 921 | ||
922 | if (is_client_associated_to_ap(Adapter) == false) | 922 | if (is_client_associated_to_ap(Adapter) == false) |
923 | return true; | 923 | return true; |
@@ -999,21 +999,15 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len) | |||
999 | } | 999 | } |
1000 | 1000 | ||
1001 | /* checking SSID */ | 1001 | /* checking SSID */ |
1002 | ssid_len = 0; | ||
1002 | p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_); | 1003 | p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_); |
1003 | if (p == NULL) { | 1004 | if (p) { |
1004 | DBG_88E("%s marc: cannot find SSID for survey event\n", __func__); | 1005 | ssid_len = *(p + 1); |
1005 | hidden_ssid = true; | 1006 | if (ssid_len > NDIS_802_11_LENGTH_SSID) |
1006 | } else { | 1007 | ssid_len = 0; |
1007 | hidden_ssid = false; | ||
1008 | } | ||
1009 | |||
1010 | if ((NULL != p) && (false == hidden_ssid && (*(p + 1)))) { | ||
1011 | memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1)); | ||
1012 | bssid->Ssid.SsidLength = *(p + 1); | ||
1013 | } else { | ||
1014 | bssid->Ssid.SsidLength = 0; | ||
1015 | bssid->Ssid.Ssid[0] = '\0'; | ||
1016 | } | 1008 | } |
1009 | memcpy(bssid->Ssid.Ssid, (p + 2), ssid_len); | ||
1010 | bssid->Ssid.SsidLength = ssid_len; | ||
1017 | 1011 | ||
1018 | RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s bssid.Ssid.Ssid:%s bssid.Ssid.SsidLength:%d " | 1012 | RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s bssid.Ssid.Ssid:%s bssid.Ssid.SsidLength:%d " |
1019 | "cur_network->network.Ssid.Ssid:%s len:%d\n", __func__, bssid->Ssid.Ssid, | 1013 | "cur_network->network.Ssid.Ssid:%s len:%d\n", __func__, bssid->Ssid.Ssid, |
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c index dec992569476..4ad80ae1067f 100644 --- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c | |||
@@ -2500,7 +2500,7 @@ static int rtw_mp_ioctl_hdl(struct net_device *dev, struct iw_request_info *info | |||
2500 | ("rtw_mp_ioctl_hdl: subcode [%d], len[%d], buffer_len[%d]\r\n", | 2500 | ("rtw_mp_ioctl_hdl: subcode [%d], len[%d], buffer_len[%d]\r\n", |
2501 | poidparam->subcode, poidparam->len, len)); | 2501 | poidparam->subcode, poidparam->len, len)); |
2502 | 2502 | ||
2503 | if (poidparam->subcode >= MAX_MP_IOCTL_SUBCODE) { | 2503 | if (poidparam->subcode >= ARRAY_SIZE(mp_ioctl_hdl)) { |
2504 | RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("no matching drvext subcodes\r\n")); | 2504 | RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("no matching drvext subcodes\r\n")); |
2505 | ret = -EINVAL; | 2505 | ret = -EINVAL; |
2506 | goto _rtw_mp_ioctl_hdl_exit; | 2506 | goto _rtw_mp_ioctl_hdl_exit; |
@@ -3164,9 +3164,7 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev, | |||
3164 | u8 *p2pie; | 3164 | u8 *p2pie; |
3165 | uint p2pielen = 0, attr_contentlen = 0; | 3165 | uint p2pielen = 0, attr_contentlen = 0; |
3166 | u8 attr_content[100] = {0x00}; | 3166 | u8 attr_content[100] = {0x00}; |
3167 | 3167 | u8 go_devadd_str[17 + 12] = {}; | |
3168 | u8 go_devadd_str[17 + 10] = {0x00}; | ||
3169 | /* +10 is for the str "go_devadd =", we have to clear it at wrqu->data.pointer */ | ||
3170 | 3168 | ||
3171 | /* Commented by Albert 20121209 */ | 3169 | /* Commented by Albert 20121209 */ |
3172 | /* The input data is the GO's interface address which the application wants to know its device address. */ | 3170 | /* The input data is the GO's interface address which the application wants to know its device address. */ |
@@ -3223,12 +3221,12 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev, | |||
3223 | spin_unlock_bh(&pmlmepriv->scanned_queue.lock); | 3221 | spin_unlock_bh(&pmlmepriv->scanned_queue.lock); |
3224 | 3222 | ||
3225 | if (!blnMatch) | 3223 | if (!blnMatch) |
3226 | sprintf(go_devadd_str, "\n\ndev_add = NULL"); | 3224 | snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add = NULL"); |
3227 | else | 3225 | else |
3228 | sprintf(go_devadd_str, "\n\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X", | 3226 | snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X", |
3229 | attr_content[0], attr_content[1], attr_content[2], attr_content[3], attr_content[4], attr_content[5]); | 3227 | attr_content[0], attr_content[1], attr_content[2], attr_content[3], attr_content[4], attr_content[5]); |
3230 | 3228 | ||
3231 | if (copy_to_user(wrqu->data.pointer, go_devadd_str, 10 + 17)) | 3229 | if (copy_to_user(wrqu->data.pointer, go_devadd_str, sizeof(go_devadd_str))) |
3232 | return -EFAULT; | 3230 | return -EFAULT; |
3233 | return ret; | 3231 | return ret; |
3234 | } | 3232 | } |
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c index 68f98fa114d2..7c9ee58f47bb 100644 --- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c +++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c | |||
@@ -653,7 +653,7 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb) | |||
653 | } | 653 | } |
654 | 654 | ||
655 | static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, | 655 | static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, |
656 | void *accel_priv) | 656 | void *accel_priv, select_queue_fallback_t fallback) |
657 | { | 657 | { |
658 | struct adapter *padapter = rtw_netdev_priv(dev); | 658 | struct adapter *padapter = rtw_netdev_priv(dev); |
659 | struct mlme_priv *pmlmepriv = &padapter->mlmepriv; | 659 | struct mlme_priv *pmlmepriv = &padapter->mlmepriv; |
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index 0a341d6ec51f..a70dcef1419e 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c | |||
@@ -53,7 +53,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = { | |||
53 | {USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */ | 53 | {USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */ |
54 | /*=== Customer ID ===*/ | 54 | /*=== Customer ID ===*/ |
55 | /****** 8188EUS ********/ | 55 | /****** 8188EUS ********/ |
56 | {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */ | 56 | {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */ |
57 | {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ | 57 | {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ |
58 | {} /* Terminating entry */ | 58 | {} /* Terminating entry */ |
59 | }; | 59 | }; |
diff --git a/drivers/staging/rtl8821ae/Kconfig b/drivers/staging/rtl8821ae/Kconfig index 2aa5dac2f1df..abccc9dabd65 100644 --- a/drivers/staging/rtl8821ae/Kconfig +++ b/drivers/staging/rtl8821ae/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config R8821AE | 1 | config R8821AE |
2 | tristate "RealTek RTL8821AE Wireless LAN NIC driver" | 2 | tristate "RealTek RTL8821AE Wireless LAN NIC driver" |
3 | depends on PCI && WLAN | 3 | depends on PCI && WLAN && MAC80211 |
4 | depends on m | 4 | depends on m |
5 | select WIRELESS_EXT | 5 | select WIRELESS_EXT |
6 | select WEXT_PRIV | 6 | select WEXT_PRIV |
diff --git a/drivers/staging/rtl8821ae/wifi.h b/drivers/staging/rtl8821ae/wifi.h index cfe88a1efd55..76bef93ad70a 100644 --- a/drivers/staging/rtl8821ae/wifi.h +++ b/drivers/staging/rtl8821ae/wifi.h | |||
@@ -1414,7 +1414,7 @@ struct rtl_dm { | |||
1414 | 1414 | ||
1415 | 1415 | ||
1416 | /*88e tx power tracking*/ | 1416 | /*88e tx power tracking*/ |
1417 | u8 bb_swing_idx_ofdm[2]; | 1417 | u8 bb_swing_idx_ofdm[MAX_RF_PATH]; |
1418 | u8 bb_swing_idx_ofdm_current; | 1418 | u8 bb_swing_idx_ofdm_current; |
1419 | u8 bb_swing_idx_ofdm_base[MAX_RF_PATH]; | 1419 | u8 bb_swing_idx_ofdm_base[MAX_RF_PATH]; |
1420 | bool bb_swing_flag_Ofdm; | 1420 | bool bb_swing_flag_Ofdm; |
diff --git a/drivers/staging/usbip/userspace/libsrc/names.c b/drivers/staging/usbip/userspace/libsrc/names.c index 3c8d28b771e0..81ff8522405c 100644 --- a/drivers/staging/usbip/userspace/libsrc/names.c +++ b/drivers/staging/usbip/userspace/libsrc/names.c | |||
@@ -169,14 +169,14 @@ static void *my_malloc(size_t size) | |||
169 | struct pool *p; | 169 | struct pool *p; |
170 | 170 | ||
171 | p = calloc(1, sizeof(struct pool)); | 171 | p = calloc(1, sizeof(struct pool)); |
172 | if (!p) { | 172 | if (!p) |
173 | free(p); | ||
174 | return NULL; | 173 | return NULL; |
175 | } | ||
176 | 174 | ||
177 | p->mem = calloc(1, size); | 175 | p->mem = calloc(1, size); |
178 | if (!p->mem) | 176 | if (!p->mem) { |
177 | free(p); | ||
179 | return NULL; | 178 | return NULL; |
179 | } | ||
180 | 180 | ||
181 | p->next = pool_head; | 181 | p->next = pool_head; |
182 | pool_head = p; | 182 | pool_head = p; |
diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/staging/usbip/vhci_sysfs.c index 9b51586d11d9..0141bc34d5cc 100644 --- a/drivers/staging/usbip/vhci_sysfs.c +++ b/drivers/staging/usbip/vhci_sysfs.c | |||
@@ -149,7 +149,8 @@ static int valid_args(__u32 rhport, enum usb_device_speed speed) | |||
149 | case USB_SPEED_WIRELESS: | 149 | case USB_SPEED_WIRELESS: |
150 | break; | 150 | break; |
151 | default: | 151 | default: |
152 | pr_err("speed %d\n", speed); | 152 | pr_err("Failed attach request for unsupported USB speed: %s\n", |
153 | usb_speed_string(speed)); | ||
153 | return -EINVAL; | 154 | return -EINVAL; |
154 | } | 155 | } |
155 | 156 | ||
diff --git a/drivers/staging/wlags49_h2/wl_wext.c b/drivers/staging/wlags49_h2/wl_wext.c index 4a1ddaf5e00f..187fc060de26 100644 --- a/drivers/staging/wlags49_h2/wl_wext.c +++ b/drivers/staging/wlags49_h2/wl_wext.c | |||
@@ -1061,7 +1061,7 @@ static int wireless_set_essid(struct net_device *dev, struct iw_request_info *in | |||
1061 | goto out; | 1061 | goto out; |
1062 | } | 1062 | } |
1063 | 1063 | ||
1064 | if (data->flags != 0 && data->length > HCF_MAX_NAME_LEN + 1) { | 1064 | if (data->flags != 0 && data->length > HCF_MAX_NAME_LEN) { |
1065 | ret = -EINVAL; | 1065 | ret = -EINVAL; |
1066 | goto out; | 1066 | goto out; |
1067 | } | 1067 | } |
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c index e048d6439f4a..cda4d80cfaef 100644 --- a/drivers/target/iscsi/iscsi_target_erl1.c +++ b/drivers/target/iscsi/iscsi_target_erl1.c | |||
@@ -507,7 +507,9 @@ int iscsit_handle_status_snack( | |||
507 | u32 last_statsn; | 507 | u32 last_statsn; |
508 | int found_cmd; | 508 | int found_cmd; |
509 | 509 | ||
510 | if (conn->exp_statsn > begrun) { | 510 | if (!begrun) { |
511 | begrun = conn->exp_statsn; | ||
512 | } else if (conn->exp_statsn > begrun) { | ||
511 | pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:" | 513 | pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:" |
512 | " 0x%08x but already got ExpStatSN: 0x%08x on CID:" | 514 | " 0x%08x but already got ExpStatSN: 0x%08x on CID:" |
513 | " %hu.\n", begrun, runlength, conn->exp_statsn, | 515 | " %hu.\n", begrun, runlength, conn->exp_statsn, |
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 12da9b386169..c3d9df6aaf5f 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c | |||
@@ -500,7 +500,7 @@ static inline int core_alua_state_lba_dependent( | |||
500 | 500 | ||
501 | if (segment_mult) { | 501 | if (segment_mult) { |
502 | u64 tmp = lba; | 502 | u64 tmp = lba; |
503 | start_lba = sector_div(tmp, segment_size * segment_mult); | 503 | start_lba = do_div(tmp, segment_size * segment_mult); |
504 | 504 | ||
505 | last_lba = first_lba + segment_size - 1; | 505 | last_lba = first_lba + segment_size - 1; |
506 | if (start_lba >= first_lba && | 506 | if (start_lba >= first_lba && |
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 2f5d77932c80..3013287a2aaa 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c | |||
@@ -2009,7 +2009,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, | |||
2009 | struct t10_reservation *pr_tmpl = &dev->t10_pr; | 2009 | struct t10_reservation *pr_tmpl = &dev->t10_pr; |
2010 | unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL; | 2010 | unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL; |
2011 | sense_reason_t ret = TCM_NO_SENSE; | 2011 | sense_reason_t ret = TCM_NO_SENSE; |
2012 | int pr_holder = 0; | 2012 | int pr_holder = 0, type; |
2013 | 2013 | ||
2014 | if (!se_sess || !se_lun) { | 2014 | if (!se_sess || !se_lun) { |
2015 | pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); | 2015 | pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); |
@@ -2131,6 +2131,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, | |||
2131 | ret = TCM_RESERVATION_CONFLICT; | 2131 | ret = TCM_RESERVATION_CONFLICT; |
2132 | goto out; | 2132 | goto out; |
2133 | } | 2133 | } |
2134 | type = pr_reg->pr_res_type; | ||
2134 | 2135 | ||
2135 | spin_lock(&pr_tmpl->registration_lock); | 2136 | spin_lock(&pr_tmpl->registration_lock); |
2136 | /* | 2137 | /* |
@@ -2161,6 +2162,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, | |||
2161 | * Release the calling I_T Nexus registration now.. | 2162 | * Release the calling I_T Nexus registration now.. |
2162 | */ | 2163 | */ |
2163 | __core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1); | 2164 | __core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1); |
2165 | pr_reg = NULL; | ||
2164 | 2166 | ||
2165 | /* | 2167 | /* |
2166 | * From spc4r17, section 5.7.11.3 Unregistering | 2168 | * From spc4r17, section 5.7.11.3 Unregistering |
@@ -2174,8 +2176,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, | |||
2174 | * RESERVATIONS RELEASED. | 2176 | * RESERVATIONS RELEASED. |
2175 | */ | 2177 | */ |
2176 | if (pr_holder && | 2178 | if (pr_holder && |
2177 | (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY || | 2179 | (type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY || |
2178 | pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) { | 2180 | type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) { |
2179 | list_for_each_entry(pr_reg_p, | 2181 | list_for_each_entry(pr_reg_p, |
2180 | &pr_tmpl->registration_list, | 2182 | &pr_tmpl->registration_list, |
2181 | pr_reg_list) { | 2183 | pr_reg_list) { |
@@ -2194,7 +2196,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, | |||
2194 | ret = core_scsi3_update_and_write_aptpl(dev, aptpl); | 2196 | ret = core_scsi3_update_and_write_aptpl(dev, aptpl); |
2195 | 2197 | ||
2196 | out: | 2198 | out: |
2197 | core_scsi3_put_pr_reg(pr_reg); | 2199 | if (pr_reg) |
2200 | core_scsi3_put_pr_reg(pr_reg); | ||
2198 | return ret; | 2201 | return ret; |
2199 | } | 2202 | } |
2200 | 2203 | ||
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index fa3cae393e13..a4489444ffbc 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c | |||
@@ -1074,12 +1074,19 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, | |||
1074 | struct scatterlist *psg; | 1074 | struct scatterlist *psg; |
1075 | void *paddr, *addr; | 1075 | void *paddr, *addr; |
1076 | unsigned int i, len, left; | 1076 | unsigned int i, len, left; |
1077 | unsigned int offset = 0; | ||
1077 | 1078 | ||
1078 | left = sectors * dev->prot_length; | 1079 | left = sectors * dev->prot_length; |
1079 | 1080 | ||
1080 | for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { | 1081 | for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { |
1081 | 1082 | ||
1082 | len = min(psg->length, left); | 1083 | len = min(psg->length, left); |
1084 | if (offset >= sg->length) { | ||
1085 | sg = sg_next(sg); | ||
1086 | offset = 0; | ||
1087 | sg_off = sg->offset; | ||
1088 | } | ||
1089 | |||
1083 | paddr = kmap_atomic(sg_page(psg)) + psg->offset; | 1090 | paddr = kmap_atomic(sg_page(psg)) + psg->offset; |
1084 | addr = kmap_atomic(sg_page(sg)) + sg_off; | 1091 | addr = kmap_atomic(sg_page(sg)) + sg_off; |
1085 | 1092 | ||
@@ -1089,6 +1096,7 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, | |||
1089 | memcpy(addr, paddr, len); | 1096 | memcpy(addr, paddr, len); |
1090 | 1097 | ||
1091 | left -= len; | 1098 | left -= len; |
1099 | offset += len; | ||
1092 | kunmap_atomic(paddr); | 1100 | kunmap_atomic(paddr); |
1093 | kunmap_atomic(addr); | 1101 | kunmap_atomic(addr); |
1094 | } | 1102 | } |
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 43c5ca9878bc..3bebc71ea033 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c | |||
@@ -440,8 +440,8 @@ check_scsi_name: | |||
440 | padding = ((-scsi_target_len) & 3); | 440 | padding = ((-scsi_target_len) & 3); |
441 | if (padding) | 441 | if (padding) |
442 | scsi_target_len += padding; | 442 | scsi_target_len += padding; |
443 | if (scsi_name_len > 256) | 443 | if (scsi_target_len > 256) |
444 | scsi_name_len = 256; | 444 | scsi_target_len = 256; |
445 | 445 | ||
446 | buf[off-1] = scsi_target_len; | 446 | buf[off-1] = scsi_target_len; |
447 | off += scsi_target_len; | 447 | off += scsi_target_len; |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index c50fd9f11aab..24b4f65d8777 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -669,9 +669,6 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) | |||
669 | return; | 669 | return; |
670 | } | 670 | } |
671 | 671 | ||
672 | if (!success) | ||
673 | cmd->transport_state |= CMD_T_FAILED; | ||
674 | |||
675 | /* | 672 | /* |
676 | * Check for case where an explicit ABORT_TASK has been received | 673 | * Check for case where an explicit ABORT_TASK has been received |
677 | * and transport_wait_for_tasks() will be waiting for completion.. | 674 | * and transport_wait_for_tasks() will be waiting for completion.. |
@@ -681,7 +678,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) | |||
681 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 678 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
682 | complete(&cmd->t_transport_stop_comp); | 679 | complete(&cmd->t_transport_stop_comp); |
683 | return; | 680 | return; |
684 | } else if (cmd->transport_state & CMD_T_FAILED) { | 681 | } else if (!success) { |
685 | INIT_WORK(&cmd->work, target_complete_failure_work); | 682 | INIT_WORK(&cmd->work, target_complete_failure_work); |
686 | } else { | 683 | } else { |
687 | INIT_WORK(&cmd->work, target_complete_ok_work); | 684 | INIT_WORK(&cmd->work, target_complete_ok_work); |
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c index 6496872e2e47..b01659bd4f7c 100644 --- a/drivers/tty/hvc/hvc_opal.c +++ b/drivers/tty/hvc/hvc_opal.c | |||
@@ -255,13 +255,7 @@ static int __init hvc_opal_init(void) | |||
255 | /* Register as a vio device to receive callbacks */ | 255 | /* Register as a vio device to receive callbacks */ |
256 | return platform_driver_register(&hvc_opal_driver); | 256 | return platform_driver_register(&hvc_opal_driver); |
257 | } | 257 | } |
258 | module_init(hvc_opal_init); | 258 | device_initcall(hvc_opal_init); |
259 | |||
260 | static void __exit hvc_opal_exit(void) | ||
261 | { | ||
262 | platform_driver_unregister(&hvc_opal_driver); | ||
263 | } | ||
264 | module_exit(hvc_opal_exit); | ||
265 | 259 | ||
266 | static void udbg_opal_putc(char c) | 260 | static void udbg_opal_putc(char c) |
267 | { | 261 | { |
diff --git a/drivers/tty/hvc/hvc_rtas.c b/drivers/tty/hvc/hvc_rtas.c index 0069bb86ba49..08c87920b74a 100644 --- a/drivers/tty/hvc/hvc_rtas.c +++ b/drivers/tty/hvc/hvc_rtas.c | |||
@@ -102,17 +102,7 @@ static int __init hvc_rtas_init(void) | |||
102 | 102 | ||
103 | return 0; | 103 | return 0; |
104 | } | 104 | } |
105 | module_init(hvc_rtas_init); | 105 | device_initcall(hvc_rtas_init); |
106 | |||
107 | /* This will tear down the tty portion of the driver */ | ||
108 | static void __exit hvc_rtas_exit(void) | ||
109 | { | ||
110 | /* Really the fun isn't over until the worker thread breaks down and | ||
111 | * the tty cleans up */ | ||
112 | if (hvc_rtas_dev) | ||
113 | hvc_remove(hvc_rtas_dev); | ||
114 | } | ||
115 | module_exit(hvc_rtas_exit); | ||
116 | 106 | ||
117 | /* This will happen prior to module init. There is no tty at this time? */ | 107 | /* This will happen prior to module init. There is no tty at this time? */ |
118 | static int __init hvc_rtas_console_init(void) | 108 | static int __init hvc_rtas_console_init(void) |
diff --git a/drivers/tty/hvc/hvc_udbg.c b/drivers/tty/hvc/hvc_udbg.c index 72228276fe31..9cf573d06a29 100644 --- a/drivers/tty/hvc/hvc_udbg.c +++ b/drivers/tty/hvc/hvc_udbg.c | |||
@@ -80,14 +80,7 @@ static int __init hvc_udbg_init(void) | |||
80 | 80 | ||
81 | return 0; | 81 | return 0; |
82 | } | 82 | } |
83 | module_init(hvc_udbg_init); | 83 | device_initcall(hvc_udbg_init); |
84 | |||
85 | static void __exit hvc_udbg_exit(void) | ||
86 | { | ||
87 | if (hvc_udbg_dev) | ||
88 | hvc_remove(hvc_udbg_dev); | ||
89 | } | ||
90 | module_exit(hvc_udbg_exit); | ||
91 | 84 | ||
92 | static int __init hvc_udbg_console_init(void) | 85 | static int __init hvc_udbg_console_init(void) |
93 | { | 86 | { |
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index 636c9baad7a5..2dc2831840ca 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c | |||
@@ -561,18 +561,7 @@ static int __init xen_hvc_init(void) | |||
561 | #endif | 561 | #endif |
562 | return r; | 562 | return r; |
563 | } | 563 | } |
564 | 564 | device_initcall(xen_hvc_init); | |
565 | static void __exit xen_hvc_fini(void) | ||
566 | { | ||
567 | struct xencons_info *entry, *next; | ||
568 | |||
569 | if (list_empty(&xenconsoles)) | ||
570 | return; | ||
571 | |||
572 | list_for_each_entry_safe(entry, next, &xenconsoles, list) { | ||
573 | xen_console_remove(entry); | ||
574 | } | ||
575 | } | ||
576 | 565 | ||
577 | static int xen_cons_init(void) | 566 | static int xen_cons_init(void) |
578 | { | 567 | { |
@@ -598,10 +587,6 @@ static int xen_cons_init(void) | |||
598 | hvc_instantiate(HVC_COOKIE, 0, ops); | 587 | hvc_instantiate(HVC_COOKIE, 0, ops); |
599 | return 0; | 588 | return 0; |
600 | } | 589 | } |
601 | |||
602 | |||
603 | module_init(xen_hvc_init); | ||
604 | module_exit(xen_hvc_fini); | ||
605 | console_initcall(xen_cons_init); | 590 | console_initcall(xen_cons_init); |
606 | 591 | ||
607 | #ifdef CONFIG_EARLY_PRINTK | 592 | #ifdef CONFIG_EARLY_PRINTK |
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index f34461c5f14e..2ebe47b78a3e 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c | |||
@@ -1090,6 +1090,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen) | |||
1090 | { | 1090 | { |
1091 | unsigned int addr = 0; | 1091 | unsigned int addr = 0; |
1092 | unsigned int modem = 0; | 1092 | unsigned int modem = 0; |
1093 | unsigned int brk = 0; | ||
1093 | struct gsm_dlci *dlci; | 1094 | struct gsm_dlci *dlci; |
1094 | int len = clen; | 1095 | int len = clen; |
1095 | u8 *dp = data; | 1096 | u8 *dp = data; |
@@ -1116,6 +1117,16 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen) | |||
1116 | if (len == 0) | 1117 | if (len == 0) |
1117 | return; | 1118 | return; |
1118 | } | 1119 | } |
1120 | len--; | ||
1121 | if (len > 0) { | ||
1122 | while (gsm_read_ea(&brk, *dp++) == 0) { | ||
1123 | len--; | ||
1124 | if (len == 0) | ||
1125 | return; | ||
1126 | } | ||
1127 | modem <<= 7; | ||
1128 | modem |= (brk & 0x7f); | ||
1129 | } | ||
1119 | tty = tty_port_tty_get(&dlci->port); | 1130 | tty = tty_port_tty_get(&dlci->port); |
1120 | gsm_process_modem(tty, dlci, modem, clen); | 1131 | gsm_process_modem(tty, dlci, modem, clen); |
1121 | if (tty) { | 1132 | if (tty) { |
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index cb8017aa4434..d15624c1b751 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c | |||
@@ -817,8 +817,7 @@ static void process_echoes(struct tty_struct *tty) | |||
817 | struct n_tty_data *ldata = tty->disc_data; | 817 | struct n_tty_data *ldata = tty->disc_data; |
818 | size_t echoed; | 818 | size_t echoed; |
819 | 819 | ||
820 | if ((!L_ECHO(tty) && !L_ECHONL(tty)) || | 820 | if (ldata->echo_mark == ldata->echo_tail) |
821 | ldata->echo_mark == ldata->echo_tail) | ||
822 | return; | 821 | return; |
823 | 822 | ||
824 | mutex_lock(&ldata->output_lock); | 823 | mutex_lock(&ldata->output_lock); |
@@ -1244,7 +1243,8 @@ n_tty_receive_signal_char(struct tty_struct *tty, int signal, unsigned char c) | |||
1244 | if (L_ECHO(tty)) { | 1243 | if (L_ECHO(tty)) { |
1245 | echo_char(c, tty); | 1244 | echo_char(c, tty); |
1246 | commit_echoes(tty); | 1245 | commit_echoes(tty); |
1247 | } | 1246 | } else |
1247 | process_echoes(tty); | ||
1248 | isig(signal, tty); | 1248 | isig(signal, tty); |
1249 | return; | 1249 | return; |
1250 | } | 1250 | } |
@@ -1274,7 +1274,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c) | |||
1274 | if (I_IXON(tty)) { | 1274 | if (I_IXON(tty)) { |
1275 | if (c == START_CHAR(tty)) { | 1275 | if (c == START_CHAR(tty)) { |
1276 | start_tty(tty); | 1276 | start_tty(tty); |
1277 | commit_echoes(tty); | 1277 | process_echoes(tty); |
1278 | return 0; | 1278 | return 0; |
1279 | } | 1279 | } |
1280 | if (c == STOP_CHAR(tty)) { | 1280 | if (c == STOP_CHAR(tty)) { |
@@ -1820,8 +1820,10 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) | |||
1820 | * Fix tty hang when I_IXON(tty) is cleared, but the tty | 1820 | * Fix tty hang when I_IXON(tty) is cleared, but the tty |
1821 | * been stopped by STOP_CHAR(tty) before it. | 1821 | * been stopped by STOP_CHAR(tty) before it. |
1822 | */ | 1822 | */ |
1823 | if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) | 1823 | if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) { |
1824 | start_tty(tty); | 1824 | start_tty(tty); |
1825 | process_echoes(tty); | ||
1826 | } | ||
1825 | 1827 | ||
1826 | /* The termios change make the tty ready for I/O */ | 1828 | /* The termios change make the tty ready for I/O */ |
1827 | if (waitqueue_active(&tty->write_wait)) | 1829 | if (waitqueue_active(&tty->write_wait)) |
@@ -1896,7 +1898,7 @@ err: | |||
1896 | static inline int input_available_p(struct tty_struct *tty, int poll) | 1898 | static inline int input_available_p(struct tty_struct *tty, int poll) |
1897 | { | 1899 | { |
1898 | struct n_tty_data *ldata = tty->disc_data; | 1900 | struct n_tty_data *ldata = tty->disc_data; |
1899 | int amt = poll && !TIME_CHAR(tty) ? MIN_CHAR(tty) : 1; | 1901 | int amt = poll && !TIME_CHAR(tty) && MIN_CHAR(tty) ? MIN_CHAR(tty) : 1; |
1900 | 1902 | ||
1901 | if (ldata->icanon && !L_EXTPROC(tty)) { | 1903 | if (ldata->icanon && !L_EXTPROC(tty)) { |
1902 | if (ldata->canon_head != ldata->read_tail) | 1904 | if (ldata->canon_head != ldata->read_tail) |
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 61ecd709a722..69932b7556cf 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c | |||
@@ -2433,6 +2433,24 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, | |||
2433 | serial_dl_write(up, quot); | 2433 | serial_dl_write(up, quot); |
2434 | 2434 | ||
2435 | /* | 2435 | /* |
2436 | * XR17V35x UARTs have an extra fractional divisor register (DLD) | ||
2437 | * | ||
2438 | * We need to recalculate all of the registers, because DLM and DLL | ||
2439 | * are already rounded to a whole integer. | ||
2440 | * | ||
2441 | * When recalculating we use a 32x clock instead of a 16x clock to | ||
2442 | * allow 1-bit for rounding in the fractional part. | ||
2443 | */ | ||
2444 | if (up->port.type == PORT_XR17V35X) { | ||
2445 | unsigned int baud_x32 = (port->uartclk * 2) / baud; | ||
2446 | u16 quot = baud_x32 / 32; | ||
2447 | u8 quot_frac = DIV_ROUND_CLOSEST(baud_x32 % 32, 2); | ||
2448 | |||
2449 | serial_dl_write(up, quot); | ||
2450 | serial_port_out(port, 0x2, quot_frac & 0xf); | ||
2451 | } | ||
2452 | |||
2453 | /* | ||
2436 | * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR | 2454 | * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR |
2437 | * is written without DLAB set, this mode will be disabled. | 2455 | * is written without DLAB set, this mode will be disabled. |
2438 | */ | 2456 | */ |
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index faa64e646100..ed3113576740 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c | |||
@@ -391,7 +391,7 @@ static int dw8250_remove(struct platform_device *pdev) | |||
391 | return 0; | 391 | return 0; |
392 | } | 392 | } |
393 | 393 | ||
394 | #ifdef CONFIG_PM | 394 | #ifdef CONFIG_PM_SLEEP |
395 | static int dw8250_suspend(struct device *dev) | 395 | static int dw8250_suspend(struct device *dev) |
396 | { | 396 | { |
397 | struct dw8250_data *data = dev_get_drvdata(dev); | 397 | struct dw8250_data *data = dev_get_drvdata(dev); |
@@ -409,7 +409,7 @@ static int dw8250_resume(struct device *dev) | |||
409 | 409 | ||
410 | return 0; | 410 | return 0; |
411 | } | 411 | } |
412 | #endif /* CONFIG_PM */ | 412 | #endif /* CONFIG_PM_SLEEP */ |
413 | 413 | ||
414 | #ifdef CONFIG_PM_RUNTIME | 414 | #ifdef CONFIG_PM_RUNTIME |
415 | static int dw8250_runtime_suspend(struct device *dev) | 415 | static int dw8250_runtime_suspend(struct device *dev) |
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 50228eed3b6f..0ff3e3624d4c 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c | |||
@@ -783,7 +783,8 @@ static int pci_netmos_9900_setup(struct serial_private *priv, | |||
783 | { | 783 | { |
784 | unsigned int bar; | 784 | unsigned int bar; |
785 | 785 | ||
786 | if ((priv->dev->subsystem_device & 0xff00) == 0x3000) { | 786 | if ((priv->dev->device != PCI_DEVICE_ID_NETMOS_9865) && |
787 | (priv->dev->subsystem_device & 0xff00) == 0x3000) { | ||
787 | /* netmos apparently orders BARs by datasheet layout, so serial | 788 | /* netmos apparently orders BARs by datasheet layout, so serial |
788 | * ports get BARs 0 and 3 (or 1 and 4 for memmapped) | 789 | * ports get BARs 0 and 3 (or 1 and 4 for memmapped) |
789 | */ | 790 | */ |
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index fa511ebab67c..77f035158d6c 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c | |||
@@ -738,9 +738,6 @@ static int serial_omap_startup(struct uart_port *port) | |||
738 | return retval; | 738 | return retval; |
739 | } | 739 | } |
740 | disable_irq(up->wakeirq); | 740 | disable_irq(up->wakeirq); |
741 | } else { | ||
742 | dev_info(up->port.dev, "no wakeirq for uart%d\n", | ||
743 | up->port.line); | ||
744 | } | 741 | } |
745 | 742 | ||
746 | dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line); | 743 | dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line); |
@@ -1604,8 +1601,11 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up, | |||
1604 | flags & SER_RS485_RTS_AFTER_SEND); | 1601 | flags & SER_RS485_RTS_AFTER_SEND); |
1605 | if (ret < 0) | 1602 | if (ret < 0) |
1606 | return ret; | 1603 | return ret; |
1607 | } else | 1604 | } else if (up->rts_gpio == -EPROBE_DEFER) { |
1605 | return -EPROBE_DEFER; | ||
1606 | } else { | ||
1608 | up->rts_gpio = -EINVAL; | 1607 | up->rts_gpio = -EINVAL; |
1608 | } | ||
1609 | 1609 | ||
1610 | if (of_property_read_u32_array(np, "rs485-rts-delay", | 1610 | if (of_property_read_u32_array(np, "rs485-rts-delay", |
1611 | rs485_delay, 2) == 0) { | 1611 | rs485_delay, 2) == 0) { |
@@ -1687,6 +1687,9 @@ static int serial_omap_probe(struct platform_device *pdev) | |||
1687 | up->port.iotype = UPIO_MEM; | 1687 | up->port.iotype = UPIO_MEM; |
1688 | up->port.irq = uartirq; | 1688 | up->port.irq = uartirq; |
1689 | up->wakeirq = wakeirq; | 1689 | up->wakeirq = wakeirq; |
1690 | if (!up->wakeirq) | ||
1691 | dev_info(up->port.dev, "no wakeirq for uart%d\n", | ||
1692 | up->port.line); | ||
1690 | 1693 | ||
1691 | up->port.regshift = 2; | 1694 | up->port.regshift = 2; |
1692 | up->port.fifosize = 64; | 1695 | up->port.fifosize = 64; |
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c index 49a2ffd101a7..b7bfe24d4ebc 100644 --- a/drivers/tty/serial/sirfsoc_uart.c +++ b/drivers/tty/serial/sirfsoc_uart.c | |||
@@ -542,8 +542,10 @@ static void sirfsoc_rx_tmo_process_tl(unsigned long param) | |||
542 | wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, | 542 | wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, |
543 | rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) | | 543 | rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) | |
544 | SIRFUART_IO_MODE); | 544 | SIRFUART_IO_MODE); |
545 | sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count); | ||
546 | spin_unlock_irqrestore(&sirfport->rx_lock, flags); | 545 | spin_unlock_irqrestore(&sirfport->rx_lock, flags); |
546 | spin_lock(&port->lock); | ||
547 | sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count); | ||
548 | spin_unlock(&port->lock); | ||
547 | if (sirfport->rx_io_count == 4) { | 549 | if (sirfport->rx_io_count == 4) { |
548 | spin_lock_irqsave(&sirfport->rx_lock, flags); | 550 | spin_lock_irqsave(&sirfport->rx_lock, flags); |
549 | sirfport->rx_io_count = 0; | 551 | sirfport->rx_io_count = 0; |
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index c74a00ad7add..bd2715a9d8e5 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c | |||
@@ -1267,16 +1267,17 @@ static void pty_line_name(struct tty_driver *driver, int index, char *p) | |||
1267 | * @p: output buffer of at least 7 bytes | 1267 | * @p: output buffer of at least 7 bytes |
1268 | * | 1268 | * |
1269 | * Generate a name from a driver reference and write it to the output | 1269 | * Generate a name from a driver reference and write it to the output |
1270 | * buffer. | 1270 | * buffer. Return the number of bytes written. |
1271 | * | 1271 | * |
1272 | * Locking: None | 1272 | * Locking: None |
1273 | */ | 1273 | */ |
1274 | static void tty_line_name(struct tty_driver *driver, int index, char *p) | 1274 | static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p) |
1275 | { | 1275 | { |
1276 | if (driver->flags & TTY_DRIVER_UNNUMBERED_NODE) | 1276 | if (driver->flags & TTY_DRIVER_UNNUMBERED_NODE) |
1277 | strcpy(p, driver->name); | 1277 | return sprintf(p, "%s", driver->name); |
1278 | else | 1278 | else |
1279 | sprintf(p, "%s%d", driver->name, index + driver->name_base); | 1279 | return sprintf(p, "%s%d", driver->name, |
1280 | index + driver->name_base); | ||
1280 | } | 1281 | } |
1281 | 1282 | ||
1282 | /** | 1283 | /** |
@@ -3545,9 +3546,19 @@ static ssize_t show_cons_active(struct device *dev, | |||
3545 | if (i >= ARRAY_SIZE(cs)) | 3546 | if (i >= ARRAY_SIZE(cs)) |
3546 | break; | 3547 | break; |
3547 | } | 3548 | } |
3548 | while (i--) | 3549 | while (i--) { |
3549 | count += sprintf(buf + count, "%s%d%c", | 3550 | struct tty_driver *driver; |
3550 | cs[i]->name, cs[i]->index, i ? ' ':'\n'); | 3551 | const char *name = cs[i]->name; |
3552 | int index = cs[i]->index; | ||
3553 | |||
3554 | driver = cs[i]->device(cs[i], &index); | ||
3555 | if (driver) { | ||
3556 | count += tty_line_name(driver, index, buf + count); | ||
3557 | count += sprintf(buf + count, "%c", i ? ' ':'\n'); | ||
3558 | } else | ||
3559 | count += sprintf(buf + count, "%s%d%c", | ||
3560 | name, index, i ? ' ':'\n'); | ||
3561 | } | ||
3551 | console_unlock(); | 3562 | console_unlock(); |
3552 | 3563 | ||
3553 | return count; | 3564 | return count; |
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 61b1137d7e56..23b5d32954bf 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c | |||
@@ -1164,6 +1164,8 @@ static void csi_J(struct vc_data *vc, int vpar) | |||
1164 | scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char, | 1164 | scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char, |
1165 | vc->vc_screenbuf_size >> 1); | 1165 | vc->vc_screenbuf_size >> 1); |
1166 | set_origin(vc); | 1166 | set_origin(vc); |
1167 | if (CON_IS_VISIBLE(vc)) | ||
1168 | update_screen(vc); | ||
1167 | /* fall through */ | 1169 | /* fall through */ |
1168 | case 2: /* erase whole display */ | 1170 | case 2: /* erase whole display */ |
1169 | count = vc->vc_cols * vc->vc_rows; | 1171 | count = vc->vc_cols * vc->vc_rows; |
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index 5d01558cef66..ab90a0156828 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c | |||
@@ -63,8 +63,10 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids, | |||
63 | dynid->id.idProduct = idProduct; | 63 | dynid->id.idProduct = idProduct; |
64 | dynid->id.match_flags = USB_DEVICE_ID_MATCH_DEVICE; | 64 | dynid->id.match_flags = USB_DEVICE_ID_MATCH_DEVICE; |
65 | if (fields > 2 && bInterfaceClass) { | 65 | if (fields > 2 && bInterfaceClass) { |
66 | if (bInterfaceClass > 255) | 66 | if (bInterfaceClass > 255) { |
67 | return -EINVAL; | 67 | retval = -EINVAL; |
68 | goto fail; | ||
69 | } | ||
68 | 70 | ||
69 | dynid->id.bInterfaceClass = (u8)bInterfaceClass; | 71 | dynid->id.bInterfaceClass = (u8)bInterfaceClass; |
70 | dynid->id.match_flags |= USB_DEVICE_ID_MATCH_INT_CLASS; | 72 | dynid->id.match_flags |= USB_DEVICE_ID_MATCH_INT_CLASS; |
@@ -73,17 +75,21 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids, | |||
73 | if (fields > 4) { | 75 | if (fields > 4) { |
74 | const struct usb_device_id *id = id_table; | 76 | const struct usb_device_id *id = id_table; |
75 | 77 | ||
76 | if (!id) | 78 | if (!id) { |
77 | return -ENODEV; | 79 | retval = -ENODEV; |
80 | goto fail; | ||
81 | } | ||
78 | 82 | ||
79 | for (; id->match_flags; id++) | 83 | for (; id->match_flags; id++) |
80 | if (id->idVendor == refVendor && id->idProduct == refProduct) | 84 | if (id->idVendor == refVendor && id->idProduct == refProduct) |
81 | break; | 85 | break; |
82 | 86 | ||
83 | if (id->match_flags) | 87 | if (id->match_flags) { |
84 | dynid->id.driver_info = id->driver_info; | 88 | dynid->id.driver_info = id->driver_info; |
85 | else | 89 | } else { |
86 | return -ENODEV; | 90 | retval = -ENODEV; |
91 | goto fail; | ||
92 | } | ||
87 | } | 93 | } |
88 | 94 | ||
89 | spin_lock(&dynids->lock); | 95 | spin_lock(&dynids->lock); |
@@ -95,6 +101,10 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids, | |||
95 | if (retval) | 101 | if (retval) |
96 | return retval; | 102 | return retval; |
97 | return count; | 103 | return count; |
104 | |||
105 | fail: | ||
106 | kfree(dynid); | ||
107 | return retval; | ||
98 | } | 108 | } |
99 | EXPORT_SYMBOL_GPL(usb_store_new_id); | 109 | EXPORT_SYMBOL_GPL(usb_store_new_id); |
100 | 110 | ||
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 199aaea6bfe0..2518c3250750 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c | |||
@@ -1032,7 +1032,6 @@ static int register_root_hub(struct usb_hcd *hcd) | |||
1032 | dev_name(&usb_dev->dev), retval); | 1032 | dev_name(&usb_dev->dev), retval); |
1033 | return retval; | 1033 | return retval; |
1034 | } | 1034 | } |
1035 | usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev); | ||
1036 | } | 1035 | } |
1037 | 1036 | ||
1038 | retval = usb_new_device (usb_dev); | 1037 | retval = usb_new_device (usb_dev); |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index babba885978d..64ea21971be2 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -128,7 +128,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev) | |||
128 | return usb_get_intfdata(hdev->actconfig->interface[0]); | 128 | return usb_get_intfdata(hdev->actconfig->interface[0]); |
129 | } | 129 | } |
130 | 130 | ||
131 | int usb_device_supports_lpm(struct usb_device *udev) | 131 | static int usb_device_supports_lpm(struct usb_device *udev) |
132 | { | 132 | { |
133 | /* USB 2.1 (and greater) devices indicate LPM support through | 133 | /* USB 2.1 (and greater) devices indicate LPM support through |
134 | * their USB 2.0 Extended Capabilities BOS descriptor. | 134 | * their USB 2.0 Extended Capabilities BOS descriptor. |
@@ -149,11 +149,6 @@ int usb_device_supports_lpm(struct usb_device *udev) | |||
149 | "Power management will be impacted.\n"); | 149 | "Power management will be impacted.\n"); |
150 | return 0; | 150 | return 0; |
151 | } | 151 | } |
152 | |||
153 | /* udev is root hub */ | ||
154 | if (!udev->parent) | ||
155 | return 1; | ||
156 | |||
157 | if (udev->parent->lpm_capable) | 152 | if (udev->parent->lpm_capable) |
158 | return 1; | 153 | return 1; |
159 | 154 | ||
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index c49383669cd8..823857767a16 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h | |||
@@ -35,7 +35,6 @@ extern int usb_get_device_descriptor(struct usb_device *dev, | |||
35 | unsigned int size); | 35 | unsigned int size); |
36 | extern int usb_get_bos_descriptor(struct usb_device *dev); | 36 | extern int usb_get_bos_descriptor(struct usb_device *dev); |
37 | extern void usb_release_bos_descriptor(struct usb_device *dev); | 37 | extern void usb_release_bos_descriptor(struct usb_device *dev); |
38 | extern int usb_device_supports_lpm(struct usb_device *udev); | ||
39 | extern char *usb_cache_string(struct usb_device *udev, int index); | 38 | extern char *usb_cache_string(struct usb_device *udev, int index); |
40 | extern int usb_set_configuration(struct usb_device *dev, int configuration); | 39 | extern int usb_set_configuration(struct usb_device *dev, int configuration); |
41 | extern int usb_choose_configuration(struct usb_device *udev); | 40 | extern int usb_choose_configuration(struct usb_device *udev); |
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c index 8565d87f94b4..1d129884cc39 100644 --- a/drivers/usb/dwc2/core.c +++ b/drivers/usb/dwc2/core.c | |||
@@ -216,7 +216,7 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) | |||
216 | int retval = 0; | 216 | int retval = 0; |
217 | 217 | ||
218 | if (!select_phy) | 218 | if (!select_phy) |
219 | return -ENODEV; | 219 | return 0; |
220 | 220 | ||
221 | usbcfg = readl(hsotg->regs + GUSBCFG); | 221 | usbcfg = readl(hsotg->regs + GUSBCFG); |
222 | 222 | ||
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index f59484d43b35..4d918ed8d343 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c | |||
@@ -2565,25 +2565,14 @@ static void _dwc2_hcd_endpoint_reset(struct usb_hcd *hcd, | |||
2565 | struct usb_host_endpoint *ep) | 2565 | struct usb_host_endpoint *ep) |
2566 | { | 2566 | { |
2567 | struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); | 2567 | struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); |
2568 | int is_control = usb_endpoint_xfer_control(&ep->desc); | ||
2569 | int is_out = usb_endpoint_dir_out(&ep->desc); | ||
2570 | int epnum = usb_endpoint_num(&ep->desc); | ||
2571 | struct usb_device *udev; | ||
2572 | unsigned long flags; | 2568 | unsigned long flags; |
2573 | 2569 | ||
2574 | dev_dbg(hsotg->dev, | 2570 | dev_dbg(hsotg->dev, |
2575 | "DWC OTG HCD EP RESET: bEndpointAddress=0x%02x\n", | 2571 | "DWC OTG HCD EP RESET: bEndpointAddress=0x%02x\n", |
2576 | ep->desc.bEndpointAddress); | 2572 | ep->desc.bEndpointAddress); |
2577 | 2573 | ||
2578 | udev = to_usb_device(hsotg->dev); | ||
2579 | |||
2580 | spin_lock_irqsave(&hsotg->lock, flags); | 2574 | spin_lock_irqsave(&hsotg->lock, flags); |
2581 | |||
2582 | usb_settoggle(udev, epnum, is_out, 0); | ||
2583 | if (is_control) | ||
2584 | usb_settoggle(udev, epnum, !is_out, 0); | ||
2585 | dwc2_hcd_endpoint_reset(hsotg, ep); | 2575 | dwc2_hcd_endpoint_reset(hsotg, ep); |
2586 | |||
2587 | spin_unlock_irqrestore(&hsotg->lock, flags); | 2576 | spin_unlock_irqrestore(&hsotg->lock, flags); |
2588 | } | 2577 | } |
2589 | 2578 | ||
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index d01d0d3f2cf0..eaba547ce26b 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c | |||
@@ -124,6 +124,9 @@ static int dwc2_driver_probe(struct platform_device *dev) | |||
124 | int retval; | 124 | int retval; |
125 | int irq; | 125 | int irq; |
126 | 126 | ||
127 | if (usb_disabled()) | ||
128 | return -ENODEV; | ||
129 | |||
127 | match = of_match_device(dwc2_of_match_table, &dev->dev); | 130 | match = of_match_device(dwc2_of_match_table, &dev->dev); |
128 | if (match && match->data) { | 131 | if (match && match->data) { |
129 | params = match->data; | 132 | params = match->data; |
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index b016d38199f2..eb009a457fb5 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c | |||
@@ -203,12 +203,12 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num) | |||
203 | addr, (unsigned int)temp); | 203 | addr, (unsigned int)temp); |
204 | 204 | ||
205 | addr = &ir_set->erst_base; | 205 | addr = &ir_set->erst_base; |
206 | temp_64 = readq(addr); | 206 | temp_64 = xhci_read_64(xhci, addr); |
207 | xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n", | 207 | xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n", |
208 | addr, temp_64); | 208 | addr, temp_64); |
209 | 209 | ||
210 | addr = &ir_set->erst_dequeue; | 210 | addr = &ir_set->erst_dequeue; |
211 | temp_64 = readq(addr); | 211 | temp_64 = xhci_read_64(xhci, addr); |
212 | xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n", | 212 | xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n", |
213 | addr, temp_64); | 213 | addr, temp_64); |
214 | } | 214 | } |
@@ -412,7 +412,7 @@ void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci) | |||
412 | { | 412 | { |
413 | u64 val; | 413 | u64 val; |
414 | 414 | ||
415 | val = readq(&xhci->op_regs->cmd_ring); | 415 | val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
416 | xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n", | 416 | xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n", |
417 | lower_32_bits(val)); | 417 | lower_32_bits(val)); |
418 | xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n", | 418 | xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n", |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 873c272b3ef5..bce4391a0e7d 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -1958,7 +1958,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) | |||
1958 | xhci_warn(xhci, "WARN something wrong with SW event ring " | 1958 | xhci_warn(xhci, "WARN something wrong with SW event ring " |
1959 | "dequeue ptr.\n"); | 1959 | "dequeue ptr.\n"); |
1960 | /* Update HC event ring dequeue pointer */ | 1960 | /* Update HC event ring dequeue pointer */ |
1961 | temp = readq(&xhci->ir_set->erst_dequeue); | 1961 | temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
1962 | temp &= ERST_PTR_MASK; | 1962 | temp &= ERST_PTR_MASK; |
1963 | /* Don't clear the EHB bit (which is RW1C) because | 1963 | /* Don't clear the EHB bit (which is RW1C) because |
1964 | * there might be more events to service. | 1964 | * there might be more events to service. |
@@ -1967,7 +1967,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) | |||
1967 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | 1967 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
1968 | "// Write event ring dequeue pointer, " | 1968 | "// Write event ring dequeue pointer, " |
1969 | "preserving EHB bit"); | 1969 | "preserving EHB bit"); |
1970 | writeq(((u64) deq & (u64) ~ERST_PTR_MASK) | temp, | 1970 | xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, |
1971 | &xhci->ir_set->erst_dequeue); | 1971 | &xhci->ir_set->erst_dequeue); |
1972 | } | 1972 | } |
1973 | 1973 | ||
@@ -2269,7 +2269,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
2269 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | 2269 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
2270 | "// Device context base array address = 0x%llx (DMA), %p (virt)", | 2270 | "// Device context base array address = 0x%llx (DMA), %p (virt)", |
2271 | (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); | 2271 | (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); |
2272 | writeq(dma, &xhci->op_regs->dcbaa_ptr); | 2272 | xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); |
2273 | 2273 | ||
2274 | /* | 2274 | /* |
2275 | * Initialize the ring segment pool. The ring must be a contiguous | 2275 | * Initialize the ring segment pool. The ring must be a contiguous |
@@ -2312,13 +2312,13 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
2312 | (unsigned long long)xhci->cmd_ring->first_seg->dma); | 2312 | (unsigned long long)xhci->cmd_ring->first_seg->dma); |
2313 | 2313 | ||
2314 | /* Set the address in the Command Ring Control register */ | 2314 | /* Set the address in the Command Ring Control register */ |
2315 | val_64 = readq(&xhci->op_regs->cmd_ring); | 2315 | val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
2316 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | | 2316 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | |
2317 | (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | | 2317 | (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | |
2318 | xhci->cmd_ring->cycle_state; | 2318 | xhci->cmd_ring->cycle_state; |
2319 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | 2319 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
2320 | "// Setting command ring address to 0x%x", val); | 2320 | "// Setting command ring address to 0x%x", val); |
2321 | writeq(val_64, &xhci->op_regs->cmd_ring); | 2321 | xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); |
2322 | xhci_dbg_cmd_ptrs(xhci); | 2322 | xhci_dbg_cmd_ptrs(xhci); |
2323 | 2323 | ||
2324 | xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags); | 2324 | xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags); |
@@ -2396,10 +2396,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
2396 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | 2396 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
2397 | "// Set ERST base address for ir_set 0 = 0x%llx", | 2397 | "// Set ERST base address for ir_set 0 = 0x%llx", |
2398 | (unsigned long long)xhci->erst.erst_dma_addr); | 2398 | (unsigned long long)xhci->erst.erst_dma_addr); |
2399 | val_64 = readq(&xhci->ir_set->erst_base); | 2399 | val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); |
2400 | val_64 &= ERST_PTR_MASK; | 2400 | val_64 &= ERST_PTR_MASK; |
2401 | val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); | 2401 | val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); |
2402 | writeq(val_64, &xhci->ir_set->erst_base); | 2402 | xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); |
2403 | 2403 | ||
2404 | /* Set the event ring dequeue address */ | 2404 | /* Set the event ring dequeue address */ |
2405 | xhci_set_hc_event_deq(xhci); | 2405 | xhci_set_hc_event_deq(xhci); |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 3c898c12a06b..04f986d9234f 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -142,6 +142,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
142 | "QUIRK: Resetting on resume"); | 142 | "QUIRK: Resetting on resume"); |
143 | xhci->quirks |= XHCI_TRUST_TX_LENGTH; | 143 | xhci->quirks |= XHCI_TRUST_TX_LENGTH; |
144 | } | 144 | } |
145 | if (pdev->vendor == PCI_VENDOR_ID_RENESAS && | ||
146 | pdev->device == 0x0015 && | ||
147 | pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG && | ||
148 | pdev->subsystem_device == 0xc0cd) | ||
149 | xhci->quirks |= XHCI_RESET_ON_RESUME; | ||
145 | if (pdev->vendor == PCI_VENDOR_ID_VIA) | 150 | if (pdev->vendor == PCI_VENDOR_ID_VIA) |
146 | xhci->quirks |= XHCI_RESET_ON_RESUME; | 151 | xhci->quirks |= XHCI_RESET_ON_RESUME; |
147 | } | 152 | } |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index a0b248c34526..0ed64eb68e48 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -307,13 +307,14 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) | |||
307 | return 0; | 307 | return 0; |
308 | } | 308 | } |
309 | 309 | ||
310 | temp_64 = readq(&xhci->op_regs->cmd_ring); | 310 | temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
311 | if (!(temp_64 & CMD_RING_RUNNING)) { | 311 | if (!(temp_64 & CMD_RING_RUNNING)) { |
312 | xhci_dbg(xhci, "Command ring had been stopped\n"); | 312 | xhci_dbg(xhci, "Command ring had been stopped\n"); |
313 | return 0; | 313 | return 0; |
314 | } | 314 | } |
315 | xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; | 315 | xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; |
316 | writeq(temp_64 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring); | 316 | xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, |
317 | &xhci->op_regs->cmd_ring); | ||
317 | 318 | ||
318 | /* Section 4.6.1.2 of xHCI 1.0 spec says software should | 319 | /* Section 4.6.1.2 of xHCI 1.0 spec says software should |
319 | * time the completion od all xHCI commands, including | 320 | * time the completion od all xHCI commands, including |
@@ -2864,8 +2865,9 @@ hw_died: | |||
2864 | /* Clear the event handler busy flag (RW1C); | 2865 | /* Clear the event handler busy flag (RW1C); |
2865 | * the event ring should be empty. | 2866 | * the event ring should be empty. |
2866 | */ | 2867 | */ |
2867 | temp_64 = readq(&xhci->ir_set->erst_dequeue); | 2868 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
2868 | writeq(temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue); | 2869 | xhci_write_64(xhci, temp_64 | ERST_EHB, |
2870 | &xhci->ir_set->erst_dequeue); | ||
2869 | spin_unlock(&xhci->lock); | 2871 | spin_unlock(&xhci->lock); |
2870 | 2872 | ||
2871 | return IRQ_HANDLED; | 2873 | return IRQ_HANDLED; |
@@ -2877,7 +2879,7 @@ hw_died: | |||
2877 | */ | 2879 | */ |
2878 | while (xhci_handle_event(xhci) > 0) {} | 2880 | while (xhci_handle_event(xhci) > 0) {} |
2879 | 2881 | ||
2880 | temp_64 = readq(&xhci->ir_set->erst_dequeue); | 2882 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
2881 | /* If necessary, update the HW's version of the event ring deq ptr. */ | 2883 | /* If necessary, update the HW's version of the event ring deq ptr. */ |
2882 | if (event_ring_deq != xhci->event_ring->dequeue) { | 2884 | if (event_ring_deq != xhci->event_ring->dequeue) { |
2883 | deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, | 2885 | deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, |
@@ -2892,7 +2894,7 @@ hw_died: | |||
2892 | 2894 | ||
2893 | /* Clear the event handler busy flag (RW1C); event ring is empty. */ | 2895 | /* Clear the event handler busy flag (RW1C); event ring is empty. */ |
2894 | temp_64 |= ERST_EHB; | 2896 | temp_64 |= ERST_EHB; |
2895 | writeq(temp_64, &xhci->ir_set->erst_dequeue); | 2897 | xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue); |
2896 | 2898 | ||
2897 | spin_unlock(&xhci->lock); | 2899 | spin_unlock(&xhci->lock); |
2898 | 2900 | ||
@@ -2965,58 +2967,8 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |||
2965 | } | 2967 | } |
2966 | 2968 | ||
2967 | while (1) { | 2969 | while (1) { |
2968 | if (room_on_ring(xhci, ep_ring, num_trbs)) { | 2970 | if (room_on_ring(xhci, ep_ring, num_trbs)) |
2969 | union xhci_trb *trb = ep_ring->enqueue; | 2971 | break; |
2970 | unsigned int usable = ep_ring->enq_seg->trbs + | ||
2971 | TRBS_PER_SEGMENT - 1 - trb; | ||
2972 | u32 nop_cmd; | ||
2973 | |||
2974 | /* | ||
2975 | * Section 4.11.7.1 TD Fragments states that a link | ||
2976 | * TRB must only occur at the boundary between | ||
2977 | * data bursts (eg 512 bytes for 480M). | ||
2978 | * While it is possible to split a large fragment | ||
2979 | * we don't know the size yet. | ||
2980 | * Simplest solution is to fill the trb before the | ||
2981 | * LINK with nop commands. | ||
2982 | */ | ||
2983 | if (num_trbs == 1 || num_trbs <= usable || usable == 0) | ||
2984 | break; | ||
2985 | |||
2986 | if (ep_ring->type != TYPE_BULK) | ||
2987 | /* | ||
2988 | * While isoc transfers might have a buffer that | ||
2989 | * crosses a 64k boundary it is unlikely. | ||
2990 | * Since we can't add NOPs without generating | ||
2991 | * gaps in the traffic just hope it never | ||
2992 | * happens at the end of the ring. | ||
2993 | * This could be fixed by writing a LINK TRB | ||
2994 | * instead of the first NOP - however the | ||
2995 | * TRB_TYPE_LINK_LE32() calls would all need | ||
2996 | * changing to check the ring length. | ||
2997 | */ | ||
2998 | break; | ||
2999 | |||
3000 | if (num_trbs >= TRBS_PER_SEGMENT) { | ||
3001 | xhci_err(xhci, "Too many fragments %d, max %d\n", | ||
3002 | num_trbs, TRBS_PER_SEGMENT - 1); | ||
3003 | return -EINVAL; | ||
3004 | } | ||
3005 | |||
3006 | nop_cmd = cpu_to_le32(TRB_TYPE(TRB_TR_NOOP) | | ||
3007 | ep_ring->cycle_state); | ||
3008 | ep_ring->num_trbs_free -= usable; | ||
3009 | do { | ||
3010 | trb->generic.field[0] = 0; | ||
3011 | trb->generic.field[1] = 0; | ||
3012 | trb->generic.field[2] = 0; | ||
3013 | trb->generic.field[3] = nop_cmd; | ||
3014 | trb++; | ||
3015 | } while (--usable); | ||
3016 | ep_ring->enqueue = trb; | ||
3017 | if (room_on_ring(xhci, ep_ring, num_trbs)) | ||
3018 | break; | ||
3019 | } | ||
3020 | 2972 | ||
3021 | if (ep_ring == xhci->cmd_ring) { | 2973 | if (ep_ring == xhci->cmd_ring) { |
3022 | xhci_err(xhci, "Do not support expand command ring\n"); | 2974 | xhci_err(xhci, "Do not support expand command ring\n"); |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index ad364394885a..6fe577d46fa2 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -611,7 +611,7 @@ int xhci_run(struct usb_hcd *hcd) | |||
611 | xhci_dbg(xhci, "Event ring:\n"); | 611 | xhci_dbg(xhci, "Event ring:\n"); |
612 | xhci_debug_ring(xhci, xhci->event_ring); | 612 | xhci_debug_ring(xhci, xhci->event_ring); |
613 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); | 613 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); |
614 | temp_64 = readq(&xhci->ir_set->erst_dequeue); | 614 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
615 | temp_64 &= ~ERST_PTR_MASK; | 615 | temp_64 &= ~ERST_PTR_MASK; |
616 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | 616 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
617 | "ERST deq = 64'h%0lx", (long unsigned int) temp_64); | 617 | "ERST deq = 64'h%0lx", (long unsigned int) temp_64); |
@@ -756,11 +756,11 @@ static void xhci_save_registers(struct xhci_hcd *xhci) | |||
756 | { | 756 | { |
757 | xhci->s3.command = readl(&xhci->op_regs->command); | 757 | xhci->s3.command = readl(&xhci->op_regs->command); |
758 | xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification); | 758 | xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification); |
759 | xhci->s3.dcbaa_ptr = readq(&xhci->op_regs->dcbaa_ptr); | 759 | xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); |
760 | xhci->s3.config_reg = readl(&xhci->op_regs->config_reg); | 760 | xhci->s3.config_reg = readl(&xhci->op_regs->config_reg); |
761 | xhci->s3.erst_size = readl(&xhci->ir_set->erst_size); | 761 | xhci->s3.erst_size = readl(&xhci->ir_set->erst_size); |
762 | xhci->s3.erst_base = readq(&xhci->ir_set->erst_base); | 762 | xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); |
763 | xhci->s3.erst_dequeue = readq(&xhci->ir_set->erst_dequeue); | 763 | xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
764 | xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending); | 764 | xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending); |
765 | xhci->s3.irq_control = readl(&xhci->ir_set->irq_control); | 765 | xhci->s3.irq_control = readl(&xhci->ir_set->irq_control); |
766 | } | 766 | } |
@@ -769,11 +769,11 @@ static void xhci_restore_registers(struct xhci_hcd *xhci) | |||
769 | { | 769 | { |
770 | writel(xhci->s3.command, &xhci->op_regs->command); | 770 | writel(xhci->s3.command, &xhci->op_regs->command); |
771 | writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification); | 771 | writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification); |
772 | writeq(xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); | 772 | xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); |
773 | writel(xhci->s3.config_reg, &xhci->op_regs->config_reg); | 773 | writel(xhci->s3.config_reg, &xhci->op_regs->config_reg); |
774 | writel(xhci->s3.erst_size, &xhci->ir_set->erst_size); | 774 | writel(xhci->s3.erst_size, &xhci->ir_set->erst_size); |
775 | writeq(xhci->s3.erst_base, &xhci->ir_set->erst_base); | 775 | xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); |
776 | writeq(xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); | 776 | xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); |
777 | writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending); | 777 | writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending); |
778 | writel(xhci->s3.irq_control, &xhci->ir_set->irq_control); | 778 | writel(xhci->s3.irq_control, &xhci->ir_set->irq_control); |
779 | } | 779 | } |
@@ -783,7 +783,7 @@ static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) | |||
783 | u64 val_64; | 783 | u64 val_64; |
784 | 784 | ||
785 | /* step 2: initialize command ring buffer */ | 785 | /* step 2: initialize command ring buffer */ |
786 | val_64 = readq(&xhci->op_regs->cmd_ring); | 786 | val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
787 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | | 787 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | |
788 | (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, | 788 | (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, |
789 | xhci->cmd_ring->dequeue) & | 789 | xhci->cmd_ring->dequeue) & |
@@ -792,7 +792,7 @@ static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) | |||
792 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | 792 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
793 | "// Setting command ring address to 0x%llx", | 793 | "// Setting command ring address to 0x%llx", |
794 | (long unsigned long) val_64); | 794 | (long unsigned long) val_64); |
795 | writeq(val_64, &xhci->op_regs->cmd_ring); | 795 | xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); |
796 | } | 796 | } |
797 | 797 | ||
798 | /* | 798 | /* |
@@ -3842,7 +3842,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, | |||
3842 | if (ret) { | 3842 | if (ret) { |
3843 | return ret; | 3843 | return ret; |
3844 | } | 3844 | } |
3845 | temp_64 = readq(&xhci->op_regs->dcbaa_ptr); | 3845 | temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); |
3846 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, | 3846 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, |
3847 | "Op regs DCBAA ptr = %#016llx", temp_64); | 3847 | "Op regs DCBAA ptr = %#016llx", temp_64); |
3848 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, | 3848 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, |
@@ -4730,11 +4730,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) | |||
4730 | struct device *dev = hcd->self.controller; | 4730 | struct device *dev = hcd->self.controller; |
4731 | int retval; | 4731 | int retval; |
4732 | 4732 | ||
4733 | /* Limit the block layer scatter-gather lists to half a segment. */ | 4733 | /* Accept arbitrarily long scatter-gather lists */ |
4734 | hcd->self.sg_tablesize = TRBS_PER_SEGMENT / 2; | 4734 | hcd->self.sg_tablesize = ~0; |
4735 | |||
4736 | /* support to build packet from discontinuous buffers */ | ||
4737 | hcd->self.no_sg_constraint = 1; | ||
4738 | 4735 | ||
4739 | /* XHCI controllers don't stop the ep queue on short packets :| */ | 4736 | /* XHCI controllers don't stop the ep queue on short packets :| */ |
4740 | hcd->self.no_stop_on_short = 1; | 4737 | hcd->self.no_stop_on_short = 1; |
@@ -4760,6 +4757,14 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) | |||
4760 | /* xHCI private pointer was set in xhci_pci_probe for the second | 4757 | /* xHCI private pointer was set in xhci_pci_probe for the second |
4761 | * registered roothub. | 4758 | * registered roothub. |
4762 | */ | 4759 | */ |
4760 | xhci = hcd_to_xhci(hcd); | ||
4761 | /* | ||
4762 | * Support arbitrarily aligned sg-list entries on hosts without | ||
4763 | * TD fragment rules (which are currently unsupported). | ||
4764 | */ | ||
4765 | if (xhci->hci_version < 0x100) | ||
4766 | hcd->self.no_sg_constraint = 1; | ||
4767 | |||
4763 | return 0; | 4768 | return 0; |
4764 | } | 4769 | } |
4765 | 4770 | ||
@@ -4788,6 +4793,9 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) | |||
4788 | if (xhci->hci_version > 0x96) | 4793 | if (xhci->hci_version > 0x96) |
4789 | xhci->quirks |= XHCI_SPURIOUS_SUCCESS; | 4794 | xhci->quirks |= XHCI_SPURIOUS_SUCCESS; |
4790 | 4795 | ||
4796 | if (xhci->hci_version < 0x100) | ||
4797 | hcd->self.no_sg_constraint = 1; | ||
4798 | |||
4791 | /* Make sure the HC is halted. */ | 4799 | /* Make sure the HC is halted. */ |
4792 | retval = xhci_halt(xhci); | 4800 | retval = xhci_halt(xhci); |
4793 | if (retval) | 4801 | if (retval) |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index f8416639bf31..58ed9d088e63 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -28,17 +28,6 @@ | |||
28 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
29 | #include <linux/usb/hcd.h> | 29 | #include <linux/usb/hcd.h> |
30 | 30 | ||
31 | /* | ||
32 | * Registers should always be accessed with double word or quad word accesses. | ||
33 | * | ||
34 | * Some xHCI implementations may support 64-bit address pointers. Registers | ||
35 | * with 64-bit address pointers should be written to with dword accesses by | ||
36 | * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second. | ||
37 | * xHCI implementations that do not support 64-bit address pointers will ignore | ||
38 | * the high dword, and write order is irrelevant. | ||
39 | */ | ||
40 | #include <asm-generic/io-64-nonatomic-lo-hi.h> | ||
41 | |||
42 | /* Code sharing between pci-quirks and xhci hcd */ | 31 | /* Code sharing between pci-quirks and xhci hcd */ |
43 | #include "xhci-ext-caps.h" | 32 | #include "xhci-ext-caps.h" |
44 | #include "pci-quirks.h" | 33 | #include "pci-quirks.h" |
@@ -1279,7 +1268,7 @@ union xhci_trb { | |||
1279 | * since the command ring is 64-byte aligned. | 1268 | * since the command ring is 64-byte aligned. |
1280 | * It must also be greater than 16. | 1269 | * It must also be greater than 16. |
1281 | */ | 1270 | */ |
1282 | #define TRBS_PER_SEGMENT 256 | 1271 | #define TRBS_PER_SEGMENT 64 |
1283 | /* Allow two commands + a link TRB, along with any reserved command TRBs */ | 1272 | /* Allow two commands + a link TRB, along with any reserved command TRBs */ |
1284 | #define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3) | 1273 | #define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3) |
1285 | #define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT*16) | 1274 | #define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT*16) |
@@ -1614,6 +1603,34 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci) | |||
1614 | #define xhci_warn_ratelimited(xhci, fmt, args...) \ | 1603 | #define xhci_warn_ratelimited(xhci, fmt, args...) \ |
1615 | dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args) | 1604 | dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args) |
1616 | 1605 | ||
1606 | /* | ||
1607 | * Registers should always be accessed with double word or quad word accesses. | ||
1608 | * | ||
1609 | * Some xHCI implementations may support 64-bit address pointers. Registers | ||
1610 | * with 64-bit address pointers should be written to with dword accesses by | ||
1611 | * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second. | ||
1612 | * xHCI implementations that do not support 64-bit address pointers will ignore | ||
1613 | * the high dword, and write order is irrelevant. | ||
1614 | */ | ||
1615 | static inline u64 xhci_read_64(const struct xhci_hcd *xhci, | ||
1616 | __le64 __iomem *regs) | ||
1617 | { | ||
1618 | __u32 __iomem *ptr = (__u32 __iomem *) regs; | ||
1619 | u64 val_lo = readl(ptr); | ||
1620 | u64 val_hi = readl(ptr + 1); | ||
1621 | return val_lo + (val_hi << 32); | ||
1622 | } | ||
1623 | static inline void xhci_write_64(struct xhci_hcd *xhci, | ||
1624 | const u64 val, __le64 __iomem *regs) | ||
1625 | { | ||
1626 | __u32 __iomem *ptr = (__u32 __iomem *) regs; | ||
1627 | u32 val_lo = lower_32_bits(val); | ||
1628 | u32 val_hi = upper_32_bits(val); | ||
1629 | |||
1630 | writel(val_lo, ptr); | ||
1631 | writel(val_hi, ptr + 1); | ||
1632 | } | ||
1633 | |||
1617 | static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci) | 1634 | static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci) |
1618 | { | 1635 | { |
1619 | return xhci->quirks & XHCI_LINK_TRB_QUIRK; | 1636 | return xhci->quirks & XHCI_LINK_TRB_QUIRK; |
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c index e6f61e4361df..8afa813d690b 100644 --- a/drivers/usb/phy/phy.c +++ b/drivers/usb/phy/phy.c | |||
@@ -130,7 +130,7 @@ struct usb_phy *usb_get_phy(enum usb_phy_type type) | |||
130 | 130 | ||
131 | phy = __usb_find_phy(&phy_list, type); | 131 | phy = __usb_find_phy(&phy_list, type); |
132 | if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) { | 132 | if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) { |
133 | pr_err("unable to find transceiver of type %s\n", | 133 | pr_debug("PHY: unable to find transceiver of type %s\n", |
134 | usb_phy_type_string(type)); | 134 | usb_phy_type_string(type)); |
135 | goto err0; | 135 | goto err0; |
136 | } | 136 | } |
@@ -228,7 +228,7 @@ struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index) | |||
228 | 228 | ||
229 | phy = __usb_find_phy_dev(dev, &phy_bind_list, index); | 229 | phy = __usb_find_phy_dev(dev, &phy_bind_list, index); |
230 | if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) { | 230 | if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) { |
231 | pr_err("unable to find transceiver\n"); | 231 | dev_dbg(dev, "unable to find transceiver\n"); |
232 | goto err0; | 232 | goto err0; |
233 | } | 233 | } |
234 | 234 | ||
@@ -424,10 +424,8 @@ int usb_bind_phy(const char *dev_name, u8 index, | |||
424 | unsigned long flags; | 424 | unsigned long flags; |
425 | 425 | ||
426 | phy_bind = kzalloc(sizeof(*phy_bind), GFP_KERNEL); | 426 | phy_bind = kzalloc(sizeof(*phy_bind), GFP_KERNEL); |
427 | if (!phy_bind) { | 427 | if (!phy_bind) |
428 | pr_err("phy_bind(): No memory for phy_bind"); | ||
429 | return -ENOMEM; | 428 | return -ENOMEM; |
430 | } | ||
431 | 429 | ||
432 | phy_bind->dev_name = dev_name; | 430 | phy_bind->dev_name = dev_name; |
433 | phy_bind->phy_dev_name = phy_dev_name; | 431 | phy_bind->phy_dev_name = phy_dev_name; |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index ce0d7b0db012..ee1f00f03c43 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -152,6 +152,7 @@ static const struct usb_device_id id_table_combined[] = { | |||
152 | { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, | 152 | { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, |
153 | { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, | 153 | { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, |
154 | { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) }, | 154 | { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) }, |
155 | { USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) }, | ||
155 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, | 156 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, |
156 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) }, | 157 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) }, |
157 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) }, | 158 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) }, |
@@ -191,6 +192,8 @@ static const struct usb_device_id id_table_combined[] = { | |||
191 | { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, | 192 | { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, |
192 | { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, | 193 | { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, |
193 | { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, | 194 | { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, |
195 | { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_LP101_PID) }, | ||
196 | { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_P200X_PID) }, | ||
194 | { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) }, | 197 | { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) }, |
195 | { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) }, | 198 | { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) }, |
196 | { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) }, | 199 | { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) }, |
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index a7019d1e3058..1e2d369df86e 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
@@ -50,6 +50,7 @@ | |||
50 | #define TI_XDS100V2_PID 0xa6d0 | 50 | #define TI_XDS100V2_PID 0xa6d0 |
51 | 51 | ||
52 | #define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */ | 52 | #define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */ |
53 | #define FTDI_EV3CON_PID 0xABB9 /* Mindstorms EV3 Console Adapter */ | ||
53 | 54 | ||
54 | /* US Interface Navigator (http://www.usinterface.com/) */ | 55 | /* US Interface Navigator (http://www.usinterface.com/) */ |
55 | #define FTDI_USINT_CAT_PID 0xb810 /* Navigator CAT and 2nd PTT lines */ | 56 | #define FTDI_USINT_CAT_PID 0xb810 /* Navigator CAT and 2nd PTT lines */ |
@@ -363,6 +364,12 @@ | |||
363 | /* Sprog II (Andrew Crosland's SprogII DCC interface) */ | 364 | /* Sprog II (Andrew Crosland's SprogII DCC interface) */ |
364 | #define FTDI_SPROG_II 0xF0C8 | 365 | #define FTDI_SPROG_II 0xF0C8 |
365 | 366 | ||
367 | /* | ||
368 | * Two of the Tagsys RFID Readers | ||
369 | */ | ||
370 | #define FTDI_TAGSYS_LP101_PID 0xF0E9 /* Tagsys L-P101 RFID*/ | ||
371 | #define FTDI_TAGSYS_P200X_PID 0xF0EE /* Tagsys Medio P200x RFID*/ | ||
372 | |||
366 | /* an infrared receiver for user access control with IR tags */ | 373 | /* an infrared receiver for user access control with IR tags */ |
367 | #define FTDI_PIEGROUP_PID 0xF208 /* Product Id */ | 374 | #define FTDI_PIEGROUP_PID 0xF208 /* Product Id */ |
368 | 375 | ||
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 5c86f57e4afa..216d20affba8 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -1362,7 +1362,8 @@ static const struct usb_device_id option_ids[] = { | |||
1362 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) }, | 1362 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) }, |
1363 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) }, | 1363 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) }, |
1364 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) }, | 1364 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) }, |
1365 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) }, | 1365 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff), |
1366 | .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, | ||
1366 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) }, | 1367 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) }, |
1367 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) }, | 1368 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) }, |
1368 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) }, | 1369 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) }, |
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index c65437cfd4a2..968a40201e5f 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c | |||
@@ -139,6 +139,9 @@ static const struct usb_device_id id_table[] = { | |||
139 | {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)}, /* Sierra Wireless EM7700 Device Management */ | 139 | {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)}, /* Sierra Wireless EM7700 Device Management */ |
140 | {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)}, /* Sierra Wireless EM7700 NMEA */ | 140 | {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)}, /* Sierra Wireless EM7700 NMEA */ |
141 | {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)}, /* Sierra Wireless EM7700 Modem */ | 141 | {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)}, /* Sierra Wireless EM7700 Modem */ |
142 | {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */ | ||
143 | {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */ | ||
144 | {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */ | ||
142 | 145 | ||
143 | { } /* Terminating entry */ | 146 | { } /* Terminating entry */ |
144 | }; | 147 | }; |
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index f112b079ddfc..fb79775447b0 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c | |||
@@ -71,7 +71,8 @@ DEVICE(hp4x, HP4X_IDS); | |||
71 | 71 | ||
72 | /* Suunto ANT+ USB Driver */ | 72 | /* Suunto ANT+ USB Driver */ |
73 | #define SUUNTO_IDS() \ | 73 | #define SUUNTO_IDS() \ |
74 | { USB_DEVICE(0x0fcf, 0x1008) } | 74 | { USB_DEVICE(0x0fcf, 0x1008) }, \ |
75 | { USB_DEVICE(0x0fcf, 0x1009) } /* Dynastream ANT USB-m Stick */ | ||
75 | DEVICE(suunto, SUUNTO_IDS); | 76 | DEVICE(suunto, SUUNTO_IDS); |
76 | 77 | ||
77 | /* Siemens USB/MPI adapter */ | 78 | /* Siemens USB/MPI adapter */ |
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig index 8470e1b114f2..1dd0604d1911 100644 --- a/drivers/usb/storage/Kconfig +++ b/drivers/usb/storage/Kconfig | |||
@@ -18,7 +18,9 @@ config USB_STORAGE | |||
18 | 18 | ||
19 | This option depends on 'SCSI' support being enabled, but you | 19 | This option depends on 'SCSI' support being enabled, but you |
20 | probably also need 'SCSI device support: SCSI disk support' | 20 | probably also need 'SCSI device support: SCSI disk support' |
21 | (BLK_DEV_SD) for most USB storage devices. | 21 | (BLK_DEV_SD) for most USB storage devices. Some devices also |
22 | will require 'Probe all LUNs on each SCSI device' | ||
23 | (SCSI_MULTI_LUN). | ||
22 | 24 | ||
23 | To compile this driver as a module, choose M here: the | 25 | To compile this driver as a module, choose M here: the |
24 | module will be called usb-storage. | 26 | module will be called usb-storage. |
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index 18509e6c21ab..9d38ddc8da49 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c | |||
@@ -78,6 +78,8 @@ static const char* host_info(struct Scsi_Host *host) | |||
78 | 78 | ||
79 | static int slave_alloc (struct scsi_device *sdev) | 79 | static int slave_alloc (struct scsi_device *sdev) |
80 | { | 80 | { |
81 | struct us_data *us = host_to_us(sdev->host); | ||
82 | |||
81 | /* | 83 | /* |
82 | * Set the INQUIRY transfer length to 36. We don't use any of | 84 | * Set the INQUIRY transfer length to 36. We don't use any of |
83 | * the extra data and many devices choke if asked for more or | 85 | * the extra data and many devices choke if asked for more or |
@@ -102,6 +104,10 @@ static int slave_alloc (struct scsi_device *sdev) | |||
102 | */ | 104 | */ |
103 | blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1)); | 105 | blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1)); |
104 | 106 | ||
107 | /* Tell the SCSI layer if we know there is more than one LUN */ | ||
108 | if (us->protocol == USB_PR_BULK && us->max_lun > 0) | ||
109 | sdev->sdev_bflags |= BLIST_FORCELUN; | ||
110 | |||
105 | return 0; | 111 | return 0; |
106 | } | 112 | } |
107 | 113 | ||
diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h index 65a6a75066a8..82e8ed0324e3 100644 --- a/drivers/usb/storage/unusual_cypress.h +++ b/drivers/usb/storage/unusual_cypress.h | |||
@@ -31,7 +31,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999, | |||
31 | "Cypress ISD-300LP", | 31 | "Cypress ISD-300LP", |
32 | USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0), | 32 | USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0), |
33 | 33 | ||
34 | UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x0219, | 34 | UNUSUAL_DEV( 0x14cd, 0x6116, 0x0160, 0x0160, |
35 | "Super Top", | 35 | "Super Top", |
36 | "USB 2.0 SATA BRIDGE", | 36 | "USB 2.0 SATA BRIDGE", |
37 | USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0), | 37 | USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0), |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index ad06255c2ade..adbeb255616a 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -1455,6 +1455,13 @@ UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100, | |||
1455 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 1455 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
1456 | US_FL_FIX_CAPACITY ), | 1456 | US_FL_FIX_CAPACITY ), |
1457 | 1457 | ||
1458 | /* Reported by Moritz Moeller-Herrmann <moritz-kernel@moeller-herrmann.de> */ | ||
1459 | UNUSUAL_DEV( 0x0fca, 0x8004, 0x0201, 0x0201, | ||
1460 | "Research In Motion", | ||
1461 | "BlackBerry Bold 9000", | ||
1462 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
1463 | US_FL_MAX_SECTORS_64 ), | ||
1464 | |||
1458 | /* Reported by Michael Stattmann <michael@stattmann.com> */ | 1465 | /* Reported by Michael Stattmann <michael@stattmann.com> */ |
1459 | UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000, | 1466 | UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000, |
1460 | "Sony Ericsson", | 1467 | "Sony Ericsson", |
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 9a68409580d5..a0fa5de210cf 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -70,7 +70,12 @@ enum { | |||
70 | }; | 70 | }; |
71 | 71 | ||
72 | struct vhost_net_ubuf_ref { | 72 | struct vhost_net_ubuf_ref { |
73 | struct kref kref; | 73 | /* refcount follows semantics similar to kref: |
74 | * 0: object is released | ||
75 | * 1: no outstanding ubufs | ||
76 | * >1: outstanding ubufs | ||
77 | */ | ||
78 | atomic_t refcount; | ||
74 | wait_queue_head_t wait; | 79 | wait_queue_head_t wait; |
75 | struct vhost_virtqueue *vq; | 80 | struct vhost_virtqueue *vq; |
76 | }; | 81 | }; |
@@ -116,14 +121,6 @@ static void vhost_net_enable_zcopy(int vq) | |||
116 | vhost_net_zcopy_mask |= 0x1 << vq; | 121 | vhost_net_zcopy_mask |= 0x1 << vq; |
117 | } | 122 | } |
118 | 123 | ||
119 | static void vhost_net_zerocopy_done_signal(struct kref *kref) | ||
120 | { | ||
121 | struct vhost_net_ubuf_ref *ubufs; | ||
122 | |||
123 | ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref); | ||
124 | wake_up(&ubufs->wait); | ||
125 | } | ||
126 | |||
127 | static struct vhost_net_ubuf_ref * | 124 | static struct vhost_net_ubuf_ref * |
128 | vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) | 125 | vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) |
129 | { | 126 | { |
@@ -134,21 +131,24 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) | |||
134 | ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL); | 131 | ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL); |
135 | if (!ubufs) | 132 | if (!ubufs) |
136 | return ERR_PTR(-ENOMEM); | 133 | return ERR_PTR(-ENOMEM); |
137 | kref_init(&ubufs->kref); | 134 | atomic_set(&ubufs->refcount, 1); |
138 | init_waitqueue_head(&ubufs->wait); | 135 | init_waitqueue_head(&ubufs->wait); |
139 | ubufs->vq = vq; | 136 | ubufs->vq = vq; |
140 | return ubufs; | 137 | return ubufs; |
141 | } | 138 | } |
142 | 139 | ||
143 | static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) | 140 | static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) |
144 | { | 141 | { |
145 | kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal); | 142 | int r = atomic_sub_return(1, &ubufs->refcount); |
143 | if (unlikely(!r)) | ||
144 | wake_up(&ubufs->wait); | ||
145 | return r; | ||
146 | } | 146 | } |
147 | 147 | ||
148 | static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) | 148 | static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) |
149 | { | 149 | { |
150 | kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal); | 150 | vhost_net_ubuf_put(ubufs); |
151 | wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount)); | 151 | wait_event(ubufs->wait, !atomic_read(&ubufs->refcount)); |
152 | } | 152 | } |
153 | 153 | ||
154 | static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs) | 154 | static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs) |
@@ -306,23 +306,26 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success) | |||
306 | { | 306 | { |
307 | struct vhost_net_ubuf_ref *ubufs = ubuf->ctx; | 307 | struct vhost_net_ubuf_ref *ubufs = ubuf->ctx; |
308 | struct vhost_virtqueue *vq = ubufs->vq; | 308 | struct vhost_virtqueue *vq = ubufs->vq; |
309 | int cnt = atomic_read(&ubufs->kref.refcount); | 309 | int cnt; |
310 | |||
311 | rcu_read_lock_bh(); | ||
310 | 312 | ||
311 | /* set len to mark this desc buffers done DMA */ | 313 | /* set len to mark this desc buffers done DMA */ |
312 | vq->heads[ubuf->desc].len = success ? | 314 | vq->heads[ubuf->desc].len = success ? |
313 | VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN; | 315 | VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN; |
314 | vhost_net_ubuf_put(ubufs); | 316 | cnt = vhost_net_ubuf_put(ubufs); |
315 | 317 | ||
316 | /* | 318 | /* |
317 | * Trigger polling thread if guest stopped submitting new buffers: | 319 | * Trigger polling thread if guest stopped submitting new buffers: |
318 | * in this case, the refcount after decrement will eventually reach 1 | 320 | * in this case, the refcount after decrement will eventually reach 1. |
319 | * so here it is 2. | ||
320 | * We also trigger polling periodically after each 16 packets | 321 | * We also trigger polling periodically after each 16 packets |
321 | * (the value 16 here is more or less arbitrary, it's tuned to trigger | 322 | * (the value 16 here is more or less arbitrary, it's tuned to trigger |
322 | * less than 10% of times). | 323 | * less than 10% of times). |
323 | */ | 324 | */ |
324 | if (cnt <= 2 || !(cnt % 16)) | 325 | if (cnt <= 1 || !(cnt % 16)) |
325 | vhost_poll_queue(&vq->poll); | 326 | vhost_poll_queue(&vq->poll); |
327 | |||
328 | rcu_read_unlock_bh(); | ||
326 | } | 329 | } |
327 | 330 | ||
328 | /* Expects to be always run from workqueue - which acts as | 331 | /* Expects to be always run from workqueue - which acts as |
@@ -420,7 +423,7 @@ static void handle_tx(struct vhost_net *net) | |||
420 | msg.msg_control = ubuf; | 423 | msg.msg_control = ubuf; |
421 | msg.msg_controllen = sizeof(ubuf); | 424 | msg.msg_controllen = sizeof(ubuf); |
422 | ubufs = nvq->ubufs; | 425 | ubufs = nvq->ubufs; |
423 | kref_get(&ubufs->kref); | 426 | atomic_inc(&ubufs->refcount); |
424 | nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; | 427 | nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; |
425 | } else { | 428 | } else { |
426 | msg.msg_control = NULL; | 429 | msg.msg_control = NULL; |
@@ -780,7 +783,7 @@ static void vhost_net_flush(struct vhost_net *n) | |||
780 | vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); | 783 | vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); |
781 | mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); | 784 | mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); |
782 | n->tx_flush = false; | 785 | n->tx_flush = false; |
783 | kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref); | 786 | atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1); |
784 | mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); | 787 | mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); |
785 | } | 788 | } |
786 | } | 789 | } |
@@ -800,6 +803,8 @@ static int vhost_net_release(struct inode *inode, struct file *f) | |||
800 | fput(tx_sock->file); | 803 | fput(tx_sock->file); |
801 | if (rx_sock) | 804 | if (rx_sock) |
802 | fput(rx_sock->file); | 805 | fput(rx_sock->file); |
806 | /* Make sure no callbacks are outstanding */ | ||
807 | synchronize_rcu_bh(); | ||
803 | /* We do an extra flush before freeing memory, | 808 | /* We do an extra flush before freeing memory, |
804 | * since jobs can re-queue themselves. */ | 809 | * since jobs can re-queue themselves. */ |
805 | vhost_net_flush(n); | 810 | vhost_net_flush(n); |
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 22262a3a0e2d..dade5b7699bc 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig | |||
@@ -364,7 +364,7 @@ config FB_SA1100 | |||
364 | 364 | ||
365 | config FB_IMX | 365 | config FB_IMX |
366 | tristate "Freescale i.MX1/21/25/27 LCD support" | 366 | tristate "Freescale i.MX1/21/25/27 LCD support" |
367 | depends on FB && IMX_HAVE_PLATFORM_IMX_FB | 367 | depends on FB && ARCH_MXC |
368 | select FB_CFB_FILLRECT | 368 | select FB_CFB_FILLRECT |
369 | select FB_CFB_COPYAREA | 369 | select FB_CFB_COPYAREA |
370 | select FB_CFB_IMAGEBLIT | 370 | select FB_CFB_IMAGEBLIT |
diff --git a/drivers/video/exynos/Kconfig b/drivers/video/exynos/Kconfig index 1129d0e9e640..75c8a8e7efc0 100644 --- a/drivers/video/exynos/Kconfig +++ b/drivers/video/exynos/Kconfig | |||
@@ -22,7 +22,8 @@ config EXYNOS_MIPI_DSI | |||
22 | 22 | ||
23 | config EXYNOS_LCD_S6E8AX0 | 23 | config EXYNOS_LCD_S6E8AX0 |
24 | bool "S6E8AX0 MIPI AMOLED LCD Driver" | 24 | bool "S6E8AX0 MIPI AMOLED LCD Driver" |
25 | depends on (EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE && LCD_CLASS_DEVICE) | 25 | depends on EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE |
26 | depends on (LCD_CLASS_DEVICE = y) | ||
26 | default n | 27 | default n |
27 | help | 28 | help |
28 | If you have an S6E8AX0 MIPI AMOLED LCD Panel, say Y to enable its | 29 | If you have an S6E8AX0 MIPI AMOLED LCD Panel, say Y to enable its |
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c index bbeb8dd7f108..77d6221618f4 100644 --- a/drivers/video/omap2/dss/dispc.c +++ b/drivers/video/omap2/dss/dispc.c | |||
@@ -2160,8 +2160,8 @@ static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk, | |||
2160 | *five_taps = false; | 2160 | *five_taps = false; |
2161 | 2161 | ||
2162 | do { | 2162 | do { |
2163 | in_height = DIV_ROUND_UP(height, *decim_y); | 2163 | in_height = height / *decim_y; |
2164 | in_width = DIV_ROUND_UP(width, *decim_x); | 2164 | in_width = width / *decim_x; |
2165 | *core_clk = dispc.feat->calc_core_clk(pclk, in_width, | 2165 | *core_clk = dispc.feat->calc_core_clk(pclk, in_width, |
2166 | in_height, out_width, out_height, mem_to_mem); | 2166 | in_height, out_width, out_height, mem_to_mem); |
2167 | error = (in_width > maxsinglelinewidth || !*core_clk || | 2167 | error = (in_width > maxsinglelinewidth || !*core_clk || |
@@ -2199,8 +2199,8 @@ static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk, | |||
2199 | dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH); | 2199 | dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH); |
2200 | 2200 | ||
2201 | do { | 2201 | do { |
2202 | in_height = DIV_ROUND_UP(height, *decim_y); | 2202 | in_height = height / *decim_y; |
2203 | in_width = DIV_ROUND_UP(width, *decim_x); | 2203 | in_width = width / *decim_x; |
2204 | *five_taps = in_height > out_height; | 2204 | *five_taps = in_height > out_height; |
2205 | 2205 | ||
2206 | if (in_width > maxsinglelinewidth) | 2206 | if (in_width > maxsinglelinewidth) |
@@ -2268,7 +2268,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk, | |||
2268 | { | 2268 | { |
2269 | u16 in_width, in_width_max; | 2269 | u16 in_width, in_width_max; |
2270 | int decim_x_min = *decim_x; | 2270 | int decim_x_min = *decim_x; |
2271 | u16 in_height = DIV_ROUND_UP(height, *decim_y); | 2271 | u16 in_height = height / *decim_y; |
2272 | const int maxsinglelinewidth = | 2272 | const int maxsinglelinewidth = |
2273 | dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH); | 2273 | dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH); |
2274 | const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE); | 2274 | const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE); |
@@ -2287,7 +2287,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk, | |||
2287 | return -EINVAL; | 2287 | return -EINVAL; |
2288 | 2288 | ||
2289 | do { | 2289 | do { |
2290 | in_width = DIV_ROUND_UP(width, *decim_x); | 2290 | in_width = width / *decim_x; |
2291 | } while (*decim_x <= *x_predecim && | 2291 | } while (*decim_x <= *x_predecim && |
2292 | in_width > maxsinglelinewidth && ++*decim_x); | 2292 | in_width > maxsinglelinewidth && ++*decim_x); |
2293 | 2293 | ||
@@ -2466,8 +2466,8 @@ static int dispc_ovl_setup_common(enum omap_plane plane, | |||
2466 | if (r) | 2466 | if (r) |
2467 | return r; | 2467 | return r; |
2468 | 2468 | ||
2469 | in_width = DIV_ROUND_UP(in_width, x_predecim); | 2469 | in_width = in_width / x_predecim; |
2470 | in_height = DIV_ROUND_UP(in_height, y_predecim); | 2470 | in_height = in_height / y_predecim; |
2471 | 2471 | ||
2472 | if (color_mode == OMAP_DSS_COLOR_YUV2 || | 2472 | if (color_mode == OMAP_DSS_COLOR_YUV2 || |
2473 | color_mode == OMAP_DSS_COLOR_UYVY || | 2473 | color_mode == OMAP_DSS_COLOR_UYVY || |
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c index 7411f2674e16..23ef21ffc2c4 100644 --- a/drivers/video/omap2/dss/dpi.c +++ b/drivers/video/omap2/dss/dpi.c | |||
@@ -117,7 +117,7 @@ struct dpi_clk_calc_ctx { | |||
117 | /* outputs */ | 117 | /* outputs */ |
118 | 118 | ||
119 | struct dsi_clock_info dsi_cinfo; | 119 | struct dsi_clock_info dsi_cinfo; |
120 | unsigned long long fck; | 120 | unsigned long fck; |
121 | struct dispc_clock_info dispc_cinfo; | 121 | struct dispc_clock_info dispc_cinfo; |
122 | }; | 122 | }; |
123 | 123 | ||
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c index efb9ee9e3c96..ba806c9e7f54 100644 --- a/drivers/video/omap2/dss/sdi.c +++ b/drivers/video/omap2/dss/sdi.c | |||
@@ -46,7 +46,7 @@ static struct { | |||
46 | struct sdi_clk_calc_ctx { | 46 | struct sdi_clk_calc_ctx { |
47 | unsigned long pck_min, pck_max; | 47 | unsigned long pck_min, pck_max; |
48 | 48 | ||
49 | unsigned long long fck; | 49 | unsigned long fck; |
50 | struct dispc_clock_info dispc_cinfo; | 50 | struct dispc_clock_info dispc_cinfo; |
51 | }; | 51 | }; |
52 | 52 | ||
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c index a06edbfa95ca..1b5d48c578e1 100644 --- a/drivers/vme/bridges/vme_ca91cx42.c +++ b/drivers/vme/bridges/vme_ca91cx42.c | |||
@@ -884,7 +884,7 @@ static ssize_t ca91cx42_master_read(struct vme_master_resource *image, | |||
884 | if (done == count) | 884 | if (done == count) |
885 | goto out; | 885 | goto out; |
886 | } | 886 | } |
887 | if ((uintptr_t)addr & 0x2) { | 887 | if ((uintptr_t)(addr + done) & 0x2) { |
888 | if ((count - done) < 2) { | 888 | if ((count - done) < 2) { |
889 | *(u8 *)(buf + done) = ioread8(addr + done); | 889 | *(u8 *)(buf + done) = ioread8(addr + done); |
890 | done += 1; | 890 | done += 1; |
@@ -938,7 +938,7 @@ static ssize_t ca91cx42_master_write(struct vme_master_resource *image, | |||
938 | if (done == count) | 938 | if (done == count) |
939 | goto out; | 939 | goto out; |
940 | } | 940 | } |
941 | if ((uintptr_t)addr & 0x2) { | 941 | if ((uintptr_t)(addr + done) & 0x2) { |
942 | if ((count - done) < 2) { | 942 | if ((count - done) < 2) { |
943 | iowrite8(*(u8 *)(buf + done), addr + done); | 943 | iowrite8(*(u8 *)(buf + done), addr + done); |
944 | done += 1; | 944 | done += 1; |
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c index 16830d8b777c..9911cd5fddb5 100644 --- a/drivers/vme/bridges/vme_tsi148.c +++ b/drivers/vme/bridges/vme_tsi148.c | |||
@@ -1289,7 +1289,7 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf, | |||
1289 | if (done == count) | 1289 | if (done == count) |
1290 | goto out; | 1290 | goto out; |
1291 | } | 1291 | } |
1292 | if ((uintptr_t)addr & 0x2) { | 1292 | if ((uintptr_t)(addr + done) & 0x2) { |
1293 | if ((count - done) < 2) { | 1293 | if ((count - done) < 2) { |
1294 | *(u8 *)(buf + done) = ioread8(addr + done); | 1294 | *(u8 *)(buf + done) = ioread8(addr + done); |
1295 | done += 1; | 1295 | done += 1; |
@@ -1371,7 +1371,7 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf, | |||
1371 | if (done == count) | 1371 | if (done == count) |
1372 | goto out; | 1372 | goto out; |
1373 | } | 1373 | } |
1374 | if ((uintptr_t)addr & 0x2) { | 1374 | if ((uintptr_t)(addr + done) & 0x2) { |
1375 | if ((count - done) < 2) { | 1375 | if ((count - done) < 2) { |
1376 | iowrite8(*(u8 *)(buf + done), addr + done); | 1376 | iowrite8(*(u8 *)(buf + done), addr + done); |
1377 | done += 1; | 1377 | done += 1; |
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index d75c811bfa56..45e00afa7f2d 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile | |||
@@ -16,7 +16,6 @@ xen-pad-$(CONFIG_X86) += xen-acpi-pad.o | |||
16 | dom0-$(CONFIG_X86) += pcpu.o | 16 | dom0-$(CONFIG_X86) += pcpu.o |
17 | obj-$(CONFIG_XEN_DOM0) += $(dom0-y) | 17 | obj-$(CONFIG_XEN_DOM0) += $(dom0-y) |
18 | obj-$(CONFIG_BLOCK) += biomerge.o | 18 | obj-$(CONFIG_BLOCK) += biomerge.o |
19 | obj-$(CONFIG_XEN_XENCOMM) += xencomm.o | ||
20 | obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o | 19 | obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o |
21 | obj-$(CONFIG_XEN_SELFBALLOONING) += xen-selfballoon.o | 20 | obj-$(CONFIG_XEN_SELFBALLOONING) += xen-selfballoon.o |
22 | obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o | 21 | obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o |
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 4672e003c0ad..f4a9e3311297 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c | |||
@@ -862,6 +862,8 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
862 | irq = ret; | 862 | irq = ret; |
863 | goto out; | 863 | goto out; |
864 | } | 864 | } |
865 | /* New interdomain events are bound to VCPU 0. */ | ||
866 | bind_evtchn_to_cpu(evtchn, 0); | ||
865 | } else { | 867 | } else { |
866 | struct irq_info *info = info_for_irq(irq); | 868 | struct irq_info *info = info_for_irq(irq); |
867 | WARN_ON(info == NULL || info->type != IRQT_EVTCHN); | 869 | WARN_ON(info == NULL || info->type != IRQT_EVTCHN); |
diff --git a/drivers/xen/xencomm.c b/drivers/xen/xencomm.c deleted file mode 100644 index 4793fc594549..000000000000 --- a/drivers/xen/xencomm.c +++ /dev/null | |||
@@ -1,219 +0,0 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
15 | * | ||
16 | * Copyright (C) IBM Corp. 2006 | ||
17 | * | ||
18 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
19 | */ | ||
20 | |||
21 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
22 | |||
23 | #include <linux/mm.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <asm/page.h> | ||
26 | #include <xen/xencomm.h> | ||
27 | #include <xen/interface/xen.h> | ||
28 | #include <asm/xen/xencomm.h> /* for xencomm_is_phys_contiguous() */ | ||
29 | |||
30 | static int xencomm_init(struct xencomm_desc *desc, | ||
31 | void *buffer, unsigned long bytes) | ||
32 | { | ||
33 | unsigned long recorded = 0; | ||
34 | int i = 0; | ||
35 | |||
36 | while ((recorded < bytes) && (i < desc->nr_addrs)) { | ||
37 | unsigned long vaddr = (unsigned long)buffer + recorded; | ||
38 | unsigned long paddr; | ||
39 | int offset; | ||
40 | int chunksz; | ||
41 | |||
42 | offset = vaddr % PAGE_SIZE; /* handle partial pages */ | ||
43 | chunksz = min(PAGE_SIZE - offset, bytes - recorded); | ||
44 | |||
45 | paddr = xencomm_vtop(vaddr); | ||
46 | if (paddr == ~0UL) { | ||
47 | printk(KERN_DEBUG "%s: couldn't translate vaddr %lx\n", | ||
48 | __func__, vaddr); | ||
49 | return -EINVAL; | ||
50 | } | ||
51 | |||
52 | desc->address[i++] = paddr; | ||
53 | recorded += chunksz; | ||
54 | } | ||
55 | |||
56 | if (recorded < bytes) { | ||
57 | printk(KERN_DEBUG | ||
58 | "%s: could only translate %ld of %ld bytes\n", | ||
59 | __func__, recorded, bytes); | ||
60 | return -ENOSPC; | ||
61 | } | ||
62 | |||
63 | /* mark remaining addresses invalid (just for safety) */ | ||
64 | while (i < desc->nr_addrs) | ||
65 | desc->address[i++] = XENCOMM_INVALID; | ||
66 | |||
67 | desc->magic = XENCOMM_MAGIC; | ||
68 | |||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask, | ||
73 | void *buffer, unsigned long bytes) | ||
74 | { | ||
75 | struct xencomm_desc *desc; | ||
76 | unsigned long buffer_ulong = (unsigned long)buffer; | ||
77 | unsigned long start = buffer_ulong & PAGE_MASK; | ||
78 | unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK; | ||
79 | unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT; | ||
80 | unsigned long size = sizeof(*desc) + | ||
81 | sizeof(desc->address[0]) * nr_addrs; | ||
82 | |||
83 | /* | ||
84 | * slab allocator returns at least sizeof(void*) aligned pointer. | ||
85 | * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might | ||
86 | * cross page boundary. | ||
87 | */ | ||
88 | if (sizeof(*desc) > sizeof(void *)) { | ||
89 | unsigned long order = get_order(size); | ||
90 | desc = (struct xencomm_desc *)__get_free_pages(gfp_mask, | ||
91 | order); | ||
92 | if (desc == NULL) | ||
93 | return NULL; | ||
94 | |||
95 | desc->nr_addrs = | ||
96 | ((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) / | ||
97 | sizeof(*desc->address); | ||
98 | } else { | ||
99 | desc = kmalloc(size, gfp_mask); | ||
100 | if (desc == NULL) | ||
101 | return NULL; | ||
102 | |||
103 | desc->nr_addrs = nr_addrs; | ||
104 | } | ||
105 | return desc; | ||
106 | } | ||
107 | |||
108 | void xencomm_free(struct xencomm_handle *desc) | ||
109 | { | ||
110 | if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) { | ||
111 | struct xencomm_desc *desc__ = (struct xencomm_desc *)desc; | ||
112 | if (sizeof(*desc__) > sizeof(void *)) { | ||
113 | unsigned long size = sizeof(*desc__) + | ||
114 | sizeof(desc__->address[0]) * desc__->nr_addrs; | ||
115 | unsigned long order = get_order(size); | ||
116 | free_pages((unsigned long)__va(desc), order); | ||
117 | } else | ||
118 | kfree(__va(desc)); | ||
119 | } | ||
120 | } | ||
121 | |||
122 | static int xencomm_create(void *buffer, unsigned long bytes, | ||
123 | struct xencomm_desc **ret, gfp_t gfp_mask) | ||
124 | { | ||
125 | struct xencomm_desc *desc; | ||
126 | int rc; | ||
127 | |||
128 | pr_debug("%s: %p[%ld]\n", __func__, buffer, bytes); | ||
129 | |||
130 | if (bytes == 0) { | ||
131 | /* don't create a descriptor; Xen recognizes NULL. */ | ||
132 | BUG_ON(buffer != NULL); | ||
133 | *ret = NULL; | ||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | BUG_ON(buffer == NULL); /* 'bytes' is non-zero */ | ||
138 | |||
139 | desc = xencomm_alloc(gfp_mask, buffer, bytes); | ||
140 | if (!desc) { | ||
141 | printk(KERN_DEBUG "%s failure\n", "xencomm_alloc"); | ||
142 | return -ENOMEM; | ||
143 | } | ||
144 | |||
145 | rc = xencomm_init(desc, buffer, bytes); | ||
146 | if (rc) { | ||
147 | printk(KERN_DEBUG "%s failure: %d\n", "xencomm_init", rc); | ||
148 | xencomm_free((struct xencomm_handle *)__pa(desc)); | ||
149 | return rc; | ||
150 | } | ||
151 | |||
152 | *ret = desc; | ||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | static struct xencomm_handle *xencomm_create_inline(void *ptr) | ||
157 | { | ||
158 | unsigned long paddr; | ||
159 | |||
160 | BUG_ON(!xencomm_is_phys_contiguous((unsigned long)ptr)); | ||
161 | |||
162 | paddr = (unsigned long)xencomm_pa(ptr); | ||
163 | BUG_ON(paddr & XENCOMM_INLINE_FLAG); | ||
164 | return (struct xencomm_handle *)(paddr | XENCOMM_INLINE_FLAG); | ||
165 | } | ||
166 | |||
167 | /* "mini" routine, for stack-based communications: */ | ||
168 | static int xencomm_create_mini(void *buffer, | ||
169 | unsigned long bytes, struct xencomm_mini *xc_desc, | ||
170 | struct xencomm_desc **ret) | ||
171 | { | ||
172 | int rc = 0; | ||
173 | struct xencomm_desc *desc; | ||
174 | BUG_ON(((unsigned long)xc_desc) % sizeof(*xc_desc) != 0); | ||
175 | |||
176 | desc = (void *)xc_desc; | ||
177 | |||
178 | desc->nr_addrs = XENCOMM_MINI_ADDRS; | ||
179 | |||
180 | rc = xencomm_init(desc, buffer, bytes); | ||
181 | if (!rc) | ||
182 | *ret = desc; | ||
183 | |||
184 | return rc; | ||
185 | } | ||
186 | |||
187 | struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes) | ||
188 | { | ||
189 | int rc; | ||
190 | struct xencomm_desc *desc; | ||
191 | |||
192 | if (xencomm_is_phys_contiguous((unsigned long)ptr)) | ||
193 | return xencomm_create_inline(ptr); | ||
194 | |||
195 | rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL); | ||
196 | |||
197 | if (rc || desc == NULL) | ||
198 | return NULL; | ||
199 | |||
200 | return xencomm_pa(desc); | ||
201 | } | ||
202 | |||
203 | struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes, | ||
204 | struct xencomm_mini *xc_desc) | ||
205 | { | ||
206 | int rc; | ||
207 | struct xencomm_desc *desc = NULL; | ||
208 | |||
209 | if (xencomm_is_phys_contiguous((unsigned long)ptr)) | ||
210 | return xencomm_create_inline(ptr); | ||
211 | |||
212 | rc = xencomm_create_mini(ptr, bytes, xc_desc, | ||
213 | &desc); | ||
214 | |||
215 | if (rc) | ||
216 | return NULL; | ||
217 | |||
218 | return xencomm_pa(desc); | ||
219 | } | ||
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index 0bad24ddc2e7..0129b78a6908 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c | |||
@@ -114,6 +114,14 @@ void bio_integrity_free(struct bio *bio) | |||
114 | } | 114 | } |
115 | EXPORT_SYMBOL(bio_integrity_free); | 115 | EXPORT_SYMBOL(bio_integrity_free); |
116 | 116 | ||
117 | static inline unsigned int bip_integrity_vecs(struct bio_integrity_payload *bip) | ||
118 | { | ||
119 | if (bip->bip_slab == BIO_POOL_NONE) | ||
120 | return BIP_INLINE_VECS; | ||
121 | |||
122 | return bvec_nr_vecs(bip->bip_slab); | ||
123 | } | ||
124 | |||
117 | /** | 125 | /** |
118 | * bio_integrity_add_page - Attach integrity metadata | 126 | * bio_integrity_add_page - Attach integrity metadata |
119 | * @bio: bio to update | 127 | * @bio: bio to update |
@@ -129,7 +137,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page, | |||
129 | struct bio_integrity_payload *bip = bio->bi_integrity; | 137 | struct bio_integrity_payload *bip = bio->bi_integrity; |
130 | struct bio_vec *iv; | 138 | struct bio_vec *iv; |
131 | 139 | ||
132 | if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_slab)) { | 140 | if (bip->bip_vcnt >= bip_integrity_vecs(bip)) { |
133 | printk(KERN_ERR "%s: bip_vec full\n", __func__); | 141 | printk(KERN_ERR "%s: bip_vec full\n", __func__); |
134 | return 0; | 142 | return 0; |
135 | } | 143 | } |
@@ -226,7 +234,8 @@ unsigned int bio_integrity_tag_size(struct bio *bio) | |||
226 | } | 234 | } |
227 | EXPORT_SYMBOL(bio_integrity_tag_size); | 235 | EXPORT_SYMBOL(bio_integrity_tag_size); |
228 | 236 | ||
229 | int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len, int set) | 237 | static int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len, |
238 | int set) | ||
230 | { | 239 | { |
231 | struct bio_integrity_payload *bip = bio->bi_integrity; | 240 | struct bio_integrity_payload *bip = bio->bi_integrity; |
232 | struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); | 241 | struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); |
@@ -611,7 +611,6 @@ EXPORT_SYMBOL(bio_clone_fast); | |||
611 | struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, | 611 | struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, |
612 | struct bio_set *bs) | 612 | struct bio_set *bs) |
613 | { | 613 | { |
614 | unsigned nr_iovecs = 0; | ||
615 | struct bvec_iter iter; | 614 | struct bvec_iter iter; |
616 | struct bio_vec bv; | 615 | struct bio_vec bv; |
617 | struct bio *bio; | 616 | struct bio *bio; |
@@ -638,10 +637,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, | |||
638 | * __bio_clone_fast() anyways. | 637 | * __bio_clone_fast() anyways. |
639 | */ | 638 | */ |
640 | 639 | ||
641 | bio_for_each_segment(bv, bio_src, iter) | 640 | bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs); |
642 | nr_iovecs++; | ||
643 | |||
644 | bio = bio_alloc_bioset(gfp_mask, nr_iovecs, bs); | ||
645 | if (!bio) | 641 | if (!bio) |
646 | return NULL; | 642 | return NULL; |
647 | 643 | ||
@@ -650,9 +646,18 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, | |||
650 | bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; | 646 | bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; |
651 | bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; | 647 | bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; |
652 | 648 | ||
649 | if (bio->bi_rw & REQ_DISCARD) | ||
650 | goto integrity_clone; | ||
651 | |||
652 | if (bio->bi_rw & REQ_WRITE_SAME) { | ||
653 | bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; | ||
654 | goto integrity_clone; | ||
655 | } | ||
656 | |||
653 | bio_for_each_segment(bv, bio_src, iter) | 657 | bio_for_each_segment(bv, bio_src, iter) |
654 | bio->bi_io_vec[bio->bi_vcnt++] = bv; | 658 | bio->bi_io_vec[bio->bi_vcnt++] = bv; |
655 | 659 | ||
660 | integrity_clone: | ||
656 | if (bio_integrity(bio_src)) { | 661 | if (bio_integrity(bio_src)) { |
657 | int ret; | 662 | int ret; |
658 | 663 | ||
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 5215f04260b2..81ea55314b1f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -3839,7 +3839,6 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, | |||
3839 | rb_erase(&ref->rb_node, &head->ref_root); | 3839 | rb_erase(&ref->rb_node, &head->ref_root); |
3840 | atomic_dec(&delayed_refs->num_entries); | 3840 | atomic_dec(&delayed_refs->num_entries); |
3841 | btrfs_put_delayed_ref(ref); | 3841 | btrfs_put_delayed_ref(ref); |
3842 | cond_resched_lock(&head->lock); | ||
3843 | } | 3842 | } |
3844 | if (head->must_insert_reserved) | 3843 | if (head->must_insert_reserved) |
3845 | pin_bytes = true; | 3844 | pin_bytes = true; |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 184e9cb39647..d3d44486290b 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -5154,7 +5154,7 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, | |||
5154 | return ERR_CAST(inode); | 5154 | return ERR_CAST(inode); |
5155 | } | 5155 | } |
5156 | 5156 | ||
5157 | return d_splice_alias(inode, dentry); | 5157 | return d_materialise_unique(dentry, inode); |
5158 | } | 5158 | } |
5159 | 5159 | ||
5160 | unsigned char btrfs_filetype_table[] = { | 5160 | unsigned char btrfs_filetype_table[] = { |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 383ab455bfa7..a6d8efa46bfe 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -3537,20 +3537,6 @@ out: | |||
3537 | return ret; | 3537 | return ret; |
3538 | } | 3538 | } |
3539 | 3539 | ||
3540 | static long btrfs_ioctl_global_rsv(struct btrfs_root *root, void __user *arg) | ||
3541 | { | ||
3542 | struct btrfs_block_rsv *block_rsv = &root->fs_info->global_block_rsv; | ||
3543 | u64 reserved; | ||
3544 | |||
3545 | spin_lock(&block_rsv->lock); | ||
3546 | reserved = block_rsv->reserved; | ||
3547 | spin_unlock(&block_rsv->lock); | ||
3548 | |||
3549 | if (arg && copy_to_user(arg, &reserved, sizeof(reserved))) | ||
3550 | return -EFAULT; | ||
3551 | return 0; | ||
3552 | } | ||
3553 | |||
3554 | /* | 3540 | /* |
3555 | * there are many ways the trans_start and trans_end ioctls can lead | 3541 | * there are many ways the trans_start and trans_end ioctls can lead |
3556 | * to deadlocks. They should only be used by applications that | 3542 | * to deadlocks. They should only be used by applications that |
@@ -4757,8 +4743,6 @@ long btrfs_ioctl(struct file *file, unsigned int | |||
4757 | return btrfs_ioctl_logical_to_ino(root, argp); | 4743 | return btrfs_ioctl_logical_to_ino(root, argp); |
4758 | case BTRFS_IOC_SPACE_INFO: | 4744 | case BTRFS_IOC_SPACE_INFO: |
4759 | return btrfs_ioctl_space_info(root, argp); | 4745 | return btrfs_ioctl_space_info(root, argp); |
4760 | case BTRFS_IOC_GLOBAL_RSV: | ||
4761 | return btrfs_ioctl_global_rsv(root, argp); | ||
4762 | case BTRFS_IOC_SYNC: { | 4746 | case BTRFS_IOC_SYNC: { |
4763 | int ret; | 4747 | int ret; |
4764 | 4748 | ||
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 9c8d1a3fdc3a..9dde9717c1b9 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c | |||
@@ -1332,6 +1332,16 @@ verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, " | |||
1332 | } | 1332 | } |
1333 | 1333 | ||
1334 | if (cur_clone_root) { | 1334 | if (cur_clone_root) { |
1335 | if (compressed != BTRFS_COMPRESS_NONE) { | ||
1336 | /* | ||
1337 | * Offsets given by iterate_extent_inodes() are relative | ||
1338 | * to the start of the extent, we need to add logical | ||
1339 | * offset from the file extent item. | ||
1340 | * (See why at backref.c:check_extent_in_eb()) | ||
1341 | */ | ||
1342 | cur_clone_root->offset += btrfs_file_extent_offset(eb, | ||
1343 | fi); | ||
1344 | } | ||
1335 | *found = cur_clone_root; | 1345 | *found = cur_clone_root; |
1336 | ret = 0; | 1346 | ret = 0; |
1337 | } else { | 1347 | } else { |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 97cc24198554..d04db817be5c 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -566,7 +566,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) | |||
566 | kfree(num); | 566 | kfree(num); |
567 | 567 | ||
568 | if (info->max_inline) { | 568 | if (info->max_inline) { |
569 | info->max_inline = max_t(u64, | 569 | info->max_inline = min_t(u64, |
570 | info->max_inline, | 570 | info->max_inline, |
571 | root->sectorsize); | 571 | root->sectorsize); |
572 | } | 572 | } |
@@ -855,6 +855,7 @@ static struct dentry *get_default_root(struct super_block *sb, | |||
855 | struct btrfs_path *path; | 855 | struct btrfs_path *path; |
856 | struct btrfs_key location; | 856 | struct btrfs_key location; |
857 | struct inode *inode; | 857 | struct inode *inode; |
858 | struct dentry *dentry; | ||
858 | u64 dir_id; | 859 | u64 dir_id; |
859 | int new = 0; | 860 | int new = 0; |
860 | 861 | ||
@@ -925,7 +926,13 @@ setup_root: | |||
925 | return dget(sb->s_root); | 926 | return dget(sb->s_root); |
926 | } | 927 | } |
927 | 928 | ||
928 | return d_obtain_alias(inode); | 929 | dentry = d_obtain_alias(inode); |
930 | if (!IS_ERR(dentry)) { | ||
931 | spin_lock(&dentry->d_lock); | ||
932 | dentry->d_flags &= ~DCACHE_DISCONNECTED; | ||
933 | spin_unlock(&dentry->d_lock); | ||
934 | } | ||
935 | return dentry; | ||
929 | } | 936 | } |
930 | 937 | ||
931 | static int btrfs_fill_super(struct super_block *sb, | 938 | static int btrfs_fill_super(struct super_block *sb, |
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 782374d8fd19..865f4cf9a769 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c | |||
@@ -578,8 +578,14 @@ static int add_device_membership(struct btrfs_fs_info *fs_info) | |||
578 | return -ENOMEM; | 578 | return -ENOMEM; |
579 | 579 | ||
580 | list_for_each_entry(dev, &fs_devices->devices, dev_list) { | 580 | list_for_each_entry(dev, &fs_devices->devices, dev_list) { |
581 | struct hd_struct *disk = dev->bdev->bd_part; | 581 | struct hd_struct *disk; |
582 | struct kobject *disk_kobj = &part_to_dev(disk)->kobj; | 582 | struct kobject *disk_kobj; |
583 | |||
584 | if (!dev->bdev) | ||
585 | continue; | ||
586 | |||
587 | disk = dev->bdev->bd_part; | ||
588 | disk_kobj = &part_to_dev(disk)->kobj; | ||
583 | 589 | ||
584 | error = sysfs_create_link(fs_info->device_dir_kobj, | 590 | error = sysfs_create_link(fs_info->device_dir_kobj, |
585 | disk_kobj, disk_kobj->name); | 591 | disk_kobj, disk_kobj->name); |
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c index 4c2d452c4bfc..21887d63dad5 100644 --- a/fs/ceph/acl.c +++ b/fs/ceph/acl.c | |||
@@ -54,11 +54,6 @@ static inline struct posix_acl *ceph_get_cached_acl(struct inode *inode, | |||
54 | return acl; | 54 | return acl; |
55 | } | 55 | } |
56 | 56 | ||
57 | void ceph_forget_all_cached_acls(struct inode *inode) | ||
58 | { | ||
59 | forget_all_cached_acls(inode); | ||
60 | } | ||
61 | |||
62 | struct posix_acl *ceph_get_acl(struct inode *inode, int type) | 57 | struct posix_acl *ceph_get_acl(struct inode *inode, int type) |
63 | { | 58 | { |
64 | int size; | 59 | int size; |
@@ -160,11 +155,7 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type) | |||
160 | goto out_dput; | 155 | goto out_dput; |
161 | } | 156 | } |
162 | 157 | ||
163 | if (value) | 158 | ret = __ceph_setxattr(dentry, name, value, size, 0); |
164 | ret = __ceph_setxattr(dentry, name, value, size, 0); | ||
165 | else | ||
166 | ret = __ceph_removexattr(dentry, name); | ||
167 | |||
168 | if (ret) { | 159 | if (ret) { |
169 | if (new_mode != old_mode) { | 160 | if (new_mode != old_mode) { |
170 | newattrs.ia_mode = old_mode; | 161 | newattrs.ia_mode = old_mode; |
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 6da4df84ba30..45eda6d7a40c 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c | |||
@@ -100,6 +100,14 @@ static unsigned fpos_off(loff_t p) | |||
100 | return p & 0xffffffff; | 100 | return p & 0xffffffff; |
101 | } | 101 | } |
102 | 102 | ||
103 | static int fpos_cmp(loff_t l, loff_t r) | ||
104 | { | ||
105 | int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r)); | ||
106 | if (v) | ||
107 | return v; | ||
108 | return (int)(fpos_off(l) - fpos_off(r)); | ||
109 | } | ||
110 | |||
103 | /* | 111 | /* |
104 | * When possible, we try to satisfy a readdir by peeking at the | 112 | * When possible, we try to satisfy a readdir by peeking at the |
105 | * dcache. We make this work by carefully ordering dentries on | 113 | * dcache. We make this work by carefully ordering dentries on |
@@ -156,7 +164,7 @@ more: | |||
156 | if (!d_unhashed(dentry) && dentry->d_inode && | 164 | if (!d_unhashed(dentry) && dentry->d_inode && |
157 | ceph_snap(dentry->d_inode) != CEPH_SNAPDIR && | 165 | ceph_snap(dentry->d_inode) != CEPH_SNAPDIR && |
158 | ceph_ino(dentry->d_inode) != CEPH_INO_CEPH && | 166 | ceph_ino(dentry->d_inode) != CEPH_INO_CEPH && |
159 | ctx->pos <= di->offset) | 167 | fpos_cmp(ctx->pos, di->offset) <= 0) |
160 | break; | 168 | break; |
161 | dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry, | 169 | dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry, |
162 | dentry->d_name.len, dentry->d_name.name, di->offset, | 170 | dentry->d_name.len, dentry->d_name.name, di->offset, |
@@ -695,9 +703,8 @@ static int ceph_mknod(struct inode *dir, struct dentry *dentry, | |||
695 | ceph_mdsc_put_request(req); | 703 | ceph_mdsc_put_request(req); |
696 | 704 | ||
697 | if (!err) | 705 | if (!err) |
698 | err = ceph_init_acl(dentry, dentry->d_inode, dir); | 706 | ceph_init_acl(dentry, dentry->d_inode, dir); |
699 | 707 | else | |
700 | if (err) | ||
701 | d_drop(dentry); | 708 | d_drop(dentry); |
702 | return err; | 709 | return err; |
703 | } | 710 | } |
@@ -735,7 +742,9 @@ static int ceph_symlink(struct inode *dir, struct dentry *dentry, | |||
735 | if (!err && !req->r_reply_info.head->is_dentry) | 742 | if (!err && !req->r_reply_info.head->is_dentry) |
736 | err = ceph_handle_notrace_create(dir, dentry); | 743 | err = ceph_handle_notrace_create(dir, dentry); |
737 | ceph_mdsc_put_request(req); | 744 | ceph_mdsc_put_request(req); |
738 | if (err) | 745 | if (!err) |
746 | ceph_init_acl(dentry, dentry->d_inode, dir); | ||
747 | else | ||
739 | d_drop(dentry); | 748 | d_drop(dentry); |
740 | return err; | 749 | return err; |
741 | } | 750 | } |
@@ -776,7 +785,9 @@ static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) | |||
776 | err = ceph_handle_notrace_create(dir, dentry); | 785 | err = ceph_handle_notrace_create(dir, dentry); |
777 | ceph_mdsc_put_request(req); | 786 | ceph_mdsc_put_request(req); |
778 | out: | 787 | out: |
779 | if (err < 0) | 788 | if (!err) |
789 | ceph_init_acl(dentry, dentry->d_inode, dir); | ||
790 | else | ||
780 | d_drop(dentry); | 791 | d_drop(dentry); |
781 | return err; | 792 | return err; |
782 | } | 793 | } |
diff --git a/fs/ceph/file.c b/fs/ceph/file.c index dfd2ce3419f8..09c7afe32e49 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c | |||
@@ -286,6 +286,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry, | |||
286 | } else { | 286 | } else { |
287 | dout("atomic_open finish_open on dn %p\n", dn); | 287 | dout("atomic_open finish_open on dn %p\n", dn); |
288 | if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) { | 288 | if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) { |
289 | ceph_init_acl(dentry, dentry->d_inode, dir); | ||
289 | *opened |= FILE_CREATED; | 290 | *opened |= FILE_CREATED; |
290 | } | 291 | } |
291 | err = finish_open(file, dentry, ceph_open, opened); | 292 | err = finish_open(file, dentry, ceph_open, opened); |
diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 2df963f1cf5a..10a4ccbf38da 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c | |||
@@ -144,7 +144,11 @@ enum { | |||
144 | Opt_ino32, | 144 | Opt_ino32, |
145 | Opt_noino32, | 145 | Opt_noino32, |
146 | Opt_fscache, | 146 | Opt_fscache, |
147 | Opt_nofscache | 147 | Opt_nofscache, |
148 | #ifdef CONFIG_CEPH_FS_POSIX_ACL | ||
149 | Opt_acl, | ||
150 | #endif | ||
151 | Opt_noacl | ||
148 | }; | 152 | }; |
149 | 153 | ||
150 | static match_table_t fsopt_tokens = { | 154 | static match_table_t fsopt_tokens = { |
@@ -172,6 +176,10 @@ static match_table_t fsopt_tokens = { | |||
172 | {Opt_noino32, "noino32"}, | 176 | {Opt_noino32, "noino32"}, |
173 | {Opt_fscache, "fsc"}, | 177 | {Opt_fscache, "fsc"}, |
174 | {Opt_nofscache, "nofsc"}, | 178 | {Opt_nofscache, "nofsc"}, |
179 | #ifdef CONFIG_CEPH_FS_POSIX_ACL | ||
180 | {Opt_acl, "acl"}, | ||
181 | #endif | ||
182 | {Opt_noacl, "noacl"}, | ||
175 | {-1, NULL} | 183 | {-1, NULL} |
176 | }; | 184 | }; |
177 | 185 | ||
@@ -271,6 +279,14 @@ static int parse_fsopt_token(char *c, void *private) | |||
271 | case Opt_nofscache: | 279 | case Opt_nofscache: |
272 | fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE; | 280 | fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE; |
273 | break; | 281 | break; |
282 | #ifdef CONFIG_CEPH_FS_POSIX_ACL | ||
283 | case Opt_acl: | ||
284 | fsopt->sb_flags |= MS_POSIXACL; | ||
285 | break; | ||
286 | #endif | ||
287 | case Opt_noacl: | ||
288 | fsopt->sb_flags &= ~MS_POSIXACL; | ||
289 | break; | ||
274 | default: | 290 | default: |
275 | BUG_ON(token); | 291 | BUG_ON(token); |
276 | } | 292 | } |
@@ -438,6 +454,13 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root) | |||
438 | else | 454 | else |
439 | seq_puts(m, ",nofsc"); | 455 | seq_puts(m, ",nofsc"); |
440 | 456 | ||
457 | #ifdef CONFIG_CEPH_FS_POSIX_ACL | ||
458 | if (fsopt->sb_flags & MS_POSIXACL) | ||
459 | seq_puts(m, ",acl"); | ||
460 | else | ||
461 | seq_puts(m, ",noacl"); | ||
462 | #endif | ||
463 | |||
441 | if (fsopt->wsize) | 464 | if (fsopt->wsize) |
442 | seq_printf(m, ",wsize=%d", fsopt->wsize); | 465 | seq_printf(m, ",wsize=%d", fsopt->wsize); |
443 | if (fsopt->rsize != CEPH_RSIZE_DEFAULT) | 466 | if (fsopt->rsize != CEPH_RSIZE_DEFAULT) |
@@ -819,9 +842,6 @@ static int ceph_set_super(struct super_block *s, void *data) | |||
819 | 842 | ||
820 | s->s_flags = fsc->mount_options->sb_flags; | 843 | s->s_flags = fsc->mount_options->sb_flags; |
821 | s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */ | 844 | s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */ |
822 | #ifdef CONFIG_CEPH_FS_POSIX_ACL | ||
823 | s->s_flags |= MS_POSIXACL; | ||
824 | #endif | ||
825 | 845 | ||
826 | s->s_xattr = ceph_xattr_handlers; | 846 | s->s_xattr = ceph_xattr_handlers; |
827 | s->s_fs_info = fsc; | 847 | s->s_fs_info = fsc; |
@@ -911,6 +931,10 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type, | |||
911 | struct ceph_options *opt = NULL; | 931 | struct ceph_options *opt = NULL; |
912 | 932 | ||
913 | dout("ceph_mount\n"); | 933 | dout("ceph_mount\n"); |
934 | |||
935 | #ifdef CONFIG_CEPH_FS_POSIX_ACL | ||
936 | flags |= MS_POSIXACL; | ||
937 | #endif | ||
914 | err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path); | 938 | err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path); |
915 | if (err < 0) { | 939 | if (err < 0) { |
916 | res = ERR_PTR(err); | 940 | res = ERR_PTR(err); |
diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 19793b56d0a7..d8801a95b685 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/wait.h> | 13 | #include <linux/wait.h> |
14 | #include <linux/writeback.h> | 14 | #include <linux/writeback.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/posix_acl.h> | ||
16 | 17 | ||
17 | #include <linux/ceph/libceph.h> | 18 | #include <linux/ceph/libceph.h> |
18 | 19 | ||
@@ -743,7 +744,11 @@ extern const struct xattr_handler *ceph_xattr_handlers[]; | |||
743 | struct posix_acl *ceph_get_acl(struct inode *, int); | 744 | struct posix_acl *ceph_get_acl(struct inode *, int); |
744 | int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type); | 745 | int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type); |
745 | int ceph_init_acl(struct dentry *, struct inode *, struct inode *); | 746 | int ceph_init_acl(struct dentry *, struct inode *, struct inode *); |
746 | void ceph_forget_all_cached_acls(struct inode *inode); | 747 | |
748 | static inline void ceph_forget_all_cached_acls(struct inode *inode) | ||
749 | { | ||
750 | forget_all_cached_acls(inode); | ||
751 | } | ||
747 | 752 | ||
748 | #else | 753 | #else |
749 | 754 | ||
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 898b6565ad3e..a55ec37378c6 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c | |||
@@ -12,6 +12,9 @@ | |||
12 | #define XATTR_CEPH_PREFIX "ceph." | 12 | #define XATTR_CEPH_PREFIX "ceph." |
13 | #define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1) | 13 | #define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1) |
14 | 14 | ||
15 | static int __remove_xattr(struct ceph_inode_info *ci, | ||
16 | struct ceph_inode_xattr *xattr); | ||
17 | |||
15 | /* | 18 | /* |
16 | * List of handlers for synthetic system.* attributes. Other | 19 | * List of handlers for synthetic system.* attributes. Other |
17 | * attributes are handled directly. | 20 | * attributes are handled directly. |
@@ -319,8 +322,7 @@ static struct ceph_vxattr *ceph_match_vxattr(struct inode *inode, | |||
319 | static int __set_xattr(struct ceph_inode_info *ci, | 322 | static int __set_xattr(struct ceph_inode_info *ci, |
320 | const char *name, int name_len, | 323 | const char *name, int name_len, |
321 | const char *val, int val_len, | 324 | const char *val, int val_len, |
322 | int dirty, | 325 | int flags, int update_xattr, |
323 | int should_free_name, int should_free_val, | ||
324 | struct ceph_inode_xattr **newxattr) | 326 | struct ceph_inode_xattr **newxattr) |
325 | { | 327 | { |
326 | struct rb_node **p; | 328 | struct rb_node **p; |
@@ -349,12 +351,31 @@ static int __set_xattr(struct ceph_inode_info *ci, | |||
349 | xattr = NULL; | 351 | xattr = NULL; |
350 | } | 352 | } |
351 | 353 | ||
354 | if (update_xattr) { | ||
355 | int err = 0; | ||
356 | if (xattr && (flags & XATTR_CREATE)) | ||
357 | err = -EEXIST; | ||
358 | else if (!xattr && (flags & XATTR_REPLACE)) | ||
359 | err = -ENODATA; | ||
360 | if (err) { | ||
361 | kfree(name); | ||
362 | kfree(val); | ||
363 | return err; | ||
364 | } | ||
365 | if (update_xattr < 0) { | ||
366 | if (xattr) | ||
367 | __remove_xattr(ci, xattr); | ||
368 | kfree(name); | ||
369 | return 0; | ||
370 | } | ||
371 | } | ||
372 | |||
352 | if (!xattr) { | 373 | if (!xattr) { |
353 | new = 1; | 374 | new = 1; |
354 | xattr = *newxattr; | 375 | xattr = *newxattr; |
355 | xattr->name = name; | 376 | xattr->name = name; |
356 | xattr->name_len = name_len; | 377 | xattr->name_len = name_len; |
357 | xattr->should_free_name = should_free_name; | 378 | xattr->should_free_name = update_xattr; |
358 | 379 | ||
359 | ci->i_xattrs.count++; | 380 | ci->i_xattrs.count++; |
360 | dout("__set_xattr count=%d\n", ci->i_xattrs.count); | 381 | dout("__set_xattr count=%d\n", ci->i_xattrs.count); |
@@ -364,7 +385,7 @@ static int __set_xattr(struct ceph_inode_info *ci, | |||
364 | if (xattr->should_free_val) | 385 | if (xattr->should_free_val) |
365 | kfree((void *)xattr->val); | 386 | kfree((void *)xattr->val); |
366 | 387 | ||
367 | if (should_free_name) { | 388 | if (update_xattr) { |
368 | kfree((void *)name); | 389 | kfree((void *)name); |
369 | name = xattr->name; | 390 | name = xattr->name; |
370 | } | 391 | } |
@@ -379,8 +400,8 @@ static int __set_xattr(struct ceph_inode_info *ci, | |||
379 | xattr->val = ""; | 400 | xattr->val = ""; |
380 | 401 | ||
381 | xattr->val_len = val_len; | 402 | xattr->val_len = val_len; |
382 | xattr->dirty = dirty; | 403 | xattr->dirty = update_xattr; |
383 | xattr->should_free_val = (val && should_free_val); | 404 | xattr->should_free_val = (val && update_xattr); |
384 | 405 | ||
385 | if (new) { | 406 | if (new) { |
386 | rb_link_node(&xattr->node, parent, p); | 407 | rb_link_node(&xattr->node, parent, p); |
@@ -442,7 +463,7 @@ static int __remove_xattr(struct ceph_inode_info *ci, | |||
442 | struct ceph_inode_xattr *xattr) | 463 | struct ceph_inode_xattr *xattr) |
443 | { | 464 | { |
444 | if (!xattr) | 465 | if (!xattr) |
445 | return -EOPNOTSUPP; | 466 | return -ENODATA; |
446 | 467 | ||
447 | rb_erase(&xattr->node, &ci->i_xattrs.index); | 468 | rb_erase(&xattr->node, &ci->i_xattrs.index); |
448 | 469 | ||
@@ -588,7 +609,7 @@ start: | |||
588 | p += len; | 609 | p += len; |
589 | 610 | ||
590 | err = __set_xattr(ci, name, namelen, val, len, | 611 | err = __set_xattr(ci, name, namelen, val, len, |
591 | 0, 0, 0, &xattrs[numattr]); | 612 | 0, 0, &xattrs[numattr]); |
592 | 613 | ||
593 | if (err < 0) | 614 | if (err < 0) |
594 | goto bad; | 615 | goto bad; |
@@ -850,6 +871,9 @@ static int ceph_sync_setxattr(struct dentry *dentry, const char *name, | |||
850 | 871 | ||
851 | dout("setxattr value=%.*s\n", (int)size, value); | 872 | dout("setxattr value=%.*s\n", (int)size, value); |
852 | 873 | ||
874 | if (!value) | ||
875 | flags |= CEPH_XATTR_REMOVE; | ||
876 | |||
853 | /* do request */ | 877 | /* do request */ |
854 | req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR, | 878 | req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR, |
855 | USE_AUTH_MDS); | 879 | USE_AUTH_MDS); |
@@ -892,7 +916,7 @@ int __ceph_setxattr(struct dentry *dentry, const char *name, | |||
892 | struct ceph_inode_info *ci = ceph_inode(inode); | 916 | struct ceph_inode_info *ci = ceph_inode(inode); |
893 | int issued; | 917 | int issued; |
894 | int err; | 918 | int err; |
895 | int dirty; | 919 | int dirty = 0; |
896 | int name_len = strlen(name); | 920 | int name_len = strlen(name); |
897 | int val_len = size; | 921 | int val_len = size; |
898 | char *newname = NULL; | 922 | char *newname = NULL; |
@@ -953,12 +977,14 @@ retry: | |||
953 | goto retry; | 977 | goto retry; |
954 | } | 978 | } |
955 | 979 | ||
956 | err = __set_xattr(ci, newname, name_len, newval, | 980 | err = __set_xattr(ci, newname, name_len, newval, val_len, |
957 | val_len, 1, 1, 1, &xattr); | 981 | flags, value ? 1 : -1, &xattr); |
958 | 982 | ||
959 | dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); | 983 | if (!err) { |
960 | ci->i_xattrs.dirty = true; | 984 | dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); |
961 | inode->i_ctime = CURRENT_TIME; | 985 | ci->i_xattrs.dirty = true; |
986 | inode->i_ctime = CURRENT_TIME; | ||
987 | } | ||
962 | 988 | ||
963 | spin_unlock(&ci->i_ceph_lock); | 989 | spin_unlock(&ci->i_ceph_lock); |
964 | if (dirty) | 990 | if (dirty) |
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index c819b0bd491a..7ff866dbb89e 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c | |||
@@ -865,8 +865,8 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd, | |||
865 | return rc; | 865 | return rc; |
866 | } | 866 | } |
867 | 867 | ||
868 | static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, | 868 | struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, |
869 | __u16 fid, u32 *pacllen) | 869 | const struct cifs_fid *cifsfid, u32 *pacllen) |
870 | { | 870 | { |
871 | struct cifs_ntsd *pntsd = NULL; | 871 | struct cifs_ntsd *pntsd = NULL; |
872 | unsigned int xid; | 872 | unsigned int xid; |
@@ -877,7 +877,8 @@ static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, | |||
877 | return ERR_CAST(tlink); | 877 | return ERR_CAST(tlink); |
878 | 878 | ||
879 | xid = get_xid(); | 879 | xid = get_xid(); |
880 | rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen); | 880 | rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), cifsfid->netfid, &pntsd, |
881 | pacllen); | ||
881 | free_xid(xid); | 882 | free_xid(xid); |
882 | 883 | ||
883 | cifs_put_tlink(tlink); | 884 | cifs_put_tlink(tlink); |
@@ -946,7 +947,7 @@ struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb, | |||
946 | if (!open_file) | 947 | if (!open_file) |
947 | return get_cifs_acl_by_path(cifs_sb, path, pacllen); | 948 | return get_cifs_acl_by_path(cifs_sb, path, pacllen); |
948 | 949 | ||
949 | pntsd = get_cifs_acl_by_fid(cifs_sb, open_file->fid.netfid, pacllen); | 950 | pntsd = get_cifs_acl_by_fid(cifs_sb, &open_file->fid, pacllen); |
950 | cifsFileInfo_put(open_file); | 951 | cifsFileInfo_put(open_file); |
951 | return pntsd; | 952 | return pntsd; |
952 | } | 953 | } |
@@ -1006,19 +1007,31 @@ out: | |||
1006 | /* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */ | 1007 | /* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */ |
1007 | int | 1008 | int |
1008 | cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, | 1009 | cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, |
1009 | struct inode *inode, const char *path, const __u16 *pfid) | 1010 | struct inode *inode, const char *path, |
1011 | const struct cifs_fid *pfid) | ||
1010 | { | 1012 | { |
1011 | struct cifs_ntsd *pntsd = NULL; | 1013 | struct cifs_ntsd *pntsd = NULL; |
1012 | u32 acllen = 0; | 1014 | u32 acllen = 0; |
1013 | int rc = 0; | 1015 | int rc = 0; |
1016 | struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); | ||
1017 | struct cifs_tcon *tcon; | ||
1014 | 1018 | ||
1015 | cifs_dbg(NOISY, "converting ACL to mode for %s\n", path); | 1019 | cifs_dbg(NOISY, "converting ACL to mode for %s\n", path); |
1016 | 1020 | ||
1017 | if (pfid) | 1021 | if (IS_ERR(tlink)) |
1018 | pntsd = get_cifs_acl_by_fid(cifs_sb, *pfid, &acllen); | 1022 | return PTR_ERR(tlink); |
1019 | else | 1023 | tcon = tlink_tcon(tlink); |
1020 | pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen); | ||
1021 | 1024 | ||
1025 | if (pfid && (tcon->ses->server->ops->get_acl_by_fid)) | ||
1026 | pntsd = tcon->ses->server->ops->get_acl_by_fid(cifs_sb, pfid, | ||
1027 | &acllen); | ||
1028 | else if (tcon->ses->server->ops->get_acl) | ||
1029 | pntsd = tcon->ses->server->ops->get_acl(cifs_sb, inode, path, | ||
1030 | &acllen); | ||
1031 | else { | ||
1032 | cifs_put_tlink(tlink); | ||
1033 | return -EOPNOTSUPP; | ||
1034 | } | ||
1022 | /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */ | 1035 | /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */ |
1023 | if (IS_ERR(pntsd)) { | 1036 | if (IS_ERR(pntsd)) { |
1024 | rc = PTR_ERR(pntsd); | 1037 | rc = PTR_ERR(pntsd); |
@@ -1030,6 +1043,8 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, | |||
1030 | cifs_dbg(VFS, "parse sec desc failed rc = %d\n", rc); | 1043 | cifs_dbg(VFS, "parse sec desc failed rc = %d\n", rc); |
1031 | } | 1044 | } |
1032 | 1045 | ||
1046 | cifs_put_tlink(tlink); | ||
1047 | |||
1033 | return rc; | 1048 | return rc; |
1034 | } | 1049 | } |
1035 | 1050 | ||
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 86dc28c7aa5c..cf32f0393369 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -398,6 +398,8 @@ struct smb_version_operations { | |||
398 | const struct nls_table *, int); | 398 | const struct nls_table *, int); |
399 | struct cifs_ntsd * (*get_acl)(struct cifs_sb_info *, struct inode *, | 399 | struct cifs_ntsd * (*get_acl)(struct cifs_sb_info *, struct inode *, |
400 | const char *, u32 *); | 400 | const char *, u32 *); |
401 | struct cifs_ntsd * (*get_acl_by_fid)(struct cifs_sb_info *, | ||
402 | const struct cifs_fid *, u32 *); | ||
401 | int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *, | 403 | int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *, |
402 | int); | 404 | int); |
403 | }; | 405 | }; |
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index d00e09dfc452..acc4ee8ed075 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
@@ -151,7 +151,7 @@ extern struct inode *cifs_iget(struct super_block *sb, | |||
151 | 151 | ||
152 | extern int cifs_get_inode_info(struct inode **inode, const char *full_path, | 152 | extern int cifs_get_inode_info(struct inode **inode, const char *full_path, |
153 | FILE_ALL_INFO *data, struct super_block *sb, | 153 | FILE_ALL_INFO *data, struct super_block *sb, |
154 | int xid, const __u16 *fid); | 154 | int xid, const struct cifs_fid *fid); |
155 | extern int cifs_get_inode_info_unix(struct inode **pinode, | 155 | extern int cifs_get_inode_info_unix(struct inode **pinode, |
156 | const unsigned char *search_path, | 156 | const unsigned char *search_path, |
157 | struct super_block *sb, unsigned int xid); | 157 | struct super_block *sb, unsigned int xid); |
@@ -162,11 +162,13 @@ extern int cifs_rename_pending_delete(const char *full_path, | |||
162 | const unsigned int xid); | 162 | const unsigned int xid); |
163 | extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, | 163 | extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, |
164 | struct cifs_fattr *fattr, struct inode *inode, | 164 | struct cifs_fattr *fattr, struct inode *inode, |
165 | const char *path, const __u16 *pfid); | 165 | const char *path, const struct cifs_fid *pfid); |
166 | extern int id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64, | 166 | extern int id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64, |
167 | kuid_t, kgid_t); | 167 | kuid_t, kgid_t); |
168 | extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *, | 168 | extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *, |
169 | const char *, u32 *); | 169 | const char *, u32 *); |
170 | extern struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *, | ||
171 | const struct cifs_fid *, u32 *); | ||
170 | extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *, | 172 | extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *, |
171 | const char *, int); | 173 | const char *, int); |
172 | 174 | ||
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index d3a6796caa5a..3db0c5fd9a11 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c | |||
@@ -378,7 +378,7 @@ cifs_create_get_file_info: | |||
378 | xid); | 378 | xid); |
379 | else { | 379 | else { |
380 | rc = cifs_get_inode_info(&newinode, full_path, buf, inode->i_sb, | 380 | rc = cifs_get_inode_info(&newinode, full_path, buf, inode->i_sb, |
381 | xid, &fid->netfid); | 381 | xid, fid); |
382 | if (newinode) { | 382 | if (newinode) { |
383 | if (server->ops->set_lease_key) | 383 | if (server->ops->set_lease_key) |
384 | server->ops->set_lease_key(newinode, fid); | 384 | server->ops->set_lease_key(newinode, fid); |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 755584684f6c..53c15074bb36 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -244,7 +244,7 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb, | |||
244 | xid); | 244 | xid); |
245 | else | 245 | else |
246 | rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, | 246 | rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, |
247 | xid, &fid->netfid); | 247 | xid, fid); |
248 | 248 | ||
249 | out: | 249 | out: |
250 | kfree(buf); | 250 | kfree(buf); |
@@ -2389,7 +2389,7 @@ cifs_iovec_write(struct file *file, const struct iovec *iov, | |||
2389 | unsigned long nr_segs, loff_t *poffset) | 2389 | unsigned long nr_segs, loff_t *poffset) |
2390 | { | 2390 | { |
2391 | unsigned long nr_pages, i; | 2391 | unsigned long nr_pages, i; |
2392 | size_t copied, len, cur_len; | 2392 | size_t bytes, copied, len, cur_len; |
2393 | ssize_t total_written = 0; | 2393 | ssize_t total_written = 0; |
2394 | loff_t offset; | 2394 | loff_t offset; |
2395 | struct iov_iter it; | 2395 | struct iov_iter it; |
@@ -2444,14 +2444,45 @@ cifs_iovec_write(struct file *file, const struct iovec *iov, | |||
2444 | 2444 | ||
2445 | save_len = cur_len; | 2445 | save_len = cur_len; |
2446 | for (i = 0; i < nr_pages; i++) { | 2446 | for (i = 0; i < nr_pages; i++) { |
2447 | copied = min_t(const size_t, cur_len, PAGE_SIZE); | 2447 | bytes = min_t(const size_t, cur_len, PAGE_SIZE); |
2448 | copied = iov_iter_copy_from_user(wdata->pages[i], &it, | 2448 | copied = iov_iter_copy_from_user(wdata->pages[i], &it, |
2449 | 0, copied); | 2449 | 0, bytes); |
2450 | cur_len -= copied; | 2450 | cur_len -= copied; |
2451 | iov_iter_advance(&it, copied); | 2451 | iov_iter_advance(&it, copied); |
2452 | /* | ||
2453 | * If we didn't copy as much as we expected, then that | ||
2454 | * may mean we trod into an unmapped area. Stop copying | ||
2455 | * at that point. On the next pass through the big | ||
2456 | * loop, we'll likely end up getting a zero-length | ||
2457 | * write and bailing out of it. | ||
2458 | */ | ||
2459 | if (copied < bytes) | ||
2460 | break; | ||
2452 | } | 2461 | } |
2453 | cur_len = save_len - cur_len; | 2462 | cur_len = save_len - cur_len; |
2454 | 2463 | ||
2464 | /* | ||
2465 | * If we have no data to send, then that probably means that | ||
2466 | * the copy above failed altogether. That's most likely because | ||
2467 | * the address in the iovec was bogus. Set the rc to -EFAULT, | ||
2468 | * free anything we allocated and bail out. | ||
2469 | */ | ||
2470 | if (!cur_len) { | ||
2471 | for (i = 0; i < nr_pages; i++) | ||
2472 | put_page(wdata->pages[i]); | ||
2473 | kfree(wdata); | ||
2474 | rc = -EFAULT; | ||
2475 | break; | ||
2476 | } | ||
2477 | |||
2478 | /* | ||
2479 | * i + 1 now represents the number of pages we actually used in | ||
2480 | * the copy phase above. Bring nr_pages down to that, and free | ||
2481 | * any pages that we didn't use. | ||
2482 | */ | ||
2483 | for ( ; nr_pages > i + 1; nr_pages--) | ||
2484 | put_page(wdata->pages[nr_pages - 1]); | ||
2485 | |||
2455 | wdata->sync_mode = WB_SYNC_ALL; | 2486 | wdata->sync_mode = WB_SYNC_ALL; |
2456 | wdata->nr_pages = nr_pages; | 2487 | wdata->nr_pages = nr_pages; |
2457 | wdata->offset = (__u64)offset; | 2488 | wdata->offset = (__u64)offset; |
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index be58b8fcdb3c..aadc2b68678b 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -677,7 +677,7 @@ cgfi_exit: | |||
677 | int | 677 | int |
678 | cifs_get_inode_info(struct inode **inode, const char *full_path, | 678 | cifs_get_inode_info(struct inode **inode, const char *full_path, |
679 | FILE_ALL_INFO *data, struct super_block *sb, int xid, | 679 | FILE_ALL_INFO *data, struct super_block *sb, int xid, |
680 | const __u16 *fid) | 680 | const struct cifs_fid *fid) |
681 | { | 681 | { |
682 | bool validinum = false; | 682 | bool validinum = false; |
683 | __u16 srchflgs; | 683 | __u16 srchflgs; |
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index bfd66d84831e..526fb89f9230 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c | |||
@@ -1073,6 +1073,7 @@ struct smb_version_operations smb1_operations = { | |||
1073 | #endif /* CIFS_XATTR */ | 1073 | #endif /* CIFS_XATTR */ |
1074 | #ifdef CONFIG_CIFS_ACL | 1074 | #ifdef CONFIG_CIFS_ACL |
1075 | .get_acl = get_cifs_acl, | 1075 | .get_acl = get_cifs_acl, |
1076 | .get_acl_by_fid = get_cifs_acl_by_fid, | ||
1076 | .set_acl = set_cifs_acl, | 1077 | .set_acl = set_cifs_acl, |
1077 | #endif /* CIFS_ACL */ | 1078 | #endif /* CIFS_ACL */ |
1078 | }; | 1079 | }; |
diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h index c38350851b08..bc0bb9c34f72 100644 --- a/fs/cifs/smb2glob.h +++ b/fs/cifs/smb2glob.h | |||
@@ -57,4 +57,7 @@ | |||
57 | #define SMB2_CMACAES_SIZE (16) | 57 | #define SMB2_CMACAES_SIZE (16) |
58 | #define SMB3_SIGNKEY_SIZE (16) | 58 | #define SMB3_SIGNKEY_SIZE (16) |
59 | 59 | ||
60 | /* Maximum buffer size value we can send with 1 credit */ | ||
61 | #define SMB2_MAX_BUFFER_SIZE 65536 | ||
62 | |||
60 | #endif /* _SMB2_GLOB_H */ | 63 | #endif /* _SMB2_GLOB_H */ |
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 757da3e54d3d..192f51a12cf1 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c | |||
@@ -182,11 +182,8 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info) | |||
182 | /* start with specified wsize, or default */ | 182 | /* start with specified wsize, or default */ |
183 | wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE; | 183 | wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE; |
184 | wsize = min_t(unsigned int, wsize, server->max_write); | 184 | wsize = min_t(unsigned int, wsize, server->max_write); |
185 | /* | 185 | /* set it to the maximum buffer size value we can send with 1 credit */ |
186 | * limit write size to 2 ** 16, because we don't support multicredit | 186 | wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE); |
187 | * requests now. | ||
188 | */ | ||
189 | wsize = min_t(unsigned int, wsize, 2 << 15); | ||
190 | 187 | ||
191 | return wsize; | 188 | return wsize; |
192 | } | 189 | } |
@@ -200,11 +197,8 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info) | |||
200 | /* start with specified rsize, or default */ | 197 | /* start with specified rsize, or default */ |
201 | rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE; | 198 | rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE; |
202 | rsize = min_t(unsigned int, rsize, server->max_read); | 199 | rsize = min_t(unsigned int, rsize, server->max_read); |
203 | /* | 200 | /* set it to the maximum buffer size value we can send with 1 credit */ |
204 | * limit write size to 2 ** 16, because we don't support multicredit | 201 | rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE); |
205 | * requests now. | ||
206 | */ | ||
207 | rsize = min_t(unsigned int, rsize, 2 << 15); | ||
208 | 202 | ||
209 | return rsize; | 203 | return rsize; |
210 | } | 204 | } |
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index a3f7a9c3cc69..860344701067 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c | |||
@@ -413,7 +413,9 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) | |||
413 | 413 | ||
414 | /* SMB2 only has an extended negflavor */ | 414 | /* SMB2 only has an extended negflavor */ |
415 | server->negflavor = CIFS_NEGFLAVOR_EXTENDED; | 415 | server->negflavor = CIFS_NEGFLAVOR_EXTENDED; |
416 | server->maxBuf = le32_to_cpu(rsp->MaxTransactSize); | 416 | /* set it to the maximum buffer size value we can send with 1 credit */ |
417 | server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize), | ||
418 | SMB2_MAX_BUFFER_SIZE); | ||
417 | server->max_read = le32_to_cpu(rsp->MaxReadSize); | 419 | server->max_read = le32_to_cpu(rsp->MaxReadSize); |
418 | server->max_write = le32_to_cpu(rsp->MaxWriteSize); | 420 | server->max_write = le32_to_cpu(rsp->MaxWriteSize); |
419 | /* BB Do we need to validate the SecurityMode? */ | 421 | /* BB Do we need to validate the SecurityMode? */ |
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index ece55565b9cd..d3a534fdc5ff 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
@@ -771,6 +771,8 @@ do { \ | |||
771 | if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \ | 771 | if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \ |
772 | (einode)->xtime.tv_sec = \ | 772 | (einode)->xtime.tv_sec = \ |
773 | (signed)le32_to_cpu((raw_inode)->xtime); \ | 773 | (signed)le32_to_cpu((raw_inode)->xtime); \ |
774 | else \ | ||
775 | (einode)->xtime.tv_sec = 0; \ | ||
774 | if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \ | 776 | if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \ |
775 | ext4_decode_extra_time(&(einode)->xtime, \ | 777 | ext4_decode_extra_time(&(einode)->xtime, \ |
776 | raw_inode->xtime ## _extra); \ | 778 | raw_inode->xtime ## _extra); \ |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 10cff4736b11..74bc2d549c58 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -3906,6 +3906,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, | |||
3906 | } else | 3906 | } else |
3907 | err = ret; | 3907 | err = ret; |
3908 | map->m_flags |= EXT4_MAP_MAPPED; | 3908 | map->m_flags |= EXT4_MAP_MAPPED; |
3909 | map->m_pblk = newblock; | ||
3909 | if (allocated > map->m_len) | 3910 | if (allocated > map->m_len) |
3910 | allocated = map->m_len; | 3911 | allocated = map->m_len; |
3911 | map->m_len = allocated; | 3912 | map->m_len = allocated; |
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 6bea80614d77..a2a837f00407 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c | |||
@@ -140,7 +140,7 @@ static long swap_inode_boot_loader(struct super_block *sb, | |||
140 | handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2); | 140 | handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2); |
141 | if (IS_ERR(handle)) { | 141 | if (IS_ERR(handle)) { |
142 | err = -EINVAL; | 142 | err = -EINVAL; |
143 | goto swap_boot_out; | 143 | goto journal_err_out; |
144 | } | 144 | } |
145 | 145 | ||
146 | /* Protect extent tree against block allocations via delalloc */ | 146 | /* Protect extent tree against block allocations via delalloc */ |
@@ -198,6 +198,7 @@ static long swap_inode_boot_loader(struct super_block *sb, | |||
198 | 198 | ||
199 | ext4_double_up_write_data_sem(inode, inode_bl); | 199 | ext4_double_up_write_data_sem(inode, inode_bl); |
200 | 200 | ||
201 | journal_err_out: | ||
201 | ext4_inode_resume_unlocked_dio(inode); | 202 | ext4_inode_resume_unlocked_dio(inode); |
202 | ext4_inode_resume_unlocked_dio(inode_bl); | 203 | ext4_inode_resume_unlocked_dio(inode_bl); |
203 | 204 | ||
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index c5adbb318a90..f3b84cd9de56 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c | |||
@@ -243,6 +243,7 @@ static int ext4_alloc_group_tables(struct super_block *sb, | |||
243 | ext4_group_t group; | 243 | ext4_group_t group; |
244 | ext4_group_t last_group; | 244 | ext4_group_t last_group; |
245 | unsigned overhead; | 245 | unsigned overhead; |
246 | __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0; | ||
246 | 247 | ||
247 | BUG_ON(flex_gd->count == 0 || group_data == NULL); | 248 | BUG_ON(flex_gd->count == 0 || group_data == NULL); |
248 | 249 | ||
@@ -266,7 +267,7 @@ next_group: | |||
266 | src_group++; | 267 | src_group++; |
267 | for (; src_group <= last_group; src_group++) { | 268 | for (; src_group <= last_group; src_group++) { |
268 | overhead = ext4_group_overhead_blocks(sb, src_group); | 269 | overhead = ext4_group_overhead_blocks(sb, src_group); |
269 | if (overhead != 0) | 270 | if (overhead == 0) |
270 | last_blk += group_data[src_group - group].blocks_count; | 271 | last_blk += group_data[src_group - group].blocks_count; |
271 | else | 272 | else |
272 | break; | 273 | break; |
@@ -280,8 +281,7 @@ next_group: | |||
280 | group = ext4_get_group_number(sb, start_blk - 1); | 281 | group = ext4_get_group_number(sb, start_blk - 1); |
281 | group -= group_data[0].group; | 282 | group -= group_data[0].group; |
282 | group_data[group].free_blocks_count--; | 283 | group_data[group].free_blocks_count--; |
283 | if (flexbg_size > 1) | 284 | flex_gd->bg_flags[group] &= uninit_mask; |
284 | flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT; | ||
285 | } | 285 | } |
286 | 286 | ||
287 | /* Allocate inode bitmaps */ | 287 | /* Allocate inode bitmaps */ |
@@ -292,22 +292,30 @@ next_group: | |||
292 | group = ext4_get_group_number(sb, start_blk - 1); | 292 | group = ext4_get_group_number(sb, start_blk - 1); |
293 | group -= group_data[0].group; | 293 | group -= group_data[0].group; |
294 | group_data[group].free_blocks_count--; | 294 | group_data[group].free_blocks_count--; |
295 | if (flexbg_size > 1) | 295 | flex_gd->bg_flags[group] &= uninit_mask; |
296 | flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT; | ||
297 | } | 296 | } |
298 | 297 | ||
299 | /* Allocate inode tables */ | 298 | /* Allocate inode tables */ |
300 | for (; it_index < flex_gd->count; it_index++) { | 299 | for (; it_index < flex_gd->count; it_index++) { |
301 | if (start_blk + EXT4_SB(sb)->s_itb_per_group > last_blk) | 300 | unsigned int itb = EXT4_SB(sb)->s_itb_per_group; |
301 | ext4_fsblk_t next_group_start; | ||
302 | |||
303 | if (start_blk + itb > last_blk) | ||
302 | goto next_group; | 304 | goto next_group; |
303 | group_data[it_index].inode_table = start_blk; | 305 | group_data[it_index].inode_table = start_blk; |
304 | group = ext4_get_group_number(sb, start_blk - 1); | 306 | group = ext4_get_group_number(sb, start_blk); |
307 | next_group_start = ext4_group_first_block_no(sb, group + 1); | ||
305 | group -= group_data[0].group; | 308 | group -= group_data[0].group; |
306 | group_data[group].free_blocks_count -= | ||
307 | EXT4_SB(sb)->s_itb_per_group; | ||
308 | if (flexbg_size > 1) | ||
309 | flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT; | ||
310 | 309 | ||
310 | if (start_blk + itb > next_group_start) { | ||
311 | flex_gd->bg_flags[group + 1] &= uninit_mask; | ||
312 | overhead = start_blk + itb - next_group_start; | ||
313 | group_data[group + 1].free_blocks_count -= overhead; | ||
314 | itb -= overhead; | ||
315 | } | ||
316 | |||
317 | group_data[group].free_blocks_count -= itb; | ||
318 | flex_gd->bg_flags[group] &= uninit_mask; | ||
311 | start_blk += EXT4_SB(sb)->s_itb_per_group; | 319 | start_blk += EXT4_SB(sb)->s_itb_per_group; |
312 | } | 320 | } |
313 | 321 | ||
@@ -401,7 +409,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle, | |||
401 | start = ext4_group_first_block_no(sb, group); | 409 | start = ext4_group_first_block_no(sb, group); |
402 | group -= flex_gd->groups[0].group; | 410 | group -= flex_gd->groups[0].group; |
403 | 411 | ||
404 | count2 = sb->s_blocksize * 8 - (block - start); | 412 | count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start); |
405 | if (count2 > count) | 413 | if (count2 > count) |
406 | count2 = count; | 414 | count2 = count; |
407 | 415 | ||
@@ -620,7 +628,7 @@ handle_ib: | |||
620 | if (err) | 628 | if (err) |
621 | goto out; | 629 | goto out; |
622 | count = group_table_count[j]; | 630 | count = group_table_count[j]; |
623 | start = group_data[i].block_bitmap; | 631 | start = (&group_data[i].block_bitmap)[j]; |
624 | block = start; | 632 | block = start; |
625 | } | 633 | } |
626 | 634 | ||
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 1f7784de05b6..710fed2377d4 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -3695,16 +3695,22 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
3695 | for (i = 0; i < 4; i++) | 3695 | for (i = 0; i < 4; i++) |
3696 | sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); | 3696 | sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); |
3697 | sbi->s_def_hash_version = es->s_def_hash_version; | 3697 | sbi->s_def_hash_version = es->s_def_hash_version; |
3698 | i = le32_to_cpu(es->s_flags); | 3698 | if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) { |
3699 | if (i & EXT2_FLAGS_UNSIGNED_HASH) | 3699 | i = le32_to_cpu(es->s_flags); |
3700 | sbi->s_hash_unsigned = 3; | 3700 | if (i & EXT2_FLAGS_UNSIGNED_HASH) |
3701 | else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { | 3701 | sbi->s_hash_unsigned = 3; |
3702 | else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { | ||
3702 | #ifdef __CHAR_UNSIGNED__ | 3703 | #ifdef __CHAR_UNSIGNED__ |
3703 | es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); | 3704 | if (!(sb->s_flags & MS_RDONLY)) |
3704 | sbi->s_hash_unsigned = 3; | 3705 | es->s_flags |= |
3706 | cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); | ||
3707 | sbi->s_hash_unsigned = 3; | ||
3705 | #else | 3708 | #else |
3706 | es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); | 3709 | if (!(sb->s_flags & MS_RDONLY)) |
3710 | es->s_flags |= | ||
3711 | cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); | ||
3707 | #endif | 3712 | #endif |
3713 | } | ||
3708 | } | 3714 | } |
3709 | 3715 | ||
3710 | /* Handle clustersize */ | 3716 | /* Handle clustersize */ |
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c index e1959efad64f..b5ebc2d7d80d 100644 --- a/fs/fscache/object-list.c +++ b/fs/fscache/object-list.c | |||
@@ -50,6 +50,8 @@ void fscache_objlist_add(struct fscache_object *obj) | |||
50 | struct fscache_object *xobj; | 50 | struct fscache_object *xobj; |
51 | struct rb_node **p = &fscache_object_list.rb_node, *parent = NULL; | 51 | struct rb_node **p = &fscache_object_list.rb_node, *parent = NULL; |
52 | 52 | ||
53 | ASSERT(RB_EMPTY_NODE(&obj->objlist_link)); | ||
54 | |||
53 | write_lock(&fscache_object_list_lock); | 55 | write_lock(&fscache_object_list_lock); |
54 | 56 | ||
55 | while (*p) { | 57 | while (*p) { |
@@ -75,6 +77,9 @@ void fscache_objlist_add(struct fscache_object *obj) | |||
75 | */ | 77 | */ |
76 | void fscache_objlist_remove(struct fscache_object *obj) | 78 | void fscache_objlist_remove(struct fscache_object *obj) |
77 | { | 79 | { |
80 | if (RB_EMPTY_NODE(&obj->objlist_link)) | ||
81 | return; | ||
82 | |||
78 | write_lock(&fscache_object_list_lock); | 83 | write_lock(&fscache_object_list_lock); |
79 | 84 | ||
80 | BUG_ON(RB_EMPTY_ROOT(&fscache_object_list)); | 85 | BUG_ON(RB_EMPTY_ROOT(&fscache_object_list)); |
diff --git a/fs/fscache/object.c b/fs/fscache/object.c index 53d35c504240..d3b4539f1651 100644 --- a/fs/fscache/object.c +++ b/fs/fscache/object.c | |||
@@ -314,6 +314,9 @@ void fscache_object_init(struct fscache_object *object, | |||
314 | object->cache = cache; | 314 | object->cache = cache; |
315 | object->cookie = cookie; | 315 | object->cookie = cookie; |
316 | object->parent = NULL; | 316 | object->parent = NULL; |
317 | #ifdef CONFIG_FSCACHE_OBJECT_LIST | ||
318 | RB_CLEAR_NODE(&object->objlist_link); | ||
319 | #endif | ||
317 | 320 | ||
318 | object->oob_event_mask = 0; | 321 | object->oob_event_mask = 0; |
319 | for (t = object->oob_table; t->events; t++) | 322 | for (t = object->oob_table; t->events; t++) |
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 8360674c85bc..60bb365f54a5 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c | |||
@@ -514,11 +514,13 @@ int jbd2_journal_start_reserved(handle_t *handle, unsigned int type, | |||
514 | * similarly constrained call sites | 514 | * similarly constrained call sites |
515 | */ | 515 | */ |
516 | ret = start_this_handle(journal, handle, GFP_NOFS); | 516 | ret = start_this_handle(journal, handle, GFP_NOFS); |
517 | if (ret < 0) | 517 | if (ret < 0) { |
518 | jbd2_journal_free_reserved(handle); | 518 | jbd2_journal_free_reserved(handle); |
519 | return ret; | ||
520 | } | ||
519 | handle->h_type = type; | 521 | handle->h_type = type; |
520 | handle->h_line_no = line_no; | 522 | handle->h_line_no = line_no; |
521 | return ret; | 523 | return 0; |
522 | } | 524 | } |
523 | EXPORT_SYMBOL(jbd2_journal_start_reserved); | 525 | EXPORT_SYMBOL(jbd2_journal_start_reserved); |
524 | 526 | ||
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c index e973b85d6afd..5a8ea16eedbc 100644 --- a/fs/jfs/acl.c +++ b/fs/jfs/acl.c | |||
@@ -86,6 +86,8 @@ static int __jfs_set_acl(tid_t tid, struct inode *inode, int type, | |||
86 | rc = posix_acl_equiv_mode(acl, &inode->i_mode); | 86 | rc = posix_acl_equiv_mode(acl, &inode->i_mode); |
87 | if (rc < 0) | 87 | if (rc < 0) |
88 | return rc; | 88 | return rc; |
89 | inode->i_ctime = CURRENT_TIME; | ||
90 | mark_inode_dirty(inode); | ||
89 | if (rc == 0) | 91 | if (rc == 0) |
90 | acl = NULL; | 92 | acl = NULL; |
91 | break; | 93 | break; |
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index e066a3902973..ab798a88ec1d 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c | |||
@@ -779,6 +779,7 @@ nlmsvc_grant_blocked(struct nlm_block *block) | |||
779 | struct nlm_file *file = block->b_file; | 779 | struct nlm_file *file = block->b_file; |
780 | struct nlm_lock *lock = &block->b_call->a_args.lock; | 780 | struct nlm_lock *lock = &block->b_call->a_args.lock; |
781 | int error; | 781 | int error; |
782 | loff_t fl_start, fl_end; | ||
782 | 783 | ||
783 | dprintk("lockd: grant blocked lock %p\n", block); | 784 | dprintk("lockd: grant blocked lock %p\n", block); |
784 | 785 | ||
@@ -796,9 +797,16 @@ nlmsvc_grant_blocked(struct nlm_block *block) | |||
796 | } | 797 | } |
797 | 798 | ||
798 | /* Try the lock operation again */ | 799 | /* Try the lock operation again */ |
800 | /* vfs_lock_file() can mangle fl_start and fl_end, but we need | ||
801 | * them unchanged for the GRANT_MSG | ||
802 | */ | ||
799 | lock->fl.fl_flags |= FL_SLEEP; | 803 | lock->fl.fl_flags |= FL_SLEEP; |
804 | fl_start = lock->fl.fl_start; | ||
805 | fl_end = lock->fl.fl_end; | ||
800 | error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL); | 806 | error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL); |
801 | lock->fl.fl_flags &= ~FL_SLEEP; | 807 | lock->fl.fl_flags &= ~FL_SLEEP; |
808 | lock->fl.fl_start = fl_start; | ||
809 | lock->fl.fl_end = fl_end; | ||
802 | 810 | ||
803 | switch (error) { | 811 | switch (error) { |
804 | case 0: | 812 | case 0: |
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c index d3a587144222..d190e33d0ec2 100644 --- a/fs/nfsd/nfs4acl.c +++ b/fs/nfsd/nfs4acl.c | |||
@@ -151,17 +151,15 @@ nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry, | |||
151 | pacl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); | 151 | pacl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); |
152 | if (IS_ERR(pacl)) | 152 | if (IS_ERR(pacl)) |
153 | return PTR_ERR(pacl); | 153 | return PTR_ERR(pacl); |
154 | /* allocate for worst case: one (deny, allow) pair each: */ | ||
155 | size += 2 * pacl->a_count; | ||
156 | } | 154 | } |
155 | /* allocate for worst case: one (deny, allow) pair each: */ | ||
156 | size += 2 * pacl->a_count; | ||
157 | 157 | ||
158 | if (S_ISDIR(inode->i_mode)) { | 158 | if (S_ISDIR(inode->i_mode)) { |
159 | flags = NFS4_ACL_DIR; | 159 | flags = NFS4_ACL_DIR; |
160 | dpacl = get_acl(inode, ACL_TYPE_DEFAULT); | 160 | dpacl = get_acl(inode, ACL_TYPE_DEFAULT); |
161 | if (dpacl) | 161 | if (dpacl) |
162 | size += 2 * dpacl->a_count; | 162 | size += 2 * dpacl->a_count; |
163 | } else { | ||
164 | dpacl = NULL; | ||
165 | } | 163 | } |
166 | 164 | ||
167 | *acl = nfs4_acl_new(size); | 165 | *acl = nfs4_acl_new(size); |
@@ -170,8 +168,7 @@ nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry, | |||
170 | goto out; | 168 | goto out; |
171 | } | 169 | } |
172 | 170 | ||
173 | if (pacl) | 171 | _posix_to_nfsv4_one(pacl, *acl, flags & ~NFS4_ACL_TYPE_DEFAULT); |
174 | _posix_to_nfsv4_one(pacl, *acl, flags & ~NFS4_ACL_TYPE_DEFAULT); | ||
175 | 172 | ||
176 | if (dpacl) | 173 | if (dpacl) |
177 | _posix_to_nfsv4_one(dpacl, *acl, flags | NFS4_ACL_TYPE_DEFAULT); | 174 | _posix_to_nfsv4_one(dpacl, *acl, flags | NFS4_ACL_TYPE_DEFAULT); |
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c index 2b7882b508db..9a3c68cf6026 100644 --- a/fs/reiserfs/do_balan.c +++ b/fs/reiserfs/do_balan.c | |||
@@ -324,23 +324,17 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
324 | switch (flag) { | 324 | switch (flag) { |
325 | case M_INSERT: /* insert item into L[0] */ | 325 | case M_INSERT: /* insert item into L[0] */ |
326 | 326 | ||
327 | if (item_pos == tb->lnum[0] - 1 | 327 | if (item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) { |
328 | && tb->lbytes != -1) { | ||
329 | /* part of new item falls into L[0] */ | 328 | /* part of new item falls into L[0] */ |
330 | int new_item_len; | 329 | int new_item_len; |
331 | int version; | 330 | int version; |
332 | 331 | ||
333 | ret_val = | 332 | ret_val = leaf_shift_left(tb, tb->lnum[0] - 1, -1); |
334 | leaf_shift_left(tb, tb->lnum[0] - 1, | ||
335 | -1); | ||
336 | 333 | ||
337 | /* Calculate item length to insert to S[0] */ | 334 | /* Calculate item length to insert to S[0] */ |
338 | new_item_len = | 335 | new_item_len = ih_item_len(ih) - tb->lbytes; |
339 | ih_item_len(ih) - tb->lbytes; | ||
340 | /* Calculate and check item length to insert to L[0] */ | 336 | /* Calculate and check item length to insert to L[0] */ |
341 | put_ih_item_len(ih, | 337 | put_ih_item_len(ih, ih_item_len(ih) - new_item_len); |
342 | ih_item_len(ih) - | ||
343 | new_item_len); | ||
344 | 338 | ||
345 | RFALSE(ih_item_len(ih) <= 0, | 339 | RFALSE(ih_item_len(ih) <= 0, |
346 | "PAP-12080: there is nothing to insert into L[0]: ih_item_len=%d", | 340 | "PAP-12080: there is nothing to insert into L[0]: ih_item_len=%d", |
@@ -349,30 +343,18 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
349 | /* Insert new item into L[0] */ | 343 | /* Insert new item into L[0] */ |
350 | buffer_info_init_left(tb, &bi); | 344 | buffer_info_init_left(tb, &bi); |
351 | leaf_insert_into_buf(&bi, | 345 | leaf_insert_into_buf(&bi, |
352 | n + item_pos - | 346 | n + item_pos - ret_val, ih, body, |
353 | ret_val, ih, body, | 347 | zeros_num > ih_item_len(ih) ? ih_item_len(ih) : zeros_num); |
354 | zeros_num > | ||
355 | ih_item_len(ih) ? | ||
356 | ih_item_len(ih) : | ||
357 | zeros_num); | ||
358 | 348 | ||
359 | version = ih_version(ih); | 349 | version = ih_version(ih); |
360 | 350 | ||
361 | /* Calculate key component, item length and body to insert into S[0] */ | 351 | /* Calculate key component, item length and body to insert into S[0] */ |
362 | set_le_ih_k_offset(ih, | 352 | set_le_ih_k_offset(ih, le_ih_k_offset(ih) + |
363 | le_ih_k_offset(ih) + | 353 | (tb-> lbytes << (is_indirect_le_ih(ih) ? tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT : 0))); |
364 | (tb-> | ||
365 | lbytes << | ||
366 | (is_indirect_le_ih | ||
367 | (ih) ? tb->tb_sb-> | ||
368 | s_blocksize_bits - | ||
369 | UNFM_P_SHIFT : | ||
370 | 0))); | ||
371 | 354 | ||
372 | put_ih_item_len(ih, new_item_len); | 355 | put_ih_item_len(ih, new_item_len); |
373 | if (tb->lbytes > zeros_num) { | 356 | if (tb->lbytes > zeros_num) { |
374 | body += | 357 | body += (tb->lbytes - zeros_num); |
375 | (tb->lbytes - zeros_num); | ||
376 | zeros_num = 0; | 358 | zeros_num = 0; |
377 | } else | 359 | } else |
378 | zeros_num -= tb->lbytes; | 360 | zeros_num -= tb->lbytes; |
@@ -383,15 +365,10 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
383 | } else { | 365 | } else { |
384 | /* new item in whole falls into L[0] */ | 366 | /* new item in whole falls into L[0] */ |
385 | /* Shift lnum[0]-1 items to L[0] */ | 367 | /* Shift lnum[0]-1 items to L[0] */ |
386 | ret_val = | 368 | ret_val = leaf_shift_left(tb, tb->lnum[0] - 1, tb->lbytes); |
387 | leaf_shift_left(tb, tb->lnum[0] - 1, | ||
388 | tb->lbytes); | ||
389 | /* Insert new item into L[0] */ | 369 | /* Insert new item into L[0] */ |
390 | buffer_info_init_left(tb, &bi); | 370 | buffer_info_init_left(tb, &bi); |
391 | leaf_insert_into_buf(&bi, | 371 | leaf_insert_into_buf(&bi, n + item_pos - ret_val, ih, body, zeros_num); |
392 | n + item_pos - | ||
393 | ret_val, ih, body, | ||
394 | zeros_num); | ||
395 | tb->insert_size[0] = 0; | 372 | tb->insert_size[0] = 0; |
396 | zeros_num = 0; | 373 | zeros_num = 0; |
397 | } | 374 | } |
@@ -399,264 +376,117 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
399 | 376 | ||
400 | case M_PASTE: /* append item in L[0] */ | 377 | case M_PASTE: /* append item in L[0] */ |
401 | 378 | ||
402 | if (item_pos == tb->lnum[0] - 1 | 379 | if (item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) { |
403 | && tb->lbytes != -1) { | ||
404 | /* we must shift the part of the appended item */ | 380 | /* we must shift the part of the appended item */ |
405 | if (is_direntry_le_ih | 381 | if (is_direntry_le_ih(B_N_PITEM_HEAD(tbS0, item_pos))) { |
406 | (B_N_PITEM_HEAD(tbS0, item_pos))) { | ||
407 | 382 | ||
408 | RFALSE(zeros_num, | 383 | RFALSE(zeros_num, |
409 | "PAP-12090: invalid parameter in case of a directory"); | 384 | "PAP-12090: invalid parameter in case of a directory"); |
410 | /* directory item */ | 385 | /* directory item */ |
411 | if (tb->lbytes > pos_in_item) { | 386 | if (tb->lbytes > pos_in_item) { |
412 | /* new directory entry falls into L[0] */ | 387 | /* new directory entry falls into L[0] */ |
413 | struct item_head | 388 | struct item_head *pasted; |
414 | *pasted; | 389 | int l_pos_in_item = pos_in_item; |
415 | int l_pos_in_item = | ||
416 | pos_in_item; | ||
417 | 390 | ||
418 | /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 entries from given directory item */ | 391 | /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 entries from given directory item */ |
419 | ret_val = | 392 | ret_val = leaf_shift_left(tb, tb->lnum[0], tb->lbytes-1); |
420 | leaf_shift_left(tb, | 393 | if (ret_val && !item_pos) { |
421 | tb-> | 394 | pasted = B_N_PITEM_HEAD(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1); |
422 | lnum | 395 | l_pos_in_item += I_ENTRY_COUNT(pasted) - (tb->lbytes -1); |
423 | [0], | ||
424 | tb-> | ||
425 | lbytes | ||
426 | - | ||
427 | 1); | ||
428 | if (ret_val | ||
429 | && !item_pos) { | ||
430 | pasted = | ||
431 | B_N_PITEM_HEAD | ||
432 | (tb->L[0], | ||
433 | B_NR_ITEMS | ||
434 | (tb-> | ||
435 | L[0]) - | ||
436 | 1); | ||
437 | l_pos_in_item += | ||
438 | I_ENTRY_COUNT | ||
439 | (pasted) - | ||
440 | (tb-> | ||
441 | lbytes - | ||
442 | 1); | ||
443 | } | 396 | } |
444 | 397 | ||
445 | /* Append given directory entry to directory item */ | 398 | /* Append given directory entry to directory item */ |
446 | buffer_info_init_left(tb, &bi); | 399 | buffer_info_init_left(tb, &bi); |
447 | leaf_paste_in_buffer | 400 | leaf_paste_in_buffer(&bi, n + item_pos - ret_val, l_pos_in_item, tb->insert_size[0], body, zeros_num); |
448 | (&bi, | ||
449 | n + item_pos - | ||
450 | ret_val, | ||
451 | l_pos_in_item, | ||
452 | tb->insert_size[0], | ||
453 | body, zeros_num); | ||
454 | 401 | ||
455 | /* previous string prepared space for pasting new entry, following string pastes this entry */ | 402 | /* previous string prepared space for pasting new entry, following string pastes this entry */ |
456 | 403 | ||
457 | /* when we have merge directory item, pos_in_item has been changed too */ | 404 | /* when we have merge directory item, pos_in_item has been changed too */ |
458 | 405 | ||
459 | /* paste new directory entry. 1 is entry number */ | 406 | /* paste new directory entry. 1 is entry number */ |
460 | leaf_paste_entries(&bi, | 407 | leaf_paste_entries(&bi, n + item_pos - ret_val, l_pos_in_item, |
461 | n + | 408 | 1, (struct reiserfs_de_head *) body, |
462 | item_pos | 409 | body + DEH_SIZE, tb->insert_size[0]); |
463 | - | ||
464 | ret_val, | ||
465 | l_pos_in_item, | ||
466 | 1, | ||
467 | (struct | ||
468 | reiserfs_de_head | ||
469 | *) | ||
470 | body, | ||
471 | body | ||
472 | + | ||
473 | DEH_SIZE, | ||
474 | tb-> | ||
475 | insert_size | ||
476 | [0] | ||
477 | ); | ||
478 | tb->insert_size[0] = 0; | 410 | tb->insert_size[0] = 0; |
479 | } else { | 411 | } else { |
480 | /* new directory item doesn't fall into L[0] */ | 412 | /* new directory item doesn't fall into L[0] */ |
481 | /* Shift lnum[0]-1 items in whole. Shift lbytes directory entries from directory item number lnum[0] */ | 413 | /* Shift lnum[0]-1 items in whole. Shift lbytes directory entries from directory item number lnum[0] */ |
482 | leaf_shift_left(tb, | 414 | leaf_shift_left(tb, tb->lnum[0], tb->lbytes); |
483 | tb-> | ||
484 | lnum[0], | ||
485 | tb-> | ||
486 | lbytes); | ||
487 | } | 415 | } |
488 | /* Calculate new position to append in item body */ | 416 | /* Calculate new position to append in item body */ |
489 | pos_in_item -= tb->lbytes; | 417 | pos_in_item -= tb->lbytes; |
490 | } else { | 418 | } else { |
491 | /* regular object */ | 419 | /* regular object */ |
492 | RFALSE(tb->lbytes <= 0, | 420 | RFALSE(tb->lbytes <= 0, "PAP-12095: there is nothing to shift to L[0]. lbytes=%d", tb->lbytes); |
493 | "PAP-12095: there is nothing to shift to L[0]. lbytes=%d", | 421 | RFALSE(pos_in_item != ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)), |
494 | tb->lbytes); | ||
495 | RFALSE(pos_in_item != | ||
496 | ih_item_len | ||
497 | (B_N_PITEM_HEAD | ||
498 | (tbS0, item_pos)), | ||
499 | "PAP-12100: incorrect position to paste: item_len=%d, pos_in_item=%d", | 422 | "PAP-12100: incorrect position to paste: item_len=%d, pos_in_item=%d", |
500 | ih_item_len | 423 | ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)),pos_in_item); |
501 | (B_N_PITEM_HEAD | ||
502 | (tbS0, item_pos)), | ||
503 | pos_in_item); | ||
504 | 424 | ||
505 | if (tb->lbytes >= pos_in_item) { | 425 | if (tb->lbytes >= pos_in_item) { |
506 | /* appended item will be in L[0] in whole */ | 426 | /* appended item will be in L[0] in whole */ |
507 | int l_n; | 427 | int l_n; |
508 | 428 | ||
509 | /* this bytes number must be appended to the last item of L[h] */ | 429 | /* this bytes number must be appended to the last item of L[h] */ |
510 | l_n = | 430 | l_n = tb->lbytes - pos_in_item; |
511 | tb->lbytes - | ||
512 | pos_in_item; | ||
513 | 431 | ||
514 | /* Calculate new insert_size[0] */ | 432 | /* Calculate new insert_size[0] */ |
515 | tb->insert_size[0] -= | 433 | tb->insert_size[0] -= l_n; |
516 | l_n; | ||
517 | 434 | ||
518 | RFALSE(tb-> | 435 | RFALSE(tb->insert_size[0] <= 0, |
519 | insert_size[0] <= | ||
520 | 0, | ||
521 | "PAP-12105: there is nothing to paste into L[0]. insert_size=%d", | 436 | "PAP-12105: there is nothing to paste into L[0]. insert_size=%d", |
522 | tb-> | 437 | tb->insert_size[0]); |
523 | insert_size[0]); | 438 | ret_val = leaf_shift_left(tb, tb->lnum[0], ih_item_len |
524 | ret_val = | 439 | (B_N_PITEM_HEAD(tbS0, item_pos))); |
525 | leaf_shift_left(tb, | ||
526 | tb-> | ||
527 | lnum | ||
528 | [0], | ||
529 | ih_item_len | ||
530 | (B_N_PITEM_HEAD | ||
531 | (tbS0, | ||
532 | item_pos))); | ||
533 | /* Append to body of item in L[0] */ | 440 | /* Append to body of item in L[0] */ |
534 | buffer_info_init_left(tb, &bi); | 441 | buffer_info_init_left(tb, &bi); |
535 | leaf_paste_in_buffer | 442 | leaf_paste_in_buffer |
536 | (&bi, | 443 | (&bi, n + item_pos - ret_val, ih_item_len |
537 | n + item_pos - | 444 | (B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val)), |
538 | ret_val, | 445 | l_n, body, |
539 | ih_item_len | 446 | zeros_num > l_n ? l_n : zeros_num); |
540 | (B_N_PITEM_HEAD | ||
541 | (tb->L[0], | ||
542 | n + item_pos - | ||
543 | ret_val)), l_n, | ||
544 | body, | ||
545 | zeros_num > | ||
546 | l_n ? l_n : | ||
547 | zeros_num); | ||
548 | /* 0-th item in S0 can be only of DIRECT type when l_n != 0 */ | 447 | /* 0-th item in S0 can be only of DIRECT type when l_n != 0 */ |
549 | { | 448 | { |
550 | int version; | 449 | int version; |
551 | int temp_l = | 450 | int temp_l = l_n; |
552 | l_n; | 451 | |
553 | 452 | RFALSE(ih_item_len(B_N_PITEM_HEAD(tbS0, 0)), | |
554 | RFALSE | ||
555 | (ih_item_len | ||
556 | (B_N_PITEM_HEAD | ||
557 | (tbS0, | ||
558 | 0)), | ||
559 | "PAP-12106: item length must be 0"); | 453 | "PAP-12106: item length must be 0"); |
560 | RFALSE | 454 | RFALSE(comp_short_le_keys(B_N_PKEY(tbS0, 0), B_N_PKEY |
561 | (comp_short_le_keys | 455 | (tb->L[0], n + item_pos - ret_val)), |
562 | (B_N_PKEY | ||
563 | (tbS0, 0), | ||
564 | B_N_PKEY | ||
565 | (tb->L[0], | ||
566 | n + | ||
567 | item_pos | ||
568 | - | ||
569 | ret_val)), | ||
570 | "PAP-12107: items must be of the same file"); | 456 | "PAP-12107: items must be of the same file"); |
571 | if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val))) { | 457 | if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val))) { |
572 | temp_l = | 458 | temp_l = l_n << (tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT); |
573 | l_n | ||
574 | << | ||
575 | (tb-> | ||
576 | tb_sb-> | ||
577 | s_blocksize_bits | ||
578 | - | ||
579 | UNFM_P_SHIFT); | ||
580 | } | 459 | } |
581 | /* update key of first item in S0 */ | 460 | /* update key of first item in S0 */ |
582 | version = | 461 | version = ih_version(B_N_PITEM_HEAD(tbS0, 0)); |
583 | ih_version | 462 | set_le_key_k_offset(version, B_N_PKEY(tbS0, 0), |
584 | (B_N_PITEM_HEAD | 463 | le_key_k_offset(version,B_N_PKEY(tbS0, 0)) + temp_l); |
585 | (tbS0, 0)); | ||
586 | set_le_key_k_offset | ||
587 | (version, | ||
588 | B_N_PKEY | ||
589 | (tbS0, 0), | ||
590 | le_key_k_offset | ||
591 | (version, | ||
592 | B_N_PKEY | ||
593 | (tbS0, | ||
594 | 0)) + | ||
595 | temp_l); | ||
596 | /* update left delimiting key */ | 464 | /* update left delimiting key */ |
597 | set_le_key_k_offset | 465 | set_le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]), |
598 | (version, | 466 | le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0])) + temp_l); |
599 | B_N_PDELIM_KEY | ||
600 | (tb-> | ||
601 | CFL[0], | ||
602 | tb-> | ||
603 | lkey[0]), | ||
604 | le_key_k_offset | ||
605 | (version, | ||
606 | B_N_PDELIM_KEY | ||
607 | (tb-> | ||
608 | CFL[0], | ||
609 | tb-> | ||
610 | lkey[0])) | ||
611 | + temp_l); | ||
612 | } | 467 | } |
613 | 468 | ||
614 | /* Calculate new body, position in item and insert_size[0] */ | 469 | /* Calculate new body, position in item and insert_size[0] */ |
615 | if (l_n > zeros_num) { | 470 | if (l_n > zeros_num) { |
616 | body += | 471 | body += (l_n - zeros_num); |
617 | (l_n - | ||
618 | zeros_num); | ||
619 | zeros_num = 0; | 472 | zeros_num = 0; |
620 | } else | 473 | } else |
621 | zeros_num -= | 474 | zeros_num -= l_n; |
622 | l_n; | ||
623 | pos_in_item = 0; | 475 | pos_in_item = 0; |
624 | 476 | ||
625 | RFALSE | 477 | RFALSE(comp_short_le_keys(B_N_PKEY(tbS0, 0), B_N_PKEY(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1)) |
626 | (comp_short_le_keys | 478 | || !op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size) |
627 | (B_N_PKEY(tbS0, 0), | 479 | || !op_is_left_mergeable(B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]), tbS0->b_size), |
628 | B_N_PKEY(tb->L[0], | ||
629 | B_NR_ITEMS | ||
630 | (tb-> | ||
631 | L[0]) - | ||
632 | 1)) | ||
633 | || | ||
634 | !op_is_left_mergeable | ||
635 | (B_N_PKEY(tbS0, 0), | ||
636 | tbS0->b_size) | ||
637 | || | ||
638 | !op_is_left_mergeable | ||
639 | (B_N_PDELIM_KEY | ||
640 | (tb->CFL[0], | ||
641 | tb->lkey[0]), | ||
642 | tbS0->b_size), | ||
643 | "PAP-12120: item must be merge-able with left neighboring item"); | 480 | "PAP-12120: item must be merge-able with left neighboring item"); |
644 | } else { /* only part of the appended item will be in L[0] */ | 481 | } else { /* only part of the appended item will be in L[0] */ |
645 | 482 | ||
646 | /* Calculate position in item for append in S[0] */ | 483 | /* Calculate position in item for append in S[0] */ |
647 | pos_in_item -= | 484 | pos_in_item -= tb->lbytes; |
648 | tb->lbytes; | ||
649 | 485 | ||
650 | RFALSE(pos_in_item <= 0, | 486 | RFALSE(pos_in_item <= 0, "PAP-12125: no place for paste. pos_in_item=%d", pos_in_item); |
651 | "PAP-12125: no place for paste. pos_in_item=%d", | ||
652 | pos_in_item); | ||
653 | 487 | ||
654 | /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */ | 488 | /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */ |
655 | leaf_shift_left(tb, | 489 | leaf_shift_left(tb, tb->lnum[0], tb->lbytes); |
656 | tb-> | ||
657 | lnum[0], | ||
658 | tb-> | ||
659 | lbytes); | ||
660 | } | 490 | } |
661 | } | 491 | } |
662 | } else { /* appended item will be in L[0] in whole */ | 492 | } else { /* appended item will be in L[0] in whole */ |
@@ -665,52 +495,30 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
665 | 495 | ||
666 | if (!item_pos && op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size)) { /* if we paste into first item of S[0] and it is left mergable */ | 496 | if (!item_pos && op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size)) { /* if we paste into first item of S[0] and it is left mergable */ |
667 | /* then increment pos_in_item by the size of the last item in L[0] */ | 497 | /* then increment pos_in_item by the size of the last item in L[0] */ |
668 | pasted = | 498 | pasted = B_N_PITEM_HEAD(tb->L[0], n - 1); |
669 | B_N_PITEM_HEAD(tb->L[0], | ||
670 | n - 1); | ||
671 | if (is_direntry_le_ih(pasted)) | 499 | if (is_direntry_le_ih(pasted)) |
672 | pos_in_item += | 500 | pos_in_item += ih_entry_count(pasted); |
673 | ih_entry_count | ||
674 | (pasted); | ||
675 | else | 501 | else |
676 | pos_in_item += | 502 | pos_in_item += ih_item_len(pasted); |
677 | ih_item_len(pasted); | ||
678 | } | 503 | } |
679 | 504 | ||
680 | /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */ | 505 | /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */ |
681 | ret_val = | 506 | ret_val = leaf_shift_left(tb, tb->lnum[0], tb->lbytes); |
682 | leaf_shift_left(tb, tb->lnum[0], | ||
683 | tb->lbytes); | ||
684 | /* Append to body of item in L[0] */ | 507 | /* Append to body of item in L[0] */ |
685 | buffer_info_init_left(tb, &bi); | 508 | buffer_info_init_left(tb, &bi); |
686 | leaf_paste_in_buffer(&bi, | 509 | leaf_paste_in_buffer(&bi, n + item_pos - ret_val, |
687 | n + item_pos - | ||
688 | ret_val, | ||
689 | pos_in_item, | 510 | pos_in_item, |
690 | tb->insert_size[0], | 511 | tb->insert_size[0], |
691 | body, zeros_num); | 512 | body, zeros_num); |
692 | 513 | ||
693 | /* if appended item is directory, paste entry */ | 514 | /* if appended item is directory, paste entry */ |
694 | pasted = | 515 | pasted = B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val); |
695 | B_N_PITEM_HEAD(tb->L[0], | ||
696 | n + item_pos - | ||
697 | ret_val); | ||
698 | if (is_direntry_le_ih(pasted)) | 516 | if (is_direntry_le_ih(pasted)) |
699 | leaf_paste_entries(&bi, | 517 | leaf_paste_entries(&bi, n + item_pos - ret_val, |
700 | n + | 518 | pos_in_item, 1, |
701 | item_pos - | 519 | (struct reiserfs_de_head *) body, |
702 | ret_val, | 520 | body + DEH_SIZE, |
703 | pos_in_item, | 521 | tb->insert_size[0]); |
704 | 1, | ||
705 | (struct | ||
706 | reiserfs_de_head | ||
707 | *)body, | ||
708 | body + | ||
709 | DEH_SIZE, | ||
710 | tb-> | ||
711 | insert_size | ||
712 | [0] | ||
713 | ); | ||
714 | /* if appended item is indirect item, put unformatted node into un list */ | 522 | /* if appended item is indirect item, put unformatted node into un list */ |
715 | if (is_indirect_le_ih(pasted)) | 523 | if (is_indirect_le_ih(pasted)) |
716 | set_ih_free_space(pasted, 0); | 524 | set_ih_free_space(pasted, 0); |
@@ -722,13 +530,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
722 | reiserfs_panic(tb->tb_sb, "PAP-12130", | 530 | reiserfs_panic(tb->tb_sb, "PAP-12130", |
723 | "lnum > 0: unexpected mode: " | 531 | "lnum > 0: unexpected mode: " |
724 | " %s(%d)", | 532 | " %s(%d)", |
725 | (flag == | 533 | (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag); |
726 | M_DELETE) ? "DELETE" : ((flag == | ||
727 | M_CUT) | ||
728 | ? "CUT" | ||
729 | : | ||
730 | "UNKNOWN"), | ||
731 | flag); | ||
732 | } | 534 | } |
733 | } else { | 535 | } else { |
734 | /* new item doesn't fall into L[0] */ | 536 | /* new item doesn't fall into L[0] */ |
@@ -748,14 +550,12 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
748 | case M_INSERT: /* insert item */ | 550 | case M_INSERT: /* insert item */ |
749 | if (n - tb->rnum[0] < item_pos) { /* new item or its part falls to R[0] */ | 551 | if (n - tb->rnum[0] < item_pos) { /* new item or its part falls to R[0] */ |
750 | if (item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) { /* part of new item falls into R[0] */ | 552 | if (item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) { /* part of new item falls into R[0] */ |
751 | loff_t old_key_comp, old_len, | 553 | loff_t old_key_comp, old_len, r_zeros_number; |
752 | r_zeros_number; | ||
753 | const char *r_body; | 554 | const char *r_body; |
754 | int version; | 555 | int version; |
755 | loff_t offset; | 556 | loff_t offset; |
756 | 557 | ||
757 | leaf_shift_right(tb, tb->rnum[0] - 1, | 558 | leaf_shift_right(tb, tb->rnum[0] - 1, -1); |
758 | -1); | ||
759 | 559 | ||
760 | version = ih_version(ih); | 560 | version = ih_version(ih); |
761 | /* Remember key component and item length */ | 561 | /* Remember key component and item length */ |
@@ -763,29 +563,17 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
763 | old_len = ih_item_len(ih); | 563 | old_len = ih_item_len(ih); |
764 | 564 | ||
765 | /* Calculate key component and item length to insert into R[0] */ | 565 | /* Calculate key component and item length to insert into R[0] */ |
766 | offset = | 566 | offset = le_ih_k_offset(ih) + ((old_len - tb->rbytes) << (is_indirect_le_ih(ih) ? tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT : 0)); |
767 | le_ih_k_offset(ih) + | ||
768 | ((old_len - | ||
769 | tb-> | ||
770 | rbytes) << (is_indirect_le_ih(ih) | ||
771 | ? tb->tb_sb-> | ||
772 | s_blocksize_bits - | ||
773 | UNFM_P_SHIFT : 0)); | ||
774 | set_le_ih_k_offset(ih, offset); | 567 | set_le_ih_k_offset(ih, offset); |
775 | put_ih_item_len(ih, tb->rbytes); | 568 | put_ih_item_len(ih, tb->rbytes); |
776 | /* Insert part of the item into R[0] */ | 569 | /* Insert part of the item into R[0] */ |
777 | buffer_info_init_right(tb, &bi); | 570 | buffer_info_init_right(tb, &bi); |
778 | if ((old_len - tb->rbytes) > zeros_num) { | 571 | if ((old_len - tb->rbytes) > zeros_num) { |
779 | r_zeros_number = 0; | 572 | r_zeros_number = 0; |
780 | r_body = | 573 | r_body = body + (old_len - tb->rbytes) - zeros_num; |
781 | body + (old_len - | ||
782 | tb->rbytes) - | ||
783 | zeros_num; | ||
784 | } else { | 574 | } else { |
785 | r_body = body; | 575 | r_body = body; |
786 | r_zeros_number = | 576 | r_zeros_number = zeros_num - (old_len - tb->rbytes); |
787 | zeros_num - (old_len - | ||
788 | tb->rbytes); | ||
789 | zeros_num -= r_zeros_number; | 577 | zeros_num -= r_zeros_number; |
790 | } | 578 | } |
791 | 579 | ||
@@ -798,25 +586,18 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
798 | 586 | ||
799 | /* Calculate key component and item length to insert into S[0] */ | 587 | /* Calculate key component and item length to insert into S[0] */ |
800 | set_le_ih_k_offset(ih, old_key_comp); | 588 | set_le_ih_k_offset(ih, old_key_comp); |
801 | put_ih_item_len(ih, | 589 | put_ih_item_len(ih, old_len - tb->rbytes); |
802 | old_len - tb->rbytes); | ||
803 | 590 | ||
804 | tb->insert_size[0] -= tb->rbytes; | 591 | tb->insert_size[0] -= tb->rbytes; |
805 | 592 | ||
806 | } else { /* whole new item falls into R[0] */ | 593 | } else { /* whole new item falls into R[0] */ |
807 | 594 | ||
808 | /* Shift rnum[0]-1 items to R[0] */ | 595 | /* Shift rnum[0]-1 items to R[0] */ |
809 | ret_val = | 596 | ret_val = leaf_shift_right(tb, tb->rnum[0] - 1, tb->rbytes); |
810 | leaf_shift_right(tb, | ||
811 | tb->rnum[0] - 1, | ||
812 | tb->rbytes); | ||
813 | /* Insert new item into R[0] */ | 597 | /* Insert new item into R[0] */ |
814 | buffer_info_init_right(tb, &bi); | 598 | buffer_info_init_right(tb, &bi); |
815 | leaf_insert_into_buf(&bi, | 599 | leaf_insert_into_buf(&bi, item_pos - n + tb->rnum[0] - 1, |
816 | item_pos - n + | 600 | ih, body, zeros_num); |
817 | tb->rnum[0] - 1, | ||
818 | ih, body, | ||
819 | zeros_num); | ||
820 | 601 | ||
821 | if (item_pos - n + tb->rnum[0] - 1 == 0) { | 602 | if (item_pos - n + tb->rnum[0] - 1 == 0) { |
822 | replace_key(tb, tb->CFR[0], | 603 | replace_key(tb, tb->CFR[0], |
@@ -841,200 +622,97 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
841 | 622 | ||
842 | RFALSE(zeros_num, | 623 | RFALSE(zeros_num, |
843 | "PAP-12145: invalid parameter in case of a directory"); | 624 | "PAP-12145: invalid parameter in case of a directory"); |
844 | entry_count = | 625 | entry_count = I_ENTRY_COUNT(B_N_PITEM_HEAD |
845 | I_ENTRY_COUNT(B_N_PITEM_HEAD | 626 | (tbS0, item_pos)); |
846 | (tbS0, | ||
847 | item_pos)); | ||
848 | if (entry_count - tb->rbytes < | 627 | if (entry_count - tb->rbytes < |
849 | pos_in_item) | 628 | pos_in_item) |
850 | /* new directory entry falls into R[0] */ | 629 | /* new directory entry falls into R[0] */ |
851 | { | 630 | { |
852 | int paste_entry_position; | 631 | int paste_entry_position; |
853 | 632 | ||
854 | RFALSE(tb->rbytes - 1 >= | 633 | RFALSE(tb->rbytes - 1 >= entry_count || !tb-> insert_size[0], |
855 | entry_count | ||
856 | || !tb-> | ||
857 | insert_size[0], | ||
858 | "PAP-12150: no enough of entries to shift to R[0]: rbytes=%d, entry_count=%d", | 634 | "PAP-12150: no enough of entries to shift to R[0]: rbytes=%d, entry_count=%d", |
859 | tb->rbytes, | 635 | tb->rbytes, entry_count); |
860 | entry_count); | ||
861 | /* Shift rnum[0]-1 items in whole. Shift rbytes-1 directory entries from directory item number rnum[0] */ | 636 | /* Shift rnum[0]-1 items in whole. Shift rbytes-1 directory entries from directory item number rnum[0] */ |
862 | leaf_shift_right(tb, | 637 | leaf_shift_right(tb, tb->rnum[0], tb->rbytes - 1); |
863 | tb-> | ||
864 | rnum | ||
865 | [0], | ||
866 | tb-> | ||
867 | rbytes | ||
868 | - 1); | ||
869 | /* Paste given directory entry to directory item */ | 638 | /* Paste given directory entry to directory item */ |
870 | paste_entry_position = | 639 | paste_entry_position = pos_in_item - entry_count + tb->rbytes - 1; |
871 | pos_in_item - | ||
872 | entry_count + | ||
873 | tb->rbytes - 1; | ||
874 | buffer_info_init_right(tb, &bi); | 640 | buffer_info_init_right(tb, &bi); |
875 | leaf_paste_in_buffer | 641 | leaf_paste_in_buffer(&bi, 0, paste_entry_position, tb->insert_size[0], body, zeros_num); |
876 | (&bi, 0, | ||
877 | paste_entry_position, | ||
878 | tb->insert_size[0], | ||
879 | body, zeros_num); | ||
880 | /* paste entry */ | 642 | /* paste entry */ |
881 | leaf_paste_entries(&bi, | 643 | leaf_paste_entries(&bi, 0, paste_entry_position, 1, |
882 | 0, | 644 | (struct reiserfs_de_head *) body, |
883 | paste_entry_position, | 645 | body + DEH_SIZE, tb->insert_size[0]); |
884 | 1, | 646 | |
885 | (struct | 647 | if (paste_entry_position == 0) { |
886 | reiserfs_de_head | ||
887 | *) | ||
888 | body, | ||
889 | body | ||
890 | + | ||
891 | DEH_SIZE, | ||
892 | tb-> | ||
893 | insert_size | ||
894 | [0] | ||
895 | ); | ||
896 | |||
897 | if (paste_entry_position | ||
898 | == 0) { | ||
899 | /* change delimiting keys */ | 648 | /* change delimiting keys */ |
900 | replace_key(tb, | 649 | replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0],0); |
901 | tb-> | ||
902 | CFR | ||
903 | [0], | ||
904 | tb-> | ||
905 | rkey | ||
906 | [0], | ||
907 | tb-> | ||
908 | R | ||
909 | [0], | ||
910 | 0); | ||
911 | } | 650 | } |
912 | 651 | ||
913 | tb->insert_size[0] = 0; | 652 | tb->insert_size[0] = 0; |
914 | pos_in_item++; | 653 | pos_in_item++; |
915 | } else { /* new directory entry doesn't fall into R[0] */ | 654 | } else { /* new directory entry doesn't fall into R[0] */ |
916 | 655 | ||
917 | leaf_shift_right(tb, | 656 | leaf_shift_right(tb, tb->rnum[0], tb->rbytes); |
918 | tb-> | ||
919 | rnum | ||
920 | [0], | ||
921 | tb-> | ||
922 | rbytes); | ||
923 | } | 657 | } |
924 | } else { /* regular object */ | 658 | } else { /* regular object */ |
925 | 659 | ||
926 | int n_shift, n_rem, | 660 | int n_shift, n_rem, r_zeros_number; |
927 | r_zeros_number; | ||
928 | const char *r_body; | 661 | const char *r_body; |
929 | 662 | ||
930 | /* Calculate number of bytes which must be shifted from appended item */ | 663 | /* Calculate number of bytes which must be shifted from appended item */ |
931 | if ((n_shift = | 664 | if ((n_shift = tb->rbytes - tb->insert_size[0]) < 0) |
932 | tb->rbytes - | ||
933 | tb->insert_size[0]) < 0) | ||
934 | n_shift = 0; | 665 | n_shift = 0; |
935 | 666 | ||
936 | RFALSE(pos_in_item != | 667 | RFALSE(pos_in_item != ih_item_len |
937 | ih_item_len | 668 | (B_N_PITEM_HEAD(tbS0, item_pos)), |
938 | (B_N_PITEM_HEAD | ||
939 | (tbS0, item_pos)), | ||
940 | "PAP-12155: invalid position to paste. ih_item_len=%d, pos_in_item=%d", | 669 | "PAP-12155: invalid position to paste. ih_item_len=%d, pos_in_item=%d", |
941 | pos_in_item, | 670 | pos_in_item, ih_item_len |
942 | ih_item_len | 671 | (B_N_PITEM_HEAD(tbS0, item_pos))); |
943 | (B_N_PITEM_HEAD | 672 | |
944 | (tbS0, item_pos))); | 673 | leaf_shift_right(tb, tb->rnum[0], n_shift); |
945 | |||
946 | leaf_shift_right(tb, | ||
947 | tb->rnum[0], | ||
948 | n_shift); | ||
949 | /* Calculate number of bytes which must remain in body after appending to R[0] */ | 674 | /* Calculate number of bytes which must remain in body after appending to R[0] */ |
950 | if ((n_rem = | 675 | if ((n_rem = tb->insert_size[0] - tb->rbytes) < 0) |
951 | tb->insert_size[0] - | ||
952 | tb->rbytes) < 0) | ||
953 | n_rem = 0; | 676 | n_rem = 0; |
954 | 677 | ||
955 | { | 678 | { |
956 | int version; | 679 | int version; |
957 | unsigned long temp_rem = | 680 | unsigned long temp_rem = n_rem; |
958 | n_rem; | 681 | |
959 | 682 | version = ih_version(B_N_PITEM_HEAD(tb->R[0], 0)); | |
960 | version = | 683 | if (is_indirect_le_key(version, B_N_PKEY(tb->R[0], 0))) { |
961 | ih_version | 684 | temp_rem = n_rem << (tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT); |
962 | (B_N_PITEM_HEAD | ||
963 | (tb->R[0], 0)); | ||
964 | if (is_indirect_le_key | ||
965 | (version, | ||
966 | B_N_PKEY(tb->R[0], | ||
967 | 0))) { | ||
968 | temp_rem = | ||
969 | n_rem << | ||
970 | (tb->tb_sb-> | ||
971 | s_blocksize_bits | ||
972 | - | ||
973 | UNFM_P_SHIFT); | ||
974 | } | 685 | } |
975 | set_le_key_k_offset | 686 | set_le_key_k_offset(version, B_N_PKEY(tb->R[0], 0), |
976 | (version, | 687 | le_key_k_offset(version, B_N_PKEY(tb->R[0], 0)) + temp_rem); |
977 | B_N_PKEY(tb->R[0], | 688 | set_le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0]), |
978 | 0), | 689 | le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0])) + temp_rem); |
979 | le_key_k_offset | ||
980 | (version, | ||
981 | B_N_PKEY(tb->R[0], | ||
982 | 0)) + | ||
983 | temp_rem); | ||
984 | set_le_key_k_offset | ||
985 | (version, | ||
986 | B_N_PDELIM_KEY(tb-> | ||
987 | CFR | ||
988 | [0], | ||
989 | tb-> | ||
990 | rkey | ||
991 | [0]), | ||
992 | le_key_k_offset | ||
993 | (version, | ||
994 | B_N_PDELIM_KEY | ||
995 | (tb->CFR[0], | ||
996 | tb->rkey[0])) + | ||
997 | temp_rem); | ||
998 | } | 690 | } |
999 | /* k_offset (B_N_PKEY(tb->R[0],0)) += n_rem; | 691 | /* k_offset (B_N_PKEY(tb->R[0],0)) += n_rem; |
1000 | k_offset (B_N_PDELIM_KEY(tb->CFR[0],tb->rkey[0])) += n_rem;*/ | 692 | k_offset (B_N_PDELIM_KEY(tb->CFR[0],tb->rkey[0])) += n_rem;*/ |
1001 | do_balance_mark_internal_dirty | 693 | do_balance_mark_internal_dirty(tb, tb->CFR[0], 0); |
1002 | (tb, tb->CFR[0], 0); | ||
1003 | 694 | ||
1004 | /* Append part of body into R[0] */ | 695 | /* Append part of body into R[0] */ |
1005 | buffer_info_init_right(tb, &bi); | 696 | buffer_info_init_right(tb, &bi); |
1006 | if (n_rem > zeros_num) { | 697 | if (n_rem > zeros_num) { |
1007 | r_zeros_number = 0; | 698 | r_zeros_number = 0; |
1008 | r_body = | 699 | r_body = body + n_rem - zeros_num; |
1009 | body + n_rem - | ||
1010 | zeros_num; | ||
1011 | } else { | 700 | } else { |
1012 | r_body = body; | 701 | r_body = body; |
1013 | r_zeros_number = | 702 | r_zeros_number = zeros_num - n_rem; |
1014 | zeros_num - n_rem; | 703 | zeros_num -= r_zeros_number; |
1015 | zeros_num -= | ||
1016 | r_zeros_number; | ||
1017 | } | 704 | } |
1018 | 705 | ||
1019 | leaf_paste_in_buffer(&bi, 0, | 706 | leaf_paste_in_buffer(&bi, 0, n_shift, |
1020 | n_shift, | 707 | tb->insert_size[0] - n_rem, |
1021 | tb-> | 708 | r_body, r_zeros_number); |
1022 | insert_size | 709 | |
1023 | [0] - | 710 | if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->R[0], 0))) { |
1024 | n_rem, | ||
1025 | r_body, | ||
1026 | r_zeros_number); | ||
1027 | |||
1028 | if (is_indirect_le_ih | ||
1029 | (B_N_PITEM_HEAD | ||
1030 | (tb->R[0], 0))) { | ||
1031 | #if 0 | 711 | #if 0 |
1032 | RFALSE(n_rem, | 712 | RFALSE(n_rem, |
1033 | "PAP-12160: paste more than one unformatted node pointer"); | 713 | "PAP-12160: paste more than one unformatted node pointer"); |
1034 | #endif | 714 | #endif |
1035 | set_ih_free_space | 715 | set_ih_free_space(B_N_PITEM_HEAD(tb->R[0], 0), 0); |
1036 | (B_N_PITEM_HEAD | ||
1037 | (tb->R[0], 0), 0); | ||
1038 | } | 716 | } |
1039 | tb->insert_size[0] = n_rem; | 717 | tb->insert_size[0] = n_rem; |
1040 | if (!n_rem) | 718 | if (!n_rem) |
@@ -1044,58 +722,28 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
1044 | 722 | ||
1045 | struct item_head *pasted; | 723 | struct item_head *pasted; |
1046 | 724 | ||
1047 | ret_val = | 725 | ret_val = leaf_shift_right(tb, tb->rnum[0], tb->rbytes); |
1048 | leaf_shift_right(tb, tb->rnum[0], | ||
1049 | tb->rbytes); | ||
1050 | /* append item in R[0] */ | 726 | /* append item in R[0] */ |
1051 | if (pos_in_item >= 0) { | 727 | if (pos_in_item >= 0) { |
1052 | buffer_info_init_right(tb, &bi); | 728 | buffer_info_init_right(tb, &bi); |
1053 | leaf_paste_in_buffer(&bi, | 729 | leaf_paste_in_buffer(&bi, item_pos - n + tb->rnum[0], pos_in_item, |
1054 | item_pos - | 730 | tb->insert_size[0], body, zeros_num); |
1055 | n + | ||
1056 | tb-> | ||
1057 | rnum[0], | ||
1058 | pos_in_item, | ||
1059 | tb-> | ||
1060 | insert_size | ||
1061 | [0], body, | ||
1062 | zeros_num); | ||
1063 | } | 731 | } |
1064 | 732 | ||
1065 | /* paste new entry, if item is directory item */ | 733 | /* paste new entry, if item is directory item */ |
1066 | pasted = | 734 | pasted = B_N_PITEM_HEAD(tb->R[0], item_pos - n + tb->rnum[0]); |
1067 | B_N_PITEM_HEAD(tb->R[0], | 735 | if (is_direntry_le_ih(pasted) && pos_in_item >= 0) { |
1068 | item_pos - n + | 736 | leaf_paste_entries(&bi, item_pos - n + tb->rnum[0], |
1069 | tb->rnum[0]); | 737 | pos_in_item, 1, |
1070 | if (is_direntry_le_ih(pasted) | 738 | (struct reiserfs_de_head *) body, |
1071 | && pos_in_item >= 0) { | 739 | body + DEH_SIZE, tb->insert_size[0]); |
1072 | leaf_paste_entries(&bi, | ||
1073 | item_pos - | ||
1074 | n + | ||
1075 | tb->rnum[0], | ||
1076 | pos_in_item, | ||
1077 | 1, | ||
1078 | (struct | ||
1079 | reiserfs_de_head | ||
1080 | *)body, | ||
1081 | body + | ||
1082 | DEH_SIZE, | ||
1083 | tb-> | ||
1084 | insert_size | ||
1085 | [0] | ||
1086 | ); | ||
1087 | if (!pos_in_item) { | 740 | if (!pos_in_item) { |
1088 | 741 | ||
1089 | RFALSE(item_pos - n + | 742 | RFALSE(item_pos - n + tb->rnum[0], |
1090 | tb->rnum[0], | ||
1091 | "PAP-12165: directory item must be first item of node when pasting is in 0th position"); | 743 | "PAP-12165: directory item must be first item of node when pasting is in 0th position"); |
1092 | 744 | ||
1093 | /* update delimiting keys */ | 745 | /* update delimiting keys */ |
1094 | replace_key(tb, | 746 | replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0); |
1095 | tb->CFR[0], | ||
1096 | tb->rkey[0], | ||
1097 | tb->R[0], | ||
1098 | 0); | ||
1099 | } | 747 | } |
1100 | } | 748 | } |
1101 | 749 | ||
@@ -1111,22 +759,16 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
1111 | default: /* cases d and t */ | 759 | default: /* cases d and t */ |
1112 | reiserfs_panic(tb->tb_sb, "PAP-12175", | 760 | reiserfs_panic(tb->tb_sb, "PAP-12175", |
1113 | "rnum > 0: unexpected mode: %s(%d)", | 761 | "rnum > 0: unexpected mode: %s(%d)", |
1114 | (flag == | 762 | (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag); |
1115 | M_DELETE) ? "DELETE" : ((flag == | ||
1116 | M_CUT) ? "CUT" | ||
1117 | : "UNKNOWN"), | ||
1118 | flag); | ||
1119 | } | 763 | } |
1120 | 764 | ||
1121 | } | 765 | } |
1122 | 766 | ||
1123 | /* tb->rnum[0] > 0 */ | 767 | /* tb->rnum[0] > 0 */ |
1124 | RFALSE(tb->blknum[0] > 3, | 768 | RFALSE(tb->blknum[0] > 3, |
1125 | "PAP-12180: blknum can not be %d. It must be <= 3", | 769 | "PAP-12180: blknum can not be %d. It must be <= 3", tb->blknum[0]); |
1126 | tb->blknum[0]); | ||
1127 | RFALSE(tb->blknum[0] < 0, | 770 | RFALSE(tb->blknum[0] < 0, |
1128 | "PAP-12185: blknum can not be %d. It must be >= 0", | 771 | "PAP-12185: blknum can not be %d. It must be >= 0", tb->blknum[0]); |
1129 | tb->blknum[0]); | ||
1130 | 772 | ||
1131 | /* if while adding to a node we discover that it is possible to split | 773 | /* if while adding to a node we discover that it is possible to split |
1132 | it in two, and merge the left part into the left neighbor and the | 774 | it in two, and merge the left part into the left neighbor and the |
@@ -1177,8 +819,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
1177 | 819 | ||
1178 | if (n - snum[i] < item_pos) { /* new item or it's part falls to first new node S_new[i] */ | 820 | if (n - snum[i] < item_pos) { /* new item or it's part falls to first new node S_new[i] */ |
1179 | if (item_pos == n - snum[i] + 1 && sbytes[i] != -1) { /* part of new item falls into S_new[i] */ | 821 | if (item_pos == n - snum[i] + 1 && sbytes[i] != -1) { /* part of new item falls into S_new[i] */ |
1180 | int old_key_comp, old_len, | 822 | int old_key_comp, old_len, r_zeros_number; |
1181 | r_zeros_number; | ||
1182 | const char *r_body; | 823 | const char *r_body; |
1183 | int version; | 824 | int version; |
1184 | 825 | ||
@@ -1192,15 +833,8 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
1192 | old_len = ih_item_len(ih); | 833 | old_len = ih_item_len(ih); |
1193 | 834 | ||
1194 | /* Calculate key component and item length to insert into S_new[i] */ | 835 | /* Calculate key component and item length to insert into S_new[i] */ |
1195 | set_le_ih_k_offset(ih, | 836 | set_le_ih_k_offset(ih, le_ih_k_offset(ih) + |
1196 | le_ih_k_offset(ih) + | 837 | ((old_len - sbytes[i]) << (is_indirect_le_ih(ih) ? tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT : 0))); |
1197 | ((old_len - | ||
1198 | sbytes[i]) << | ||
1199 | (is_indirect_le_ih | ||
1200 | (ih) ? tb->tb_sb-> | ||
1201 | s_blocksize_bits - | ||
1202 | UNFM_P_SHIFT : | ||
1203 | 0))); | ||
1204 | 838 | ||
1205 | put_ih_item_len(ih, sbytes[i]); | 839 | put_ih_item_len(ih, sbytes[i]); |
1206 | 840 | ||
@@ -1209,39 +843,29 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
1209 | 843 | ||
1210 | if ((old_len - sbytes[i]) > zeros_num) { | 844 | if ((old_len - sbytes[i]) > zeros_num) { |
1211 | r_zeros_number = 0; | 845 | r_zeros_number = 0; |
1212 | r_body = | 846 | r_body = body + (old_len - sbytes[i]) - zeros_num; |
1213 | body + (old_len - | ||
1214 | sbytes[i]) - | ||
1215 | zeros_num; | ||
1216 | } else { | 847 | } else { |
1217 | r_body = body; | 848 | r_body = body; |
1218 | r_zeros_number = | 849 | r_zeros_number = zeros_num - (old_len - sbytes[i]); |
1219 | zeros_num - (old_len - | ||
1220 | sbytes[i]); | ||
1221 | zeros_num -= r_zeros_number; | 850 | zeros_num -= r_zeros_number; |
1222 | } | 851 | } |
1223 | 852 | ||
1224 | leaf_insert_into_buf(&bi, 0, ih, r_body, | 853 | leaf_insert_into_buf(&bi, 0, ih, r_body, r_zeros_number); |
1225 | r_zeros_number); | ||
1226 | 854 | ||
1227 | /* Calculate key component and item length to insert into S[i] */ | 855 | /* Calculate key component and item length to insert into S[i] */ |
1228 | set_le_ih_k_offset(ih, old_key_comp); | 856 | set_le_ih_k_offset(ih, old_key_comp); |
1229 | put_ih_item_len(ih, | 857 | put_ih_item_len(ih, old_len - sbytes[i]); |
1230 | old_len - sbytes[i]); | ||
1231 | tb->insert_size[0] -= sbytes[i]; | 858 | tb->insert_size[0] -= sbytes[i]; |
1232 | } else { /* whole new item falls into S_new[i] */ | 859 | } else { /* whole new item falls into S_new[i] */ |
1233 | 860 | ||
1234 | /* Shift snum[0] - 1 items to S_new[i] (sbytes[i] of split item) */ | 861 | /* Shift snum[0] - 1 items to S_new[i] (sbytes[i] of split item) */ |
1235 | leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, | 862 | leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, |
1236 | snum[i] - 1, sbytes[i], | 863 | snum[i] - 1, sbytes[i], S_new[i]); |
1237 | S_new[i]); | ||
1238 | 864 | ||
1239 | /* Insert new item into S_new[i] */ | 865 | /* Insert new item into S_new[i] */ |
1240 | buffer_info_init_bh(tb, &bi, S_new[i]); | 866 | buffer_info_init_bh(tb, &bi, S_new[i]); |
1241 | leaf_insert_into_buf(&bi, | 867 | leaf_insert_into_buf(&bi, item_pos - n + snum[i] - 1, |
1242 | item_pos - n + | 868 | ih, body, zeros_num); |
1243 | snum[i] - 1, ih, | ||
1244 | body, zeros_num); | ||
1245 | 869 | ||
1246 | zeros_num = tb->insert_size[0] = 0; | 870 | zeros_num = tb->insert_size[0] = 0; |
1247 | } | 871 | } |
@@ -1268,150 +892,73 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
1268 | 892 | ||
1269 | int entry_count; | 893 | int entry_count; |
1270 | 894 | ||
1271 | entry_count = | 895 | entry_count = ih_entry_count(aux_ih); |
1272 | ih_entry_count(aux_ih); | ||
1273 | 896 | ||
1274 | if (entry_count - sbytes[i] < | 897 | if (entry_count - sbytes[i] < pos_in_item && pos_in_item <= entry_count) { |
1275 | pos_in_item | ||
1276 | && pos_in_item <= | ||
1277 | entry_count) { | ||
1278 | /* new directory entry falls into S_new[i] */ | 898 | /* new directory entry falls into S_new[i] */ |
1279 | 899 | ||
1280 | RFALSE(!tb-> | 900 | RFALSE(!tb->insert_size[0], "PAP-12215: insert_size is already 0"); |
1281 | insert_size[0], | 901 | RFALSE(sbytes[i] - 1 >= entry_count, |
1282 | "PAP-12215: insert_size is already 0"); | ||
1283 | RFALSE(sbytes[i] - 1 >= | ||
1284 | entry_count, | ||
1285 | "PAP-12220: there are no so much entries (%d), only %d", | 902 | "PAP-12220: there are no so much entries (%d), only %d", |
1286 | sbytes[i] - 1, | 903 | sbytes[i] - 1, entry_count); |
1287 | entry_count); | ||
1288 | 904 | ||
1289 | /* Shift snum[i]-1 items in whole. Shift sbytes[i] directory entries from directory item number snum[i] */ | 905 | /* Shift snum[i]-1 items in whole. Shift sbytes[i] directory entries from directory item number snum[i] */ |
1290 | leaf_move_items | 906 | leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, snum[i], sbytes[i] - 1, S_new[i]); |
1291 | (LEAF_FROM_S_TO_SNEW, | ||
1292 | tb, snum[i], | ||
1293 | sbytes[i] - 1, | ||
1294 | S_new[i]); | ||
1295 | /* Paste given directory entry to directory item */ | 907 | /* Paste given directory entry to directory item */ |
1296 | buffer_info_init_bh(tb, &bi, S_new[i]); | 908 | buffer_info_init_bh(tb, &bi, S_new[i]); |
1297 | leaf_paste_in_buffer | 909 | leaf_paste_in_buffer(&bi, 0, pos_in_item - entry_count + sbytes[i] - 1, |
1298 | (&bi, 0, | 910 | tb->insert_size[0], body, zeros_num); |
1299 | pos_in_item - | ||
1300 | entry_count + | ||
1301 | sbytes[i] - 1, | ||
1302 | tb->insert_size[0], | ||
1303 | body, zeros_num); | ||
1304 | /* paste new directory entry */ | 911 | /* paste new directory entry */ |
1305 | leaf_paste_entries(&bi, | 912 | leaf_paste_entries(&bi, 0, pos_in_item - entry_count + sbytes[i] - 1, 1, |
1306 | 0, | 913 | (struct reiserfs_de_head *) body, |
1307 | pos_in_item | 914 | body + DEH_SIZE, tb->insert_size[0]); |
1308 | - | ||
1309 | entry_count | ||
1310 | + | ||
1311 | sbytes | ||
1312 | [i] - | ||
1313 | 1, 1, | ||
1314 | (struct | ||
1315 | reiserfs_de_head | ||
1316 | *) | ||
1317 | body, | ||
1318 | body | ||
1319 | + | ||
1320 | DEH_SIZE, | ||
1321 | tb-> | ||
1322 | insert_size | ||
1323 | [0] | ||
1324 | ); | ||
1325 | tb->insert_size[0] = 0; | 915 | tb->insert_size[0] = 0; |
1326 | pos_in_item++; | 916 | pos_in_item++; |
1327 | } else { /* new directory entry doesn't fall into S_new[i] */ | 917 | } else { /* new directory entry doesn't fall into S_new[i] */ |
1328 | leaf_move_items | 918 | leaf_move_items(LEAF_FROM_S_TO_SNEW,tb, snum[i], sbytes[i], S_new[i]); |
1329 | (LEAF_FROM_S_TO_SNEW, | ||
1330 | tb, snum[i], | ||
1331 | sbytes[i], | ||
1332 | S_new[i]); | ||
1333 | } | 919 | } |
1334 | } else { /* regular object */ | 920 | } else { /* regular object */ |
1335 | 921 | ||
1336 | int n_shift, n_rem, | 922 | int n_shift, n_rem, r_zeros_number; |
1337 | r_zeros_number; | ||
1338 | const char *r_body; | 923 | const char *r_body; |
1339 | 924 | ||
1340 | RFALSE(pos_in_item != | 925 | RFALSE(pos_in_item != ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)) || tb->insert_size[0] <= 0, |
1341 | ih_item_len | ||
1342 | (B_N_PITEM_HEAD | ||
1343 | (tbS0, item_pos)) | ||
1344 | || tb->insert_size[0] <= | ||
1345 | 0, | ||
1346 | "PAP-12225: item too short or insert_size <= 0"); | 926 | "PAP-12225: item too short or insert_size <= 0"); |
1347 | 927 | ||
1348 | /* Calculate number of bytes which must be shifted from appended item */ | 928 | /* Calculate number of bytes which must be shifted from appended item */ |
1349 | n_shift = | 929 | n_shift = sbytes[i] - tb->insert_size[0]; |
1350 | sbytes[i] - | ||
1351 | tb->insert_size[0]; | ||
1352 | if (n_shift < 0) | 930 | if (n_shift < 0) |
1353 | n_shift = 0; | 931 | n_shift = 0; |
1354 | leaf_move_items | 932 | leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, snum[i], n_shift, S_new[i]); |
1355 | (LEAF_FROM_S_TO_SNEW, tb, | ||
1356 | snum[i], n_shift, | ||
1357 | S_new[i]); | ||
1358 | 933 | ||
1359 | /* Calculate number of bytes which must remain in body after append to S_new[i] */ | 934 | /* Calculate number of bytes which must remain in body after append to S_new[i] */ |
1360 | n_rem = | 935 | n_rem = tb->insert_size[0] - sbytes[i]; |
1361 | tb->insert_size[0] - | ||
1362 | sbytes[i]; | ||
1363 | if (n_rem < 0) | 936 | if (n_rem < 0) |
1364 | n_rem = 0; | 937 | n_rem = 0; |
1365 | /* Append part of body into S_new[0] */ | 938 | /* Append part of body into S_new[0] */ |
1366 | buffer_info_init_bh(tb, &bi, S_new[i]); | 939 | buffer_info_init_bh(tb, &bi, S_new[i]); |
1367 | if (n_rem > zeros_num) { | 940 | if (n_rem > zeros_num) { |
1368 | r_zeros_number = 0; | 941 | r_zeros_number = 0; |
1369 | r_body = | 942 | r_body = body + n_rem - zeros_num; |
1370 | body + n_rem - | ||
1371 | zeros_num; | ||
1372 | } else { | 943 | } else { |
1373 | r_body = body; | 944 | r_body = body; |
1374 | r_zeros_number = | 945 | r_zeros_number = zeros_num - n_rem; |
1375 | zeros_num - n_rem; | 946 | zeros_num -= r_zeros_number; |
1376 | zeros_num -= | ||
1377 | r_zeros_number; | ||
1378 | } | 947 | } |
1379 | 948 | ||
1380 | leaf_paste_in_buffer(&bi, 0, | 949 | leaf_paste_in_buffer(&bi, 0, n_shift, |
1381 | n_shift, | 950 | tb->insert_size[0] - n_rem, |
1382 | tb-> | 951 | r_body, r_zeros_number); |
1383 | insert_size | ||
1384 | [0] - | ||
1385 | n_rem, | ||
1386 | r_body, | ||
1387 | r_zeros_number); | ||
1388 | { | 952 | { |
1389 | struct item_head *tmp; | 953 | struct item_head *tmp; |
1390 | 954 | ||
1391 | tmp = | 955 | tmp = B_N_PITEM_HEAD(S_new[i], 0); |
1392 | B_N_PITEM_HEAD(S_new | ||
1393 | [i], | ||
1394 | 0); | ||
1395 | if (is_indirect_le_ih | 956 | if (is_indirect_le_ih |
1396 | (tmp)) { | 957 | (tmp)) { |
1397 | set_ih_free_space | 958 | set_ih_free_space(tmp, 0); |
1398 | (tmp, 0); | 959 | set_le_ih_k_offset(tmp, le_ih_k_offset(tmp) + (n_rem << (tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT))); |
1399 | set_le_ih_k_offset | ||
1400 | (tmp, | ||
1401 | le_ih_k_offset | ||
1402 | (tmp) + | ||
1403 | (n_rem << | ||
1404 | (tb-> | ||
1405 | tb_sb-> | ||
1406 | s_blocksize_bits | ||
1407 | - | ||
1408 | UNFM_P_SHIFT))); | ||
1409 | } else { | 960 | } else { |
1410 | set_le_ih_k_offset | 961 | set_le_ih_k_offset(tmp, le_ih_k_offset(tmp) + n_rem); |
1411 | (tmp, | ||
1412 | le_ih_k_offset | ||
1413 | (tmp) + | ||
1414 | n_rem); | ||
1415 | } | 962 | } |
1416 | } | 963 | } |
1417 | 964 | ||
@@ -1426,8 +973,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
1426 | struct item_head *pasted; | 973 | struct item_head *pasted; |
1427 | 974 | ||
1428 | #ifdef CONFIG_REISERFS_CHECK | 975 | #ifdef CONFIG_REISERFS_CHECK |
1429 | struct item_head *ih_check = | 976 | struct item_head *ih_check = B_N_PITEM_HEAD(tbS0, item_pos); |
1430 | B_N_PITEM_HEAD(tbS0, item_pos); | ||
1431 | 977 | ||
1432 | if (!is_direntry_le_ih(ih_check) | 978 | if (!is_direntry_le_ih(ih_check) |
1433 | && (pos_in_item != ih_item_len(ih_check) | 979 | && (pos_in_item != ih_item_len(ih_check) |
@@ -1439,8 +985,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
1439 | "to ih_item_len"); | 985 | "to ih_item_len"); |
1440 | #endif /* CONFIG_REISERFS_CHECK */ | 986 | #endif /* CONFIG_REISERFS_CHECK */ |
1441 | 987 | ||
1442 | leaf_mi = | 988 | leaf_mi = leaf_move_items(LEAF_FROM_S_TO_SNEW, |
1443 | leaf_move_items(LEAF_FROM_S_TO_SNEW, | ||
1444 | tb, snum[i], | 989 | tb, snum[i], |
1445 | sbytes[i], | 990 | sbytes[i], |
1446 | S_new[i]); | 991 | S_new[i]); |
@@ -1452,30 +997,19 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
1452 | /* paste into item */ | 997 | /* paste into item */ |
1453 | buffer_info_init_bh(tb, &bi, S_new[i]); | 998 | buffer_info_init_bh(tb, &bi, S_new[i]); |
1454 | leaf_paste_in_buffer(&bi, | 999 | leaf_paste_in_buffer(&bi, |
1455 | item_pos - n + | 1000 | item_pos - n + snum[i], |
1456 | snum[i], | ||
1457 | pos_in_item, | 1001 | pos_in_item, |
1458 | tb->insert_size[0], | 1002 | tb->insert_size[0], |
1459 | body, zeros_num); | 1003 | body, zeros_num); |
1460 | 1004 | ||
1461 | pasted = | 1005 | pasted = B_N_PITEM_HEAD(S_new[i], item_pos - n + snum[i]); |
1462 | B_N_PITEM_HEAD(S_new[i], | ||
1463 | item_pos - n + | ||
1464 | snum[i]); | ||
1465 | if (is_direntry_le_ih(pasted)) { | 1006 | if (is_direntry_le_ih(pasted)) { |
1466 | leaf_paste_entries(&bi, | 1007 | leaf_paste_entries(&bi, |
1467 | item_pos - | 1008 | item_pos - n + snum[i], |
1468 | n + snum[i], | 1009 | pos_in_item, 1, |
1469 | pos_in_item, | 1010 | (struct reiserfs_de_head *)body, |
1470 | 1, | 1011 | body + DEH_SIZE, |
1471 | (struct | 1012 | tb->insert_size[0] |
1472 | reiserfs_de_head | ||
1473 | *)body, | ||
1474 | body + | ||
1475 | DEH_SIZE, | ||
1476 | tb-> | ||
1477 | insert_size | ||
1478 | [0] | ||
1479 | ); | 1013 | ); |
1480 | } | 1014 | } |
1481 | 1015 | ||
@@ -1495,11 +1029,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
1495 | default: /* cases d and t */ | 1029 | default: /* cases d and t */ |
1496 | reiserfs_panic(tb->tb_sb, "PAP-12245", | 1030 | reiserfs_panic(tb->tb_sb, "PAP-12245", |
1497 | "blknum > 2: unexpected mode: %s(%d)", | 1031 | "blknum > 2: unexpected mode: %s(%d)", |
1498 | (flag == | 1032 | (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag); |
1499 | M_DELETE) ? "DELETE" : ((flag == | ||
1500 | M_CUT) ? "CUT" | ||
1501 | : "UNKNOWN"), | ||
1502 | flag); | ||
1503 | } | 1033 | } |
1504 | 1034 | ||
1505 | memcpy(insert_key + i, B_N_PKEY(S_new[i], 0), KEY_SIZE); | 1035 | memcpy(insert_key + i, B_N_PKEY(S_new[i], 0), KEY_SIZE); |
@@ -1524,9 +1054,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
1524 | /* If we insert the first key change the delimiting key */ | 1054 | /* If we insert the first key change the delimiting key */ |
1525 | if (item_pos == 0) { | 1055 | if (item_pos == 0) { |
1526 | if (tb->CFL[0]) /* can be 0 in reiserfsck */ | 1056 | if (tb->CFL[0]) /* can be 0 in reiserfsck */ |
1527 | replace_key(tb, tb->CFL[0], tb->lkey[0], | 1057 | replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0); |
1528 | tbS0, 0); | ||
1529 | |||
1530 | } | 1058 | } |
1531 | break; | 1059 | break; |
1532 | 1060 | ||
@@ -1536,53 +1064,27 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
1536 | pasted = B_N_PITEM_HEAD(tbS0, item_pos); | 1064 | pasted = B_N_PITEM_HEAD(tbS0, item_pos); |
1537 | /* when directory, may be new entry already pasted */ | 1065 | /* when directory, may be new entry already pasted */ |
1538 | if (is_direntry_le_ih(pasted)) { | 1066 | if (is_direntry_le_ih(pasted)) { |
1539 | if (pos_in_item >= 0 && | 1067 | if (pos_in_item >= 0 && pos_in_item <= ih_entry_count(pasted)) { |
1540 | pos_in_item <= | ||
1541 | ih_entry_count(pasted)) { | ||
1542 | 1068 | ||
1543 | RFALSE(!tb->insert_size[0], | 1069 | RFALSE(!tb->insert_size[0], |
1544 | "PAP-12260: insert_size is 0 already"); | 1070 | "PAP-12260: insert_size is 0 already"); |
1545 | 1071 | ||
1546 | /* prepare space */ | 1072 | /* prepare space */ |
1547 | buffer_info_init_tbS0(tb, &bi); | 1073 | buffer_info_init_tbS0(tb, &bi); |
1548 | leaf_paste_in_buffer(&bi, | 1074 | leaf_paste_in_buffer(&bi, item_pos, pos_in_item, |
1549 | item_pos, | 1075 | tb->insert_size[0], body, |
1550 | pos_in_item, | ||
1551 | tb-> | ||
1552 | insert_size | ||
1553 | [0], body, | ||
1554 | zeros_num); | 1076 | zeros_num); |
1555 | 1077 | ||
1556 | /* paste entry */ | 1078 | /* paste entry */ |
1557 | leaf_paste_entries(&bi, | 1079 | leaf_paste_entries(&bi, item_pos, pos_in_item, 1, |
1558 | item_pos, | 1080 | (struct reiserfs_de_head *)body, |
1559 | pos_in_item, | 1081 | body + DEH_SIZE, |
1560 | 1, | 1082 | tb->insert_size[0]); |
1561 | (struct | ||
1562 | reiserfs_de_head | ||
1563 | *)body, | ||
1564 | body + | ||
1565 | DEH_SIZE, | ||
1566 | tb-> | ||
1567 | insert_size | ||
1568 | [0] | ||
1569 | ); | ||
1570 | if (!item_pos && !pos_in_item) { | 1083 | if (!item_pos && !pos_in_item) { |
1571 | RFALSE(!tb->CFL[0] | 1084 | RFALSE(!tb->CFL[0] || !tb->L[0], |
1572 | || !tb->L[0], | ||
1573 | "PAP-12270: CFL[0]/L[0] must be specified"); | 1085 | "PAP-12270: CFL[0]/L[0] must be specified"); |
1574 | if (tb->CFL[0]) { | 1086 | if (tb->CFL[0]) |
1575 | replace_key(tb, | 1087 | replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0); |
1576 | tb-> | ||
1577 | CFL | ||
1578 | [0], | ||
1579 | tb-> | ||
1580 | lkey | ||
1581 | [0], | ||
1582 | tbS0, | ||
1583 | 0); | ||
1584 | |||
1585 | } | ||
1586 | } | 1088 | } |
1587 | tb->insert_size[0] = 0; | 1089 | tb->insert_size[0] = 0; |
1588 | } | 1090 | } |
@@ -1593,13 +1095,8 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
1593 | "PAP-12275: insert size must not be %d", | 1095 | "PAP-12275: insert size must not be %d", |
1594 | tb->insert_size[0]); | 1096 | tb->insert_size[0]); |
1595 | buffer_info_init_tbS0(tb, &bi); | 1097 | buffer_info_init_tbS0(tb, &bi); |
1596 | leaf_paste_in_buffer(&bi, | 1098 | leaf_paste_in_buffer(&bi, item_pos, pos_in_item, |
1597 | item_pos, | 1099 | tb->insert_size[0], body, zeros_num); |
1598 | pos_in_item, | ||
1599 | tb-> | ||
1600 | insert_size | ||
1601 | [0], body, | ||
1602 | zeros_num); | ||
1603 | 1100 | ||
1604 | if (is_indirect_le_ih(pasted)) { | 1101 | if (is_indirect_le_ih(pasted)) { |
1605 | #if 0 | 1102 | #if 0 |
@@ -1611,8 +1108,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
1611 | tb-> | 1108 | tb-> |
1612 | insert_size[0]); | 1109 | insert_size[0]); |
1613 | #endif | 1110 | #endif |
1614 | set_ih_free_space | 1111 | set_ih_free_space(pasted, 0); |
1615 | (pasted, 0); | ||
1616 | } | 1112 | } |
1617 | tb->insert_size[0] = 0; | 1113 | tb->insert_size[0] = 0; |
1618 | } | 1114 | } |
@@ -1620,8 +1116,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
1620 | else { | 1116 | else { |
1621 | if (tb->insert_size[0]) { | 1117 | if (tb->insert_size[0]) { |
1622 | print_cur_tb("12285"); | 1118 | print_cur_tb("12285"); |
1623 | reiserfs_panic(tb-> | 1119 | reiserfs_panic(tb->tb_sb, |
1624 | tb_sb, | ||
1625 | "PAP-12285", | 1120 | "PAP-12285", |
1626 | "insert_size " | 1121 | "insert_size " |
1627 | "must be 0 " | 1122 | "must be 0 " |
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 8e4f41d9af4d..34c7bdc06014 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -701,6 +701,18 @@ static inline pte_t pte_mknuma(pte_t pte) | |||
701 | } | 701 | } |
702 | #endif | 702 | #endif |
703 | 703 | ||
704 | #ifndef ptep_set_numa | ||
705 | static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, | ||
706 | pte_t *ptep) | ||
707 | { | ||
708 | pte_t ptent = *ptep; | ||
709 | |||
710 | ptent = pte_mknuma(ptent); | ||
711 | set_pte_at(mm, addr, ptep, ptent); | ||
712 | return; | ||
713 | } | ||
714 | #endif | ||
715 | |||
704 | #ifndef pmd_mknuma | 716 | #ifndef pmd_mknuma |
705 | static inline pmd_t pmd_mknuma(pmd_t pmd) | 717 | static inline pmd_t pmd_mknuma(pmd_t pmd) |
706 | { | 718 | { |
@@ -708,6 +720,18 @@ static inline pmd_t pmd_mknuma(pmd_t pmd) | |||
708 | return pmd_clear_flags(pmd, _PAGE_PRESENT); | 720 | return pmd_clear_flags(pmd, _PAGE_PRESENT); |
709 | } | 721 | } |
710 | #endif | 722 | #endif |
723 | |||
724 | #ifndef pmdp_set_numa | ||
725 | static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, | ||
726 | pmd_t *pmdp) | ||
727 | { | ||
728 | pmd_t pmd = *pmdp; | ||
729 | |||
730 | pmd = pmd_mknuma(pmd); | ||
731 | set_pmd_at(mm, addr, pmdp, pmd); | ||
732 | return; | ||
733 | } | ||
734 | #endif | ||
711 | #else | 735 | #else |
712 | extern int pte_numa(pte_t pte); | 736 | extern int pte_numa(pte_t pte); |
713 | extern int pmd_numa(pmd_t pmd); | 737 | extern int pmd_numa(pmd_t pmd); |
@@ -715,6 +739,8 @@ extern pte_t pte_mknonnuma(pte_t pte); | |||
715 | extern pmd_t pmd_mknonnuma(pmd_t pmd); | 739 | extern pmd_t pmd_mknonnuma(pmd_t pmd); |
716 | extern pte_t pte_mknuma(pte_t pte); | 740 | extern pte_t pte_mknuma(pte_t pte); |
717 | extern pmd_t pmd_mknuma(pmd_t pmd); | 741 | extern pmd_t pmd_mknuma(pmd_t pmd); |
742 | extern void ptep_set_numa(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | ||
743 | extern void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp); | ||
718 | #endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */ | 744 | #endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */ |
719 | #else | 745 | #else |
720 | static inline int pmd_numa(pmd_t pmd) | 746 | static inline int pmd_numa(pmd_t pmd) |
@@ -742,10 +768,23 @@ static inline pte_t pte_mknuma(pte_t pte) | |||
742 | return pte; | 768 | return pte; |
743 | } | 769 | } |
744 | 770 | ||
771 | static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, | ||
772 | pte_t *ptep) | ||
773 | { | ||
774 | return; | ||
775 | } | ||
776 | |||
777 | |||
745 | static inline pmd_t pmd_mknuma(pmd_t pmd) | 778 | static inline pmd_t pmd_mknuma(pmd_t pmd) |
746 | { | 779 | { |
747 | return pmd; | 780 | return pmd; |
748 | } | 781 | } |
782 | |||
783 | static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, | ||
784 | pmd_t *pmdp) | ||
785 | { | ||
786 | return ; | ||
787 | } | ||
749 | #endif /* CONFIG_NUMA_BALANCING */ | 788 | #endif /* CONFIG_NUMA_BALANCING */ |
750 | 789 | ||
751 | #endif /* CONFIG_MMU */ | 790 | #endif /* CONFIG_MMU */ |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 04086c5be930..04a7f31301f8 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -199,6 +199,9 @@ int drm_err(const char *func, const char *format, ...); | |||
199 | #define DRM_INFO(fmt, ...) \ | 199 | #define DRM_INFO(fmt, ...) \ |
200 | printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__) | 200 | printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__) |
201 | 201 | ||
202 | #define DRM_INFO_ONCE(fmt, ...) \ | ||
203 | printk_once(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__) | ||
204 | |||
202 | /** | 205 | /** |
203 | * Debug output. | 206 | * Debug output. |
204 | * | 207 | * |
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 71727b6210ae..8f3dee097579 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h | |||
@@ -907,6 +907,9 @@ struct drm_mode_config { | |||
907 | 907 | ||
908 | /* whether async page flip is supported or not */ | 908 | /* whether async page flip is supported or not */ |
909 | bool async_page_flip; | 909 | bool async_page_flip; |
910 | |||
911 | /* cursor size */ | ||
912 | uint32_t cursor_width, cursor_height; | ||
910 | }; | 913 | }; |
911 | 914 | ||
912 | #define obj_to_crtc(x) container_of(x, struct drm_crtc, base) | 915 | #define obj_to_crtc(x) container_of(x, struct drm_crtc, base) |
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h index d1f61bfe0ebe..49a828425fa2 100644 --- a/include/drm/ttm/ttm_page_alloc.h +++ b/include/drm/ttm/ttm_page_alloc.h | |||
@@ -29,6 +29,8 @@ | |||
29 | #include <drm/ttm/ttm_bo_driver.h> | 29 | #include <drm/ttm/ttm_bo_driver.h> |
30 | #include <drm/ttm/ttm_memory.h> | 30 | #include <drm/ttm/ttm_memory.h> |
31 | 31 | ||
32 | struct device; | ||
33 | |||
32 | /** | 34 | /** |
33 | * Initialize pool allocator. | 35 | * Initialize pool allocator. |
34 | */ | 36 | */ |
diff --git a/include/linux/bio.h b/include/linux/bio.h index 70654521dab6..5a4d39b4686b 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -250,6 +250,17 @@ static inline unsigned bio_segments(struct bio *bio) | |||
250 | struct bio_vec bv; | 250 | struct bio_vec bv; |
251 | struct bvec_iter iter; | 251 | struct bvec_iter iter; |
252 | 252 | ||
253 | /* | ||
254 | * We special case discard/write same, because they interpret bi_size | ||
255 | * differently: | ||
256 | */ | ||
257 | |||
258 | if (bio->bi_rw & REQ_DISCARD) | ||
259 | return 1; | ||
260 | |||
261 | if (bio->bi_rw & REQ_WRITE_SAME) | ||
262 | return 1; | ||
263 | |||
253 | bio_for_each_segment(bv, bio, iter) | 264 | bio_for_each_segment(bv, bio, iter) |
254 | segs++; | 265 | segs++; |
255 | 266 | ||
@@ -332,6 +343,7 @@ extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); | |||
332 | extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs); | 343 | extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs); |
333 | 344 | ||
334 | extern struct bio_set *fs_bio_set; | 345 | extern struct bio_set *fs_bio_set; |
346 | unsigned int bio_integrity_tag_size(struct bio *bio); | ||
335 | 347 | ||
336 | static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) | 348 | static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) |
337 | { | 349 | { |
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 161b23105b1e..18ba8a627f46 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
@@ -83,6 +83,8 @@ struct blk_mq_ops { | |||
83 | */ | 83 | */ |
84 | rq_timed_out_fn *timeout; | 84 | rq_timed_out_fn *timeout; |
85 | 85 | ||
86 | softirq_done_fn *complete; | ||
87 | |||
86 | /* | 88 | /* |
87 | * Override for hctx allocations (should probably go) | 89 | * Override for hctx allocations (should probably go) |
88 | */ | 90 | */ |
@@ -119,11 +121,12 @@ void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struc | |||
119 | 121 | ||
120 | void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); | 122 | void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); |
121 | 123 | ||
122 | void blk_mq_insert_request(struct request_queue *, struct request *, bool); | 124 | void blk_mq_insert_request(struct request_queue *, struct request *, |
125 | bool, bool); | ||
123 | void blk_mq_run_queues(struct request_queue *q, bool async); | 126 | void blk_mq_run_queues(struct request_queue *q, bool async); |
124 | void blk_mq_free_request(struct request *rq); | 127 | void blk_mq_free_request(struct request *rq); |
125 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); | 128 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); |
126 | struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved); | 129 | struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp); |
127 | struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp); | 130 | struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp); |
128 | struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag); | 131 | struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag); |
129 | 132 | ||
@@ -133,6 +136,8 @@ void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); | |||
133 | 136 | ||
134 | void blk_mq_end_io(struct request *rq, int error); | 137 | void blk_mq_end_io(struct request *rq, int error); |
135 | 138 | ||
139 | void blk_mq_complete_request(struct request *rq); | ||
140 | |||
136 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); | 141 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); |
137 | void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); | 142 | void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); |
138 | void blk_mq_stop_hw_queues(struct request_queue *q); | 143 | void blk_mq_stop_hw_queues(struct request_queue *q); |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8678c4322b44..4afa4f8f6090 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -98,7 +98,7 @@ struct request { | |||
98 | struct list_head queuelist; | 98 | struct list_head queuelist; |
99 | union { | 99 | union { |
100 | struct call_single_data csd; | 100 | struct call_single_data csd; |
101 | struct work_struct mq_flush_data; | 101 | struct work_struct mq_flush_work; |
102 | }; | 102 | }; |
103 | 103 | ||
104 | struct request_queue *q; | 104 | struct request_queue *q; |
@@ -448,13 +448,8 @@ struct request_queue { | |||
448 | unsigned long flush_pending_since; | 448 | unsigned long flush_pending_since; |
449 | struct list_head flush_queue[2]; | 449 | struct list_head flush_queue[2]; |
450 | struct list_head flush_data_in_flight; | 450 | struct list_head flush_data_in_flight; |
451 | union { | 451 | struct request *flush_rq; |
452 | struct request flush_rq; | 452 | spinlock_t mq_flush_lock; |
453 | struct { | ||
454 | spinlock_t mq_flush_lock; | ||
455 | struct work_struct mq_flush_work; | ||
456 | }; | ||
457 | }; | ||
458 | 453 | ||
459 | struct mutex sysfs_lock; | 454 | struct mutex sysfs_lock; |
460 | 455 | ||
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index 2623cffc73a1..25bfb0eff772 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h | |||
@@ -373,8 +373,9 @@ extern const char *ceph_mds_op_name(int op); | |||
373 | /* | 373 | /* |
374 | * Ceph setxattr request flags. | 374 | * Ceph setxattr request flags. |
375 | */ | 375 | */ |
376 | #define CEPH_XATTR_CREATE 1 | 376 | #define CEPH_XATTR_CREATE (1 << 0) |
377 | #define CEPH_XATTR_REPLACE 2 | 377 | #define CEPH_XATTR_REPLACE (1 << 1) |
378 | #define CEPH_XATTR_REMOVE (1 << 31) | ||
378 | 379 | ||
379 | union ceph_mds_request_args { | 380 | union ceph_mds_request_args { |
380 | struct { | 381 | struct { |
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h index ded429966c1f..2507fd2a1eb4 100644 --- a/include/linux/compiler-gcc4.h +++ b/include/linux/compiler-gcc4.h | |||
@@ -75,11 +75,7 @@ | |||
75 | * | 75 | * |
76 | * (asm goto is automatically volatile - the naming reflects this.) | 76 | * (asm goto is automatically volatile - the naming reflects this.) |
77 | */ | 77 | */ |
78 | #if GCC_VERSION <= 40801 | 78 | #define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) |
79 | # define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) | ||
80 | #else | ||
81 | # define asm_volatile_goto(x...) do { asm goto(x); } while (0) | ||
82 | #endif | ||
83 | 79 | ||
84 | #ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP | 80 | #ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP |
85 | #if GCC_VERSION >= 40400 | 81 | #if GCC_VERSION >= 40400 |
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index dfac5ed31120..f886985a28b2 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h | |||
@@ -171,7 +171,7 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops, | |||
171 | size_t size, int flags, const char *); | 171 | size_t size, int flags, const char *); |
172 | 172 | ||
173 | #define dma_buf_export(priv, ops, size, flags) \ | 173 | #define dma_buf_export(priv, ops, size, flags) \ |
174 | dma_buf_export_named(priv, ops, size, flags, __FILE__) | 174 | dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME) |
175 | 175 | ||
176 | int dma_buf_fd(struct dma_buf *dmabuf, int flags); | 176 | int dma_buf_fd(struct dma_buf *dmabuf, int flags); |
177 | struct dma_buf *dma_buf_get(int fd); | 177 | struct dma_buf *dma_buf_get(int fd); |
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 4d34dbbbad4d..7a8144fef406 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h | |||
@@ -4,8 +4,6 @@ | |||
4 | #include <linux/err.h> | 4 | #include <linux/err.h> |
5 | #include <linux/kernel.h> | 5 | #include <linux/kernel.h> |
6 | 6 | ||
7 | #ifdef CONFIG_GPIOLIB | ||
8 | |||
9 | struct device; | 7 | struct device; |
10 | struct gpio_chip; | 8 | struct gpio_chip; |
11 | 9 | ||
@@ -18,6 +16,8 @@ struct gpio_chip; | |||
18 | */ | 16 | */ |
19 | struct gpio_desc; | 17 | struct gpio_desc; |
20 | 18 | ||
19 | #ifdef CONFIG_GPIOLIB | ||
20 | |||
21 | /* Acquire and dispose GPIOs */ | 21 | /* Acquire and dispose GPIOs */ |
22 | struct gpio_desc *__must_check gpiod_get(struct device *dev, | 22 | struct gpio_desc *__must_check gpiod_get(struct device *dev, |
23 | const char *con_id); | 23 | const char *con_id); |
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 15da677478dd..344883dce584 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h | |||
@@ -875,7 +875,7 @@ struct vmbus_channel_relid_released { | |||
875 | struct vmbus_channel_initiate_contact { | 875 | struct vmbus_channel_initiate_contact { |
876 | struct vmbus_channel_message_header header; | 876 | struct vmbus_channel_message_header header; |
877 | u32 vmbus_version_requested; | 877 | u32 vmbus_version_requested; |
878 | u32 padding2; | 878 | u32 target_vcpu; /* The VCPU the host should respond to */ |
879 | u64 interrupt_page; | 879 | u64 interrupt_page; |
880 | u64 monitor_page1; | 880 | u64 monitor_page1; |
881 | u64 monitor_page2; | 881 | u64 monitor_page2; |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 0053adde0ed9..a2678d35b5a2 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -158,6 +158,11 @@ devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, | |||
158 | devname, dev_id); | 158 | devname, dev_id); |
159 | } | 159 | } |
160 | 160 | ||
161 | extern int __must_check | ||
162 | devm_request_any_context_irq(struct device *dev, unsigned int irq, | ||
163 | irq_handler_t handler, unsigned long irqflags, | ||
164 | const char *devname, void *dev_id); | ||
165 | |||
161 | extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); | 166 | extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); |
162 | 167 | ||
163 | /* | 168 | /* |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 554548cd3dd4..130bc8d77fa5 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
@@ -38,8 +38,10 @@ | |||
38 | #include <linux/pci.h> | 38 | #include <linux/pci.h> |
39 | #include <linux/spinlock_types.h> | 39 | #include <linux/spinlock_types.h> |
40 | #include <linux/semaphore.h> | 40 | #include <linux/semaphore.h> |
41 | #include <linux/slab.h> | ||
41 | #include <linux/vmalloc.h> | 42 | #include <linux/vmalloc.h> |
42 | #include <linux/radix-tree.h> | 43 | #include <linux/radix-tree.h> |
44 | |||
43 | #include <linux/mlx5/device.h> | 45 | #include <linux/mlx5/device.h> |
44 | #include <linux/mlx5/doorbell.h> | 46 | #include <linux/mlx5/doorbell.h> |
45 | 47 | ||
@@ -227,6 +229,7 @@ struct mlx5_uuar_info { | |||
227 | * protect uuar allocation data structs | 229 | * protect uuar allocation data structs |
228 | */ | 230 | */ |
229 | struct mutex lock; | 231 | struct mutex lock; |
232 | u32 ver; | ||
230 | }; | 233 | }; |
231 | 234 | ||
232 | struct mlx5_bf { | 235 | struct mlx5_bf { |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 891432a994c0..5e4756553c18 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -752,6 +752,9 @@ struct netdev_phys_port_id { | |||
752 | unsigned char id_len; | 752 | unsigned char id_len; |
753 | }; | 753 | }; |
754 | 754 | ||
755 | typedef u16 (*select_queue_fallback_t)(struct net_device *dev, | ||
756 | struct sk_buff *skb); | ||
757 | |||
755 | /* | 758 | /* |
756 | * This structure defines the management hooks for network devices. | 759 | * This structure defines the management hooks for network devices. |
757 | * The following hooks can be defined; unless noted otherwise, they are | 760 | * The following hooks can be defined; unless noted otherwise, they are |
@@ -783,7 +786,7 @@ struct netdev_phys_port_id { | |||
783 | * Required can not be NULL. | 786 | * Required can not be NULL. |
784 | * | 787 | * |
785 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, | 788 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, |
786 | * void *accel_priv); | 789 | * void *accel_priv, select_queue_fallback_t fallback); |
787 | * Called to decide which queue to when device supports multiple | 790 | * Called to decide which queue to when device supports multiple |
788 | * transmit queues. | 791 | * transmit queues. |
789 | * | 792 | * |
@@ -1005,7 +1008,8 @@ struct net_device_ops { | |||
1005 | struct net_device *dev); | 1008 | struct net_device *dev); |
1006 | u16 (*ndo_select_queue)(struct net_device *dev, | 1009 | u16 (*ndo_select_queue)(struct net_device *dev, |
1007 | struct sk_buff *skb, | 1010 | struct sk_buff *skb, |
1008 | void *accel_priv); | 1011 | void *accel_priv, |
1012 | select_queue_fallback_t fallback); | ||
1009 | void (*ndo_change_rx_flags)(struct net_device *dev, | 1013 | void (*ndo_change_rx_flags)(struct net_device *dev, |
1010 | int flags); | 1014 | int flags); |
1011 | void (*ndo_set_rx_mode)(struct net_device *dev); | 1015 | void (*ndo_set_rx_mode)(struct net_device *dev); |
@@ -1545,7 +1549,6 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, | |||
1545 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, | 1549 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, |
1546 | struct sk_buff *skb, | 1550 | struct sk_buff *skb, |
1547 | void *accel_priv); | 1551 | void *accel_priv); |
1548 | u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); | ||
1549 | 1552 | ||
1550 | /* | 1553 | /* |
1551 | * Net namespace inlines | 1554 | * Net namespace inlines |
@@ -2284,6 +2287,26 @@ static inline void netdev_reset_queue(struct net_device *dev_queue) | |||
2284 | } | 2287 | } |
2285 | 2288 | ||
2286 | /** | 2289 | /** |
2290 | * netdev_cap_txqueue - check if selected tx queue exceeds device queues | ||
2291 | * @dev: network device | ||
2292 | * @queue_index: given tx queue index | ||
2293 | * | ||
2294 | * Returns 0 if given tx queue index >= number of device tx queues, | ||
2295 | * otherwise returns the originally passed tx queue index. | ||
2296 | */ | ||
2297 | static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index) | ||
2298 | { | ||
2299 | if (unlikely(queue_index >= dev->real_num_tx_queues)) { | ||
2300 | net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", | ||
2301 | dev->name, queue_index, | ||
2302 | dev->real_num_tx_queues); | ||
2303 | return 0; | ||
2304 | } | ||
2305 | |||
2306 | return queue_index; | ||
2307 | } | ||
2308 | |||
2309 | /** | ||
2287 | * netif_running - test if up | 2310 | * netif_running - test if up |
2288 | * @dev: network device | 2311 | * @dev: network device |
2289 | * | 2312 | * |
@@ -3076,7 +3099,12 @@ void netdev_change_features(struct net_device *dev); | |||
3076 | void netif_stacked_transfer_operstate(const struct net_device *rootdev, | 3099 | void netif_stacked_transfer_operstate(const struct net_device *rootdev, |
3077 | struct net_device *dev); | 3100 | struct net_device *dev); |
3078 | 3101 | ||
3079 | netdev_features_t netif_skb_features(struct sk_buff *skb); | 3102 | netdev_features_t netif_skb_dev_features(struct sk_buff *skb, |
3103 | const struct net_device *dev); | ||
3104 | static inline netdev_features_t netif_skb_features(struct sk_buff *skb) | ||
3105 | { | ||
3106 | return netif_skb_dev_features(skb, skb->dev); | ||
3107 | } | ||
3080 | 3108 | ||
3081 | static inline bool net_gso_ok(netdev_features_t features, int gso_type) | 3109 | static inline bool net_gso_ok(netdev_features_t features, int gso_type) |
3082 | { | 3110 | { |
diff --git a/include/linux/of.h b/include/linux/of.h index 70c64ba17fa5..435cb995904d 100644 --- a/include/linux/of.h +++ b/include/linux/of.h | |||
@@ -169,35 +169,15 @@ static inline const char *of_node_full_name(const struct device_node *np) | |||
169 | 169 | ||
170 | extern struct device_node *of_find_node_by_name(struct device_node *from, | 170 | extern struct device_node *of_find_node_by_name(struct device_node *from, |
171 | const char *name); | 171 | const char *name); |
172 | #define for_each_node_by_name(dn, name) \ | ||
173 | for (dn = of_find_node_by_name(NULL, name); dn; \ | ||
174 | dn = of_find_node_by_name(dn, name)) | ||
175 | extern struct device_node *of_find_node_by_type(struct device_node *from, | 172 | extern struct device_node *of_find_node_by_type(struct device_node *from, |
176 | const char *type); | 173 | const char *type); |
177 | #define for_each_node_by_type(dn, type) \ | ||
178 | for (dn = of_find_node_by_type(NULL, type); dn; \ | ||
179 | dn = of_find_node_by_type(dn, type)) | ||
180 | extern struct device_node *of_find_compatible_node(struct device_node *from, | 174 | extern struct device_node *of_find_compatible_node(struct device_node *from, |
181 | const char *type, const char *compat); | 175 | const char *type, const char *compat); |
182 | #define for_each_compatible_node(dn, type, compatible) \ | ||
183 | for (dn = of_find_compatible_node(NULL, type, compatible); dn; \ | ||
184 | dn = of_find_compatible_node(dn, type, compatible)) | ||
185 | extern struct device_node *of_find_matching_node_and_match( | 176 | extern struct device_node *of_find_matching_node_and_match( |
186 | struct device_node *from, | 177 | struct device_node *from, |
187 | const struct of_device_id *matches, | 178 | const struct of_device_id *matches, |
188 | const struct of_device_id **match); | 179 | const struct of_device_id **match); |
189 | static inline struct device_node *of_find_matching_node( | 180 | |
190 | struct device_node *from, | ||
191 | const struct of_device_id *matches) | ||
192 | { | ||
193 | return of_find_matching_node_and_match(from, matches, NULL); | ||
194 | } | ||
195 | #define for_each_matching_node(dn, matches) \ | ||
196 | for (dn = of_find_matching_node(NULL, matches); dn; \ | ||
197 | dn = of_find_matching_node(dn, matches)) | ||
198 | #define for_each_matching_node_and_match(dn, matches, match) \ | ||
199 | for (dn = of_find_matching_node_and_match(NULL, matches, match); \ | ||
200 | dn; dn = of_find_matching_node_and_match(dn, matches, match)) | ||
201 | extern struct device_node *of_find_node_by_path(const char *path); | 181 | extern struct device_node *of_find_node_by_path(const char *path); |
202 | extern struct device_node *of_find_node_by_phandle(phandle handle); | 182 | extern struct device_node *of_find_node_by_phandle(phandle handle); |
203 | extern struct device_node *of_get_parent(const struct device_node *node); | 183 | extern struct device_node *of_get_parent(const struct device_node *node); |
@@ -209,43 +189,11 @@ extern struct device_node *of_get_next_available_child( | |||
209 | 189 | ||
210 | extern struct device_node *of_get_child_by_name(const struct device_node *node, | 190 | extern struct device_node *of_get_child_by_name(const struct device_node *node, |
211 | const char *name); | 191 | const char *name); |
212 | #define for_each_child_of_node(parent, child) \ | ||
213 | for (child = of_get_next_child(parent, NULL); child != NULL; \ | ||
214 | child = of_get_next_child(parent, child)) | ||
215 | |||
216 | #define for_each_available_child_of_node(parent, child) \ | ||
217 | for (child = of_get_next_available_child(parent, NULL); child != NULL; \ | ||
218 | child = of_get_next_available_child(parent, child)) | ||
219 | |||
220 | static inline int of_get_child_count(const struct device_node *np) | ||
221 | { | ||
222 | struct device_node *child; | ||
223 | int num = 0; | ||
224 | |||
225 | for_each_child_of_node(np, child) | ||
226 | num++; | ||
227 | |||
228 | return num; | ||
229 | } | ||
230 | |||
231 | static inline int of_get_available_child_count(const struct device_node *np) | ||
232 | { | ||
233 | struct device_node *child; | ||
234 | int num = 0; | ||
235 | |||
236 | for_each_available_child_of_node(np, child) | ||
237 | num++; | ||
238 | |||
239 | return num; | ||
240 | } | ||
241 | 192 | ||
242 | /* cache lookup */ | 193 | /* cache lookup */ |
243 | extern struct device_node *of_find_next_cache_node(const struct device_node *); | 194 | extern struct device_node *of_find_next_cache_node(const struct device_node *); |
244 | extern struct device_node *of_find_node_with_property( | 195 | extern struct device_node *of_find_node_with_property( |
245 | struct device_node *from, const char *prop_name); | 196 | struct device_node *from, const char *prop_name); |
246 | #define for_each_node_with_property(dn, prop_name) \ | ||
247 | for (dn = of_find_node_with_property(NULL, prop_name); dn; \ | ||
248 | dn = of_find_node_with_property(dn, prop_name)) | ||
249 | 197 | ||
250 | extern struct property *of_find_property(const struct device_node *np, | 198 | extern struct property *of_find_property(const struct device_node *np, |
251 | const char *name, | 199 | const char *name, |
@@ -367,42 +315,53 @@ static inline struct device_node *of_find_node_by_name(struct device_node *from, | |||
367 | return NULL; | 315 | return NULL; |
368 | } | 316 | } |
369 | 317 | ||
370 | static inline struct device_node *of_get_parent(const struct device_node *node) | 318 | static inline struct device_node *of_find_node_by_type(struct device_node *from, |
319 | const char *type) | ||
371 | { | 320 | { |
372 | return NULL; | 321 | return NULL; |
373 | } | 322 | } |
374 | 323 | ||
375 | static inline bool of_have_populated_dt(void) | 324 | static inline struct device_node *of_find_matching_node_and_match( |
325 | struct device_node *from, | ||
326 | const struct of_device_id *matches, | ||
327 | const struct of_device_id **match) | ||
376 | { | 328 | { |
377 | return false; | 329 | return NULL; |
378 | } | 330 | } |
379 | 331 | ||
380 | /* Kill an unused variable warning on a device_node pointer */ | 332 | static inline struct device_node *of_get_parent(const struct device_node *node) |
381 | static inline void __of_use_dn(const struct device_node *np) | ||
382 | { | 333 | { |
334 | return NULL; | ||
383 | } | 335 | } |
384 | 336 | ||
385 | #define for_each_child_of_node(parent, child) \ | 337 | static inline struct device_node *of_get_next_child( |
386 | while (__of_use_dn(parent), __of_use_dn(child), 0) | 338 | const struct device_node *node, struct device_node *prev) |
339 | { | ||
340 | return NULL; | ||
341 | } | ||
387 | 342 | ||
388 | #define for_each_available_child_of_node(parent, child) \ | 343 | static inline struct device_node *of_get_next_available_child( |
389 | while (0) | 344 | const struct device_node *node, struct device_node *prev) |
345 | { | ||
346 | return NULL; | ||
347 | } | ||
390 | 348 | ||
391 | static inline struct device_node *of_get_child_by_name( | 349 | static inline struct device_node *of_find_node_with_property( |
392 | const struct device_node *node, | 350 | struct device_node *from, const char *prop_name) |
393 | const char *name) | ||
394 | { | 351 | { |
395 | return NULL; | 352 | return NULL; |
396 | } | 353 | } |
397 | 354 | ||
398 | static inline int of_get_child_count(const struct device_node *np) | 355 | static inline bool of_have_populated_dt(void) |
399 | { | 356 | { |
400 | return 0; | 357 | return false; |
401 | } | 358 | } |
402 | 359 | ||
403 | static inline int of_get_available_child_count(const struct device_node *np) | 360 | static inline struct device_node *of_get_child_by_name( |
361 | const struct device_node *node, | ||
362 | const char *name) | ||
404 | { | 363 | { |
405 | return 0; | 364 | return NULL; |
406 | } | 365 | } |
407 | 366 | ||
408 | static inline int of_device_is_compatible(const struct device_node *device, | 367 | static inline int of_device_is_compatible(const struct device_node *device, |
@@ -569,6 +528,13 @@ extern int of_node_to_nid(struct device_node *np); | |||
569 | static inline int of_node_to_nid(struct device_node *device) { return 0; } | 528 | static inline int of_node_to_nid(struct device_node *device) { return 0; } |
570 | #endif | 529 | #endif |
571 | 530 | ||
531 | static inline struct device_node *of_find_matching_node( | ||
532 | struct device_node *from, | ||
533 | const struct of_device_id *matches) | ||
534 | { | ||
535 | return of_find_matching_node_and_match(from, matches, NULL); | ||
536 | } | ||
537 | |||
572 | /** | 538 | /** |
573 | * of_property_read_bool - Findfrom a property | 539 | * of_property_read_bool - Findfrom a property |
574 | * @np: device node from which the property value is to be read. | 540 | * @np: device node from which the property value is to be read. |
@@ -618,6 +584,55 @@ static inline int of_property_read_u32(const struct device_node *np, | |||
618 | s; \ | 584 | s; \ |
619 | s = of_prop_next_string(prop, s)) | 585 | s = of_prop_next_string(prop, s)) |
620 | 586 | ||
587 | #define for_each_node_by_name(dn, name) \ | ||
588 | for (dn = of_find_node_by_name(NULL, name); dn; \ | ||
589 | dn = of_find_node_by_name(dn, name)) | ||
590 | #define for_each_node_by_type(dn, type) \ | ||
591 | for (dn = of_find_node_by_type(NULL, type); dn; \ | ||
592 | dn = of_find_node_by_type(dn, type)) | ||
593 | #define for_each_compatible_node(dn, type, compatible) \ | ||
594 | for (dn = of_find_compatible_node(NULL, type, compatible); dn; \ | ||
595 | dn = of_find_compatible_node(dn, type, compatible)) | ||
596 | #define for_each_matching_node(dn, matches) \ | ||
597 | for (dn = of_find_matching_node(NULL, matches); dn; \ | ||
598 | dn = of_find_matching_node(dn, matches)) | ||
599 | #define for_each_matching_node_and_match(dn, matches, match) \ | ||
600 | for (dn = of_find_matching_node_and_match(NULL, matches, match); \ | ||
601 | dn; dn = of_find_matching_node_and_match(dn, matches, match)) | ||
602 | |||
603 | #define for_each_child_of_node(parent, child) \ | ||
604 | for (child = of_get_next_child(parent, NULL); child != NULL; \ | ||
605 | child = of_get_next_child(parent, child)) | ||
606 | #define for_each_available_child_of_node(parent, child) \ | ||
607 | for (child = of_get_next_available_child(parent, NULL); child != NULL; \ | ||
608 | child = of_get_next_available_child(parent, child)) | ||
609 | |||
610 | #define for_each_node_with_property(dn, prop_name) \ | ||
611 | for (dn = of_find_node_with_property(NULL, prop_name); dn; \ | ||
612 | dn = of_find_node_with_property(dn, prop_name)) | ||
613 | |||
614 | static inline int of_get_child_count(const struct device_node *np) | ||
615 | { | ||
616 | struct device_node *child; | ||
617 | int num = 0; | ||
618 | |||
619 | for_each_child_of_node(np, child) | ||
620 | num++; | ||
621 | |||
622 | return num; | ||
623 | } | ||
624 | |||
625 | static inline int of_get_available_child_count(const struct device_node *np) | ||
626 | { | ||
627 | struct device_node *child; | ||
628 | int num = 0; | ||
629 | |||
630 | for_each_available_child_of_node(np, child) | ||
631 | num++; | ||
632 | |||
633 | return num; | ||
634 | } | ||
635 | |||
621 | #if defined(CONFIG_PROC_FS) && defined(CONFIG_PROC_DEVICETREE) | 636 | #if defined(CONFIG_PROC_FS) && defined(CONFIG_PROC_DEVICETREE) |
622 | extern void proc_device_tree_add_node(struct device_node *, struct proc_dir_entry *); | 637 | extern void proc_device_tree_add_node(struct device_node *, struct proc_dir_entry *); |
623 | extern void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop); | 638 | extern void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop); |
diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 8d7dd6768cb7..ef370210ffb2 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h | |||
@@ -78,11 +78,13 @@ static inline int of_device_uevent_modalias(struct device *dev, | |||
78 | 78 | ||
79 | static inline void of_device_node_put(struct device *dev) { } | 79 | static inline void of_device_node_put(struct device *dev) { } |
80 | 80 | ||
81 | static inline const struct of_device_id *of_match_device( | 81 | static inline const struct of_device_id *__of_match_device( |
82 | const struct of_device_id *matches, const struct device *dev) | 82 | const struct of_device_id *matches, const struct device *dev) |
83 | { | 83 | { |
84 | return NULL; | 84 | return NULL; |
85 | } | 85 | } |
86 | #define of_match_device(matches, dev) \ | ||
87 | __of_match_device(of_match_ptr(matches), (dev)) | ||
86 | 88 | ||
87 | static inline struct device_node *of_cpu_device_node_get(int cpu) | 89 | static inline struct device_node *of_cpu_device_node_get(int cpu) |
88 | { | 90 | { |
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index e273e5ac19c9..3f83459dbb20 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h | |||
@@ -146,7 +146,9 @@ static inline void phy_set_bus_width(struct phy *phy, int bus_width) | |||
146 | phy->attrs.bus_width = bus_width; | 146 | phy->attrs.bus_width = bus_width; |
147 | } | 147 | } |
148 | struct phy *phy_get(struct device *dev, const char *string); | 148 | struct phy *phy_get(struct device *dev, const char *string); |
149 | struct phy *phy_optional_get(struct device *dev, const char *string); | ||
149 | struct phy *devm_phy_get(struct device *dev, const char *string); | 150 | struct phy *devm_phy_get(struct device *dev, const char *string); |
151 | struct phy *devm_phy_optional_get(struct device *dev, const char *string); | ||
150 | void phy_put(struct phy *phy); | 152 | void phy_put(struct phy *phy); |
151 | void devm_phy_put(struct device *dev, struct phy *phy); | 153 | void devm_phy_put(struct device *dev, struct phy *phy); |
152 | struct phy *of_phy_simple_xlate(struct device *dev, | 154 | struct phy *of_phy_simple_xlate(struct device *dev, |
@@ -232,11 +234,23 @@ static inline struct phy *phy_get(struct device *dev, const char *string) | |||
232 | return ERR_PTR(-ENOSYS); | 234 | return ERR_PTR(-ENOSYS); |
233 | } | 235 | } |
234 | 236 | ||
237 | static inline struct phy *phy_optional_get(struct device *dev, | ||
238 | const char *string) | ||
239 | { | ||
240 | return ERR_PTR(-ENOSYS); | ||
241 | } | ||
242 | |||
235 | static inline struct phy *devm_phy_get(struct device *dev, const char *string) | 243 | static inline struct phy *devm_phy_get(struct device *dev, const char *string) |
236 | { | 244 | { |
237 | return ERR_PTR(-ENOSYS); | 245 | return ERR_PTR(-ENOSYS); |
238 | } | 246 | } |
239 | 247 | ||
248 | static inline struct phy *devm_phy_optional_get(struct device *dev, | ||
249 | const char *string) | ||
250 | { | ||
251 | return ERR_PTR(-ENOSYS); | ||
252 | } | ||
253 | |||
240 | static inline void phy_put(struct phy *phy) | 254 | static inline void phy_put(struct phy *phy) |
241 | { | 255 | { |
242 | } | 256 | } |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f589c9af8cbf..3ebbbe7b6d05 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -2916,5 +2916,22 @@ static inline bool skb_head_is_locked(const struct sk_buff *skb) | |||
2916 | { | 2916 | { |
2917 | return !skb->head_frag || skb_cloned(skb); | 2917 | return !skb->head_frag || skb_cloned(skb); |
2918 | } | 2918 | } |
2919 | |||
2920 | /** | ||
2921 | * skb_gso_network_seglen - Return length of individual segments of a gso packet | ||
2922 | * | ||
2923 | * @skb: GSO skb | ||
2924 | * | ||
2925 | * skb_gso_network_seglen is used to determine the real size of the | ||
2926 | * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP). | ||
2927 | * | ||
2928 | * The MAC/L2 header is not accounted for. | ||
2929 | */ | ||
2930 | static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb) | ||
2931 | { | ||
2932 | unsigned int hdr_len = skb_transport_header(skb) - | ||
2933 | skb_network_header(skb); | ||
2934 | return hdr_len + skb_gso_transport_seglen(skb); | ||
2935 | } | ||
2919 | #endif /* __KERNEL__ */ | 2936 | #endif /* __KERNEL__ */ |
2920 | #endif /* _LINUX_SKBUFF_H */ | 2937 | #endif /* _LINUX_SKBUFF_H */ |
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index a1d4ca290862..4203c66d8803 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
@@ -273,7 +273,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) | |||
273 | * message while queuing transfers that arrive in the meantime. When the | 273 | * message while queuing transfers that arrive in the meantime. When the |
274 | * driver is finished with this message, it must call | 274 | * driver is finished with this message, it must call |
275 | * spi_finalize_current_message() so the subsystem can issue the next | 275 | * spi_finalize_current_message() so the subsystem can issue the next |
276 | * transfer | 276 | * message |
277 | * @unprepare_transfer_hardware: there are currently no more messages on the | 277 | * @unprepare_transfer_hardware: there are currently no more messages on the |
278 | * queue so the subsystem notifies the driver that it may relax the | 278 | * queue so the subsystem notifies the driver that it may relax the |
279 | * hardware by issuing this call | 279 | * hardware by issuing this call |
@@ -287,7 +287,10 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) | |||
287 | * - return 1 if the transfer is still in progress. When | 287 | * - return 1 if the transfer is still in progress. When |
288 | * the driver is finished with this transfer it must | 288 | * the driver is finished with this transfer it must |
289 | * call spi_finalize_current_transfer() so the subsystem | 289 | * call spi_finalize_current_transfer() so the subsystem |
290 | * can issue the next transfer | 290 | * can issue the next transfer. Note: transfer_one and |
291 | * transfer_one_message are mutually exclusive; when both | ||
292 | * are set, the generic subsystem does not call your | ||
293 | * transfer_one callback. | ||
291 | * @unprepare_message: undo any work done by prepare_message(). | 294 | * @unprepare_message: undo any work done by prepare_message(). |
292 | * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS | 295 | * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS |
293 | * number. Any individual value may be -ENOENT for CS lines that | 296 | * number. Any individual value may be -ENOENT for CS lines that |
diff --git a/include/linux/usb.h b/include/linux/usb.h index c716da18c668..7f6eb859873e 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h | |||
@@ -1265,8 +1265,6 @@ typedef void (*usb_complete_t)(struct urb *); | |||
1265 | * @sg: scatter gather buffer list, the buffer size of each element in | 1265 | * @sg: scatter gather buffer list, the buffer size of each element in |
1266 | * the list (except the last) must be divisible by the endpoint's | 1266 | * the list (except the last) must be divisible by the endpoint's |
1267 | * max packet size if no_sg_constraint isn't set in 'struct usb_bus' | 1267 | * max packet size if no_sg_constraint isn't set in 'struct usb_bus' |
1268 | * (FIXME: scatter-gather under xHCI is broken for periodic transfers. | ||
1269 | * Do not use urb->sg for interrupt endpoints for now, only bulk.) | ||
1270 | * @num_mapped_sgs: (internal) number of mapped sg entries | 1268 | * @num_mapped_sgs: (internal) number of mapped sg entries |
1271 | * @num_sgs: number of entries in the sg list | 1269 | * @num_sgs: number of entries in the sg list |
1272 | * @transfer_buffer_length: How big is transfer_buffer. The transfer may | 1270 | * @transfer_buffer_length: How big is transfer_buffer. The transfer may |
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index d992ca3145fe..6ee76c804893 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
@@ -1653,17 +1653,6 @@ struct sctp_association { | |||
1653 | /* This is the last advertised value of rwnd over a SACK chunk. */ | 1653 | /* This is the last advertised value of rwnd over a SACK chunk. */ |
1654 | __u32 a_rwnd; | 1654 | __u32 a_rwnd; |
1655 | 1655 | ||
1656 | /* Number of bytes by which the rwnd has slopped. The rwnd is allowed | ||
1657 | * to slop over a maximum of the association's frag_point. | ||
1658 | */ | ||
1659 | __u32 rwnd_over; | ||
1660 | |||
1661 | /* Keeps treack of rwnd pressure. This happens when we have | ||
1662 | * a window, but not recevie buffer (i.e small packets). This one | ||
1663 | * is releases slowly (1 PMTU at a time ). | ||
1664 | */ | ||
1665 | __u32 rwnd_press; | ||
1666 | |||
1667 | /* This is the sndbuf size in use for the association. | 1656 | /* This is the sndbuf size in use for the association. |
1668 | * This corresponds to the sndbuf size for the association, | 1657 | * This corresponds to the sndbuf size for the association, |
1669 | * as specified in the sk->sndbuf. | 1658 | * as specified in the sk->sndbuf. |
@@ -1892,8 +1881,7 @@ void sctp_assoc_update(struct sctp_association *old, | |||
1892 | __u32 sctp_association_get_next_tsn(struct sctp_association *); | 1881 | __u32 sctp_association_get_next_tsn(struct sctp_association *); |
1893 | 1882 | ||
1894 | void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *); | 1883 | void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *); |
1895 | void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int); | 1884 | void sctp_assoc_rwnd_update(struct sctp_association *, bool); |
1896 | void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int); | ||
1897 | void sctp_assoc_set_primary(struct sctp_association *, | 1885 | void sctp_assoc_set_primary(struct sctp_association *, |
1898 | struct sctp_transport *); | 1886 | struct sctp_transport *); |
1899 | void sctp_assoc_del_nonprimary_peers(struct sctp_association *, | 1887 | void sctp_assoc_del_nonprimary_peers(struct sctp_association *, |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 8d4a1c06f7e4..6793f32ccb58 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -226,7 +226,8 @@ enum ib_port_cap_flags { | |||
226 | IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, | 226 | IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, |
227 | IB_PORT_BOOT_MGMT_SUP = 1 << 23, | 227 | IB_PORT_BOOT_MGMT_SUP = 1 << 23, |
228 | IB_PORT_LINK_LATENCY_SUP = 1 << 24, | 228 | IB_PORT_LINK_LATENCY_SUP = 1 << 24, |
229 | IB_PORT_CLIENT_REG_SUP = 1 << 25 | 229 | IB_PORT_CLIENT_REG_SUP = 1 << 25, |
230 | IB_PORT_IP_BASED_GIDS = 1 << 26 | ||
230 | }; | 231 | }; |
231 | 232 | ||
232 | enum ib_port_width { | 233 | enum ib_port_width { |
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index c9c791209cd1..1772fadcff62 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h | |||
@@ -525,7 +525,6 @@ struct se_cmd { | |||
525 | #define CMD_T_COMPLETE (1 << 2) | 525 | #define CMD_T_COMPLETE (1 << 2) |
526 | #define CMD_T_SENT (1 << 4) | 526 | #define CMD_T_SENT (1 << 4) |
527 | #define CMD_T_STOP (1 << 5) | 527 | #define CMD_T_STOP (1 << 5) |
528 | #define CMD_T_FAILED (1 << 6) | ||
529 | #define CMD_T_DEV_ACTIVE (1 << 7) | 528 | #define CMD_T_DEV_ACTIVE (1 << 7) |
530 | #define CMD_T_REQUEST_STOP (1 << 8) | 529 | #define CMD_T_REQUEST_STOP (1 << 8) |
531 | #define CMD_T_BUSY (1 << 9) | 530 | #define CMD_T_BUSY (1 << 9) |
diff --git a/include/trace/events/power.h b/include/trace/events/power.h index 9e9475c85de5..e5bf9a76f169 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h | |||
@@ -42,7 +42,6 @@ TRACE_EVENT(pstate_sample, | |||
42 | u32 state, | 42 | u32 state, |
43 | u64 mperf, | 43 | u64 mperf, |
44 | u64 aperf, | 44 | u64 aperf, |
45 | u32 energy, | ||
46 | u32 freq | 45 | u32 freq |
47 | ), | 46 | ), |
48 | 47 | ||
@@ -51,7 +50,6 @@ TRACE_EVENT(pstate_sample, | |||
51 | state, | 50 | state, |
52 | mperf, | 51 | mperf, |
53 | aperf, | 52 | aperf, |
54 | energy, | ||
55 | freq | 53 | freq |
56 | ), | 54 | ), |
57 | 55 | ||
@@ -61,7 +59,6 @@ TRACE_EVENT(pstate_sample, | |||
61 | __field(u32, state) | 59 | __field(u32, state) |
62 | __field(u64, mperf) | 60 | __field(u64, mperf) |
63 | __field(u64, aperf) | 61 | __field(u64, aperf) |
64 | __field(u32, energy) | ||
65 | __field(u32, freq) | 62 | __field(u32, freq) |
66 | 63 | ||
67 | ), | 64 | ), |
@@ -72,17 +69,15 @@ TRACE_EVENT(pstate_sample, | |||
72 | __entry->state = state; | 69 | __entry->state = state; |
73 | __entry->mperf = mperf; | 70 | __entry->mperf = mperf; |
74 | __entry->aperf = aperf; | 71 | __entry->aperf = aperf; |
75 | __entry->energy = energy; | ||
76 | __entry->freq = freq; | 72 | __entry->freq = freq; |
77 | ), | 73 | ), |
78 | 74 | ||
79 | TP_printk("core_busy=%lu scaled=%lu state=%lu mperf=%llu aperf=%llu energy=%lu freq=%lu ", | 75 | TP_printk("core_busy=%lu scaled=%lu state=%lu mperf=%llu aperf=%llu freq=%lu ", |
80 | (unsigned long)__entry->core_busy, | 76 | (unsigned long)__entry->core_busy, |
81 | (unsigned long)__entry->scaled_busy, | 77 | (unsigned long)__entry->scaled_busy, |
82 | (unsigned long)__entry->state, | 78 | (unsigned long)__entry->state, |
83 | (unsigned long long)__entry->mperf, | 79 | (unsigned long long)__entry->mperf, |
84 | (unsigned long long)__entry->aperf, | 80 | (unsigned long long)__entry->aperf, |
85 | (unsigned long)__entry->energy, | ||
86 | (unsigned long)__entry->freq | 81 | (unsigned long)__entry->freq |
87 | ) | 82 | ) |
88 | 83 | ||
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 3c9a833992e8..b06c8ed68707 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h | |||
@@ -619,6 +619,8 @@ struct drm_gem_open { | |||
619 | #define DRM_PRIME_CAP_EXPORT 0x2 | 619 | #define DRM_PRIME_CAP_EXPORT 0x2 |
620 | #define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 | 620 | #define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 |
621 | #define DRM_CAP_ASYNC_PAGE_FLIP 0x7 | 621 | #define DRM_CAP_ASYNC_PAGE_FLIP 0x7 |
622 | #define DRM_CAP_CURSOR_WIDTH 0x8 | ||
623 | #define DRM_CAP_CURSOR_HEIGHT 0x9 | ||
622 | 624 | ||
623 | /** DRM_IOCTL_GET_CAP ioctl argument type */ | 625 | /** DRM_IOCTL_GET_CAP ioctl argument type */ |
624 | struct drm_get_cap { | 626 | struct drm_get_cap { |
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index 9971c560ed9a..87792a5fee3b 100644 --- a/include/uapi/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h | |||
@@ -87,6 +87,7 @@ | |||
87 | #define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 | 87 | #define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 |
88 | #define DRM_VMW_PARAM_3D_CAPS_SIZE 8 | 88 | #define DRM_VMW_PARAM_3D_CAPS_SIZE 8 |
89 | #define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 | 89 | #define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 |
90 | #define DRM_VMW_PARAM_MAX_MOB_SIZE 10 | ||
90 | 91 | ||
91 | /** | 92 | /** |
92 | * struct drm_vmw_getparam_arg | 93 | * struct drm_vmw_getparam_arg |
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index 1b8a0f4c9590..b4d69092fbdb 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h | |||
@@ -558,7 +558,6 @@ static inline char *btrfs_err_str(enum btrfs_err_code err_code) | |||
558 | #define BTRFS_IOC_DEFAULT_SUBVOL _IOW(BTRFS_IOCTL_MAGIC, 19, __u64) | 558 | #define BTRFS_IOC_DEFAULT_SUBVOL _IOW(BTRFS_IOCTL_MAGIC, 19, __u64) |
559 | #define BTRFS_IOC_SPACE_INFO _IOWR(BTRFS_IOCTL_MAGIC, 20, \ | 559 | #define BTRFS_IOC_SPACE_INFO _IOWR(BTRFS_IOCTL_MAGIC, 20, \ |
560 | struct btrfs_ioctl_space_args) | 560 | struct btrfs_ioctl_space_args) |
561 | #define BTRFS_IOC_GLOBAL_RSV _IOR(BTRFS_IOCTL_MAGIC, 20, __u64) | ||
562 | #define BTRFS_IOC_START_SYNC _IOR(BTRFS_IOCTL_MAGIC, 24, __u64) | 561 | #define BTRFS_IOC_START_SYNC _IOR(BTRFS_IOCTL_MAGIC, 24, __u64) |
563 | #define BTRFS_IOC_WAIT_SYNC _IOW(BTRFS_IOCTL_MAGIC, 22, __u64) | 562 | #define BTRFS_IOC_WAIT_SYNC _IOW(BTRFS_IOCTL_MAGIC, 22, __u64) |
564 | #define BTRFS_IOC_SNAP_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 23, \ | 563 | #define BTRFS_IOC_SNAP_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 23, \ |
diff --git a/include/uapi/linux/mic_ioctl.h b/include/uapi/linux/mic_ioctl.h index 7fabba5059cf..feb0b4c0814c 100644 --- a/include/uapi/linux/mic_ioctl.h +++ b/include/uapi/linux/mic_ioctl.h | |||
@@ -39,7 +39,7 @@ struct mic_copy_desc { | |||
39 | #else | 39 | #else |
40 | struct iovec *iov; | 40 | struct iovec *iov; |
41 | #endif | 41 | #endif |
42 | int iovcnt; | 42 | __u32 iovcnt; |
43 | __u8 vr_idx; | 43 | __u8 vr_idx; |
44 | __u8 update_used; | 44 | __u8 update_used; |
45 | __u32 out_len; | 45 | __u32 out_len; |
diff --git a/include/uapi/xen/Kbuild b/include/uapi/xen/Kbuild index 61257cb14653..5c459628e8c7 100644 --- a/include/uapi/xen/Kbuild +++ b/include/uapi/xen/Kbuild | |||
@@ -1,3 +1,5 @@ | |||
1 | # UAPI Header export list | 1 | # UAPI Header export list |
2 | header-y += evtchn.h | 2 | header-y += evtchn.h |
3 | header-y += gntalloc.h | ||
4 | header-y += gntdev.h | ||
3 | header-y += privcmd.h | 5 | header-y += privcmd.h |
diff --git a/include/xen/gntalloc.h b/include/uapi/xen/gntalloc.h index 76bd58065f4f..76bd58065f4f 100644 --- a/include/xen/gntalloc.h +++ b/include/uapi/xen/gntalloc.h | |||
diff --git a/include/xen/gntdev.h b/include/uapi/xen/gntdev.h index 5304bd3c84c5..5304bd3c84c5 100644 --- a/include/xen/gntdev.h +++ b/include/uapi/xen/gntdev.h | |||
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h index ae665ac59c36..32ec05a6572f 100644 --- a/include/xen/interface/io/blkif.h +++ b/include/xen/interface/io/blkif.h | |||
@@ -113,13 +113,13 @@ typedef uint64_t blkif_sector_t; | |||
113 | * it's less than the number provided by the backend. The indirect_grefs field | 113 | * it's less than the number provided by the backend. The indirect_grefs field |
114 | * in blkif_request_indirect should be filled by the frontend with the | 114 | * in blkif_request_indirect should be filled by the frontend with the |
115 | * grant references of the pages that are holding the indirect segments. | 115 | * grant references of the pages that are holding the indirect segments. |
116 | * This pages are filled with an array of blkif_request_segment_aligned | 116 | * These pages are filled with an array of blkif_request_segment that hold the |
117 | * that hold the information about the segments. The number of indirect | 117 | * information about the segments. The number of indirect pages to use is |
118 | * pages to use is determined by the maximum number of segments | 118 | * determined by the number of segments an indirect request contains. Every |
119 | * a indirect request contains. Every indirect page can contain a maximum | 119 | * indirect page can contain a maximum of |
120 | * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)), | 120 | * (PAGE_SIZE / sizeof(struct blkif_request_segment)) segments, so to |
121 | * so to calculate the number of indirect pages to use we have to do | 121 | * calculate the number of indirect pages to use we have to do |
122 | * ceil(indirect_segments/512). | 122 | * ceil(indirect_segments / (PAGE_SIZE / sizeof(struct blkif_request_segment))). |
123 | * | 123 | * |
124 | * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not* | 124 | * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not* |
125 | * create the "feature-max-indirect-segments" node! | 125 | * create the "feature-max-indirect-segments" node! |
@@ -135,13 +135,12 @@ typedef uint64_t blkif_sector_t; | |||
135 | 135 | ||
136 | #define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8 | 136 | #define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8 |
137 | 137 | ||
138 | struct blkif_request_segment_aligned { | 138 | struct blkif_request_segment { |
139 | grant_ref_t gref; /* reference to I/O buffer frame */ | 139 | grant_ref_t gref; /* reference to I/O buffer frame */ |
140 | /* @first_sect: first sector in frame to transfer (inclusive). */ | 140 | /* @first_sect: first sector in frame to transfer (inclusive). */ |
141 | /* @last_sect: last sector in frame to transfer (inclusive). */ | 141 | /* @last_sect: last sector in frame to transfer (inclusive). */ |
142 | uint8_t first_sect, last_sect; | 142 | uint8_t first_sect, last_sect; |
143 | uint16_t _pad; /* padding to make it 8 bytes, so it's cache-aligned */ | 143 | }; |
144 | } __attribute__((__packed__)); | ||
145 | 144 | ||
146 | struct blkif_request_rw { | 145 | struct blkif_request_rw { |
147 | uint8_t nr_segments; /* number of segments */ | 146 | uint8_t nr_segments; /* number of segments */ |
@@ -151,12 +150,7 @@ struct blkif_request_rw { | |||
151 | #endif | 150 | #endif |
152 | uint64_t id; /* private guest value, echoed in resp */ | 151 | uint64_t id; /* private guest value, echoed in resp */ |
153 | blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ | 152 | blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ |
154 | struct blkif_request_segment { | 153 | struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
155 | grant_ref_t gref; /* reference to I/O buffer frame */ | ||
156 | /* @first_sect: first sector in frame to transfer (inclusive). */ | ||
157 | /* @last_sect: last sector in frame to transfer (inclusive). */ | ||
158 | uint8_t first_sect, last_sect; | ||
159 | } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | ||
160 | } __attribute__((__packed__)); | 154 | } __attribute__((__packed__)); |
161 | 155 | ||
162 | struct blkif_request_discard { | 156 | struct blkif_request_discard { |
diff --git a/include/xen/interface/xencomm.h b/include/xen/interface/xencomm.h deleted file mode 100644 index ac45e0712afa..000000000000 --- a/include/xen/interface/xencomm.h +++ /dev/null | |||
@@ -1,41 +0,0 @@ | |||
1 | /* | ||
2 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
3 | * of this software and associated documentation files (the "Software"), to | ||
4 | * deal in the Software without restriction, including without limitation the | ||
5 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or | ||
6 | * sell copies of the Software, and to permit persons to whom the Software is | ||
7 | * furnished to do so, subject to the following conditions: | ||
8 | * | ||
9 | * The above copyright notice and this permission notice shall be included in | ||
10 | * all copies or substantial portions of the Software. | ||
11 | * | ||
12 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
13 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
15 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
16 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
17 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
18 | * DEALINGS IN THE SOFTWARE. | ||
19 | * | ||
20 | * Copyright (C) IBM Corp. 2006 | ||
21 | */ | ||
22 | |||
23 | #ifndef _XEN_XENCOMM_H_ | ||
24 | #define _XEN_XENCOMM_H_ | ||
25 | |||
26 | /* A xencomm descriptor is a scatter/gather list containing physical | ||
27 | * addresses corresponding to a virtually contiguous memory area. The | ||
28 | * hypervisor translates these physical addresses to machine addresses to copy | ||
29 | * to and from the virtually contiguous area. | ||
30 | */ | ||
31 | |||
32 | #define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */ | ||
33 | #define XENCOMM_INVALID (~0UL) | ||
34 | |||
35 | struct xencomm_desc { | ||
36 | uint32_t magic; | ||
37 | uint32_t nr_addrs; /* the number of entries in address[] */ | ||
38 | uint64_t address[0]; | ||
39 | }; | ||
40 | |||
41 | #endif /* _XEN_XENCOMM_H_ */ | ||
diff --git a/include/xen/xencomm.h b/include/xen/xencomm.h deleted file mode 100644 index e43b039be112..000000000000 --- a/include/xen/xencomm.h +++ /dev/null | |||
@@ -1,77 +0,0 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
15 | * | ||
16 | * Copyright (C) IBM Corp. 2006 | ||
17 | * | ||
18 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
19 | * Jerone Young <jyoung5@us.ibm.com> | ||
20 | */ | ||
21 | |||
22 | #ifndef _LINUX_XENCOMM_H_ | ||
23 | #define _LINUX_XENCOMM_H_ | ||
24 | |||
25 | #include <xen/interface/xencomm.h> | ||
26 | |||
27 | #define XENCOMM_MINI_ADDRS 3 | ||
28 | struct xencomm_mini { | ||
29 | struct xencomm_desc _desc; | ||
30 | uint64_t address[XENCOMM_MINI_ADDRS]; | ||
31 | }; | ||
32 | |||
33 | /* To avoid additionnal virt to phys conversion, an opaque structure is | ||
34 | presented. */ | ||
35 | struct xencomm_handle; | ||
36 | |||
37 | extern void xencomm_free(struct xencomm_handle *desc); | ||
38 | extern struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes); | ||
39 | extern struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, | ||
40 | unsigned long bytes, struct xencomm_mini *xc_area); | ||
41 | |||
42 | #if 0 | ||
43 | #define XENCOMM_MINI_ALIGNED(xc_desc, n) \ | ||
44 | struct xencomm_mini xc_desc ## _base[(n)] \ | ||
45 | __attribute__((__aligned__(sizeof(struct xencomm_mini)))); \ | ||
46 | struct xencomm_mini *xc_desc = &xc_desc ## _base[0]; | ||
47 | #else | ||
48 | /* | ||
49 | * gcc bug workaround: | ||
50 | * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=16660 | ||
51 | * gcc doesn't handle properly stack variable with | ||
52 | * __attribute__((__align__(sizeof(struct xencomm_mini)))) | ||
53 | */ | ||
54 | #define XENCOMM_MINI_ALIGNED(xc_desc, n) \ | ||
55 | unsigned char xc_desc ## _base[((n) + 1 ) * \ | ||
56 | sizeof(struct xencomm_mini)]; \ | ||
57 | struct xencomm_mini *xc_desc = (struct xencomm_mini *) \ | ||
58 | ((unsigned long)xc_desc ## _base + \ | ||
59 | (sizeof(struct xencomm_mini) - \ | ||
60 | ((unsigned long)xc_desc ## _base) % \ | ||
61 | sizeof(struct xencomm_mini))); | ||
62 | #endif | ||
63 | #define xencomm_map_no_alloc(ptr, bytes) \ | ||
64 | ({ XENCOMM_MINI_ALIGNED(xc_desc, 1); \ | ||
65 | __xencomm_map_no_alloc(ptr, bytes, xc_desc); }) | ||
66 | |||
67 | /* provided by architecture code: */ | ||
68 | extern unsigned long xencomm_vtop(unsigned long vaddr); | ||
69 | |||
70 | static inline void *xencomm_pa(void *ptr) | ||
71 | { | ||
72 | return (void *)xencomm_vtop((unsigned long)ptr); | ||
73 | } | ||
74 | |||
75 | #define xen_guest_handle(hnd) ((hnd).p) | ||
76 | |||
77 | #endif /* _LINUX_XENCOMM_H_ */ | ||
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c index bd8e788d71e0..1ef0606797c9 100644 --- a/kernel/irq/devres.c +++ b/kernel/irq/devres.c | |||
@@ -73,6 +73,51 @@ int devm_request_threaded_irq(struct device *dev, unsigned int irq, | |||
73 | EXPORT_SYMBOL(devm_request_threaded_irq); | 73 | EXPORT_SYMBOL(devm_request_threaded_irq); |
74 | 74 | ||
75 | /** | 75 | /** |
76 | * devm_request_any_context_irq - allocate an interrupt line for a managed device | ||
77 | * @dev: device to request interrupt for | ||
78 | * @irq: Interrupt line to allocate | ||
79 | * @handler: Function to be called when the IRQ occurs | ||
80 | * @thread_fn: function to be called in a threaded interrupt context. NULL | ||
81 | * for devices which handle everything in @handler | ||
82 | * @irqflags: Interrupt type flags | ||
83 | * @devname: An ascii name for the claiming device | ||
84 | * @dev_id: A cookie passed back to the handler function | ||
85 | * | ||
86 | * Except for the extra @dev argument, this function takes the | ||
87 | * same arguments and performs the same function as | ||
88 | * request_any_context_irq(). IRQs requested with this function will be | ||
89 | * automatically freed on driver detach. | ||
90 | * | ||
91 | * If an IRQ allocated with this function needs to be freed | ||
92 | * separately, devm_free_irq() must be used. | ||
93 | */ | ||
94 | int devm_request_any_context_irq(struct device *dev, unsigned int irq, | ||
95 | irq_handler_t handler, unsigned long irqflags, | ||
96 | const char *devname, void *dev_id) | ||
97 | { | ||
98 | struct irq_devres *dr; | ||
99 | int rc; | ||
100 | |||
101 | dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres), | ||
102 | GFP_KERNEL); | ||
103 | if (!dr) | ||
104 | return -ENOMEM; | ||
105 | |||
106 | rc = request_any_context_irq(irq, handler, irqflags, devname, dev_id); | ||
107 | if (rc) { | ||
108 | devres_free(dr); | ||
109 | return rc; | ||
110 | } | ||
111 | |||
112 | dr->irq = irq; | ||
113 | dr->dev_id = dev_id; | ||
114 | devres_add(dev, dr); | ||
115 | |||
116 | return 0; | ||
117 | } | ||
118 | EXPORT_SYMBOL(devm_request_any_context_irq); | ||
119 | |||
120 | /** | ||
76 | * devm_free_irq - free an interrupt | 121 | * devm_free_irq - free an interrupt |
77 | * @dev: device to free interrupt for | 122 | * @dev: device to free interrupt for |
78 | * @irq: Interrupt line to free | 123 | * @irq: Interrupt line to free |
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 192a302d6cfd..8ab8e9390297 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
@@ -274,6 +274,7 @@ struct irq_desc *irq_to_desc(unsigned int irq) | |||
274 | { | 274 | { |
275 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; | 275 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; |
276 | } | 276 | } |
277 | EXPORT_SYMBOL(irq_to_desc); | ||
277 | 278 | ||
278 | static void free_desc(unsigned int irq) | 279 | static void free_desc(unsigned int irq) |
279 | { | 280 | { |
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index b1d255f04135..4dae9cbe9259 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
@@ -1076,7 +1076,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear) | |||
1076 | next_seq = log_next_seq; | 1076 | next_seq = log_next_seq; |
1077 | 1077 | ||
1078 | len = 0; | 1078 | len = 0; |
1079 | prev = 0; | ||
1080 | while (len >= 0 && seq < next_seq) { | 1079 | while (len >= 0 && seq < next_seq) { |
1081 | struct printk_log *msg = log_from_idx(idx); | 1080 | struct printk_log *msg = log_from_idx(idx); |
1082 | int textlen; | 1081 | int textlen; |
@@ -2788,7 +2787,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, | |||
2788 | next_idx = idx; | 2787 | next_idx = idx; |
2789 | 2788 | ||
2790 | l = 0; | 2789 | l = 0; |
2791 | prev = 0; | ||
2792 | while (seq < dumper->next_seq) { | 2790 | while (seq < dumper->next_seq) { |
2793 | struct printk_log *msg = log_from_idx(idx); | 2791 | struct printk_log *msg = log_from_idx(idx); |
2794 | 2792 | ||
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index 7a925ba456fb..a6a5bf53e86d 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c | |||
@@ -51,7 +51,13 @@ | |||
51 | * HZ shrinks, so values greater than 8 overflow 32bits when | 51 | * HZ shrinks, so values greater than 8 overflow 32bits when |
52 | * HZ=100. | 52 | * HZ=100. |
53 | */ | 53 | */ |
54 | #if HZ < 34 | ||
55 | #define JIFFIES_SHIFT 6 | ||
56 | #elif HZ < 67 | ||
57 | #define JIFFIES_SHIFT 7 | ||
58 | #else | ||
54 | #define JIFFIES_SHIFT 8 | 59 | #define JIFFIES_SHIFT 8 |
60 | #endif | ||
55 | 61 | ||
56 | static cycle_t jiffies_read(struct clocksource *cs) | 62 | static cycle_t jiffies_read(struct clocksource *cs) |
57 | { | 63 | { |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 43780ab5e279..98977a57ac72 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -756,6 +756,7 @@ out: | |||
756 | static void tick_broadcast_clear_oneshot(int cpu) | 756 | static void tick_broadcast_clear_oneshot(int cpu) |
757 | { | 757 | { |
758 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); | 758 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); |
759 | cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); | ||
759 | } | 760 | } |
760 | 761 | ||
761 | static void tick_broadcast_init_next_event(struct cpumask *mask, | 762 | static void tick_broadcast_init_next_event(struct cpumask *mask, |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 294b8a271a04..fc4da2d97f9b 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -2397,6 +2397,13 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
2397 | write &= RB_WRITE_MASK; | 2397 | write &= RB_WRITE_MASK; |
2398 | tail = write - length; | 2398 | tail = write - length; |
2399 | 2399 | ||
2400 | /* | ||
2401 | * If this is the first commit on the page, then it has the same | ||
2402 | * timestamp as the page itself. | ||
2403 | */ | ||
2404 | if (!tail) | ||
2405 | delta = 0; | ||
2406 | |||
2400 | /* See if we shot pass the end of this buffer page */ | 2407 | /* See if we shot pass the end of this buffer page */ |
2401 | if (unlikely(write > BUF_PAGE_SIZE)) | 2408 | if (unlikely(write > BUF_PAGE_SIZE)) |
2402 | return rb_move_tail(cpu_buffer, length, tail, | 2409 | return rb_move_tail(cpu_buffer, length, tail, |
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c index 7be235f1a70b..93d145e5539c 100644 --- a/lib/percpu_ida.c +++ b/lib/percpu_ida.c | |||
@@ -54,9 +54,7 @@ static inline void move_tags(unsigned *dst, unsigned *dst_nr, | |||
54 | /* | 54 | /* |
55 | * Try to steal tags from a remote cpu's percpu freelist. | 55 | * Try to steal tags from a remote cpu's percpu freelist. |
56 | * | 56 | * |
57 | * We first check how many percpu freelists have tags - we don't steal tags | 57 | * We first check how many percpu freelists have tags |
58 | * unless enough percpu freelists have tags on them that it's possible more than | ||
59 | * half the total tags could be stuck on remote percpu freelists. | ||
60 | * | 58 | * |
61 | * Then we iterate through the cpus until we find some tags - we don't attempt | 59 | * Then we iterate through the cpus until we find some tags - we don't attempt |
62 | * to find the "best" cpu to steal from, to keep cacheline bouncing to a | 60 | * to find the "best" cpu to steal from, to keep cacheline bouncing to a |
@@ -69,8 +67,7 @@ static inline void steal_tags(struct percpu_ida *pool, | |||
69 | struct percpu_ida_cpu *remote; | 67 | struct percpu_ida_cpu *remote; |
70 | 68 | ||
71 | for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags); | 69 | for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags); |
72 | cpus_have_tags * pool->percpu_max_size > pool->nr_tags / 2; | 70 | cpus_have_tags; cpus_have_tags--) { |
73 | cpus_have_tags--) { | ||
74 | cpu = cpumask_next(cpu, &pool->cpus_have_tags); | 71 | cpu = cpumask_next(cpu, &pool->cpus_have_tags); |
75 | 72 | ||
76 | if (cpu >= nr_cpu_ids) { | 73 | if (cpu >= nr_cpu_ids) { |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 82166bf974e1..da23eb96779f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1545,6 +1545,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
1545 | entry = pmd_mknonnuma(entry); | 1545 | entry = pmd_mknonnuma(entry); |
1546 | entry = pmd_modify(entry, newprot); | 1546 | entry = pmd_modify(entry, newprot); |
1547 | ret = HPAGE_PMD_NR; | 1547 | ret = HPAGE_PMD_NR; |
1548 | set_pmd_at(mm, addr, pmd, entry); | ||
1548 | BUG_ON(pmd_write(entry)); | 1549 | BUG_ON(pmd_write(entry)); |
1549 | } else { | 1550 | } else { |
1550 | struct page *page = pmd_page(*pmd); | 1551 | struct page *page = pmd_page(*pmd); |
@@ -1557,16 +1558,10 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
1557 | */ | 1558 | */ |
1558 | if (!is_huge_zero_page(page) && | 1559 | if (!is_huge_zero_page(page) && |
1559 | !pmd_numa(*pmd)) { | 1560 | !pmd_numa(*pmd)) { |
1560 | entry = *pmd; | 1561 | pmdp_set_numa(mm, addr, pmd); |
1561 | entry = pmd_mknuma(entry); | ||
1562 | ret = HPAGE_PMD_NR; | 1562 | ret = HPAGE_PMD_NR; |
1563 | } | 1563 | } |
1564 | } | 1564 | } |
1565 | |||
1566 | /* Set PMD if cleared earlier */ | ||
1567 | if (ret == HPAGE_PMD_NR) | ||
1568 | set_pmd_at(mm, addr, pmd, entry); | ||
1569 | |||
1570 | spin_unlock(ptl); | 1565 | spin_unlock(ptl); |
1571 | } | 1566 | } |
1572 | 1567 | ||
diff --git a/mm/mprotect.c b/mm/mprotect.c index 7332c1785744..769a67a15803 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
@@ -58,36 +58,27 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
58 | if (pte_numa(ptent)) | 58 | if (pte_numa(ptent)) |
59 | ptent = pte_mknonnuma(ptent); | 59 | ptent = pte_mknonnuma(ptent); |
60 | ptent = pte_modify(ptent, newprot); | 60 | ptent = pte_modify(ptent, newprot); |
61 | /* | ||
62 | * Avoid taking write faults for pages we | ||
63 | * know to be dirty. | ||
64 | */ | ||
65 | if (dirty_accountable && pte_dirty(ptent)) | ||
66 | ptent = pte_mkwrite(ptent); | ||
67 | ptep_modify_prot_commit(mm, addr, pte, ptent); | ||
61 | updated = true; | 68 | updated = true; |
62 | } else { | 69 | } else { |
63 | struct page *page; | 70 | struct page *page; |
64 | 71 | ||
65 | ptent = *pte; | ||
66 | page = vm_normal_page(vma, addr, oldpte); | 72 | page = vm_normal_page(vma, addr, oldpte); |
67 | if (page && !PageKsm(page)) { | 73 | if (page && !PageKsm(page)) { |
68 | if (!pte_numa(oldpte)) { | 74 | if (!pte_numa(oldpte)) { |
69 | ptent = pte_mknuma(ptent); | 75 | ptep_set_numa(mm, addr, pte); |
70 | set_pte_at(mm, addr, pte, ptent); | ||
71 | updated = true; | 76 | updated = true; |
72 | } | 77 | } |
73 | } | 78 | } |
74 | } | 79 | } |
75 | |||
76 | /* | ||
77 | * Avoid taking write faults for pages we know to be | ||
78 | * dirty. | ||
79 | */ | ||
80 | if (dirty_accountable && pte_dirty(ptent)) { | ||
81 | ptent = pte_mkwrite(ptent); | ||
82 | updated = true; | ||
83 | } | ||
84 | |||
85 | if (updated) | 80 | if (updated) |
86 | pages++; | 81 | pages++; |
87 | |||
88 | /* Only !prot_numa always clears the pte */ | ||
89 | if (!prot_numa) | ||
90 | ptep_modify_prot_commit(mm, addr, pte, ptent); | ||
91 | } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) { | 82 | } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) { |
92 | swp_entry_t entry = pte_to_swp_entry(oldpte); | 83 | swp_entry_t entry = pte_to_swp_entry(oldpte); |
93 | 84 | ||
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 512159bf607f..8323bced8e5b 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c | |||
@@ -241,19 +241,19 @@ batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const uint8_t *addr) | |||
241 | size = bat_priv->num_ifaces * sizeof(uint8_t); | 241 | size = bat_priv->num_ifaces * sizeof(uint8_t); |
242 | orig_node->bat_iv.bcast_own_sum = kzalloc(size, GFP_ATOMIC); | 242 | orig_node->bat_iv.bcast_own_sum = kzalloc(size, GFP_ATOMIC); |
243 | if (!orig_node->bat_iv.bcast_own_sum) | 243 | if (!orig_node->bat_iv.bcast_own_sum) |
244 | goto free_bcast_own; | 244 | goto free_orig_node; |
245 | 245 | ||
246 | hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig, | 246 | hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig, |
247 | batadv_choose_orig, orig_node, | 247 | batadv_choose_orig, orig_node, |
248 | &orig_node->hash_entry); | 248 | &orig_node->hash_entry); |
249 | if (hash_added != 0) | 249 | if (hash_added != 0) |
250 | goto free_bcast_own; | 250 | goto free_orig_node; |
251 | 251 | ||
252 | return orig_node; | 252 | return orig_node; |
253 | 253 | ||
254 | free_bcast_own: | ||
255 | kfree(orig_node->bat_iv.bcast_own); | ||
256 | free_orig_node: | 254 | free_orig_node: |
255 | /* free twice, as batadv_orig_node_new sets refcount to 2 */ | ||
256 | batadv_orig_node_free_ref(orig_node); | ||
257 | batadv_orig_node_free_ref(orig_node); | 257 | batadv_orig_node_free_ref(orig_node); |
258 | 258 | ||
259 | return NULL; | 259 | return NULL; |
@@ -266,7 +266,7 @@ batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface, | |||
266 | struct batadv_orig_node *orig_neigh) | 266 | struct batadv_orig_node *orig_neigh) |
267 | { | 267 | { |
268 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); | 268 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
269 | struct batadv_neigh_node *neigh_node; | 269 | struct batadv_neigh_node *neigh_node, *tmp_neigh_node; |
270 | 270 | ||
271 | neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, orig_node); | 271 | neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, orig_node); |
272 | if (!neigh_node) | 272 | if (!neigh_node) |
@@ -281,14 +281,24 @@ batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface, | |||
281 | neigh_node->orig_node = orig_neigh; | 281 | neigh_node->orig_node = orig_neigh; |
282 | neigh_node->if_incoming = hard_iface; | 282 | neigh_node->if_incoming = hard_iface; |
283 | 283 | ||
284 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | ||
285 | "Creating new neighbor %pM for orig_node %pM on interface %s\n", | ||
286 | neigh_addr, orig_node->orig, hard_iface->net_dev->name); | ||
287 | |||
288 | spin_lock_bh(&orig_node->neigh_list_lock); | 284 | spin_lock_bh(&orig_node->neigh_list_lock); |
289 | hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); | 285 | tmp_neigh_node = batadv_neigh_node_get(orig_node, hard_iface, |
286 | neigh_addr); | ||
287 | if (!tmp_neigh_node) { | ||
288 | hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); | ||
289 | } else { | ||
290 | kfree(neigh_node); | ||
291 | batadv_hardif_free_ref(hard_iface); | ||
292 | neigh_node = tmp_neigh_node; | ||
293 | } | ||
290 | spin_unlock_bh(&orig_node->neigh_list_lock); | 294 | spin_unlock_bh(&orig_node->neigh_list_lock); |
291 | 295 | ||
296 | if (!tmp_neigh_node) | ||
297 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | ||
298 | "Creating new neighbor %pM for orig_node %pM on interface %s\n", | ||
299 | neigh_addr, orig_node->orig, | ||
300 | hard_iface->net_dev->name); | ||
301 | |||
292 | out: | 302 | out: |
293 | return neigh_node; | 303 | return neigh_node; |
294 | } | 304 | } |
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 3d417d3641c6..b851cc580853 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c | |||
@@ -241,7 +241,7 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface) | |||
241 | { | 241 | { |
242 | struct batadv_priv *bat_priv = netdev_priv(soft_iface); | 242 | struct batadv_priv *bat_priv = netdev_priv(soft_iface); |
243 | const struct batadv_hard_iface *hard_iface; | 243 | const struct batadv_hard_iface *hard_iface; |
244 | int min_mtu = ETH_DATA_LEN; | 244 | int min_mtu = INT_MAX; |
245 | 245 | ||
246 | rcu_read_lock(); | 246 | rcu_read_lock(); |
247 | list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { | 247 | list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { |
@@ -256,8 +256,6 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface) | |||
256 | } | 256 | } |
257 | rcu_read_unlock(); | 257 | rcu_read_unlock(); |
258 | 258 | ||
259 | atomic_set(&bat_priv->packet_size_max, min_mtu); | ||
260 | |||
261 | if (atomic_read(&bat_priv->fragmentation) == 0) | 259 | if (atomic_read(&bat_priv->fragmentation) == 0) |
262 | goto out; | 260 | goto out; |
263 | 261 | ||
@@ -268,13 +266,21 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface) | |||
268 | min_mtu = min_t(int, min_mtu, BATADV_FRAG_MAX_FRAG_SIZE); | 266 | min_mtu = min_t(int, min_mtu, BATADV_FRAG_MAX_FRAG_SIZE); |
269 | min_mtu -= sizeof(struct batadv_frag_packet); | 267 | min_mtu -= sizeof(struct batadv_frag_packet); |
270 | min_mtu *= BATADV_FRAG_MAX_FRAGMENTS; | 268 | min_mtu *= BATADV_FRAG_MAX_FRAGMENTS; |
271 | atomic_set(&bat_priv->packet_size_max, min_mtu); | ||
272 | |||
273 | /* with fragmentation enabled we can fragment external packets easily */ | ||
274 | min_mtu = min_t(int, min_mtu, ETH_DATA_LEN); | ||
275 | 269 | ||
276 | out: | 270 | out: |
277 | return min_mtu - batadv_max_header_len(); | 271 | /* report to the other components the maximum amount of bytes that |
272 | * batman-adv can send over the wire (without considering the payload | ||
273 | * overhead). For example, this value is used by TT to compute the | ||
274 | * maximum local table table size | ||
275 | */ | ||
276 | atomic_set(&bat_priv->packet_size_max, min_mtu); | ||
277 | |||
278 | /* the real soft-interface MTU is computed by removing the payload | ||
279 | * overhead from the maximum amount of bytes that was just computed. | ||
280 | * | ||
281 | * However batman-adv does not support MTUs bigger than ETH_DATA_LEN | ||
282 | */ | ||
283 | return min_t(int, min_mtu - batadv_max_header_len(), ETH_DATA_LEN); | ||
278 | } | 284 | } |
279 | 285 | ||
280 | /* adjusts the MTU if a new interface with a smaller MTU appeared. */ | 286 | /* adjusts the MTU if a new interface with a smaller MTU appeared. */ |
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 6df12a2e3605..853941629dc1 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c | |||
@@ -458,6 +458,42 @@ out: | |||
458 | } | 458 | } |
459 | 459 | ||
460 | /** | 460 | /** |
461 | * batadv_neigh_node_get - retrieve a neighbour from the list | ||
462 | * @orig_node: originator which the neighbour belongs to | ||
463 | * @hard_iface: the interface where this neighbour is connected to | ||
464 | * @addr: the address of the neighbour | ||
465 | * | ||
466 | * Looks for and possibly returns a neighbour belonging to this originator list | ||
467 | * which is connected through the provided hard interface. | ||
468 | * Returns NULL if the neighbour is not found. | ||
469 | */ | ||
470 | struct batadv_neigh_node * | ||
471 | batadv_neigh_node_get(const struct batadv_orig_node *orig_node, | ||
472 | const struct batadv_hard_iface *hard_iface, | ||
473 | const uint8_t *addr) | ||
474 | { | ||
475 | struct batadv_neigh_node *tmp_neigh_node, *res = NULL; | ||
476 | |||
477 | rcu_read_lock(); | ||
478 | hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) { | ||
479 | if (!batadv_compare_eth(tmp_neigh_node->addr, addr)) | ||
480 | continue; | ||
481 | |||
482 | if (tmp_neigh_node->if_incoming != hard_iface) | ||
483 | continue; | ||
484 | |||
485 | if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) | ||
486 | continue; | ||
487 | |||
488 | res = tmp_neigh_node; | ||
489 | break; | ||
490 | } | ||
491 | rcu_read_unlock(); | ||
492 | |||
493 | return res; | ||
494 | } | ||
495 | |||
496 | /** | ||
461 | * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object | 497 | * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object |
462 | * @rcu: rcu pointer of the orig_ifinfo object | 498 | * @rcu: rcu pointer of the orig_ifinfo object |
463 | */ | 499 | */ |
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h index 37be290f63f6..db3a9ed734cb 100644 --- a/net/batman-adv/originator.h +++ b/net/batman-adv/originator.h | |||
@@ -29,6 +29,10 @@ void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node); | |||
29 | struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv, | 29 | struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv, |
30 | const uint8_t *addr); | 30 | const uint8_t *addr); |
31 | struct batadv_neigh_node * | 31 | struct batadv_neigh_node * |
32 | batadv_neigh_node_get(const struct batadv_orig_node *orig_node, | ||
33 | const struct batadv_hard_iface *hard_iface, | ||
34 | const uint8_t *addr); | ||
35 | struct batadv_neigh_node * | ||
32 | batadv_neigh_node_new(struct batadv_hard_iface *hard_iface, | 36 | batadv_neigh_node_new(struct batadv_hard_iface *hard_iface, |
33 | const uint8_t *neigh_addr, | 37 | const uint8_t *neigh_addr, |
34 | struct batadv_orig_node *orig_node); | 38 | struct batadv_orig_node *orig_node); |
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 1ed9f7c9ecea..a953d5b196a3 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c | |||
@@ -688,7 +688,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, | |||
688 | int is_old_ttvn; | 688 | int is_old_ttvn; |
689 | 689 | ||
690 | /* check if there is enough data before accessing it */ | 690 | /* check if there is enough data before accessing it */ |
691 | if (pskb_may_pull(skb, hdr_len + ETH_HLEN) < 0) | 691 | if (!pskb_may_pull(skb, hdr_len + ETH_HLEN)) |
692 | return 0; | 692 | return 0; |
693 | 693 | ||
694 | /* create a copy of the skb (in case of for re-routing) to modify it. */ | 694 | /* create a copy of the skb (in case of for re-routing) to modify it. */ |
@@ -918,6 +918,8 @@ int batadv_recv_unicast_tvlv(struct sk_buff *skb, | |||
918 | 918 | ||
919 | if (ret != NET_RX_SUCCESS) | 919 | if (ret != NET_RX_SUCCESS) |
920 | ret = batadv_route_unicast_packet(skb, recv_if); | 920 | ret = batadv_route_unicast_packet(skb, recv_if); |
921 | else | ||
922 | consume_skb(skb); | ||
921 | 923 | ||
922 | return ret; | 924 | return ret; |
923 | } | 925 | } |
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index 579f5f00a385..843febd1e519 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c | |||
@@ -254,9 +254,9 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv, | |||
254 | struct batadv_orig_node *orig_node, | 254 | struct batadv_orig_node *orig_node, |
255 | unsigned short vid) | 255 | unsigned short vid) |
256 | { | 256 | { |
257 | struct ethhdr *ethhdr = (struct ethhdr *)skb->data; | 257 | struct ethhdr *ethhdr; |
258 | struct batadv_unicast_packet *unicast_packet; | 258 | struct batadv_unicast_packet *unicast_packet; |
259 | int ret = NET_XMIT_DROP; | 259 | int ret = NET_XMIT_DROP, hdr_size; |
260 | 260 | ||
261 | if (!orig_node) | 261 | if (!orig_node) |
262 | goto out; | 262 | goto out; |
@@ -265,12 +265,16 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv, | |||
265 | case BATADV_UNICAST: | 265 | case BATADV_UNICAST: |
266 | if (!batadv_send_skb_prepare_unicast(skb, orig_node)) | 266 | if (!batadv_send_skb_prepare_unicast(skb, orig_node)) |
267 | goto out; | 267 | goto out; |
268 | |||
269 | hdr_size = sizeof(*unicast_packet); | ||
268 | break; | 270 | break; |
269 | case BATADV_UNICAST_4ADDR: | 271 | case BATADV_UNICAST_4ADDR: |
270 | if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb, | 272 | if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb, |
271 | orig_node, | 273 | orig_node, |
272 | packet_subtype)) | 274 | packet_subtype)) |
273 | goto out; | 275 | goto out; |
276 | |||
277 | hdr_size = sizeof(struct batadv_unicast_4addr_packet); | ||
274 | break; | 278 | break; |
275 | default: | 279 | default: |
276 | /* this function supports UNICAST and UNICAST_4ADDR only. It | 280 | /* this function supports UNICAST and UNICAST_4ADDR only. It |
@@ -279,6 +283,7 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv, | |||
279 | goto out; | 283 | goto out; |
280 | } | 284 | } |
281 | 285 | ||
286 | ethhdr = (struct ethhdr *)(skb->data + hdr_size); | ||
282 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 287 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
283 | 288 | ||
284 | /* inform the destination node that we are still missing a correct route | 289 | /* inform the destination node that we are still missing a correct route |
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index b6071f675a3e..959dde721c46 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
@@ -1975,6 +1975,7 @@ static uint32_t batadv_tt_global_crc(struct batadv_priv *bat_priv, | |||
1975 | struct hlist_head *head; | 1975 | struct hlist_head *head; |
1976 | uint32_t i, crc_tmp, crc = 0; | 1976 | uint32_t i, crc_tmp, crc = 0; |
1977 | uint8_t flags; | 1977 | uint8_t flags; |
1978 | __be16 tmp_vid; | ||
1978 | 1979 | ||
1979 | for (i = 0; i < hash->size; i++) { | 1980 | for (i = 0; i < hash->size; i++) { |
1980 | head = &hash->table[i]; | 1981 | head = &hash->table[i]; |
@@ -2011,8 +2012,11 @@ static uint32_t batadv_tt_global_crc(struct batadv_priv *bat_priv, | |||
2011 | orig_node)) | 2012 | orig_node)) |
2012 | continue; | 2013 | continue; |
2013 | 2014 | ||
2014 | crc_tmp = crc32c(0, &tt_common->vid, | 2015 | /* use network order to read the VID: this ensures that |
2015 | sizeof(tt_common->vid)); | 2016 | * every node reads the bytes in the same order. |
2017 | */ | ||
2018 | tmp_vid = htons(tt_common->vid); | ||
2019 | crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid)); | ||
2016 | 2020 | ||
2017 | /* compute the CRC on flags that have to be kept in sync | 2021 | /* compute the CRC on flags that have to be kept in sync |
2018 | * among nodes | 2022 | * among nodes |
@@ -2046,6 +2050,7 @@ static uint32_t batadv_tt_local_crc(struct batadv_priv *bat_priv, | |||
2046 | struct hlist_head *head; | 2050 | struct hlist_head *head; |
2047 | uint32_t i, crc_tmp, crc = 0; | 2051 | uint32_t i, crc_tmp, crc = 0; |
2048 | uint8_t flags; | 2052 | uint8_t flags; |
2053 | __be16 tmp_vid; | ||
2049 | 2054 | ||
2050 | for (i = 0; i < hash->size; i++) { | 2055 | for (i = 0; i < hash->size; i++) { |
2051 | head = &hash->table[i]; | 2056 | head = &hash->table[i]; |
@@ -2064,8 +2069,11 @@ static uint32_t batadv_tt_local_crc(struct batadv_priv *bat_priv, | |||
2064 | if (tt_common->flags & BATADV_TT_CLIENT_NEW) | 2069 | if (tt_common->flags & BATADV_TT_CLIENT_NEW) |
2065 | continue; | 2070 | continue; |
2066 | 2071 | ||
2067 | crc_tmp = crc32c(0, &tt_common->vid, | 2072 | /* use network order to read the VID: this ensures that |
2068 | sizeof(tt_common->vid)); | 2073 | * every node reads the bytes in the same order. |
2074 | */ | ||
2075 | tmp_vid = htons(tt_common->vid); | ||
2076 | crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid)); | ||
2069 | 2077 | ||
2070 | /* compute the CRC on flags that have to be kept in sync | 2078 | /* compute the CRC on flags that have to be kept in sync |
2071 | * among nodes | 2079 | * among nodes |
@@ -2262,6 +2270,7 @@ static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node, | |||
2262 | { | 2270 | { |
2263 | struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp; | 2271 | struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp; |
2264 | struct batadv_orig_node_vlan *vlan; | 2272 | struct batadv_orig_node_vlan *vlan; |
2273 | uint32_t crc; | ||
2265 | int i; | 2274 | int i; |
2266 | 2275 | ||
2267 | /* check if each received CRC matches the locally stored one */ | 2276 | /* check if each received CRC matches the locally stored one */ |
@@ -2281,7 +2290,10 @@ static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node, | |||
2281 | if (!vlan) | 2290 | if (!vlan) |
2282 | return false; | 2291 | return false; |
2283 | 2292 | ||
2284 | if (vlan->tt.crc != ntohl(tt_vlan_tmp->crc)) | 2293 | crc = vlan->tt.crc; |
2294 | batadv_orig_node_vlan_free_ref(vlan); | ||
2295 | |||
2296 | if (crc != ntohl(tt_vlan_tmp->crc)) | ||
2285 | return false; | 2297 | return false; |
2286 | } | 2298 | } |
2287 | 2299 | ||
@@ -3218,7 +3230,6 @@ static void batadv_tt_update_orig(struct batadv_priv *bat_priv, | |||
3218 | 3230 | ||
3219 | spin_lock_bh(&orig_node->tt_lock); | 3231 | spin_lock_bh(&orig_node->tt_lock); |
3220 | 3232 | ||
3221 | tt_change = (struct batadv_tvlv_tt_change *)tt_buff; | ||
3222 | batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes, | 3233 | batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes, |
3223 | ttvn, tt_change); | 3234 | ttvn, tt_change); |
3224 | 3235 | ||
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 292e619db896..d9fb93451442 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c | |||
@@ -430,6 +430,16 @@ static void hidp_del_timer(struct hidp_session *session) | |||
430 | del_timer(&session->timer); | 430 | del_timer(&session->timer); |
431 | } | 431 | } |
432 | 432 | ||
433 | static void hidp_process_report(struct hidp_session *session, | ||
434 | int type, const u8 *data, int len, int intr) | ||
435 | { | ||
436 | if (len > HID_MAX_BUFFER_SIZE) | ||
437 | len = HID_MAX_BUFFER_SIZE; | ||
438 | |||
439 | memcpy(session->input_buf, data, len); | ||
440 | hid_input_report(session->hid, type, session->input_buf, len, intr); | ||
441 | } | ||
442 | |||
433 | static void hidp_process_handshake(struct hidp_session *session, | 443 | static void hidp_process_handshake(struct hidp_session *session, |
434 | unsigned char param) | 444 | unsigned char param) |
435 | { | 445 | { |
@@ -502,7 +512,8 @@ static int hidp_process_data(struct hidp_session *session, struct sk_buff *skb, | |||
502 | hidp_input_report(session, skb); | 512 | hidp_input_report(session, skb); |
503 | 513 | ||
504 | if (session->hid) | 514 | if (session->hid) |
505 | hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 0); | 515 | hidp_process_report(session, HID_INPUT_REPORT, |
516 | skb->data, skb->len, 0); | ||
506 | break; | 517 | break; |
507 | 518 | ||
508 | case HIDP_DATA_RTYPE_OTHER: | 519 | case HIDP_DATA_RTYPE_OTHER: |
@@ -584,7 +595,8 @@ static void hidp_recv_intr_frame(struct hidp_session *session, | |||
584 | hidp_input_report(session, skb); | 595 | hidp_input_report(session, skb); |
585 | 596 | ||
586 | if (session->hid) { | 597 | if (session->hid) { |
587 | hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 1); | 598 | hidp_process_report(session, HID_INPUT_REPORT, |
599 | skb->data, skb->len, 1); | ||
588 | BT_DBG("report len %d", skb->len); | 600 | BT_DBG("report len %d", skb->len); |
589 | } | 601 | } |
590 | } else { | 602 | } else { |
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h index ab5241400cf7..8798492a6e99 100644 --- a/net/bluetooth/hidp/hidp.h +++ b/net/bluetooth/hidp/hidp.h | |||
@@ -24,6 +24,7 @@ | |||
24 | #define __HIDP_H | 24 | #define __HIDP_H |
25 | 25 | ||
26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
27 | #include <linux/hid.h> | ||
27 | #include <linux/kref.h> | 28 | #include <linux/kref.h> |
28 | #include <net/bluetooth/bluetooth.h> | 29 | #include <net/bluetooth/bluetooth.h> |
29 | #include <net/bluetooth/l2cap.h> | 30 | #include <net/bluetooth/l2cap.h> |
@@ -179,6 +180,9 @@ struct hidp_session { | |||
179 | 180 | ||
180 | /* Used in hidp_output_raw_report() */ | 181 | /* Used in hidp_output_raw_report() */ |
181 | int output_report_success; /* boolean */ | 182 | int output_report_success; /* boolean */ |
183 | |||
184 | /* temporary input buffer */ | ||
185 | u8 input_buf[HID_MAX_BUFFER_SIZE]; | ||
182 | }; | 186 | }; |
183 | 187 | ||
184 | /* HIDP init defines */ | 188 | /* HIDP init defines */ |
diff --git a/net/core/dev.c b/net/core/dev.c index 4ad1b78c9c77..b1b0c8d4d7df 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2420,7 +2420,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault); | |||
2420 | * 2. No high memory really exists on this machine. | 2420 | * 2. No high memory really exists on this machine. |
2421 | */ | 2421 | */ |
2422 | 2422 | ||
2423 | static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) | 2423 | static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb) |
2424 | { | 2424 | { |
2425 | #ifdef CONFIG_HIGHMEM | 2425 | #ifdef CONFIG_HIGHMEM |
2426 | int i; | 2426 | int i; |
@@ -2495,34 +2495,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features) | |||
2495 | } | 2495 | } |
2496 | 2496 | ||
2497 | static netdev_features_t harmonize_features(struct sk_buff *skb, | 2497 | static netdev_features_t harmonize_features(struct sk_buff *skb, |
2498 | netdev_features_t features) | 2498 | const struct net_device *dev, |
2499 | netdev_features_t features) | ||
2499 | { | 2500 | { |
2500 | if (skb->ip_summed != CHECKSUM_NONE && | 2501 | if (skb->ip_summed != CHECKSUM_NONE && |
2501 | !can_checksum_protocol(features, skb_network_protocol(skb))) { | 2502 | !can_checksum_protocol(features, skb_network_protocol(skb))) { |
2502 | features &= ~NETIF_F_ALL_CSUM; | 2503 | features &= ~NETIF_F_ALL_CSUM; |
2503 | } else if (illegal_highdma(skb->dev, skb)) { | 2504 | } else if (illegal_highdma(dev, skb)) { |
2504 | features &= ~NETIF_F_SG; | 2505 | features &= ~NETIF_F_SG; |
2505 | } | 2506 | } |
2506 | 2507 | ||
2507 | return features; | 2508 | return features; |
2508 | } | 2509 | } |
2509 | 2510 | ||
2510 | netdev_features_t netif_skb_features(struct sk_buff *skb) | 2511 | netdev_features_t netif_skb_dev_features(struct sk_buff *skb, |
2512 | const struct net_device *dev) | ||
2511 | { | 2513 | { |
2512 | __be16 protocol = skb->protocol; | 2514 | __be16 protocol = skb->protocol; |
2513 | netdev_features_t features = skb->dev->features; | 2515 | netdev_features_t features = dev->features; |
2514 | 2516 | ||
2515 | if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs) | 2517 | if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs) |
2516 | features &= ~NETIF_F_GSO_MASK; | 2518 | features &= ~NETIF_F_GSO_MASK; |
2517 | 2519 | ||
2518 | if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { | 2520 | if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { |
2519 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; | 2521 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; |
2520 | protocol = veh->h_vlan_encapsulated_proto; | 2522 | protocol = veh->h_vlan_encapsulated_proto; |
2521 | } else if (!vlan_tx_tag_present(skb)) { | 2523 | } else if (!vlan_tx_tag_present(skb)) { |
2522 | return harmonize_features(skb, features); | 2524 | return harmonize_features(skb, dev, features); |
2523 | } | 2525 | } |
2524 | 2526 | ||
2525 | features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | | 2527 | features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | |
2526 | NETIF_F_HW_VLAN_STAG_TX); | 2528 | NETIF_F_HW_VLAN_STAG_TX); |
2527 | 2529 | ||
2528 | if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) | 2530 | if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) |
@@ -2530,9 +2532,9 @@ netdev_features_t netif_skb_features(struct sk_buff *skb) | |||
2530 | NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX | | 2532 | NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX | |
2531 | NETIF_F_HW_VLAN_STAG_TX; | 2533 | NETIF_F_HW_VLAN_STAG_TX; |
2532 | 2534 | ||
2533 | return harmonize_features(skb, features); | 2535 | return harmonize_features(skb, dev, features); |
2534 | } | 2536 | } |
2535 | EXPORT_SYMBOL(netif_skb_features); | 2537 | EXPORT_SYMBOL(netif_skb_dev_features); |
2536 | 2538 | ||
2537 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | 2539 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
2538 | struct netdev_queue *txq) | 2540 | struct netdev_queue *txq) |
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 87577d447554..e29e810663d7 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
@@ -323,17 +323,6 @@ u32 __skb_get_poff(const struct sk_buff *skb) | |||
323 | return poff; | 323 | return poff; |
324 | } | 324 | } |
325 | 325 | ||
326 | static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) | ||
327 | { | ||
328 | if (unlikely(queue_index >= dev->real_num_tx_queues)) { | ||
329 | net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", | ||
330 | dev->name, queue_index, | ||
331 | dev->real_num_tx_queues); | ||
332 | return 0; | ||
333 | } | ||
334 | return queue_index; | ||
335 | } | ||
336 | |||
337 | static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) | 326 | static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) |
338 | { | 327 | { |
339 | #ifdef CONFIG_XPS | 328 | #ifdef CONFIG_XPS |
@@ -372,7 +361,7 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) | |||
372 | #endif | 361 | #endif |
373 | } | 362 | } |
374 | 363 | ||
375 | u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) | 364 | static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) |
376 | { | 365 | { |
377 | struct sock *sk = skb->sk; | 366 | struct sock *sk = skb->sk; |
378 | int queue_index = sk_tx_queue_get(sk); | 367 | int queue_index = sk_tx_queue_get(sk); |
@@ -392,7 +381,6 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) | |||
392 | 381 | ||
393 | return queue_index; | 382 | return queue_index; |
394 | } | 383 | } |
395 | EXPORT_SYMBOL(__netdev_pick_tx); | ||
396 | 384 | ||
397 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, | 385 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, |
398 | struct sk_buff *skb, | 386 | struct sk_buff *skb, |
@@ -403,13 +391,13 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev, | |||
403 | if (dev->real_num_tx_queues != 1) { | 391 | if (dev->real_num_tx_queues != 1) { |
404 | const struct net_device_ops *ops = dev->netdev_ops; | 392 | const struct net_device_ops *ops = dev->netdev_ops; |
405 | if (ops->ndo_select_queue) | 393 | if (ops->ndo_select_queue) |
406 | queue_index = ops->ndo_select_queue(dev, skb, | 394 | queue_index = ops->ndo_select_queue(dev, skb, accel_priv, |
407 | accel_priv); | 395 | __netdev_pick_tx); |
408 | else | 396 | else |
409 | queue_index = __netdev_pick_tx(dev, skb); | 397 | queue_index = __netdev_pick_tx(dev, skb); |
410 | 398 | ||
411 | if (!accel_priv) | 399 | if (!accel_priv) |
412 | queue_index = dev_cap_txqueue(dev, queue_index); | 400 | queue_index = netdev_cap_txqueue(dev, queue_index); |
413 | } | 401 | } |
414 | 402 | ||
415 | skb_set_queue_mapping(skb, queue_index); | 403 | skb_set_queue_mapping(skb, queue_index); |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 7b2ad564b303..fc122fdb266a 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -1968,16 +1968,21 @@ replay: | |||
1968 | 1968 | ||
1969 | dev->ifindex = ifm->ifi_index; | 1969 | dev->ifindex = ifm->ifi_index; |
1970 | 1970 | ||
1971 | if (ops->newlink) | 1971 | if (ops->newlink) { |
1972 | err = ops->newlink(net, dev, tb, data); | 1972 | err = ops->newlink(net, dev, tb, data); |
1973 | else | 1973 | /* Drivers should call free_netdev() in ->destructor |
1974 | * and unregister it on failure so that device could be | ||
1975 | * finally freed in rtnl_unlock. | ||
1976 | */ | ||
1977 | if (err < 0) | ||
1978 | goto out; | ||
1979 | } else { | ||
1974 | err = register_netdevice(dev); | 1980 | err = register_netdevice(dev); |
1975 | 1981 | if (err < 0) { | |
1976 | if (err < 0) { | 1982 | free_netdev(dev); |
1977 | free_netdev(dev); | 1983 | goto out; |
1978 | goto out; | 1984 | } |
1979 | } | 1985 | } |
1980 | |||
1981 | err = rtnl_configure_link(dev, ifm); | 1986 | err = rtnl_configure_link(dev, ifm); |
1982 | if (err < 0) | 1987 | if (err < 0) |
1983 | unregister_netdevice(dev); | 1988 | unregister_netdevice(dev); |
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c index c073b81a1f3e..62b5828acde0 100644 --- a/net/dccp/ccids/lib/tfrc.c +++ b/net/dccp/ccids/lib/tfrc.c | |||
@@ -8,7 +8,7 @@ | |||
8 | #include "tfrc.h" | 8 | #include "tfrc.h" |
9 | 9 | ||
10 | #ifdef CONFIG_IP_DCCP_TFRC_DEBUG | 10 | #ifdef CONFIG_IP_DCCP_TFRC_DEBUG |
11 | static bool tfrc_debug; | 11 | bool tfrc_debug; |
12 | module_param(tfrc_debug, bool, 0644); | 12 | module_param(tfrc_debug, bool, 0644); |
13 | MODULE_PARM_DESC(tfrc_debug, "Enable TFRC debug messages"); | 13 | MODULE_PARM_DESC(tfrc_debug, "Enable TFRC debug messages"); |
14 | #endif | 14 | #endif |
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h index a3d8f7c76ae0..40ee7d62b652 100644 --- a/net/dccp/ccids/lib/tfrc.h +++ b/net/dccp/ccids/lib/tfrc.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #include "packet_history.h" | 21 | #include "packet_history.h" |
22 | 22 | ||
23 | #ifdef CONFIG_IP_DCCP_TFRC_DEBUG | 23 | #ifdef CONFIG_IP_DCCP_TFRC_DEBUG |
24 | extern bool tfrc_debug; | ||
24 | #define tfrc_pr_debug(format, a...) DCCP_PR_DEBUG(tfrc_debug, format, ##a) | 25 | #define tfrc_pr_debug(format, a...) DCCP_PR_DEBUG(tfrc_debug, format, ##a) |
25 | #else | 26 | #else |
26 | #define tfrc_pr_debug(format, a...) | 27 | #define tfrc_pr_debug(format, a...) |
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index d9d929042a89..be8abe73bb9f 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
@@ -39,6 +39,71 @@ | |||
39 | #include <net/route.h> | 39 | #include <net/route.h> |
40 | #include <net/xfrm.h> | 40 | #include <net/xfrm.h> |
41 | 41 | ||
42 | static bool ip_may_fragment(const struct sk_buff *skb) | ||
43 | { | ||
44 | return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) || | ||
45 | !skb->local_df; | ||
46 | } | ||
47 | |||
48 | static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) | ||
49 | { | ||
50 | if (skb->len <= mtu || skb->local_df) | ||
51 | return false; | ||
52 | |||
53 | if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) | ||
54 | return false; | ||
55 | |||
56 | return true; | ||
57 | } | ||
58 | |||
59 | static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb) | ||
60 | { | ||
61 | unsigned int mtu; | ||
62 | |||
63 | if (skb->local_df || !skb_is_gso(skb)) | ||
64 | return false; | ||
65 | |||
66 | mtu = ip_dst_mtu_maybe_forward(skb_dst(skb), true); | ||
67 | |||
68 | /* if seglen > mtu, do software segmentation for IP fragmentation on | ||
69 | * output. DF bit cannot be set since ip_forward would have sent | ||
70 | * icmp error. | ||
71 | */ | ||
72 | return skb_gso_network_seglen(skb) > mtu; | ||
73 | } | ||
74 | |||
75 | /* called if GSO skb needs to be fragmented on forward */ | ||
76 | static int ip_forward_finish_gso(struct sk_buff *skb) | ||
77 | { | ||
78 | struct dst_entry *dst = skb_dst(skb); | ||
79 | netdev_features_t features; | ||
80 | struct sk_buff *segs; | ||
81 | int ret = 0; | ||
82 | |||
83 | features = netif_skb_dev_features(skb, dst->dev); | ||
84 | segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); | ||
85 | if (IS_ERR(segs)) { | ||
86 | kfree_skb(skb); | ||
87 | return -ENOMEM; | ||
88 | } | ||
89 | |||
90 | consume_skb(skb); | ||
91 | |||
92 | do { | ||
93 | struct sk_buff *nskb = segs->next; | ||
94 | int err; | ||
95 | |||
96 | segs->next = NULL; | ||
97 | err = dst_output(segs); | ||
98 | |||
99 | if (err && ret == 0) | ||
100 | ret = err; | ||
101 | segs = nskb; | ||
102 | } while (segs); | ||
103 | |||
104 | return ret; | ||
105 | } | ||
106 | |||
42 | static int ip_forward_finish(struct sk_buff *skb) | 107 | static int ip_forward_finish(struct sk_buff *skb) |
43 | { | 108 | { |
44 | struct ip_options *opt = &(IPCB(skb)->opt); | 109 | struct ip_options *opt = &(IPCB(skb)->opt); |
@@ -49,6 +114,9 @@ static int ip_forward_finish(struct sk_buff *skb) | |||
49 | if (unlikely(opt->optlen)) | 114 | if (unlikely(opt->optlen)) |
50 | ip_forward_options(skb); | 115 | ip_forward_options(skb); |
51 | 116 | ||
117 | if (ip_gso_exceeds_dst_mtu(skb)) | ||
118 | return ip_forward_finish_gso(skb); | ||
119 | |||
52 | return dst_output(skb); | 120 | return dst_output(skb); |
53 | } | 121 | } |
54 | 122 | ||
@@ -92,8 +160,7 @@ int ip_forward(struct sk_buff *skb) | |||
92 | 160 | ||
93 | IPCB(skb)->flags |= IPSKB_FORWARDED; | 161 | IPCB(skb)->flags |= IPSKB_FORWARDED; |
94 | mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); | 162 | mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); |
95 | if (unlikely(skb->len > mtu && !skb_is_gso(skb) && | 163 | if (!ip_may_fragment(skb) && ip_exceeds_mtu(skb, mtu)) { |
96 | (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) { | ||
97 | IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS); | 164 | IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS); |
98 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, | 165 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, |
99 | htonl(mtu)); | 166 | htonl(mtu)); |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index efa1138fa523..b3e86ea7b71b 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
@@ -273,7 +273,7 @@ static int __init ic_open_devs(void) | |||
273 | 273 | ||
274 | msleep(1); | 274 | msleep(1); |
275 | 275 | ||
276 | if time_before(jiffies, next_msg) | 276 | if (time_before(jiffies, next_msg)) |
277 | continue; | 277 | continue; |
278 | 278 | ||
279 | elapsed = jiffies_to_msecs(jiffies - start); | 279 | elapsed = jiffies_to_msecs(jiffies - start); |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 271554c61276..11e4384daaf9 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1596,6 +1596,7 @@ static int __mkroute_input(struct sk_buff *skb, | |||
1596 | rth->rt_gateway = 0; | 1596 | rth->rt_gateway = 0; |
1597 | rth->rt_uses_gateway = 0; | 1597 | rth->rt_uses_gateway = 0; |
1598 | INIT_LIST_HEAD(&rth->rt_uncached); | 1598 | INIT_LIST_HEAD(&rth->rt_uncached); |
1599 | RT_CACHE_STAT_INC(in_slow_tot); | ||
1599 | 1600 | ||
1600 | rth->dst.input = ip_forward; | 1601 | rth->dst.input = ip_forward; |
1601 | rth->dst.output = ip_output; | 1602 | rth->dst.output = ip_output; |
@@ -1694,10 +1695,11 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1694 | fl4.daddr = daddr; | 1695 | fl4.daddr = daddr; |
1695 | fl4.saddr = saddr; | 1696 | fl4.saddr = saddr; |
1696 | err = fib_lookup(net, &fl4, &res); | 1697 | err = fib_lookup(net, &fl4, &res); |
1697 | if (err != 0) | 1698 | if (err != 0) { |
1699 | if (!IN_DEV_FORWARD(in_dev)) | ||
1700 | err = -EHOSTUNREACH; | ||
1698 | goto no_route; | 1701 | goto no_route; |
1699 | 1702 | } | |
1700 | RT_CACHE_STAT_INC(in_slow_tot); | ||
1701 | 1703 | ||
1702 | if (res.type == RTN_BROADCAST) | 1704 | if (res.type == RTN_BROADCAST) |
1703 | goto brd_input; | 1705 | goto brd_input; |
@@ -1711,8 +1713,10 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1711 | goto local_input; | 1713 | goto local_input; |
1712 | } | 1714 | } |
1713 | 1715 | ||
1714 | if (!IN_DEV_FORWARD(in_dev)) | 1716 | if (!IN_DEV_FORWARD(in_dev)) { |
1717 | err = -EHOSTUNREACH; | ||
1715 | goto no_route; | 1718 | goto no_route; |
1719 | } | ||
1716 | if (res.type != RTN_UNICAST) | 1720 | if (res.type != RTN_UNICAST) |
1717 | goto martian_destination; | 1721 | goto martian_destination; |
1718 | 1722 | ||
@@ -1767,6 +1771,7 @@ local_input: | |||
1767 | rth->rt_gateway = 0; | 1771 | rth->rt_gateway = 0; |
1768 | rth->rt_uses_gateway = 0; | 1772 | rth->rt_uses_gateway = 0; |
1769 | INIT_LIST_HEAD(&rth->rt_uncached); | 1773 | INIT_LIST_HEAD(&rth->rt_uncached); |
1774 | RT_CACHE_STAT_INC(in_slow_tot); | ||
1770 | if (res.type == RTN_UNREACHABLE) { | 1775 | if (res.type == RTN_UNREACHABLE) { |
1771 | rth->dst.input= ip_error; | 1776 | rth->dst.input= ip_error; |
1772 | rth->dst.error= -err; | 1777 | rth->dst.error= -err; |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index ad235690684c..fdbfeca36d63 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -2783,6 +2783,8 @@ static void addrconf_gre_config(struct net_device *dev) | |||
2783 | ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0); | 2783 | ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0); |
2784 | if (!ipv6_generate_eui64(addr.s6_addr + 8, dev)) | 2784 | if (!ipv6_generate_eui64(addr.s6_addr + 8, dev)) |
2785 | addrconf_add_linklocal(idev, &addr); | 2785 | addrconf_add_linklocal(idev, &addr); |
2786 | else | ||
2787 | addrconf_prefix_route(&addr, 64, dev, 0, 0); | ||
2786 | } | 2788 | } |
2787 | #endif | 2789 | #endif |
2788 | 2790 | ||
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index ef02b26ccf81..070a2fae2375 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -342,6 +342,20 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst) | |||
342 | return mtu; | 342 | return mtu; |
343 | } | 343 | } |
344 | 344 | ||
345 | static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu) | ||
346 | { | ||
347 | if (skb->len <= mtu || skb->local_df) | ||
348 | return false; | ||
349 | |||
350 | if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu) | ||
351 | return true; | ||
352 | |||
353 | if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) | ||
354 | return false; | ||
355 | |||
356 | return true; | ||
357 | } | ||
358 | |||
345 | int ip6_forward(struct sk_buff *skb) | 359 | int ip6_forward(struct sk_buff *skb) |
346 | { | 360 | { |
347 | struct dst_entry *dst = skb_dst(skb); | 361 | struct dst_entry *dst = skb_dst(skb); |
@@ -466,8 +480,7 @@ int ip6_forward(struct sk_buff *skb) | |||
466 | if (mtu < IPV6_MIN_MTU) | 480 | if (mtu < IPV6_MIN_MTU) |
467 | mtu = IPV6_MIN_MTU; | 481 | mtu = IPV6_MIN_MTU; |
468 | 482 | ||
469 | if ((!skb->local_df && skb->len > mtu && !skb_is_gso(skb)) || | 483 | if (ip6_pkt_too_big(skb, mtu)) { |
470 | (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) { | ||
471 | /* Again, force OUTPUT device used as source address */ | 484 | /* Again, force OUTPUT device used as source address */ |
472 | skb->dev = dst->dev; | 485 | skb->dev = dst->dev; |
473 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 486 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index d6d1f1df9119..ce1c44370610 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -1057,7 +1057,8 @@ static void ieee80211_uninit(struct net_device *dev) | |||
1057 | 1057 | ||
1058 | static u16 ieee80211_netdev_select_queue(struct net_device *dev, | 1058 | static u16 ieee80211_netdev_select_queue(struct net_device *dev, |
1059 | struct sk_buff *skb, | 1059 | struct sk_buff *skb, |
1060 | void *accel_priv) | 1060 | void *accel_priv, |
1061 | select_queue_fallback_t fallback) | ||
1061 | { | 1062 | { |
1062 | return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); | 1063 | return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); |
1063 | } | 1064 | } |
@@ -1075,7 +1076,8 @@ static const struct net_device_ops ieee80211_dataif_ops = { | |||
1075 | 1076 | ||
1076 | static u16 ieee80211_monitor_select_queue(struct net_device *dev, | 1077 | static u16 ieee80211_monitor_select_queue(struct net_device *dev, |
1077 | struct sk_buff *skb, | 1078 | struct sk_buff *skb, |
1078 | void *accel_priv) | 1079 | void *accel_priv, |
1080 | select_queue_fallback_t fallback) | ||
1079 | { | 1081 | { |
1080 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1082 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
1081 | struct ieee80211_local *local = sdata->local; | 1083 | struct ieee80211_local *local = sdata->local; |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 6a2bb37506c5..48a6a93db296 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -308,11 +308,27 @@ static bool packet_use_direct_xmit(const struct packet_sock *po) | |||
308 | return po->xmit == packet_direct_xmit; | 308 | return po->xmit == packet_direct_xmit; |
309 | } | 309 | } |
310 | 310 | ||
311 | static u16 packet_pick_tx_queue(struct net_device *dev) | 311 | static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) |
312 | { | 312 | { |
313 | return (u16) raw_smp_processor_id() % dev->real_num_tx_queues; | 313 | return (u16) raw_smp_processor_id() % dev->real_num_tx_queues; |
314 | } | 314 | } |
315 | 315 | ||
316 | static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) | ||
317 | { | ||
318 | const struct net_device_ops *ops = dev->netdev_ops; | ||
319 | u16 queue_index; | ||
320 | |||
321 | if (ops->ndo_select_queue) { | ||
322 | queue_index = ops->ndo_select_queue(dev, skb, NULL, | ||
323 | __packet_pick_tx_queue); | ||
324 | queue_index = netdev_cap_txqueue(dev, queue_index); | ||
325 | } else { | ||
326 | queue_index = __packet_pick_tx_queue(dev, skb); | ||
327 | } | ||
328 | |||
329 | skb_set_queue_mapping(skb, queue_index); | ||
330 | } | ||
331 | |||
316 | /* register_prot_hook must be invoked with the po->bind_lock held, | 332 | /* register_prot_hook must be invoked with the po->bind_lock held, |
317 | * or from a context in which asynchronous accesses to the packet | 333 | * or from a context in which asynchronous accesses to the packet |
318 | * socket is not possible (packet_create()). | 334 | * socket is not possible (packet_create()). |
@@ -2285,7 +2301,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) | |||
2285 | } | 2301 | } |
2286 | } | 2302 | } |
2287 | 2303 | ||
2288 | skb_set_queue_mapping(skb, packet_pick_tx_queue(dev)); | 2304 | packet_pick_tx_queue(dev, skb); |
2305 | |||
2289 | skb->destructor = tpacket_destruct_skb; | 2306 | skb->destructor = tpacket_destruct_skb; |
2290 | __packet_set_status(po, ph, TP_STATUS_SENDING); | 2307 | __packet_set_status(po, ph, TP_STATUS_SENDING); |
2291 | packet_inc_pending(&po->tx_ring); | 2308 | packet_inc_pending(&po->tx_ring); |
@@ -2499,7 +2516,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
2499 | skb->dev = dev; | 2516 | skb->dev = dev; |
2500 | skb->priority = sk->sk_priority; | 2517 | skb->priority = sk->sk_priority; |
2501 | skb->mark = sk->sk_mark; | 2518 | skb->mark = sk->sk_mark; |
2502 | skb_set_queue_mapping(skb, packet_pick_tx_queue(dev)); | 2519 | |
2520 | packet_pick_tx_queue(dev, skb); | ||
2503 | 2521 | ||
2504 | if (po->has_vnet_hdr) { | 2522 | if (po->has_vnet_hdr) { |
2505 | if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { | 2523 | if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { |
@@ -3786,7 +3804,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
3786 | */ | 3804 | */ |
3787 | if (!tx_ring) | 3805 | if (!tx_ring) |
3788 | init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring); | 3806 | init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring); |
3789 | break; | 3807 | break; |
3790 | default: | 3808 | default: |
3791 | break; | 3809 | break; |
3792 | } | 3810 | } |
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c index a255d0200a59..fefeeb73f15f 100644 --- a/net/sched/sch_pie.c +++ b/net/sched/sch_pie.c | |||
@@ -15,6 +15,11 @@ | |||
15 | * | 15 | * |
16 | * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no> | 16 | * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no> |
17 | * University of Oslo, Norway. | 17 | * University of Oslo, Norway. |
18 | * | ||
19 | * References: | ||
20 | * IETF draft submission: http://tools.ietf.org/html/draft-pan-aqm-pie-00 | ||
21 | * IEEE Conference on High Performance Switching and Routing 2013 : | ||
22 | * "PIE: A * Lightweight Control Scheme to Address the Bufferbloat Problem" | ||
18 | */ | 23 | */ |
19 | 24 | ||
20 | #include <linux/module.h> | 25 | #include <linux/module.h> |
@@ -36,7 +41,7 @@ struct pie_params { | |||
36 | psched_time_t target; /* user specified target delay in pschedtime */ | 41 | psched_time_t target; /* user specified target delay in pschedtime */ |
37 | u32 tupdate; /* timer frequency (in jiffies) */ | 42 | u32 tupdate; /* timer frequency (in jiffies) */ |
38 | u32 limit; /* number of packets that can be enqueued */ | 43 | u32 limit; /* number of packets that can be enqueued */ |
39 | u32 alpha; /* alpha and beta are between -4 and 4 */ | 44 | u32 alpha; /* alpha and beta are between 0 and 32 */ |
40 | u32 beta; /* and are used for shift relative to 1 */ | 45 | u32 beta; /* and are used for shift relative to 1 */ |
41 | bool ecn; /* true if ecn is enabled */ | 46 | bool ecn; /* true if ecn is enabled */ |
42 | bool bytemode; /* to scale drop early prob based on pkt size */ | 47 | bool bytemode; /* to scale drop early prob based on pkt size */ |
@@ -326,10 +331,16 @@ static void calculate_probability(struct Qdisc *sch) | |||
326 | if (qdelay == 0 && qlen != 0) | 331 | if (qdelay == 0 && qlen != 0) |
327 | update_prob = false; | 332 | update_prob = false; |
328 | 333 | ||
329 | /* Add ranges for alpha and beta, more aggressive for high dropping | 334 | /* In the algorithm, alpha and beta are between 0 and 2 with typical |
330 | * mode and gentle steps for light dropping mode | 335 | * value for alpha as 0.125. In this implementation, we use values 0-32 |
331 | * In light dropping mode, take gentle steps; in medium dropping mode, | 336 | * passed from user space to represent this. Also, alpha and beta have |
332 | * take medium steps; in high dropping mode, take big steps. | 337 | * unit of HZ and need to be scaled before they can used to update |
338 | * probability. alpha/beta are updated locally below by 1) scaling them | ||
339 | * appropriately 2) scaling down by 16 to come to 0-2 range. | ||
340 | * Please see paper for details. | ||
341 | * | ||
342 | * We scale alpha and beta differently depending on whether we are in | ||
343 | * light, medium or high dropping mode. | ||
333 | */ | 344 | */ |
334 | if (q->vars.prob < MAX_PROB / 100) { | 345 | if (q->vars.prob < MAX_PROB / 100) { |
335 | alpha = | 346 | alpha = |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 5ae609200674..f558433537b8 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -1367,44 +1367,35 @@ static inline bool sctp_peer_needs_update(struct sctp_association *asoc) | |||
1367 | return false; | 1367 | return false; |
1368 | } | 1368 | } |
1369 | 1369 | ||
1370 | /* Increase asoc's rwnd by len and send any window update SACK if needed. */ | 1370 | /* Update asoc's rwnd for the approximated state in the buffer, |
1371 | void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) | 1371 | * and check whether SACK needs to be sent. |
1372 | */ | ||
1373 | void sctp_assoc_rwnd_update(struct sctp_association *asoc, bool update_peer) | ||
1372 | { | 1374 | { |
1375 | int rx_count; | ||
1373 | struct sctp_chunk *sack; | 1376 | struct sctp_chunk *sack; |
1374 | struct timer_list *timer; | 1377 | struct timer_list *timer; |
1375 | 1378 | ||
1376 | if (asoc->rwnd_over) { | 1379 | if (asoc->ep->rcvbuf_policy) |
1377 | if (asoc->rwnd_over >= len) { | 1380 | rx_count = atomic_read(&asoc->rmem_alloc); |
1378 | asoc->rwnd_over -= len; | 1381 | else |
1379 | } else { | 1382 | rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); |
1380 | asoc->rwnd += (len - asoc->rwnd_over); | ||
1381 | asoc->rwnd_over = 0; | ||
1382 | } | ||
1383 | } else { | ||
1384 | asoc->rwnd += len; | ||
1385 | } | ||
1386 | 1383 | ||
1387 | /* If we had window pressure, start recovering it | 1384 | if ((asoc->base.sk->sk_rcvbuf - rx_count) > 0) |
1388 | * once our rwnd had reached the accumulated pressure | 1385 | asoc->rwnd = (asoc->base.sk->sk_rcvbuf - rx_count) >> 1; |
1389 | * threshold. The idea is to recover slowly, but up | 1386 | else |
1390 | * to the initial advertised window. | 1387 | asoc->rwnd = 0; |
1391 | */ | ||
1392 | if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) { | ||
1393 | int change = min(asoc->pathmtu, asoc->rwnd_press); | ||
1394 | asoc->rwnd += change; | ||
1395 | asoc->rwnd_press -= change; | ||
1396 | } | ||
1397 | 1388 | ||
1398 | pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n", | 1389 | pr_debug("%s: asoc:%p rwnd=%u, rx_count=%d, sk_rcvbuf=%d\n", |
1399 | __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, | 1390 | __func__, asoc, asoc->rwnd, rx_count, |
1400 | asoc->a_rwnd); | 1391 | asoc->base.sk->sk_rcvbuf); |
1401 | 1392 | ||
1402 | /* Send a window update SACK if the rwnd has increased by at least the | 1393 | /* Send a window update SACK if the rwnd has increased by at least the |
1403 | * minimum of the association's PMTU and half of the receive buffer. | 1394 | * minimum of the association's PMTU and half of the receive buffer. |
1404 | * The algorithm used is similar to the one described in | 1395 | * The algorithm used is similar to the one described in |
1405 | * Section 4.2.3.3 of RFC 1122. | 1396 | * Section 4.2.3.3 of RFC 1122. |
1406 | */ | 1397 | */ |
1407 | if (sctp_peer_needs_update(asoc)) { | 1398 | if (update_peer && sctp_peer_needs_update(asoc)) { |
1408 | asoc->a_rwnd = asoc->rwnd; | 1399 | asoc->a_rwnd = asoc->rwnd; |
1409 | 1400 | ||
1410 | pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " | 1401 | pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " |
@@ -1426,45 +1417,6 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) | |||
1426 | } | 1417 | } |
1427 | } | 1418 | } |
1428 | 1419 | ||
1429 | /* Decrease asoc's rwnd by len. */ | ||
1430 | void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) | ||
1431 | { | ||
1432 | int rx_count; | ||
1433 | int over = 0; | ||
1434 | |||
1435 | if (unlikely(!asoc->rwnd || asoc->rwnd_over)) | ||
1436 | pr_debug("%s: association:%p has asoc->rwnd:%u, " | ||
1437 | "asoc->rwnd_over:%u!\n", __func__, asoc, | ||
1438 | asoc->rwnd, asoc->rwnd_over); | ||
1439 | |||
1440 | if (asoc->ep->rcvbuf_policy) | ||
1441 | rx_count = atomic_read(&asoc->rmem_alloc); | ||
1442 | else | ||
1443 | rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); | ||
1444 | |||
1445 | /* If we've reached or overflowed our receive buffer, announce | ||
1446 | * a 0 rwnd if rwnd would still be positive. Store the | ||
1447 | * the potential pressure overflow so that the window can be restored | ||
1448 | * back to original value. | ||
1449 | */ | ||
1450 | if (rx_count >= asoc->base.sk->sk_rcvbuf) | ||
1451 | over = 1; | ||
1452 | |||
1453 | if (asoc->rwnd >= len) { | ||
1454 | asoc->rwnd -= len; | ||
1455 | if (over) { | ||
1456 | asoc->rwnd_press += asoc->rwnd; | ||
1457 | asoc->rwnd = 0; | ||
1458 | } | ||
1459 | } else { | ||
1460 | asoc->rwnd_over = len - asoc->rwnd; | ||
1461 | asoc->rwnd = 0; | ||
1462 | } | ||
1463 | |||
1464 | pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n", | ||
1465 | __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, | ||
1466 | asoc->rwnd_press); | ||
1467 | } | ||
1468 | 1420 | ||
1469 | /* Build the bind address list for the association based on info from the | 1421 | /* Build the bind address list for the association based on info from the |
1470 | * local endpoint and the remote peer. | 1422 | * local endpoint and the remote peer. |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 483dcd71b3c5..591b44d3b7de 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -6176,7 +6176,7 @@ static int sctp_eat_data(const struct sctp_association *asoc, | |||
6176 | * PMTU. In cases, such as loopback, this might be a rather | 6176 | * PMTU. In cases, such as loopback, this might be a rather |
6177 | * large spill over. | 6177 | * large spill over. |
6178 | */ | 6178 | */ |
6179 | if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over || | 6179 | if ((!chunk->data_accepted) && (!asoc->rwnd || |
6180 | (datalen > asoc->rwnd + asoc->frag_point))) { | 6180 | (datalen > asoc->rwnd + asoc->frag_point))) { |
6181 | 6181 | ||
6182 | /* If this is the next TSN, consider reneging to make | 6182 | /* If this is the next TSN, consider reneging to make |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9e91d6e5df63..981aaf8b6ace 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -64,6 +64,7 @@ | |||
64 | #include <linux/crypto.h> | 64 | #include <linux/crypto.h> |
65 | #include <linux/slab.h> | 65 | #include <linux/slab.h> |
66 | #include <linux/file.h> | 66 | #include <linux/file.h> |
67 | #include <linux/compat.h> | ||
67 | 68 | ||
68 | #include <net/ip.h> | 69 | #include <net/ip.h> |
69 | #include <net/icmp.h> | 70 | #include <net/icmp.h> |
@@ -1368,11 +1369,19 @@ static int sctp_setsockopt_connectx(struct sock *sk, | |||
1368 | /* | 1369 | /* |
1369 | * New (hopefully final) interface for the API. | 1370 | * New (hopefully final) interface for the API. |
1370 | * We use the sctp_getaddrs_old structure so that use-space library | 1371 | * We use the sctp_getaddrs_old structure so that use-space library |
1371 | * can avoid any unnecessary allocations. The only defferent part | 1372 | * can avoid any unnecessary allocations. The only different part |
1372 | * is that we store the actual length of the address buffer into the | 1373 | * is that we store the actual length of the address buffer into the |
1373 | * addrs_num structure member. That way we can re-use the existing | 1374 | * addrs_num structure member. That way we can re-use the existing |
1374 | * code. | 1375 | * code. |
1375 | */ | 1376 | */ |
1377 | #ifdef CONFIG_COMPAT | ||
1378 | struct compat_sctp_getaddrs_old { | ||
1379 | sctp_assoc_t assoc_id; | ||
1380 | s32 addr_num; | ||
1381 | compat_uptr_t addrs; /* struct sockaddr * */ | ||
1382 | }; | ||
1383 | #endif | ||
1384 | |||
1376 | static int sctp_getsockopt_connectx3(struct sock *sk, int len, | 1385 | static int sctp_getsockopt_connectx3(struct sock *sk, int len, |
1377 | char __user *optval, | 1386 | char __user *optval, |
1378 | int __user *optlen) | 1387 | int __user *optlen) |
@@ -1381,16 +1390,30 @@ static int sctp_getsockopt_connectx3(struct sock *sk, int len, | |||
1381 | sctp_assoc_t assoc_id = 0; | 1390 | sctp_assoc_t assoc_id = 0; |
1382 | int err = 0; | 1391 | int err = 0; |
1383 | 1392 | ||
1384 | if (len < sizeof(param)) | 1393 | #ifdef CONFIG_COMPAT |
1385 | return -EINVAL; | 1394 | if (is_compat_task()) { |
1395 | struct compat_sctp_getaddrs_old param32; | ||
1386 | 1396 | ||
1387 | if (copy_from_user(¶m, optval, sizeof(param))) | 1397 | if (len < sizeof(param32)) |
1388 | return -EFAULT; | 1398 | return -EINVAL; |
1399 | if (copy_from_user(¶m32, optval, sizeof(param32))) | ||
1400 | return -EFAULT; | ||
1389 | 1401 | ||
1390 | err = __sctp_setsockopt_connectx(sk, | 1402 | param.assoc_id = param32.assoc_id; |
1391 | (struct sockaddr __user *)param.addrs, | 1403 | param.addr_num = param32.addr_num; |
1392 | param.addr_num, &assoc_id); | 1404 | param.addrs = compat_ptr(param32.addrs); |
1405 | } else | ||
1406 | #endif | ||
1407 | { | ||
1408 | if (len < sizeof(param)) | ||
1409 | return -EINVAL; | ||
1410 | if (copy_from_user(¶m, optval, sizeof(param))) | ||
1411 | return -EFAULT; | ||
1412 | } | ||
1393 | 1413 | ||
1414 | err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *) | ||
1415 | param.addrs, param.addr_num, | ||
1416 | &assoc_id); | ||
1394 | if (err == 0 || err == -EINPROGRESS) { | 1417 | if (err == 0 || err == -EINPROGRESS) { |
1395 | if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) | 1418 | if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) |
1396 | return -EFAULT; | 1419 | return -EFAULT; |
@@ -2092,12 +2115,6 @@ static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
2092 | sctp_skb_pull(skb, copied); | 2115 | sctp_skb_pull(skb, copied); |
2093 | skb_queue_head(&sk->sk_receive_queue, skb); | 2116 | skb_queue_head(&sk->sk_receive_queue, skb); |
2094 | 2117 | ||
2095 | /* When only partial message is copied to the user, increase | ||
2096 | * rwnd by that amount. If all the data in the skb is read, | ||
2097 | * rwnd is updated when the event is freed. | ||
2098 | */ | ||
2099 | if (!sctp_ulpevent_is_notification(event)) | ||
2100 | sctp_assoc_rwnd_increase(event->asoc, copied); | ||
2101 | goto out; | 2118 | goto out; |
2102 | } else if ((event->msg_flags & MSG_NOTIFICATION) || | 2119 | } else if ((event->msg_flags & MSG_NOTIFICATION) || |
2103 | (event->msg_flags & MSG_EOR)) | 2120 | (event->msg_flags & MSG_EOR)) |
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 7135e617ab0f..35c8923b5554 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c | |||
@@ -151,6 +151,7 @@ static struct ctl_table sctp_net_table[] = { | |||
151 | }, | 151 | }, |
152 | { | 152 | { |
153 | .procname = "cookie_hmac_alg", | 153 | .procname = "cookie_hmac_alg", |
154 | .data = &init_net.sctp.sctp_hmac_alg, | ||
154 | .maxlen = 8, | 155 | .maxlen = 8, |
155 | .mode = 0644, | 156 | .mode = 0644, |
156 | .proc_handler = proc_sctp_do_hmac_alg, | 157 | .proc_handler = proc_sctp_do_hmac_alg, |
@@ -401,15 +402,18 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, | |||
401 | 402 | ||
402 | int sctp_sysctl_net_register(struct net *net) | 403 | int sctp_sysctl_net_register(struct net *net) |
403 | { | 404 | { |
404 | struct ctl_table *table; | 405 | struct ctl_table *table = sctp_net_table; |
405 | int i; | 406 | |
407 | if (!net_eq(net, &init_net)) { | ||
408 | int i; | ||
406 | 409 | ||
407 | table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL); | 410 | table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL); |
408 | if (!table) | 411 | if (!table) |
409 | return -ENOMEM; | 412 | return -ENOMEM; |
410 | 413 | ||
411 | for (i = 0; table[i].data; i++) | 414 | for (i = 0; table[i].data; i++) |
412 | table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp; | 415 | table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp; |
416 | } | ||
413 | 417 | ||
414 | net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table); | 418 | net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table); |
415 | return 0; | 419 | return 0; |
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 85c64658bd0b..8d198ae03606 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c | |||
@@ -989,7 +989,7 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event, | |||
989 | skb = sctp_event2skb(event); | 989 | skb = sctp_event2skb(event); |
990 | /* Set the owner and charge rwnd for bytes received. */ | 990 | /* Set the owner and charge rwnd for bytes received. */ |
991 | sctp_ulpevent_set_owner(event, asoc); | 991 | sctp_ulpevent_set_owner(event, asoc); |
992 | sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb)); | 992 | sctp_assoc_rwnd_update(asoc, false); |
993 | 993 | ||
994 | if (!skb->data_len) | 994 | if (!skb->data_len) |
995 | return; | 995 | return; |
@@ -1011,6 +1011,7 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event) | |||
1011 | { | 1011 | { |
1012 | struct sk_buff *skb, *frag; | 1012 | struct sk_buff *skb, *frag; |
1013 | unsigned int len; | 1013 | unsigned int len; |
1014 | struct sctp_association *asoc; | ||
1014 | 1015 | ||
1015 | /* Current stack structures assume that the rcv buffer is | 1016 | /* Current stack structures assume that the rcv buffer is |
1016 | * per socket. For UDP style sockets this is not true as | 1017 | * per socket. For UDP style sockets this is not true as |
@@ -1035,8 +1036,11 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event) | |||
1035 | } | 1036 | } |
1036 | 1037 | ||
1037 | done: | 1038 | done: |
1038 | sctp_assoc_rwnd_increase(event->asoc, len); | 1039 | asoc = event->asoc; |
1040 | sctp_association_hold(asoc); | ||
1039 | sctp_ulpevent_release_owner(event); | 1041 | sctp_ulpevent_release_owner(event); |
1042 | sctp_assoc_rwnd_update(asoc, true); | ||
1043 | sctp_association_put(asoc); | ||
1040 | } | 1044 | } |
1041 | 1045 | ||
1042 | static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event) | 1046 | static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event) |
diff --git a/net/tipc/core.h b/net/tipc/core.h index 1ff477b0450d..5569d96b4da3 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h | |||
@@ -192,6 +192,7 @@ static inline void k_term_timer(struct timer_list *timer) | |||
192 | 192 | ||
193 | struct tipc_skb_cb { | 193 | struct tipc_skb_cb { |
194 | void *handle; | 194 | void *handle; |
195 | bool deferred; | ||
195 | }; | 196 | }; |
196 | 197 | ||
197 | #define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0])) | 198 | #define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0])) |
diff --git a/net/tipc/link.c b/net/tipc/link.c index e4f233d58d35..284d6383ad6c 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -1408,6 +1408,12 @@ static int link_recv_buf_validate(struct sk_buff *buf) | |||
1408 | u32 hdr_size; | 1408 | u32 hdr_size; |
1409 | u32 min_hdr_size; | 1409 | u32 min_hdr_size; |
1410 | 1410 | ||
1411 | /* If this packet comes from the defer queue, the skb has already | ||
1412 | * been validated | ||
1413 | */ | ||
1414 | if (unlikely(TIPC_SKB_CB(buf)->deferred)) | ||
1415 | return 1; | ||
1416 | |||
1411 | if (unlikely(buf->len < MIN_H_SIZE)) | 1417 | if (unlikely(buf->len < MIN_H_SIZE)) |
1412 | return 0; | 1418 | return 0; |
1413 | 1419 | ||
@@ -1717,6 +1723,7 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, | |||
1717 | &l_ptr->newest_deferred_in, buf)) { | 1723 | &l_ptr->newest_deferred_in, buf)) { |
1718 | l_ptr->deferred_inqueue_sz++; | 1724 | l_ptr->deferred_inqueue_sz++; |
1719 | l_ptr->stats.deferred_recv++; | 1725 | l_ptr->stats.deferred_recv++; |
1726 | TIPC_SKB_CB(buf)->deferred = true; | ||
1720 | if ((l_ptr->deferred_inqueue_sz % 16) == 1) | 1727 | if ((l_ptr->deferred_inqueue_sz % 16) == 1) |
1721 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); | 1728 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); |
1722 | } else | 1729 | } else |
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 23708636b05c..25e5cb0aaef6 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c | |||
@@ -210,8 +210,8 @@ static void do_usb_entry(void *symval, | |||
210 | range_lo < 0x9 ? "[%X-9" : "[%X", | 210 | range_lo < 0x9 ? "[%X-9" : "[%X", |
211 | range_lo); | 211 | range_lo); |
212 | sprintf(alias + strlen(alias), | 212 | sprintf(alias + strlen(alias), |
213 | range_hi > 0xA ? "a-%X]" : "%X]", | 213 | range_hi > 0xA ? "A-%X]" : "%X]", |
214 | range_lo); | 214 | range_hi); |
215 | } | 215 | } |
216 | } | 216 | } |
217 | if (bcdDevice_initial_digits < (sizeof(bcdDevice_lo) * 2 - 1)) | 217 | if (bcdDevice_initial_digits < (sizeof(bcdDevice_lo) * 2 - 1)) |
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index ec4536c8d8d4..dafcf82139e2 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c | |||
@@ -932,7 +932,7 @@ int snd_hda_bus_new(struct snd_card *card, | |||
932 | } | 932 | } |
933 | EXPORT_SYMBOL_GPL(snd_hda_bus_new); | 933 | EXPORT_SYMBOL_GPL(snd_hda_bus_new); |
934 | 934 | ||
935 | #ifdef CONFIG_SND_HDA_GENERIC | 935 | #if IS_ENABLED(CONFIG_SND_HDA_GENERIC) |
936 | #define is_generic_config(codec) \ | 936 | #define is_generic_config(codec) \ |
937 | (codec->modelname && !strcmp(codec->modelname, "generic")) | 937 | (codec->modelname && !strcmp(codec->modelname, "generic")) |
938 | #else | 938 | #else |
@@ -1339,23 +1339,15 @@ get_hda_cvt_setup(struct hda_codec *codec, hda_nid_t nid) | |||
1339 | /* | 1339 | /* |
1340 | * Dynamic symbol binding for the codec parsers | 1340 | * Dynamic symbol binding for the codec parsers |
1341 | */ | 1341 | */ |
1342 | #ifdef MODULE | ||
1343 | #define load_parser_sym(sym) ((int (*)(struct hda_codec *))symbol_request(sym)) | ||
1344 | #define unload_parser_addr(addr) symbol_put_addr(addr) | ||
1345 | #else | ||
1346 | #define load_parser_sym(sym) (sym) | ||
1347 | #define unload_parser_addr(addr) do {} while (0) | ||
1348 | #endif | ||
1349 | 1342 | ||
1350 | #define load_parser(codec, sym) \ | 1343 | #define load_parser(codec, sym) \ |
1351 | ((codec)->parser = load_parser_sym(sym)) | 1344 | ((codec)->parser = (int (*)(struct hda_codec *))symbol_request(sym)) |
1352 | 1345 | ||
1353 | static void unload_parser(struct hda_codec *codec) | 1346 | static void unload_parser(struct hda_codec *codec) |
1354 | { | 1347 | { |
1355 | if (codec->parser) { | 1348 | if (codec->parser) |
1356 | unload_parser_addr(codec->parser); | 1349 | symbol_put_addr(codec->parser); |
1357 | codec->parser = NULL; | 1350 | codec->parser = NULL; |
1358 | } | ||
1359 | } | 1351 | } |
1360 | 1352 | ||
1361 | /* | 1353 | /* |
@@ -1570,7 +1562,7 @@ int snd_hda_codec_update_widgets(struct hda_codec *codec) | |||
1570 | EXPORT_SYMBOL_GPL(snd_hda_codec_update_widgets); | 1562 | EXPORT_SYMBOL_GPL(snd_hda_codec_update_widgets); |
1571 | 1563 | ||
1572 | 1564 | ||
1573 | #ifdef CONFIG_SND_HDA_CODEC_HDMI | 1565 | #if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI) |
1574 | /* if all audio out widgets are digital, let's assume the codec as a HDMI/DP */ | 1566 | /* if all audio out widgets are digital, let's assume the codec as a HDMI/DP */ |
1575 | static bool is_likely_hdmi_codec(struct hda_codec *codec) | 1567 | static bool is_likely_hdmi_codec(struct hda_codec *codec) |
1576 | { | 1568 | { |
@@ -1620,12 +1612,20 @@ int snd_hda_codec_configure(struct hda_codec *codec) | |||
1620 | patch = codec->preset->patch; | 1612 | patch = codec->preset->patch; |
1621 | if (!patch) { | 1613 | if (!patch) { |
1622 | unload_parser(codec); /* to be sure */ | 1614 | unload_parser(codec); /* to be sure */ |
1623 | if (is_likely_hdmi_codec(codec)) | 1615 | if (is_likely_hdmi_codec(codec)) { |
1616 | #if IS_MODULE(CONFIG_SND_HDA_CODEC_HDMI) | ||
1624 | patch = load_parser(codec, snd_hda_parse_hdmi_codec); | 1617 | patch = load_parser(codec, snd_hda_parse_hdmi_codec); |
1625 | #ifdef CONFIG_SND_HDA_GENERIC | 1618 | #elif IS_BUILTIN(CONFIG_SND_HDA_CODEC_HDMI) |
1626 | if (!patch) | 1619 | patch = snd_hda_parse_hdmi_codec; |
1620 | #endif | ||
1621 | } | ||
1622 | if (!patch) { | ||
1623 | #if IS_MODULE(CONFIG_SND_HDA_GENERIC) | ||
1627 | patch = load_parser(codec, snd_hda_parse_generic_codec); | 1624 | patch = load_parser(codec, snd_hda_parse_generic_codec); |
1625 | #elif IS_BUILTIN(CONFIG_SND_HDA_GENERIC) | ||
1626 | patch = snd_hda_parse_generic_codec; | ||
1628 | #endif | 1627 | #endif |
1628 | } | ||
1629 | if (!patch) { | 1629 | if (!patch) { |
1630 | printk(KERN_ERR "hda-codec: No codec parser is available\n"); | 1630 | printk(KERN_ERR "hda-codec: No codec parser is available\n"); |
1631 | return -ENODEV; | 1631 | return -ENODEV; |
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 8321a97d5c05..d9a09bdd09db 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c | |||
@@ -3269,7 +3269,7 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol, | |||
3269 | mutex_unlock(&codec->control_mutex); | 3269 | mutex_unlock(&codec->control_mutex); |
3270 | snd_hda_codec_flush_cache(codec); /* flush the updates */ | 3270 | snd_hda_codec_flush_cache(codec); /* flush the updates */ |
3271 | if (err >= 0 && spec->cap_sync_hook) | 3271 | if (err >= 0 && spec->cap_sync_hook) |
3272 | spec->cap_sync_hook(codec, ucontrol); | 3272 | spec->cap_sync_hook(codec, kcontrol, ucontrol); |
3273 | return err; | 3273 | return err; |
3274 | } | 3274 | } |
3275 | 3275 | ||
@@ -3390,7 +3390,7 @@ static int cap_single_sw_put(struct snd_kcontrol *kcontrol, | |||
3390 | return ret; | 3390 | return ret; |
3391 | 3391 | ||
3392 | if (spec->cap_sync_hook) | 3392 | if (spec->cap_sync_hook) |
3393 | spec->cap_sync_hook(codec, ucontrol); | 3393 | spec->cap_sync_hook(codec, kcontrol, ucontrol); |
3394 | 3394 | ||
3395 | return ret; | 3395 | return ret; |
3396 | } | 3396 | } |
@@ -3795,7 +3795,7 @@ static int mux_select(struct hda_codec *codec, unsigned int adc_idx, | |||
3795 | return 0; | 3795 | return 0; |
3796 | snd_hda_activate_path(codec, path, true, false); | 3796 | snd_hda_activate_path(codec, path, true, false); |
3797 | if (spec->cap_sync_hook) | 3797 | if (spec->cap_sync_hook) |
3798 | spec->cap_sync_hook(codec, NULL); | 3798 | spec->cap_sync_hook(codec, NULL, NULL); |
3799 | path_power_down_sync(codec, old_path); | 3799 | path_power_down_sync(codec, old_path); |
3800 | return 1; | 3800 | return 1; |
3801 | } | 3801 | } |
@@ -5270,7 +5270,7 @@ static void init_input_src(struct hda_codec *codec) | |||
5270 | } | 5270 | } |
5271 | 5271 | ||
5272 | if (spec->cap_sync_hook) | 5272 | if (spec->cap_sync_hook) |
5273 | spec->cap_sync_hook(codec, NULL); | 5273 | spec->cap_sync_hook(codec, NULL, NULL); |
5274 | } | 5274 | } |
5275 | 5275 | ||
5276 | /* set right pin controls for digital I/O */ | 5276 | /* set right pin controls for digital I/O */ |
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h index 07f767231c9f..c908afbe4d94 100644 --- a/sound/pci/hda/hda_generic.h +++ b/sound/pci/hda/hda_generic.h | |||
@@ -274,6 +274,7 @@ struct hda_gen_spec { | |||
274 | void (*init_hook)(struct hda_codec *codec); | 274 | void (*init_hook)(struct hda_codec *codec); |
275 | void (*automute_hook)(struct hda_codec *codec); | 275 | void (*automute_hook)(struct hda_codec *codec); |
276 | void (*cap_sync_hook)(struct hda_codec *codec, | 276 | void (*cap_sync_hook)(struct hda_codec *codec, |
277 | struct snd_kcontrol *kcontrol, | ||
277 | struct snd_ctl_elem_value *ucontrol); | 278 | struct snd_ctl_elem_value *ucontrol); |
278 | 279 | ||
279 | /* PCM hooks */ | 280 | /* PCM hooks */ |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index fa2879a21a50..e354ab1ec20f 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -198,7 +198,7 @@ MODULE_DESCRIPTION("Intel HDA driver"); | |||
198 | #endif | 198 | #endif |
199 | 199 | ||
200 | #if defined(CONFIG_PM) && defined(CONFIG_VGA_SWITCHEROO) | 200 | #if defined(CONFIG_PM) && defined(CONFIG_VGA_SWITCHEROO) |
201 | #ifdef CONFIG_SND_HDA_CODEC_HDMI | 201 | #if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI) |
202 | #define SUPPORT_VGA_SWITCHEROO | 202 | #define SUPPORT_VGA_SWITCHEROO |
203 | #endif | 203 | #endif |
204 | #endif | 204 | #endif |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 4e0ec146553d..bcf91bea3317 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -3291,7 +3291,8 @@ static void cxt_update_headset_mode(struct hda_codec *codec) | |||
3291 | } | 3291 | } |
3292 | 3292 | ||
3293 | static void cxt_update_headset_mode_hook(struct hda_codec *codec, | 3293 | static void cxt_update_headset_mode_hook(struct hda_codec *codec, |
3294 | struct snd_ctl_elem_value *ucontrol) | 3294 | struct snd_kcontrol *kcontrol, |
3295 | struct snd_ctl_elem_value *ucontrol) | ||
3295 | { | 3296 | { |
3296 | cxt_update_headset_mode(codec); | 3297 | cxt_update_headset_mode(codec); |
3297 | } | 3298 | } |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index d9693ca9546f..a9a83b85517a 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -708,7 +708,8 @@ static void alc_inv_dmic_sync(struct hda_codec *codec, bool force) | |||
708 | } | 708 | } |
709 | 709 | ||
710 | static void alc_inv_dmic_hook(struct hda_codec *codec, | 710 | static void alc_inv_dmic_hook(struct hda_codec *codec, |
711 | struct snd_ctl_elem_value *ucontrol) | 711 | struct snd_kcontrol *kcontrol, |
712 | struct snd_ctl_elem_value *ucontrol) | ||
712 | { | 713 | { |
713 | alc_inv_dmic_sync(codec, false); | 714 | alc_inv_dmic_sync(codec, false); |
714 | } | 715 | } |
@@ -3218,7 +3219,8 @@ static void alc269_fixup_hp_gpio_mute_hook(void *private_data, int enabled) | |||
3218 | 3219 | ||
3219 | /* turn on/off mic-mute LED per capture hook */ | 3220 | /* turn on/off mic-mute LED per capture hook */ |
3220 | static void alc269_fixup_hp_gpio_mic_mute_hook(struct hda_codec *codec, | 3221 | static void alc269_fixup_hp_gpio_mic_mute_hook(struct hda_codec *codec, |
3221 | struct snd_ctl_elem_value *ucontrol) | 3222 | struct snd_kcontrol *kcontrol, |
3223 | struct snd_ctl_elem_value *ucontrol) | ||
3222 | { | 3224 | { |
3223 | struct alc_spec *spec = codec->spec; | 3225 | struct alc_spec *spec = codec->spec; |
3224 | unsigned int oldval = spec->gpio_led; | 3226 | unsigned int oldval = spec->gpio_led; |
@@ -3528,7 +3530,8 @@ static void alc_update_headset_mode(struct hda_codec *codec) | |||
3528 | } | 3530 | } |
3529 | 3531 | ||
3530 | static void alc_update_headset_mode_hook(struct hda_codec *codec, | 3532 | static void alc_update_headset_mode_hook(struct hda_codec *codec, |
3531 | struct snd_ctl_elem_value *ucontrol) | 3533 | struct snd_kcontrol *kcontrol, |
3534 | struct snd_ctl_elem_value *ucontrol) | ||
3532 | { | 3535 | { |
3533 | alc_update_headset_mode(codec); | 3536 | alc_update_headset_mode(codec); |
3534 | } | 3537 | } |
@@ -4329,6 +4332,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
4329 | SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC), | 4332 | SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC), |
4330 | SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC), | 4333 | SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC), |
4331 | SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101), | 4334 | SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101), |
4335 | SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE), | ||
4332 | SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE), | 4336 | SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE), |
4333 | SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2), | 4337 | SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2), |
4334 | SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), | 4338 | SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), |
@@ -4434,9 +4438,6 @@ static void alc269_fill_coef(struct hda_codec *codec) | |||
4434 | 4438 | ||
4435 | if (spec->codec_variant != ALC269_TYPE_ALC269VB) | 4439 | if (spec->codec_variant != ALC269_TYPE_ALC269VB) |
4436 | return; | 4440 | return; |
4437 | /* ALC271X doesn't seem to support these COEFs (bko#52181) */ | ||
4438 | if (!strcmp(codec->chip_name, "ALC271X")) | ||
4439 | return; | ||
4440 | 4441 | ||
4441 | if ((alc_get_coef0(codec) & 0x00ff) < 0x015) { | 4442 | if ((alc_get_coef0(codec) & 0x00ff) < 0x015) { |
4442 | alc_write_coef_idx(codec, 0xf, 0x960b); | 4443 | alc_write_coef_idx(codec, 0xf, 0x960b); |
@@ -5106,6 +5107,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { | |||
5106 | SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), | 5107 | SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), |
5107 | SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), | 5108 | SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), |
5108 | SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), | 5109 | SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), |
5110 | SND_PCI_QUIRK(0x1028, 0x060a, "Dell XPS 13", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), | ||
5109 | SND_PCI_QUIRK(0x1028, 0x0623, "Dell", ALC668_FIXUP_AUTO_MUTE), | 5111 | SND_PCI_QUIRK(0x1028, 0x0623, "Dell", ALC668_FIXUP_AUTO_MUTE), |
5110 | SND_PCI_QUIRK(0x1028, 0x0624, "Dell", ALC668_FIXUP_AUTO_MUTE), | 5112 | SND_PCI_QUIRK(0x1028, 0x0624, "Dell", ALC668_FIXUP_AUTO_MUTE), |
5111 | SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), | 5113 | SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), |
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 6998cf29b9bc..7311badf6a94 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
@@ -194,7 +194,7 @@ struct sigmatel_spec { | |||
194 | int default_polarity; | 194 | int default_polarity; |
195 | 195 | ||
196 | unsigned int mic_mute_led_gpio; /* capture mute LED GPIO */ | 196 | unsigned int mic_mute_led_gpio; /* capture mute LED GPIO */ |
197 | bool mic_mute_led_on; /* current mic mute state */ | 197 | unsigned int mic_enabled; /* current mic mute state (bitmask) */ |
198 | 198 | ||
199 | /* stream */ | 199 | /* stream */ |
200 | unsigned int stream_delay; | 200 | unsigned int stream_delay; |
@@ -324,19 +324,26 @@ static void stac_gpio_set(struct hda_codec *codec, unsigned int mask, | |||
324 | 324 | ||
325 | /* hook for controlling mic-mute LED GPIO */ | 325 | /* hook for controlling mic-mute LED GPIO */ |
326 | static void stac_capture_led_hook(struct hda_codec *codec, | 326 | static void stac_capture_led_hook(struct hda_codec *codec, |
327 | struct snd_ctl_elem_value *ucontrol) | 327 | struct snd_kcontrol *kcontrol, |
328 | struct snd_ctl_elem_value *ucontrol) | ||
328 | { | 329 | { |
329 | struct sigmatel_spec *spec = codec->spec; | 330 | struct sigmatel_spec *spec = codec->spec; |
330 | bool mute; | 331 | unsigned int mask; |
332 | bool cur_mute, prev_mute; | ||
331 | 333 | ||
332 | if (!ucontrol) | 334 | if (!kcontrol || !ucontrol) |
333 | return; | 335 | return; |
334 | 336 | ||
335 | mute = !(ucontrol->value.integer.value[0] || | 337 | mask = 1U << snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); |
336 | ucontrol->value.integer.value[1]); | 338 | prev_mute = !spec->mic_enabled; |
337 | if (spec->mic_mute_led_on != mute) { | 339 | if (ucontrol->value.integer.value[0] || |
338 | spec->mic_mute_led_on = mute; | 340 | ucontrol->value.integer.value[1]) |
339 | if (mute) | 341 | spec->mic_enabled |= mask; |
342 | else | ||
343 | spec->mic_enabled &= ~mask; | ||
344 | cur_mute = !spec->mic_enabled; | ||
345 | if (cur_mute != prev_mute) { | ||
346 | if (cur_mute) | ||
340 | spec->gpio_data |= spec->mic_mute_led_gpio; | 347 | spec->gpio_data |= spec->mic_mute_led_gpio; |
341 | else | 348 | else |
342 | spec->gpio_data &= ~spec->mic_mute_led_gpio; | 349 | spec->gpio_data &= ~spec->mic_mute_led_gpio; |
@@ -4462,7 +4469,7 @@ static void stac_setup_gpio(struct hda_codec *codec) | |||
4462 | if (spec->mic_mute_led_gpio) { | 4469 | if (spec->mic_mute_led_gpio) { |
4463 | spec->gpio_mask |= spec->mic_mute_led_gpio; | 4470 | spec->gpio_mask |= spec->mic_mute_led_gpio; |
4464 | spec->gpio_dir |= spec->mic_mute_led_gpio; | 4471 | spec->gpio_dir |= spec->mic_mute_led_gpio; |
4465 | spec->mic_mute_led_on = true; | 4472 | spec->mic_enabled = 0; |
4466 | spec->gpio_data |= spec->mic_mute_led_gpio; | 4473 | spec->gpio_data |= spec->mic_mute_led_gpio; |
4467 | 4474 | ||
4468 | spec->gen.cap_sync_hook = stac_capture_led_hook; | 4475 | spec->gen.cap_sync_hook = stac_capture_led_hook; |
diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c index 5799fbc24c28..8fe3b8c18ed4 100644 --- a/sound/pci/hda/thinkpad_helper.c +++ b/sound/pci/hda/thinkpad_helper.c | |||
@@ -39,6 +39,7 @@ static void update_tpacpi_mute_led(void *private_data, int enabled) | |||
39 | } | 39 | } |
40 | 40 | ||
41 | static void update_tpacpi_micmute_led(struct hda_codec *codec, | 41 | static void update_tpacpi_micmute_led(struct hda_codec *codec, |
42 | struct snd_kcontrol *kcontrol, | ||
42 | struct snd_ctl_elem_value *ucontrol) | 43 | struct snd_ctl_elem_value *ucontrol) |
43 | { | 44 | { |
44 | if (!ucontrol || !led_set_func) | 45 | if (!ucontrol || !led_set_func) |
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index be456ce264d0..8ca405cd7c1a 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/of.h> | 24 | #include <linux/of.h> |
25 | #include <linux/of_address.h> | 25 | #include <linux/of_address.h> |
26 | #include <linux/of_irq.h> | 26 | #include <linux/of_irq.h> |
27 | #include <linux/uaccess.h> | ||
27 | 28 | ||
28 | #include <linux/irqchip/arm-gic.h> | 29 | #include <linux/irqchip/arm-gic.h> |
29 | 30 | ||
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index 88b2fe3ddf42..00d86427af0f 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c | |||
@@ -154,17 +154,13 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, | |||
154 | list_add_tail(&dev->list, &kvm->coalesced_zones); | 154 | list_add_tail(&dev->list, &kvm->coalesced_zones); |
155 | mutex_unlock(&kvm->slots_lock); | 155 | mutex_unlock(&kvm->slots_lock); |
156 | 156 | ||
157 | return ret; | 157 | return 0; |
158 | 158 | ||
159 | out_free_dev: | 159 | out_free_dev: |
160 | mutex_unlock(&kvm->slots_lock); | 160 | mutex_unlock(&kvm->slots_lock); |
161 | |||
162 | kfree(dev); | 161 | kfree(dev); |
163 | 162 | ||
164 | if (dev == NULL) | 163 | return ret; |
165 | return -ENXIO; | ||
166 | |||
167 | return 0; | ||
168 | } | 164 | } |
169 | 165 | ||
170 | int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, | 166 | int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, |